Skip to content

Instantly share code, notes, and snippets.

@ben-manes
Created July 26, 2012 08:54
Show Gist options
  • Save ben-manes/3181092 to your computer and use it in GitHub Desktop.
Save ben-manes/3181092 to your computer and use it in GitHub Desktop.
package com.googlecode.concurrentlinkedhashmap.benchmark;
import com.google.caliper.Runner;
import com.google.caliper.SimpleBenchmark;
import org.testng.annotations.Parameters;
import org.testng.annotations.Test;
import java.util.Queue;
import java.util.concurrent.ConcurrentLinkedQueue;
/**
* This benchmark evaluates single-threaded performance of the buffer
* approaches.
*
* @author [email protected] (Ben Manes)
*/
public class BufferBenchmark extends SimpleBenchmark {
Queue<Integer> clq;
MPSCQueue<Integer> mpsc;
LockFreeQueue<Integer> lfq;
@Override
protected void setUp() {
mpsc = new MPSCQueue<Integer>();
lfq = new LockFreeQueue<Integer>();
clq = new ConcurrentLinkedQueue<Integer>();
}
public int timeMPSCQueue(final int reps) {
int dummy = 0;
while (dummy < reps) {
mpsc.enqueue(dummy);
mpsc.dequeue();
dummy++;
}
return dummy;
}
public int timeLockFreeQueue(final int reps) {
int dummy = 0;
while (dummy < reps) {
lfq.enq(dummy);
lfq.deq();
dummy++;
}
return dummy;
}
public int timeConcurrentLinkedQueue(final int reps) {
int dummy = 0;
while (dummy < reps) {
clq.add(dummy);
clq.poll();
dummy++;
}
return dummy;
}
@Test(groups = "caliper")
@Parameters({"warmupMillis", "runMillis", "timeUnit"})
public static void benchmark(String warmupMillis, String runMillis, String timeUnit) {
String[] args = {
"--warmupMillis", warmupMillis,
"--runMillis", runMillis,
"--timeUnit", timeUnit
};
Runner.main(BufferBenchmark.class, args);
}
}
Execution with params:
- warmupMillis: 10000
- runMillis: 5000
- timeUnit: ns
[Caliper Benchmark]
Running BufferBenchmark
0% Scenario{vm=java, trial=0, benchmark=MPSCQueue} 436.18 ns; ?=1.74 ns @ 3 trials
33% Scenario{vm=java, trial=0, benchmark=LockFreeQueue} 702.80 ns; ?=9.64 ns @ 10 trials
67% Scenario{vm=java, trial=0, benchmark=ConcurrentLinkedQueue} 34.99 ns; ?=0.06 ns @ 3 trials
benchmark ns linear runtime
MPSCQueue 436.2 ==================
LockFreeQueue 702.8 ==============================
ConcurrentLinkedQueue 35.0 =
package com.googlecode.concurrentlinkedhashmap.benchmark;
import java.util.concurrent.atomic.AtomicReference;
/**
* Lock-free queue.
* Based on Michael and Scott http://doi.acm.org/10.1145/248052.248106
* @author Maurice Herlihy
*/
public class LockFreeQueue<T> {
private final AtomicReference<Node> head;
private final AtomicReference<Node> tail;
/**
* Create a new object of this class.
*/
public LockFreeQueue() {
Node sentinel = new Node(null);
head = new AtomicReference<Node>(sentinel);
tail = new AtomicReference<Node>(sentinel);
}
/**
* Enqueue an item.
* @param value Item to enqueue.
*/
public void enq(T value) {
// try to allocate new node from local pool
Node node = new Node(value);
while (true) { // keep trying
Node last = tail.get(); // read tail
Node next = last.next.get(); // read next
// are they consistent?
if (last == tail.get()) {
if (next == null) { // was tail the last node?
// try to link node to end of list
if (last.next.compareAndSet(next, node)) {
// enq done, try to advance tail
tail.compareAndSet(last, node);
return;
}
} else { // tail was not the last node
// try to swing tail to next node
tail.compareAndSet(last, next);
}
}
}
}
/**
* Dequeue an item.
* @throws queue.EmptyException The queue is empty.
* @return Item at the head of the queue.
*/
public T deq() {
while (true) {
Node first = head.get();
Node last = tail.get();
Node next = first.next.get();
// are they consistent?
if (first == head.get()) {
if (first == last) { // is queue empty or tail falling behind?
if (next == null) { // is queue empty?
return null;
}
// tail is behind, try to advance
tail.compareAndSet(last, next);
} else {
T value = next.value; // read value before dequeuing
if (head.compareAndSet(first, next)) {
return value;
}
}
}
}
}
/**
* Items are kept in a list of nodes.
*/
public class Node {
/**
* Item kept by this node.
*/
public T value;
/**
* Next node in the queue.
*/
public AtomicReference<Node> next;
/**
* Create a new node.
*/
public Node(T value) {
this.next = new AtomicReference<Node>(null);
this.value = value;
}
}
}
package com.googlecode.concurrentlinkedhashmap.benchmark;
import java.util.concurrent.atomic.AtomicReference;
class MPSCQueue<E> {
Node<E> tail = new Node<E>(null);
final AtomicReference<Node<E>> head = new AtomicReference<Node<E>>(tail);
void enqueue(E e) {
Node<E> node = new Node<E>(e);
head.getAndSet(node).lazySet(node);
}
E dequeue() {
for (;;) {
Node<E> next = tail.get();
if (next != null) {
tail = next;
return next.value;
}
}
}
static class Node<T> extends AtomicReference<Node<T>> {
private static final long serialVersionUID = 1L;
final T value;
Node(T value) {
this.value = value;
}
}
}
@plokhotnyuk
Copy link

I wouldn't believe benchmarking frameworks so blindly...
Here is simple test and output that prove better performance of MPSCQueue:

private static final int ITERATIONS = 100000000;

public static void main(String[] args) {
BufferBenchmark bufferBenchmark = new BufferBenchmark();
bufferBenchmark.setUp();
for (int i = 0; i < 3; i++) {
final long t = System.nanoTime();
bufferBenchmark.timeMPSCQueue(ITERATIONS);
final long d = System.nanoTime() - t;
System.out.printf("MPSCQueue[%d]: %,d ns/op\n", i, d / ITERATIONS);
}
for (int i = 0; i < 3; i++) {
final long t = System.nanoTime();
bufferBenchmark.timeConcurrentLinkedQueue(ITERATIONS);
final long d = System.nanoTime() - t;
System.out.printf("ConcurrentLinkedQueue[%d]: %,d ns/op\n", i, d / ITERATIONS);
}
}

MPSCQueue[0]: 18 ns/op
MPSCQueue[1]: 16 ns/op
MPSCQueue[2]: 14 ns/op
ConcurrentLinkedQueue[0]: 35 ns/op
ConcurrentLinkedQueue[1]: 35 ns/op
ConcurrentLinkedQueue[2]: 35 ns/op

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment