Last active
July 13, 2018 22:27
-
-
Save n-soda/1204f34675c382fd00f0b708cee50b5f to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
/* | |
* lock_fairness_test.c | |
* | |
* test program to see unfair behavior of mutex/rwlock/POSIX semaphore. | |
* | |
* NOTE: | |
* The unfairness is not necessarily a bug, since POSIX doesn't say anything | |
* about fairness, and also the critical section shouldn't be this long. | |
* | |
* Fair behavior of this program is that elapsed time is printed | |
* nearly every 2 seconds. e.g. | |
* | |
* CentOS 7.3.1611/x86_64 (1 CPU core, 2 threads) with -q option: | |
* | |
* $ cc -pthread -Wall -o lock_fairness_test lock_fairness_test.c -lm | |
* $ ./lock_fairness_test -q 30000000 | |
* running in queuelock mode | |
* running 30000000 loops | |
* timer/sec=1.99542e+09 | |
* 2.046851367 0.000051369 36 | |
* 4.055690931 0.000043529 71 | |
* 6.064471415 0.000043287 106 | |
* 8.072942444 0.000066593 141 | |
* 10.080625325 0.000032040 176 | |
* 12.084942512 0.000062311 211 | |
* 14.092002205 0.000044712 246 | |
* 16.098744026 0.000043810 281 | |
* 18.103785603 0.000033713 316 | |
* 20.108970296 0.000033250 351 | |
* 22.114094569 0.000056179 386 | |
* 24.123946326 0.000052503 421 | |
* 26.128291622 0.000032100 456 | |
* 28.133075676 0.000033145 491 | |
* 30.138027817 0.000043689 526 | |
* 32.145002003 0.000067513 561 | |
* 34.149647974 0.000043448 596 | |
* 36.155416246 0.000044015 631 | |
* 38.161070762 0.000032407 666 | |
* 40.168976007 0.000032889 701 | |
* 42.174912415 0.000051632 736 | |
* 44.179967304 0.000044475 771 | |
* 46.185357263 0.000032337 806 | |
* 48.192743172 0.000032530 841 | |
* 50.200680518 0.000033077 876 | |
* 52.206143774 0.000032990 911 | |
* 54.214794951 0.000029218 946 | |
* 56.220743811 0.000053392 981 | |
* 58.228527500 0.000044976 1016 | |
* 60.233673756 0.000031663 1051 | |
* 62.237223814 0.000043707 1086 | |
* 64.243293277 0.000045816 1121 | |
* ^C | |
* count: 1150 | |
* average length of critical section (second): 0.0567577 | |
* standard deviation: 0.000229203 | |
* minimum: 0.0565204 | |
* maximum: 0.0602164 | |
* | |
* Unfair behavior is observed on machines which have multiple CPU threads: | |
* e.g. | |
* | |
* CentOS 7.3.1611/x86_64 (1 CPU core, 2 threads): | |
* $ cc -pthread -Wall -o lock_fairness_test lock_fairness_test.c -lm | |
* $ ./lock_fairness_test 30000000 | |
* running in mutex mode | |
* running 30000000 loops | |
* timer/sec=3.39251e+09 | |
* 23.530055013 0.000014799 368 | |
* 65.902219952 0.000008506 1033 | |
* 81.935830791 0.000008124 1283 | |
* 83.976186182 0.000017008 1315 | |
* 97.990706136 0.000009499 1535 | |
* 100.025443035 0.000011114 1567 | |
* 102.059949221 0.000014984 1599 | |
* 106.007334228 0.000061578 1661 | |
* ^C | |
* count: 1679 | |
* average length of critical section (second): 0.0638063 | |
* standard deviation: 0.00363152 | |
* minimum: 0.0611446 | |
* maximum: 0.165686 | |
* | |
* If you cannot observe this unfair behavior, please increase the 30000000 | |
* argument to something more larger. | |
* | |
* As the number CPU cores increases, it becomes reproducable in a shorter | |
* critical section. | |
* e.g. | |
* | |
* RHEL 7.3/x86_64 (14 CPU cores, 28 threads): | |
* $ cc -pthread -Wall -o lock_fairness_test lock_fairness_test.c -lm | |
* $ ./lock_fairness_test 300000 | |
* running in mutex mode | |
* running 300000 loops | |
* timer/sec=1.99542e+09 | |
* 8.666709875 0.000010589 15018 | |
* 11.783726435 0.000006059 20422 | |
* 14.757883946 0.000007563 25615 | |
* 70.831735475 0.000014931 122701 | |
* ^C | |
* count: 138571 | |
* average length of critical section (second): 0.000575703 | |
* standard deviation: 2.58348e-05 | |
* minimum: 0.000563828 | |
* maximum: 0.000954344 | |
* | |
* If the critical section is short enough (i.e. if the argument of the | |
* lock_fairness_test program is small enough -- e.g. 300 instead of 30000000), | |
* this unfairness won't be observed. | |
* | |
* locking type can be specified as follows: | |
* | |
* -m mutex (default) ... often unfair | |
* -r rwlock ... often unfair | |
* -s POSIX semaphore ... often unfair | |
* -t ticket lock ... fair, but has a performace issue in lock contention | |
* -f ticket lock w/ multiple condvars ... to mitigate the performace issue | |
* XXX large enouch number should be specified as cond_number. | |
* it depends on the number of threads in the contention. | |
* -q queue lock ... fair, and doesn't have the performace issue | |
* | |
* -- | |
* E-Mail: <soda at sra.co.jp> | |
*/ | |
#define _GNU_SOURCE /* for <sched.h>: SCHED_IDLE */ | |
#include <pthread.h> | |
#include <semaphore.h> | |
#include <stdbool.h> | |
#include <assert.h> | |
#include <string.h> | |
#include <signal.h> | |
#include <unistd.h> | |
#include <stdlib.h> | |
#include <stdio.h> | |
#include <sched.h> | |
#include <errno.h> | |
#include <time.h> | |
#include <math.h> | |
#define SLEEP_PERIOD 2000000000LL /* nanosec. i.e. 2.0 sec */ | |
#define YIELD_PERIOD 20000000LL /* nanosec. i.e. 20 millisec */ | |
#define LOOP_DEFAULT 300LL | |
/* variables which are set before fork(2) */ | |
static unsigned long long loop = LOOP_DEFAULT; | |
static struct timespec start_time; | |
static unsigned long long zero; | |
static void | |
timespec_sub(struct timespec *t1, const struct timespec *t2) | |
{ | |
t1->tv_sec -= t2->tv_sec; | |
if (t1->tv_nsec < t2->tv_nsec) { | |
t1->tv_sec--; | |
t1->tv_nsec += 1000000000 - t2->tv_nsec; | |
} else { | |
t1->tv_nsec -= t2->tv_nsec; | |
} | |
} | |
struct statistics { | |
size_t n; | |
double temporary_average; | |
double square_sum; | |
double minimum, maximum; | |
}; | |
static void | |
statistics_init(struct statistics *s) | |
{ | |
s->n = 0; | |
s->minimum = s->maximum = 0; | |
s->temporary_average = 0; | |
s->square_sum = 0; | |
} | |
static void | |
statistics_add(struct statistics *s, double x) | |
{ | |
if (++s->n == 1) { | |
s->minimum = s->maximum = x; | |
} else { | |
if (s->minimum > x) | |
s->minimum = x; | |
if (s->maximum < x) | |
s->maximum = x; | |
} | |
x -= s->temporary_average; | |
s->temporary_average += x / s->n; | |
s->square_sum += (s->n - 1) * x * x / s->n; | |
} | |
static void | |
statistics_result(const struct statistics *s, | |
int *np, | |
double *minp, double *maxp, double *averagep, double *standard_deviationp) | |
{ | |
*np = s->n; | |
*minp = s->minimum; | |
*maxp = s->maximum; | |
*averagep = s->temporary_average; | |
*standard_deviationp = sqrt(s->square_sum / (s->n - 1)); | |
} | |
#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) | |
typedef unsigned long long timerval_t; | |
double timerval_calibration; | |
unsigned long long | |
get_cycles(void) | |
{ | |
return __builtin_ia32_rdtsc(); | |
} | |
#define gettimerval(tp) (*(tp) = get_cycles()) | |
#define timerval_second(tp) (*(tp) * timerval_calibration) | |
#define timerval_sub(t1p, t2p) ((*(t1p) - *(t2p)) * timerval_calibration) | |
void | |
timerval_calibrate(void) | |
{ | |
timerval_t t1, t2; | |
struct timespec s1, s2; | |
/* warming up */ | |
gettimerval(&t1); | |
clock_gettime(CLOCK_REALTIME, &s1); | |
gettimerval(&t1); | |
clock_gettime(CLOCK_REALTIME, &s1); | |
sleep(3); | |
gettimerval(&t2); | |
clock_gettime(CLOCK_REALTIME, &s2); | |
timerval_calibration = | |
((s2.tv_sec - s1.tv_sec) + | |
(s2.tv_nsec - s1.tv_nsec) * .000000001) / | |
(t2 - t1); | |
fprintf(stderr, "timer/sec=%g\n", 1.0 / timerval_calibration); | |
} | |
#else /* ! (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))) */ | |
typedef struct timespec timerval_t; | |
#define gettimerval(t1) clock_gettime(CLOCK_REALTIME, t1) | |
#define timerval_second(t1) ((double)(t1)->tv_sec \ | |
+ (double)(t1)->tv_nsec * .000000001) | |
#define timerval_sub(t1, t2) \ | |
(((double)(t1)->tv_sec - (double)(t2)->tv_sec) \ | |
+ ((double)(t1)->tv_nsec - (double)(t2)->tv_nsec) * .000000001) | |
void | |
timerval_calibrate(void) | |
{} | |
#endif /* ! (defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__))) */ | |
struct condlock { | |
pthread_mutex_t mutex; | |
pthread_cond_t unlocked; | |
bool lock_held; | |
}; | |
static void | |
condlock_init(struct condlock *cl) | |
{ | |
int rv; | |
rv = pthread_mutex_init(&cl->mutex, NULL); | |
if (rv != 0) { | |
fprintf(stderr, "pthread_mutex_init() in %s(): %s\n", | |
__func__, strerror(rv)); | |
exit(EXIT_FAILURE); | |
} | |
rv = pthread_cond_init(&cl->unlocked, NULL); | |
if (rv != 0) { | |
fprintf(stderr, "pthread_cond_init() in %s(): %s\n", | |
__func__, strerror(rv)); | |
exit(EXIT_FAILURE); | |
} | |
cl->lock_held = false; | |
} | |
static bool | |
condlock_trylock(struct condlock *cl) | |
{ | |
int rv; | |
bool locked; | |
rv = pthread_mutex_lock(&cl->mutex); | |
if (rv != 0) { | |
fprintf(stderr, "pthread_mutex_lock() in %s(): %s\n", | |
__func__, strerror(rv)); | |
exit(EXIT_FAILURE); | |
} | |
if (cl->lock_held) { | |
locked = false; | |
} else { | |
locked = true; | |
cl->lock_held = true; | |
} | |
rv = pthread_mutex_unlock(&cl->mutex); | |
if (rv != 0) { | |
fprintf(stderr, "pthread_mutex_unlock() in %s(): %s\n", | |
__func__, strerror(rv)); | |
exit(EXIT_FAILURE); | |
} | |
return (locked); | |
} | |
static void | |
condlock_lock(struct condlock *cl) | |
{ | |
int rv; | |
rv = pthread_mutex_lock(&cl->mutex); | |
if (rv != 0) { | |
fprintf(stderr, "pthread_mutex_lock() in %s(): %s\n", | |
__func__, strerror(rv)); | |
exit(EXIT_FAILURE); | |
} | |
while (cl->lock_held) { | |
rv = pthread_cond_wait(&cl->unlocked, &cl->mutex); | |
if (rv != 0) { | |
fprintf(stderr, "pthread_cond_wait() in %s(): %s\n", | |
__func__, strerror(rv)); | |
exit(EXIT_FAILURE); | |
} | |
} | |
cl->lock_held = true; | |
rv = pthread_mutex_unlock(&cl->mutex); | |
if (rv != 0) { | |
fprintf(stderr, "pthread_mutex_unlock() in %s(): %s\n", | |
__func__, strerror(rv)); | |
exit(EXIT_FAILURE); | |
} | |
} | |
static void | |
condlock_unlock(struct condlock *cl) | |
{ | |
int rv; | |
rv = pthread_mutex_lock(&cl->mutex); | |
if (rv != 0) { | |
fprintf(stderr, "pthread_mutex_lock() in %s(): %s\n", | |
__func__, strerror(rv)); | |
exit(EXIT_FAILURE); | |
} | |
if (!cl->lock_held) { | |
fprintf(stderr, "condlock_unlock: unexpected error\n"); | |
exit(EXIT_FAILURE); | |
} | |
cl->lock_held = false; | |
rv = pthread_cond_signal(&cl->unlocked); | |
if (rv != 0) { | |
fprintf(stderr, "pthread_cond_signal() in %s(): %s\n", | |
__func__, strerror(rv)); | |
exit(EXIT_FAILURE); | |
} | |
rv = pthread_mutex_unlock(&cl->mutex); | |
if (rv != 0) { | |
fprintf(stderr, "pthread_mutex_unlock() in %s(): %s\n", | |
__func__, strerror(rv)); | |
exit(EXIT_FAILURE); | |
} | |
} | |
/* | |
* ticket lock implementation from: | |
* https://stackoverflow.com/questions/6449732/fair-critical-section-linux | |
* by caf (https://stackoverflow.com/users/134633/caf) | |
* 2011-06-23 12:24 | |
*/ | |
struct ticketlock { | |
pthread_mutex_t mutex; | |
pthread_cond_t unlocked; | |
unsigned long queue_head, queue_tail; | |
}; | |
static void | |
ticketlock_init(struct ticketlock *tl) | |
{ | |
int rv; | |
rv = pthread_mutex_init(&tl->mutex, NULL); | |
if (rv != 0) { | |
fprintf(stderr, "pthread_mutex_init() in %s(): %s\n", | |
__func__, strerror(rv)); | |
exit(EXIT_FAILURE); | |
} | |
rv = pthread_cond_init(&tl->unlocked, NULL); | |
if (rv != 0) { | |
fprintf(stderr, "pthread_cond_init() in %s(): %s\n", | |
__func__, strerror(rv)); | |
exit(EXIT_FAILURE); | |
} | |
tl->queue_head = tl->queue_tail = 0; | |
} | |
static bool | |
ticketlock_trylock(struct ticketlock *tl) | |
{ | |
int rv; | |
bool locked; | |
rv = pthread_mutex_lock(&tl->mutex); | |
if (rv != 0) { | |
fprintf(stderr, "pthread_mutex_lock() in %s(): %s\n", | |
__func__, strerror(rv)); | |
exit(EXIT_FAILURE); | |
} | |
locked = tl->queue_tail == tl->queue_head; | |
if (locked) | |
tl->queue_tail++; | |
rv = pthread_mutex_unlock(&tl->mutex); | |
if (rv != 0) { | |
fprintf(stderr, "pthread_mutex_unlock() in %s(): %s\n", | |
__func__, strerror(rv)); | |
exit(EXIT_FAILURE); | |
} | |
return (locked); | |
} | |
static void | |
ticketlock_lock(struct ticketlock *tl) | |
{ | |
int rv; | |
unsigned long queue_me; | |
rv = pthread_mutex_lock(&tl->mutex); | |
if (rv != 0) { | |
fprintf(stderr, "pthread_mutex_lock() in %s(): %s\n", | |
__func__, strerror(rv)); | |
exit(EXIT_FAILURE); | |
} | |
queue_me = tl->queue_tail++; | |
while (queue_me != tl->queue_head) { | |
rv = pthread_cond_wait(&tl->unlocked, &tl->mutex); | |
if (rv != 0) { | |
fprintf(stderr, "pthread_cond_wait() in %s(): %s\n", | |
__func__, strerror(rv)); | |
exit(EXIT_FAILURE); | |
} | |
} | |
rv = pthread_mutex_unlock(&tl->mutex); | |
if (rv != 0) { | |
fprintf(stderr, "pthread_mutex_unlock() in %s(): %s\n", | |
__func__, strerror(rv)); | |
exit(EXIT_FAILURE); | |
} | |
} | |
static void | |
ticketlock_unlock(struct ticketlock *tl) | |
{ | |
int rv; | |
rv = pthread_mutex_lock(&tl->mutex); | |
if (rv != 0) { | |
fprintf(stderr, "pthread_mutex_lock() in %s(): %s\n", | |
__func__, strerror(rv)); | |
exit(EXIT_FAILURE); | |
} | |
tl->queue_head++; | |
rv = pthread_cond_broadcast(&tl->unlocked); | |
if (rv != 0) { | |
fprintf(stderr, "pthread_cond_broadcast() in %s(): %s\n", | |
__func__, strerror(rv)); | |
exit(EXIT_FAILURE); | |
} | |
rv = pthread_mutex_unlock(&tl->mutex); | |
if (rv != 0) { | |
fprintf(stderr, "pthread_mutex_unlock() in %s(): %s\n", | |
__func__, strerror(rv)); | |
exit(EXIT_FAILURE); | |
} | |
} | |
/* | |
* ticketlock was nearly 100x slower than mutex in contention. | |
* try to make it faster | |
*/ | |
struct fasterticketlock { | |
pthread_mutex_t mutex; | |
unsigned long queue_head, queue_tail; | |
pthread_cond_t *unlocked; | |
int cond_number; | |
}; | |
static void | |
fasterticketlock_init(struct fasterticketlock *tl, int cond_number) | |
{ | |
int rv, i; | |
rv = pthread_mutex_init(&tl->mutex, NULL); | |
if (rv != 0) { | |
fprintf(stderr, "pthread_mutex_init() in %s(): %s\n", | |
__func__, strerror(rv)); | |
exit(EXIT_FAILURE); | |
} | |
tl->queue_head = tl->queue_tail = 0; | |
tl->unlocked = malloc(sizeof(*tl->unlocked) * cond_number); | |
for (i = 0; i < cond_number; i++) { | |
rv = pthread_cond_init(&tl->unlocked[i], NULL); | |
if (rv != 0) { | |
fprintf(stderr, "pthread_cond_init() in %s():%d: %s\n", | |
__func__, i, strerror(rv)); | |
exit(EXIT_FAILURE); | |
} | |
} | |
tl->cond_number = cond_number; | |
} | |
static bool | |
fasterticketlock_trylock(struct fasterticketlock *tl) | |
{ | |
int rv; | |
bool locked; | |
rv = pthread_mutex_lock(&tl->mutex); | |
if (rv != 0) { | |
fprintf(stderr, "pthread_mutex_lock() in %s(): %s\n", | |
__func__, strerror(rv)); | |
exit(EXIT_FAILURE); | |
} | |
locked = tl->queue_tail == tl->queue_head; | |
if (locked) | |
tl->queue_tail++; | |
rv = pthread_mutex_unlock(&tl->mutex); | |
if (rv != 0) { | |
fprintf(stderr, "pthread_mutex_unlock() in %s(): %s\n", | |
__func__, strerror(rv)); | |
exit(EXIT_FAILURE); | |
} | |
return (locked); | |
} | |
static void | |
fasterticketlock_lock(struct fasterticketlock *tl) | |
{ | |
int rv; | |
unsigned long queue_me; | |
rv = pthread_mutex_lock(&tl->mutex); | |
if (rv != 0) { | |
fprintf(stderr, "pthread_mutex_lock() in %s(): %s\n", | |
__func__, strerror(rv)); | |
exit(EXIT_FAILURE); | |
} | |
queue_me = tl->queue_tail++; | |
if (queue_me != tl->queue_head) { | |
for (;;) { | |
rv = pthread_cond_wait( | |
&tl->unlocked[queue_me % tl->cond_number], | |
&tl->mutex); | |
if (rv != 0) { | |
fprintf(stderr, | |
"pthread_cond_wait() in %s(): %s\n", | |
__func__, strerror(rv)); | |
exit(EXIT_FAILURE); | |
} | |
if (queue_me == tl->queue_head) | |
break; | |
rv = pthread_cond_signal( | |
&tl->unlocked[queue_me % tl->cond_number]); | |
if (rv != 0) { | |
fprintf(stderr, | |
"pthread_cond_signal() in %s(): %s\n", | |
__func__, strerror(rv)); | |
exit(EXIT_FAILURE); | |
} | |
} | |
} | |
rv = pthread_mutex_unlock(&tl->mutex); | |
if (rv != 0) { | |
fprintf(stderr, "pthread_mutex_unlock() in %s(): %s\n", | |
__func__, strerror(rv)); | |
exit(EXIT_FAILURE); | |
} | |
} | |
static void | |
fasterticketlock_unlock(struct fasterticketlock *tl) | |
{ | |
int rv; | |
rv = pthread_mutex_lock(&tl->mutex); | |
if (rv != 0) { | |
fprintf(stderr, "pthread_mutex_lock() in %s(): %s\n", | |
__func__, strerror(rv)); | |
exit(EXIT_FAILURE); | |
} | |
tl->queue_head++; | |
rv = pthread_cond_signal( | |
&tl->unlocked[tl->queue_head % tl->cond_number]); | |
if (rv != 0) { | |
fprintf(stderr, "pthread_cond_broadcast() in %s(): %s\n", | |
__func__, strerror(rv)); | |
exit(EXIT_FAILURE); | |
} | |
rv = pthread_mutex_unlock(&tl->mutex); | |
if (rv != 0) { | |
fprintf(stderr, "pthread_mutex_unlock() in %s(): %s\n", | |
__func__, strerror(rv)); | |
exit(EXIT_FAILURE); | |
} | |
} | |
struct queuelock_waiter { | |
struct queuelock_waiter *next; | |
pthread_cond_t unlocked; | |
}; | |
struct queuelock { | |
pthread_mutex_t mutex; | |
struct queuelock_waiter *head, **tail; | |
bool locked; | |
}; | |
void | |
queuelock_init(struct queuelock *ql) | |
{ | |
int rv; | |
rv = pthread_mutex_init(&ql->mutex, NULL); | |
if (rv != 0) { | |
fprintf(stderr, "pthread_mutex_init() in %s(): %s\n", | |
__func__, strerror(rv)); | |
exit(EXIT_FAILURE); | |
} | |
ql->head = NULL; | |
ql->tail = &ql->head; | |
ql->locked = 0; | |
} | |
int | |
queuelock_trylock(struct queuelock *ql) | |
{ | |
int rv; | |
bool locked; | |
rv = pthread_mutex_lock(&ql->mutex); | |
if (rv != 0) { | |
fprintf(stderr, "pthread_mutex_lock() in %s(): %s\n", | |
__func__, strerror(rv)); | |
exit(EXIT_FAILURE); | |
} | |
/* unlocked AND there is no waiter */ | |
locked = !ql->locked && ql->head == NULL; | |
if (locked) | |
ql->locked = 1; | |
rv = pthread_mutex_unlock(&ql->mutex); | |
if (rv != 0) { | |
fprintf(stderr, "pthread_mutex_unlock() in %s(): %s\n", | |
__func__, strerror(rv)); | |
exit(EXIT_FAILURE); | |
} | |
return (locked); | |
} | |
void | |
queuelock_lock(struct queuelock *ql) | |
{ | |
int rv; | |
struct queuelock_waiter me; | |
rv = pthread_mutex_lock(&ql->mutex); | |
if (rv != 0) { | |
fprintf(stderr, "pthread_mutex_lock() in %s(): %s\n", | |
__func__, strerror(rv)); | |
exit(EXIT_FAILURE); | |
} | |
if (ql->locked || ql->head != NULL) { | |
rv = pthread_cond_init(&me.unlocked, NULL); | |
if (rv != 0) { | |
fprintf(stderr, "pthread_cond_init() in %s(): %s\n", | |
__func__, strerror(rv)); | |
exit(EXIT_FAILURE); | |
} | |
me.next = NULL; | |
*ql->tail = &me; | |
ql->tail = &me.next; | |
do { | |
rv = pthread_cond_wait(&me.unlocked, &ql->mutex); | |
if (rv != 0) { | |
fprintf(stderr, | |
"pthread_cond_wait() in %s(): %s\n", | |
__func__, strerror(rv)); | |
exit(EXIT_FAILURE); | |
} | |
/* "ql->head != me" is necessary for spurious wakeup */ | |
} while (ql->locked || ql->head != &me); | |
ql->head = me.next; | |
if (ql->head == NULL) | |
ql->tail = &ql->head; | |
rv = pthread_cond_destroy(&me.unlocked); | |
if (rv != 0) { | |
fprintf(stderr, "pthread_cond_destroy() in %s(): %s\n", | |
__func__, strerror(rv)); | |
exit(EXIT_FAILURE); | |
} | |
} | |
ql->locked = 1; | |
rv = pthread_mutex_unlock(&ql->mutex); | |
if (rv != 0) { | |
fprintf(stderr, "pthread_mutex_unlock() in %s(): %s\n", | |
__func__, strerror(rv)); | |
exit(EXIT_FAILURE); | |
} | |
} | |
void | |
queuelock_unlock(struct queuelock *ql) | |
{ | |
int rv; | |
rv =pthread_mutex_lock(&ql->mutex); | |
if (rv != 0) { | |
fprintf(stderr, "pthread_mutex_lock() in %s(): %s\n", | |
__func__, strerror(rv)); | |
exit(EXIT_FAILURE); | |
} | |
assert(ql->locked); | |
ql->locked = 0; | |
if (ql->head != NULL) { | |
rv = pthread_cond_signal(&ql->head->unlocked); | |
if (rv != 0) { | |
fprintf(stderr, "pthread_cond_signal() in %s(): %s\n", | |
__func__, strerror(rv)); | |
exit(EXIT_FAILURE); | |
} | |
} | |
rv = pthread_mutex_unlock(&ql->mutex); | |
if (rv != 0) { | |
fprintf(stderr, "pthread_mutex_unlock() in %s(): %s\n", | |
__func__, strerror(rv)); | |
exit(EXIT_FAILURE); | |
} | |
} | |
struct lock_object { | |
union { | |
struct condlock cl; | |
struct ticketlock tl; | |
struct fasterticketlock fl; | |
struct queuelock ql; | |
pthread_mutex_t mutex; | |
pthread_rwlock_t rwlock; | |
sem_t sem; | |
} u; | |
struct lock_ops *ops; | |
}; | |
struct lock_ops { | |
bool (*trylock)(struct lock_object *, const char *); | |
void (*lock)(struct lock_object *, const char *); | |
void (*unlock)(struct lock_object *, const char *); | |
}; | |
static bool | |
cl_trylock(struct lock_object *lockobj, const char *diag) | |
{ | |
return (condlock_trylock(&lockobj->u.cl)); | |
} | |
static void | |
cl_lock(struct lock_object *lockobj, const char *diag) | |
{ | |
condlock_lock(&lockobj->u.cl); | |
} | |
static void | |
cl_unlock(struct lock_object *lockobj, const char *diag) | |
{ | |
condlock_unlock(&lockobj->u.cl); | |
} | |
static void | |
cl_init(struct lock_object *lockobj) | |
{ | |
static struct lock_ops cl_ops = { | |
cl_trylock, | |
cl_lock, | |
cl_unlock | |
}; | |
condlock_init(&lockobj->u.cl); | |
lockobj->ops = &cl_ops; | |
} | |
static bool | |
tl_trylock(struct lock_object *lockobj, const char *diag) | |
{ | |
return (ticketlock_trylock(&lockobj->u.tl)); | |
} | |
static void | |
tl_lock(struct lock_object *lockobj, const char *diag) | |
{ | |
ticketlock_lock(&lockobj->u.tl); | |
} | |
static void | |
tl_unlock(struct lock_object *lockobj, const char *diag) | |
{ | |
ticketlock_unlock(&lockobj->u.tl); | |
} | |
static void | |
tl_init(struct lock_object *lockobj) | |
{ | |
static struct lock_ops tl_ops = { | |
tl_trylock, | |
tl_lock, | |
tl_unlock | |
}; | |
ticketlock_init(&lockobj->u.tl); | |
lockobj->ops = &tl_ops; | |
} | |
static bool | |
fl_trylock(struct lock_object *lockobj, const char *diag) | |
{ | |
return (fasterticketlock_trylock(&lockobj->u.fl)); | |
} | |
static void | |
fl_lock(struct lock_object *lockobj, const char *diag) | |
{ | |
fasterticketlock_lock(&lockobj->u.fl); | |
} | |
static void | |
fl_unlock(struct lock_object *lockobj, const char *diag) | |
{ | |
fasterticketlock_unlock(&lockobj->u.fl); | |
} | |
static void | |
fl_init(struct lock_object *lockobj, int cond_number) | |
{ | |
static struct lock_ops fl_ops = { | |
fl_trylock, | |
fl_lock, | |
fl_unlock | |
}; | |
fasterticketlock_init(&lockobj->u.fl, cond_number); | |
lockobj->ops = &fl_ops; | |
} | |
static bool | |
ql_trylock(struct lock_object *lockobj, const char *diag) | |
{ | |
return (queuelock_trylock(&lockobj->u.ql)); | |
} | |
static void | |
ql_lock(struct lock_object *lockobj, const char *diag) | |
{ | |
queuelock_lock(&lockobj->u.ql); | |
} | |
static void | |
ql_unlock(struct lock_object *lockobj, const char *diag) | |
{ | |
queuelock_unlock(&lockobj->u.ql); | |
} | |
static void | |
ql_init(struct lock_object *lockobj) | |
{ | |
static struct lock_ops ql_ops = { | |
ql_trylock, | |
ql_lock, | |
ql_unlock | |
}; | |
queuelock_init(&lockobj->u.ql); | |
lockobj->ops = &ql_ops; | |
} | |
static bool | |
mutex_trylock(struct lock_object *lockobj, const char *diag) | |
{ | |
int rv = pthread_mutex_trylock(&lockobj->u.mutex); | |
switch (rv) { | |
case 0: | |
return (true); | |
case EBUSY: | |
return (false); | |
default: | |
fprintf(stderr, "pthread_mutex_trylock() in %s(): %s\n", | |
diag, strerror(rv)); | |
exit(EXIT_FAILURE); | |
} | |
} | |
static void | |
mutex_lock(struct lock_object *lockobj, const char *diag) | |
{ | |
int rv = pthread_mutex_lock(&lockobj->u.mutex); | |
if (rv != 0) { | |
fprintf(stderr, "pthread_mutex_lock() in %s(): %s\n", | |
diag, strerror(rv)); | |
exit(EXIT_FAILURE); | |
} | |
} | |
static void | |
mutex_unlock(struct lock_object *lockobj, const char *diag) | |
{ | |
int rv = pthread_mutex_unlock(&lockobj->u.mutex); | |
if (rv != 0) { | |
fprintf(stderr, "pthread_mutex_unlock() in %s(): %s\n", | |
diag, strerror(rv)); | |
exit(EXIT_FAILURE); | |
} | |
} | |
static void | |
mutex_init(struct lock_object *lockobj, const char *diag) | |
{ | |
static struct lock_ops mutex_ops = { | |
mutex_trylock, | |
mutex_lock, | |
mutex_unlock | |
}; | |
int rv = pthread_mutex_init(&lockobj->u.mutex, NULL); | |
if (rv != 0) { | |
fprintf(stderr, "%s: pthread_mutex_init(): %s\n", | |
diag, strerror(rv)); | |
exit(EXIT_FAILURE); | |
} | |
lockobj->ops = &mutex_ops; | |
} | |
static bool | |
rwlock_trylock(struct lock_object *lockobj, const char *diag) | |
{ | |
int rv = pthread_rwlock_trywrlock(&lockobj->u.rwlock); | |
switch (rv) { | |
case 0: | |
return (true); | |
case EBUSY: | |
return (false); | |
default: | |
fprintf(stderr, "%s: pthread_rwlock_trywrlock(): %s\n", | |
diag, strerror(rv)); | |
exit(EXIT_FAILURE); | |
} | |
} | |
static void | |
rwlock_lock(struct lock_object *lockobj, const char *diag) | |
{ | |
int rv = pthread_rwlock_wrlock(&lockobj->u.rwlock); | |
if (rv != 0) { | |
fprintf(stderr, "%s: pthread_rwlock_wrlock(): %s\n", | |
diag, strerror(rv)); | |
exit(EXIT_FAILURE); | |
} | |
} | |
static void | |
rwlock_unlock(struct lock_object *lockobj, const char *diag) | |
{ | |
int rv = pthread_rwlock_unlock(&lockobj->u.rwlock); | |
if (rv != 0) { | |
fprintf(stderr, "pthread_rwlock_unlock() in %s(): %s\n", | |
diag, strerror(rv)); | |
exit(EXIT_FAILURE); | |
} | |
} | |
static void | |
rwlock_init(struct lock_object *lockobj, const char *diag) | |
{ | |
static struct lock_ops rwlock_ops = { | |
rwlock_trylock, | |
rwlock_lock, | |
rwlock_unlock | |
}; | |
int rv = pthread_rwlock_init(&lockobj->u.rwlock, NULL); | |
if (rv != 0) { | |
fprintf(stderr, "pthread_rwlock_init() in %s(): %s\n", | |
diag, strerror(rv)); | |
exit(EXIT_FAILURE); | |
} | |
lockobj->ops = &rwlock_ops; | |
} | |
static bool | |
sem_trylock(struct lock_object *lockobj, const char *diag) | |
{ | |
if (sem_trywait(&lockobj->u.sem) == 0) | |
return (true); | |
if (errno == EAGAIN) | |
return (false); | |
fprintf(stderr, "sem_trywait() in %s(): %s\n", diag, strerror(errno)); | |
exit(EXIT_FAILURE); | |
/*NOTREACHED*/ | |
return (false); | |
} | |
static void | |
sem_lock(struct lock_object *lockobj, const char *diag) | |
{ | |
if (sem_wait(&lockobj->u.sem) == -1) { | |
fprintf(stderr, "sem_wait() in %s(): %s\n", | |
diag, strerror(errno)); | |
exit(EXIT_FAILURE); | |
} | |
} | |
static void | |
sem_unlock(struct lock_object *lockobj, const char *diag) | |
{ | |
if (sem_post(&lockobj->u.sem) == -1) { | |
fprintf(stderr, "sem_post() in %s(): %s\n", | |
diag, strerror(errno)); | |
exit(EXIT_FAILURE); | |
} | |
} | |
static void | |
semaphore_init(struct lock_object *lockobj, const char *diag) | |
{ | |
static struct lock_ops sem_ops = { | |
sem_trylock, | |
sem_lock, | |
sem_unlock | |
}; | |
if (sem_init(&lockobj->u.sem, 0, 1) == -1) { | |
fprintf(stderr, "sem_init() in %s(): %s\n", | |
diag, strerror(errno)); | |
exit(EXIT_FAILURE); | |
} | |
lockobj->ops = &sem_ops; | |
} | |
/* resource which is shared between compute_thread() and io() thread */ | |
static struct { | |
unsigned long long counter; | |
struct timespec last; | |
struct lock_object lock; | |
} shared_resource; | |
static void | |
nanosec_sleep(unsigned long long nanosec) | |
{ | |
struct timespec t; | |
t.tv_sec = nanosec / 1000000000ULL; | |
t.tv_nsec = nanosec % 1000000000ULL; | |
while (nanosleep(&t, &t) == -1) | |
; | |
} | |
struct statistics crit_sec_length; | |
static void | |
sigint_handler(int sig) | |
{ | |
int n; | |
double min, max, average, standard_deviation; | |
/* XXX this is NOT async-signal-safe. hope this works */ | |
statistics_result(&crit_sec_length, | |
&n, &min, &max, &average, &standard_deviation); | |
printf("\n"); | |
printf("count: %d\n", n); | |
printf("average length of critical section (second): %g\n", average); | |
printf("standard deviation: %g\n", standard_deviation); | |
printf("minimum: %g\n", min); | |
printf("maximum: %g\n", max); | |
exit(0); | |
} | |
static void * | |
compute_thread(void *p) | |
{ | |
unsigned long long i, m; | |
timerval_t t0, t1; | |
for (;;) { | |
if (!shared_resource.lock.ops->trylock( | |
&shared_resource.lock, __func__)) { | |
/* pass CPU to the io() thread for a while */ | |
nanosec_sleep(YIELD_PERIOD); | |
shared_resource.lock.ops->lock( | |
&shared_resource.lock, __func__); | |
} | |
gettimerval(&t0); | |
m = 0; | |
for (i = 0; i < loop; i++) | |
m += zero; | |
shared_resource.counter += m + 1; | |
if (clock_gettime(CLOCK_REALTIME, &shared_resource.last) | |
== -1) { | |
perror("clock_gettime in compute_thread()"); | |
exit(EXIT_FAILURE); | |
} | |
gettimerval(&t1); | |
shared_resource.lock.ops->unlock( | |
&shared_resource.lock, __func__); | |
statistics_add(&crit_sec_length, timerval_sub(&t1, &t0)); | |
if (sched_yield() == -1) { | |
perror("sched_yield"); | |
exit(EXIT_FAILURE); | |
} | |
} | |
/*NOTREACHED*/ | |
return (0); /* to shut up warning */ | |
} | |
static void | |
io(void) | |
{ | |
struct timespec t1, t2; | |
unsigned long long n; | |
nanosec_sleep(SLEEP_PERIOD); | |
shared_resource.lock.ops->lock(&shared_resource.lock, __func__); | |
t1 = shared_resource.last; | |
n = shared_resource.counter; | |
shared_resource.lock.ops->unlock(&shared_resource.lock, __func__); | |
if (clock_gettime(CLOCK_REALTIME, &t2) == -1) { | |
perror("clock_gettime in io_thread()"); | |
exit(EXIT_FAILURE); | |
} | |
timespec_sub(&t1, &start_time); | |
timespec_sub(&t2, &start_time); | |
timespec_sub(&t2, &t1); | |
printf("%llu.%09llu %llu.%09llu %llu\n", | |
(unsigned long long)t1.tv_sec, (unsigned long long)t1.tv_nsec, | |
(unsigned long long)t2.tv_sec, (unsigned long long)t2.tv_nsec, n); | |
fflush(stdout); | |
} | |
static void | |
do_main(void) | |
{ | |
int rv; | |
pthread_t thread_id; | |
statistics_init(&crit_sec_length); | |
signal(SIGINT, sigint_handler); | |
rv = pthread_create(&thread_id, NULL, compute_thread, NULL); | |
if (rv != 0) { | |
fprintf(stderr, "pthread_create(): %s\n", strerror(rv)); | |
exit(EXIT_FAILURE); | |
} | |
for (;;) { | |
io(); | |
} | |
} | |
static void | |
priority_set_idle(void) | |
{ | |
#ifdef SCHED_IDLE /* Since Linux 2.6.23 */ | |
struct sched_param param; | |
int rv; | |
param.sched_priority = 0; | |
rv = pthread_setschedparam(pthread_self(), SCHED_IDLE, ¶m); | |
if (rv == 0) { | |
fprintf(stderr, "scheduling policy=SCHED_IDLE\n"); | |
} else { | |
fprintf(stderr, "scheduling policy=SCHED_IDLE: %s\n", | |
strerror(rv)); | |
} | |
#else | |
fprintf(stderr, "scheduling policy SCHED_IDLE is not supported\n"); | |
#endif | |
} | |
static const char * | |
sched_policy_name(int policy) | |
{ | |
return ((policy == SCHED_FIFO) ? "SCHED_FIFO" : | |
(policy == SCHED_RR) ? "SCHED_RR" : | |
(policy == SCHED_OTHER) ? "SCHED_OTHER" : | |
#ifdef SCHED_BATCH | |
(policy == SCHED_BATCH) ? "SCHED_BATCH" : | |
#endif | |
#ifdef SCHED_IDLE | |
(policy == SCHED_IDLE) ? "SCHED_IDLE" : | |
#endif | |
"SCHED_<unknown>"); | |
} | |
static void | |
priority_set_minimum(void) | |
{ | |
pthread_t self = pthread_self(); | |
int policy, min_prio, old_prio; | |
struct sched_param param; | |
int rv; | |
rv = pthread_getschedparam(self, &policy, ¶m); | |
if (rv != 0) { | |
fprintf(stderr, "pthread_get_schedparam(): %s\n", | |
strerror(rv)); | |
return; | |
} | |
min_prio = sched_get_priority_min(policy); | |
if (min_prio == -1) { | |
fprintf(stderr, "sched_get_priority_min(%s): %s\n", | |
sched_policy_name(policy), strerror(errno)); | |
return; | |
} | |
if (param.sched_priority == min_prio) { | |
fprintf(stderr, "scheduling priority of %s is already %d\n", | |
sched_policy_name(policy), min_prio); | |
return; | |
} | |
old_prio = param.sched_priority; | |
param.sched_priority = min_prio; | |
rv = pthread_setschedparam(self, policy, ¶m); | |
if (rv != 0) { | |
fprintf(stderr, "pthread_set_schedparam(): " | |
"policy=%s(%d), priority: %d ->%d: %s\n", | |
sched_policy_name(policy), policy, | |
old_prio, min_prio, strerror(rv)); | |
return; | |
} | |
fprintf(stderr, "scheduling policy=%s, priority: %d -> %d\n", | |
sched_policy_name(policy), old_prio, min_prio); | |
} | |
int | |
main(int argc, char **argv) | |
{ | |
int c; | |
enum { cl_mode, tl_mode, fl_mode, ql_mode, | |
mutex_mode, rwlock_mode, sem_mode } mode = mutex_mode; | |
while ((c = getopt(argc, argv, "IMctfqmrs")) != -1) { | |
switch (c) { | |
case 'I': | |
priority_set_idle(); | |
break; | |
case 'M': | |
priority_set_minimum(); | |
break; | |
case 'c': | |
mode = cl_mode; | |
break; | |
case 't': | |
mode = tl_mode; | |
break; | |
case 'f': | |
mode = fl_mode; | |
break; | |
case 'q': | |
mode = ql_mode; | |
break; | |
case 'm': | |
mode = mutex_mode; | |
break; | |
case 'r': | |
mode = rwlock_mode; | |
break; | |
case 's': | |
mode = sem_mode; | |
break; | |
default: | |
fprintf(stderr, | |
"Usage: lock_fairness_test [-mrs] [<number>]\n"); | |
exit(EXIT_FAILURE); | |
} | |
} | |
argc -= optind; | |
argv += optind; | |
switch (mode) { | |
case cl_mode: | |
fprintf(stderr, "running in condlock mode\n"); | |
cl_init(&shared_resource.lock); | |
break; | |
case tl_mode: | |
fprintf(stderr, "running in ticketlock mode\n"); | |
tl_init(&shared_resource.lock); | |
break; | |
case fl_mode: | |
fprintf(stderr, "running in fasterticketlock mode\n"); | |
fl_init(&shared_resource.lock, 2 /* 2 threads */); | |
break; | |
case ql_mode: | |
fprintf(stderr, "running in queuelock mode\n"); | |
ql_init(&shared_resource.lock); | |
break; | |
case mutex_mode: | |
fprintf(stderr, "running in mutex mode\n"); | |
mutex_init(&shared_resource.lock, "shared_resource"); | |
break; | |
case rwlock_mode: | |
fprintf(stderr, "running in rwlock mode\n"); | |
rwlock_init(&shared_resource.lock, "shared_resource"); | |
break; | |
case sem_mode: | |
fprintf(stderr, "running in semaphore mode\n"); | |
semaphore_init(&shared_resource.lock, "shared_resource"); | |
break; | |
default: | |
assert(0); | |
} | |
if (argc > 0) | |
loop = atol(argv[0]); | |
fprintf(stderr, "running %llu loops\n", loop); | |
zero = loop - loop; | |
timerval_calibrate(); | |
if (clock_gettime(CLOCK_REALTIME, &start_time) == -1) { | |
perror("clock_gettime in main()"); | |
exit(EXIT_FAILURE); | |
} | |
do_main(); | |
return EXIT_SUCCESS; | |
} | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment