Skip to content

Instantly share code, notes, and snippets.

@mrnugget
Last active August 29, 2015 14:26
Show Gist options
  • Select an option

  • Save mrnugget/067e100d8104b4443b3f to your computer and use it in GitHub Desktop.

Select an option

Save mrnugget/067e100d8104b4443b3f to your computer and use it in GitHub Desktop.
Small "web server" in C to reproduce the problems I encountered with github.com/mrnugget/helles under OS X.
#ifdef __linux__
#define _XOPEN_SOURCE 700
#endif
#include <arpa/inet.h>
#include <errno.h>
#include <netdb.h>
#include <netinet/in.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/select.h>
#include <sys/socket.h>
#include <sys/wait.h>
#include <unistd.h>
#define PORT "3333"
#define LISTEN_BACKLOG 50
#define BUFFER_SIZE 4096
char *dummy_response = "HTTP/1.1 200 OK\r\n"
"Content-Length: 32\r\n"
"Connection: close\r\n\r\n"
"It's a UNIX system! I know this!";
void *get_in_addr(struct sockaddr *sa)
{
if (sa->sa_family == AF_INET) {
return &(((struct sockaddr_in*)sa)->sin_addr);
}
return &(((struct sockaddr_in6*)sa)->sin6_addr);
}
int listen_on(char *port)
{
int sockfd, status;
const int yes = 1;
char s[BUFFER_SIZE];
struct addrinfo hints, *res, *rptr;
memset(&hints, 0, sizeof(hints));
hints.ai_family = PF_INET6;
hints.ai_socktype = SOCK_STREAM;
hints.ai_flags = AI_PASSIVE;
if ((status = getaddrinfo(NULL, port, &hints, &res) != 0)) {
fprintf(stderr, "getaddrinfo: %s\n", gai_strerror(status));
return status;
}
for(rptr = res; rptr != NULL; rptr = rptr->ai_next) {
sockfd = socket(rptr->ai_family, rptr->ai_socktype, rptr->ai_protocol);
if (sockfd < 0) {
perror("listen_on: socket");
continue;
}
if (setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, &yes,
sizeof(yes)) < 0) {
perror("listen_on: setsockopt");
continue;
};
if (bind(sockfd, rptr->ai_addr, rptr->ai_addrlen) < 0) {
close(sockfd);
perror("listen_on: bind");
continue;
}
inet_ntop(rptr->ai_family, get_in_addr((struct sockaddr *)rptr->ai_addr),
s, sizeof s);
printf("s=%s\n", s);
break;
}
if (res == NULL) {
return -1;
}
if (listen(sockfd, LISTEN_BACKLOG) < 0) {
perror("listen_on: listen");
return -1;
}
freeaddrinfo(res);
return sockfd;
}
int accept_conn(int sockfd)
{
int client_fd;
socklen_t sin_size;
struct sockaddr_storage client_addr;
sin_size = sizeof(client_addr);
client_fd = accept(sockfd, (struct sockaddr *)&client_addr, &sin_size);
if (client_fd < 0) {
perror("accept_conn: accept");
return -1;
}
return client_fd;
}
int handle_connection(int fd)
{
char buffer[BUFFER_SIZE];
int response_len, nread;
do {
nread = recv(fd, buffer, BUFFER_SIZE, 0);
if (nread < 0) {
perror("recv failed");
return -1;
}
} while (nread == BUFFER_SIZE);
response_len = strlen(dummy_response);
if (send(fd, dummy_response, response_len, 0) != response_len) {
perror("send failed");
return -1;
}
return 0;
}
int main(int argc, char *argv[])
{
int listen_fd;
int connection;
if ((listen_fd = listen_on(PORT)) < 0) {
fprintf(stderr, "listen failed");
exit(1);
}
for (;;) {
if ((connection = accept_conn(listen_fd)) < 0) {
return 1;
}
if (handle_connection(connection) < 0) {
fprintf(stderr, "handle_connection failed\n");
}
close(connection);
}
return 0;
}
$ ./wrk -d 1s -c 1 -t 1 http://localhost:3333
Running 1s test @ http://localhost:3333
1 threads and 1 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 28.81us 13.33us 783.00us 97.94%
Req/Sec 11.71k 3.97k 13.75k 90.91%
12785 requests in 1.10s, 1.10MB read
Requests/sec: 11623.53
Transfer/sec: 1.00MB
$ ./wrk -d 1s -c 1 -t 1 http://localhost:3333
Running 1s test @ http://localhost:3333
1 threads and 1 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 28.18us 4.84us 118.00us 92.01%
Req/Sec 11.82k 2.08k 13.23k 66.67%
3581 requests in 1.04s, 314.74KB read
Requests/sec: 3454.52
Transfer/sec: 303.62KB
$ ./wrk -d 1s -c 1 -t 1 http://localhost:3333
Running 1s test @ http://localhost:3333
[... blocking for around 30 seconds ...]
1 threads and 1 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 28.30us 5.25us 176.00us 95.09%
Req/Sec 11.81k 3.92k 13.32k 90.91%
12932 requests in 1.10s, 1.11MB read
Requests/sec: 11706.82
Transfer/sec: 1.00MB
$ ./wrk -d 1s -c 1 -t 1 http://localhost:3333
Running 1s test @ http://localhost:3333
[... blocking for around 30 seconds ...]
1 threads and 1 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 57.05us 152.27us 3.70ms 98.18%
Req/Sec 8.73k 2.37k 12.90k 70.00%
8662 requests in 1.10s, 761.31KB read
Requests/sec: 7869.00
Transfer/sec: 691.61KB
$
~/wrk master ✗ % ./wrk -d 1s -t 1 -c 1 http://localhost:3333/small.txt
Running 1s test @ http://localhost:3333/small.txt
1 threads and 1 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 20.33us 63.82us 1.78ms 98.09%
Req/Sec 30.08k 1.28k 32.73k 72.73%
32878 requests in 1.10s, 2.82MB read
Requests/sec: 29862.40
Transfer/sec: 2.56MB
~/wrk master ✗ % ./wrk -d 1s -t 1 -c 1 http://localhost:3333/small.txt
Running 1s test @ http://localhost:3333/small.txt
1 threads and 1 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 17.74us 54.94us 1.75ms 98.53%
Req/Sec 31.71k 1.52k 33.62k 81.82%
34691 requests in 1.10s, 2.98MB read
Requests/sec: 31542.26
Transfer/sec: 2.71MB
~/wrk master ✗ % ./wrk -d 1s -t 1 -c 1 http://localhost:3333/small.txt
Running 1s test @ http://localhost:3333/small.txt
1 threads and 1 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 21.13us 89.96us 2.76ms 98.45%
Req/Sec 30.89k 1.85k 32.74k 81.82%
33760 requests in 1.10s, 2.90MB read
Requests/sec: 30700.96
Transfer/sec: 2.64MB
~/wrk master ✗ % ./wrk -d 1s -t 1 -c 1 http://localhost:3333/small.txt
Running 1s test @ http://localhost:3333/small.txt
1 threads and 1 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 66.73us 346.60us 5.78ms 97.01%
Req/Sec 26.93k 5.68k 32.65k 63.64%
29427 requests in 1.10s, 2.53MB read
Requests/sec: 26771.46
Transfer/sec: 2.30MB
~/wrk master ✗ % ./wrk -d 1s -t 1 -c 1 http://localhost:3333/small.txt
Running 1s test @ http://localhost:3333/small.txt
1 threads and 1 connections
Thread Stats Avg Stdev Max +/- Stdev
Latency 17.83us 45.11us 1.16ms 98.06%
Req/Sec 31.77k 1.45k 33.27k 72.73%
34732 requests in 1.10s, 2.98MB read
Requests/sec: 31596.69
Transfer/sec: 2.71MB
@mrnugget
Copy link
Copy Markdown
Author

mrnugget commented Aug 9, 2015

The problem was the system running out of "ephermal" ports. The number of available ephermal ports on OS X is around 16k, which matches the benchmarking results in hindsight. After around 16k requests (other ports may be used by other processes on the system) the client has to wait for the system to "drain" the old ports (which are in TIME_WAIT state, which OS X somehow doesn't show in netstat).

So, to "fix" this issue for benchmarking purposes, use this:

sudo sysctl -w net.inet.ip.portrange.first=32768
sudo sysctl -w net.inet.tcp.msl=100

This increases the number of available ephermal ports and decreases the number of milliseconds to wait for sockets in TIME_WAIT state.

Linux is better configured by default, which is why the problems didn't arise there. As to why Linux is so much faster? That is only with one core. As soon as I booted the VirtualBox instance with 2 cores the req/s dropped from around 30k/s to 8k/s. My guess is that the overhead of scheduling is too high at this point.

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment