Today I just wanted to know which of these options is faster:
- Go HTTP standalone
- Nginx proxy to Go HTTP
- Nginx fastcgi to Go TCP FastCGI
- Nginx fastcgi to Go Unix Socket FastCGI
- Samsung Laptop NP550P5C-AD1BR
- Intel Core i7 3630QM @2.4GHz (quad core, 8 threads)
- CPU caches: (L1: 256KiB, L2: 1MiB, L3: 6MiB)
- RAM 8GiB DDR3 1600MHz
- Ubuntu 13.10 amd64 Saucy Salamander (updated)
- Nginx 1.4.4 (1.4.4-1~saucy0 amd64)
- Go 1.2 (linux/amd64)
- ApacheBench 2.3 Revision 1430300
fs.file-max 9999999
fs.nr_open 9999999
net.core.netdev_max_backlog 4096
net.core.rmem_max 16777216
net.core.somaxconn 65535
net.core.wmem_max 16777216
net.ipv4.ip_forward 0
net.ipv4.ip_local_port_range 1025 65535
net.ipv4.tcp_fin_timeout 30
net.ipv4.tcp_keepalive_time 30
net.ipv4.tcp_max_syn_backlog 20480
net.ipv4.tcp_max_tw_buckets 400000
net.ipv4.tcp_no_metrics_save 1
net.ipv4.tcp_syn_retries 2
net.ipv4.tcp_synack_retries 2
net.ipv4.tcp_tw_recycle 1
net.ipv4.tcp_tw_reuse 1
vm.min_free_kbytes 65536
vm.overcommit_memory 1
user www-data;
worker_processes 16;
worker_rlimit_nofile 200000;
pid /var/run/nginx.pid;
events {
worker_connections 10000;
use epoll;
multi_accept on;
}
http {
sendfile on;
tcp_nopush on;
tcp_nodelay on;
keepalive_timeout 65;
keepalive_requests 10000;
types_hash_max_size 2048;
open_file_cache max=200000 inactive=20s;
open_file_cache_valid 30s;
open_file_cache_min_uses 2;
open_file_cache_errors on;
server_tokens off;
dav_methods off;
include /etc/nginx/mime.types;
default_type application/octet-stream;
gzip on;
gzip_disable "msie6";
gzip_min_length 512;
gzip_vary on;
gzip_proxied any;
gzip_types text/plain text/css application/json application/x-javascript text/xml application/xml application/xml+rss text/javascript;
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/*.conf;
}
server {
listen 80;
server_name go.http;
access_log off;
error_log /dev/null crit;
location / {
proxy_pass http://127.0.0.1:8080;
}
}
server {
listen 80;
server_name go.fcgi.tcp;
access_log off;
error_log /dev/null crit;
location / {
include fastcgi_params;
fastcgi_pass 127.0.0.1:9001;
}
}
server {
listen 80;
server_name go.fcgi.unix;
access_log off;
error_log /dev/null crit;
location / {
include fastcgi_params;
fastcgi_pass unix:/tmp/go.sock;
}
}
package main
import (
"fmt"
"log"
"net"
"net/http"
"net/http/fcgi"
)
type Server struct {
}
func (s Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
fmt.Fprintln(w, "Hello World")
}
func main() {
server := Server{}
go func() {
http.Handle("/", server)
if err := http.ListenAndServe(":8080", nil); err != nil {
log.Fatal(err)
}
}()
go func() {
tcp, err := net.Listen("tcp", ":9001")
if err != nil {
log.Fatal(err)
}
fcgi.Serve(tcp, server)
}()
unix, err := net.Listen("unix", "/tmp/go.sock")
if err != nil {
log.Fatal(err)
}
fcgi.Serve(unix, server)
}
- Configure kernel with sysctl
- Configure nginx
- Configure nginx vhosts
- Load the server (
sudo su - www-data ./server
)
$ export GOMAXPROCS=1
$ sudo -u www-data ./server &
$ sudo ab -k -c 4000 -n 100000 http://127.0.0.1:8080/
$ sudo ab -k -c 4000 -n 100000 http://go.http/
$ sudo ab -k -c 4000 -n 100000 http://go.fcgi.tcp/
$ sudo ab -k -c 4000 -n 100000 http://go.fcgi.unix/
Metric | Go | Nginx+Go HTTP | Nginx+Go FCGI tcp | Nginx+Go FCGI unix |
---|---|---|---|---|
time taken (s) | 2.15 | 5.729 | 10.794 | 9.137 |
reqs/second | 46517.04 | 17,454.89 | 9,264.48 | 10944.05 |
transfer rate (kb/s) | 7,117,106.64 | 2,932,422.33 | 1,528,639.76 | 1,805,768.68 |
min conn time (ms) | 0 | 0 | 0 | 0 |
avg conn time (ms) | 1 | 1 | 8 | 32 |
max conn time (ms) | 72 | 85 | 74 | 88 |
min proc time (ms) | 0 | 100 | 102 | 91 |
avg proc time (ms) | 81 | 223 | 418 | 329 |
max proc time (ms) | 55 | 333 | 674 | 426 |
min total time (ms) | 0 | 100 | 102 | 91 |
avg total time (ms) | 82 | 224 | 426 | 361 |
max total time (ms) | 127 | 418 | 748 | 514 |
$ export GOMAXPROCS=8
$ sudo -u www-data ./server &
$ sudo ab -k -c 4000 -n 100000 http://127.0.0.1:8080/
$ sudo ab -k -c 4000 -n 100000 http://go.http/
$ sudo ab -k -c 4000 -n 100000 http://go.fcgi.tcp/
$ sudo ab -k -c 4000 -n 100000 http://go.fcgi.unix/
Metric | Go | Nginx+Go HTTP | Nginx+Go FCGI tcp | Nginx+Go FCGI unix |
---|---|---|---|---|
time taken (s) | 2.144 | 5.438 | 10.776 | 9.360 |
reqs/second | 46641.16 | 18,387.54 | 9,279.75 | 10,683.57 |
transfer rate (kb/s) | 7,136,097.51 | 3,089,106.94 | 1,531,159.37 | 1,762,788.68 |
min conn time (ms) | 0 | 0 | 0 | 0 |
avg conn time (ms) | 1 | 1 | 7 | 32 |
max conn time (ms) | 65 | 77 | 71 | 97 |
min proc time (ms) | 0 | 100 | 47 | 95 |
avg proc time (ms) | 81 | 212 | 419 | 337 |
max proc time (ms) | 124 | 278 | 766 | 441 |
min total time (ms) | 0 | 100 | 47 | 95 |
avg total time (ms) | 82 | 213 | 426 | 369 |
max total time (ms) | 189 | 355 | 837 | 538 |
- If you don't need features from nginx I suggest you to stay with Go standalone. The nginx overhead looks huge and sometimes will not compensate the extra features.
- If you still need nginx to do virtual hosting or some other fancy feature, stay with the HTTP proxy config
I don't know why the FastCGI middleware did so bad comparing to the HTTP one. Any ideas?