-
-
Save fxg42/55e968649261a37ac6ac to your computer and use it in GitHub Desktop.
| <?php | |
| echo json_encode(array('hello'=>'world!')); | |
| ?> |
| from flask import Flask | |
| from flask import jsonify | |
| app = Flask(__name__) | |
| @app.route("/") | |
| def hello(): | |
| return jsonify(hello="world!") | |
| if __name__ == "__main__": | |
| app.run() |
| class HelloController < ApplicationController | |
| def index | |
| render json:{"hello"=>"world!"} | |
| end | |
| end |
| class HelloController { | |
| def index() { | |
| render([ hello:"world!" ] as grails.converters.JSON) | |
| } | |
| } |
| var express = require('express'); | |
| var router = express.Router(); | |
| router.get('/', function(req, res) { | |
| res.json({hello:'world!'}); | |
| }); | |
| module.exports = router; |
| defmodule HelloPhoenix.PageController do | |
| use Phoenix.Controller | |
| alias Poison, as: JSON | |
| plug :action | |
| def index(conn, _params) do | |
| json conn, JSON.encode!(%{hello: "world!"}) | |
| end | |
| end |
Did you run 10k requests or 100k requests? I'm running out of ephemeral ports when running 100k (ab starts to wait for long times) trying to benchmark a golang solution:
package main
import (
"fmt"
"net/http"
"encoding/json"
)
func main() {
http.HandleFunc("/", indexHandler)
http.ListenAndServe(":7070", nil)
}
func indexHandler(res http.ResponseWriter, req *http.Request) {
res.Header().Set("Access-Control-Allow-Origin", "*")
res.Header().Set("Content-type", "application/json")
jsonMsg, err := getResponse()
if err != nil {
http.Error(res, "Error", http.StatusInternalServerError)
}
fmt.Fprintf(res, jsonMsg)
}
func getResponse() (string, error){
msg := HelloMsg{"World"}
jbMsg, err := json.Marshal(msg)
if err != nil {
return "", err
}
jsonMsg := string(jbMsg[:])
return jsonMsg, nil
}
type HelloMsg struct {
Hello string
}then testing go run server.go vs go build server.go; ./server and running the benchmarks.
I'd be curious to see how golang fares. On my current machine, with 20k requests, I get ~1600 requests per second on the python example vs ~10000 / ~11000 for go run / compiled.
I ran 100K queries first and than did a benchmark for 10k. The idea was to give any vm optimization a chance to run. In Grails' case, the first 10k queries were dealt with at a rate of 400 req/s. After 100k queries, the throughput jumped to about 1400 req/s -- possibly due to vm optimizations.
Just a note: rails s in the development environment runs a single-threaded server that has debugging tools enabled. It's not close to production performance or behavior.
running
ab -n 10000 http://127.0.0.1/after 100,000 queries yields:apachectl start(php): ~1700 req/sgrails run-war: ~1400 req/spm2 start app.js -i max: ~950 req/selixir -pa _build/prod/consolidated -S mix phoenix.start: ~850 req/spython hello.py: ~650 req/srails s: ~160 req/sRan on a MacBook Air, 1.7 GHz Intel Core i7 (dual core), 8 Go 1600 MHz DDR3.