Last active
January 29, 2024 00:09
-
-
Save aramg/4540f0f8cfb8922e89bd to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Author: Aram Grigorian <[email protected]> | |
# https://github.com/aramg | |
# https://github.com/opendns | |
# | |
# By default, nginx will close upstream connections after every request. | |
# The upstream-keepalive module tries to remedy this by keeping a certain minimum number of | |
# persistent connections open at all times to upstreams. These connections are re-used for | |
# all requests, regardless of downstream connection source. There are options available | |
# for load balacing clients to the same upstreams more consistently. | |
# This is all designed around the reverse proxy case, which is nginxs main purpose. | |
# | |
# This patch enables 1-1 connection mapping & caching between clients and upstreams. | |
# Note that requests will no longer be evenly distributed across all upstreams, the | |
# primary use case is around making nginx a forward proxy - along the lines of: | |
# http { | |
# .. | |
# server { | |
# listen 80; | |
# .. | |
# location / { | |
# proxy_pass http://$http_host$request_uri; | |
# } | |
# } | |
# } | |
# | |
# original blog post: | |
# https://engineering.opendns.com/2015/11/03/lets-talk-about-proxies-pt-2-nginx-as-a-forward-http-proxy/ | |
# | |
diff --git a/ngx_openresty-1.9.3.1/bundle/nginx-1.9.3/src/core/ngx_connection.c b/ngx_openresty-1.9.3.1/bundle/nginx-1.9.3/src/core/ngx_connection.c | |
index 04a365a..ef558e6 100644 | |
--- a/ngx_openresty-1.9.3.1/bundle/nginx-1.9.3/src/core/ngx_connection.c | |
+++ b/ngx_openresty-1.9.3.1/bundle/nginx-1.9.3/src/core/ngx_connection.c | |
@@ -1014,6 +1014,21 @@ ngx_close_connection(ngx_connection_t *c) | |
ngx_uint_t log_error, level; | |
ngx_socket_t fd; | |
+ ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0, | |
+ "close connection: c %p", c); | |
+ | |
+ if (c->saved_peer_conn) { | |
+ ngx_peer_saved_connection_t *sc = c->saved_peer_conn; | |
+ | |
+ ngx_log_debug1(NGX_LOG_DEBUG_HTTP, c->log, 0, | |
+ "close conn: c->sc->c: %p", sc->connection); | |
+ | |
+ if (sc->connection) { | |
+ ngx_close_saved_connection(sc->connection); | |
+ sc->connection = NULL; | |
+ } | |
+ } | |
+ | |
if (c->fd == (ngx_socket_t) -1) { | |
ngx_log_error(NGX_LOG_ALERT, c->log, 0, "connection already closed"); | |
return; | |
@@ -1091,6 +1106,29 @@ ngx_close_connection(ngx_connection_t *c) | |
} | |
} | |
+void | |
+ngx_close_saved_connection(ngx_connection_t *c) | |
+{ | |
+ | |
+#if (NGX_HTTP_SSL) | |
+ | |
+ if (c->ssl) { | |
+ c->ssl->no_wait_shutdown = 1; | |
+ c->ssl->no_send_shutdown = 1; | |
+ | |
+ if (ngx_ssl_shutdown(c) == NGX_AGAIN) { | |
+ c->ssl->handler = ngx_close_saved_connection; | |
+ return; | |
+ } | |
+ } | |
+ | |
+#endif | |
+ | |
+ if (c->pool) { | |
+ ngx_destroy_pool(c->pool); | |
+ } | |
+ ngx_close_connection(c); | |
+} | |
void | |
ngx_reusable_connection(ngx_connection_t *c, ngx_uint_t reusable) | |
diff --git a/ngx_openresty-1.9.3.1/bundle/nginx-1.9.3/src/core/ngx_connection.h b/ngx_openresty-1.9.3.1/bundle/nginx-1.9.3/src/core/ngx_connection.h | |
index a49aa95..b265bc0 100644 | |
--- a/ngx_openresty-1.9.3.1/bundle/nginx-1.9.3/src/core/ngx_connection.h | |
+++ b/ngx_openresty-1.9.3.1/bundle/nginx-1.9.3/src/core/ngx_connection.h | |
@@ -116,6 +116,13 @@ typedef enum { | |
} ngx_connection_tcp_nopush_e; | |
+typedef struct { | |
+ ngx_connection_t *connection; | |
+ socklen_t socklen; | |
+ u_char sockaddr[NGX_SOCKADDRLEN]; | |
+} ngx_peer_saved_connection_t; | |
+ | |
+ | |
#define NGX_LOWLEVEL_BUFFERED 0x0f | |
#define NGX_SSL_BUFFERED 0x01 | |
#define NGX_SPDY_BUFFERED 0x02 | |
@@ -147,6 +154,9 @@ struct ngx_connection_s { | |
ngx_str_t proxy_protocol_addr; | |
+ /* cached upstream connection */ | |
+ ngx_peer_saved_connection_t *saved_peer_conn; | |
+ | |
#if (NGX_SSL) | |
ngx_ssl_connection_t *ssl; | |
#endif | |
@@ -214,6 +224,7 @@ ngx_int_t ngx_set_inherited_sockets(ngx_cycle_t *cycle); | |
ngx_int_t ngx_open_listening_sockets(ngx_cycle_t *cycle); | |
void ngx_configure_listening_sockets(ngx_cycle_t *cycle); | |
void ngx_close_listening_sockets(ngx_cycle_t *cycle); | |
+void ngx_close_saved_connection(ngx_connection_t *c); | |
void ngx_close_connection(ngx_connection_t *c); | |
ngx_int_t ngx_connection_local_sockaddr(ngx_connection_t *c, ngx_str_t *s, | |
ngx_uint_t port); | |
diff --git a/ngx_openresty-1.9.3.1/bundle/nginx-1.9.3/src/http/ngx_http_upstream.c b/ngx_openresty-1.9.3.1/bundle/nginx-1.9.3/src/http/ngx_http_upstream.c | |
index f2a530e..15a023f 100644 | |
--- a/ngx_openresty-1.9.3.1/bundle/nginx-1.9.3/src/http/ngx_http_upstream.c | |
+++ b/ngx_openresty-1.9.3.1/bundle/nginx-1.9.3/src/http/ngx_http_upstream.c | |
@@ -1299,6 +1299,249 @@ ngx_http_upstream_check_broken_connection(ngx_http_request_t *r, | |
} | |
} | |
+/* some of these are same in upstream_keeplive module, but | |
+ * I am not modifying the other file to keep the patch small */ | |
+static void | |
+ngx_http_upstream_keepalive_dummy_handler(ngx_event_t *ev) | |
+{ | |
+ ngx_log_debug0(NGX_LOG_DEBUG_HTTP, ev->log, 0, | |
+ "keepalive dummy handler"); | |
+} | |
+ | |
+static void | |
+ngx_http_upstream_keepalive_close_handler(ngx_event_t *ev) | |
+{ | |
+ ngx_peer_saved_connection_t *sc; | |
+ | |
+ int n; | |
+ char buf[1]; | |
+ ngx_connection_t *c; | |
+ | |
+ ngx_log_debug0(NGX_LOG_DEBUG_HTTP, ev->log, 0, | |
+ "keepalive close handler"); | |
+ | |
+ c = ev->data; | |
+ | |
+ if (c->close) { | |
+ goto close; | |
+ } | |
+ | |
+ n = recv(c->fd, buf, 1, MSG_PEEK); | |
+ | |
+ if (n == -1 && ngx_socket_errno == NGX_EAGAIN) { | |
+ ev->ready = 0; | |
+ | |
+ if (ngx_handle_read_event(c->read, 0) != NGX_OK) { | |
+ goto close; | |
+ } | |
+ | |
+ return; | |
+ } | |
+ | |
+close: | |
+ | |
+ sc = c->data; | |
+ ngx_close_saved_connection(c); | |
+ sc->connection = NULL; | |
+} | |
+ | |
+ngx_int_t | |
+ngx_http_upstream_keepalive_check(ngx_http_request_t *r, | |
+ ngx_http_upstream_t *u, ngx_http_upstream_rr_peers_t *peers, | |
+ ngx_http_upstream_rr_peer_data_t *rrp) | |
+{ | |
+ ngx_uint_t i; | |
+ ngx_peer_saved_connection_t *sc; | |
+ ngx_connection_t *scc; | |
+ ngx_http_upstream_rr_peer_t *peer; | |
+ | |
+ /* check if client connection has a cached upstream connection, | |
+ * and if that matches the upstream of current request. | |
+ */ | |
+ | |
+ sc = r->connection->saved_peer_conn; | |
+ | |
+ ngx_log_debug2(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, | |
+ "upstream keepalive check: sc:%p ->c:%p", sc, sc->connection); | |
+ | |
+ if (!sc->connection) { | |
+ return NGX_ERROR; | |
+ } | |
+ | |
+ ngx_http_upstream_rr_peers_wlock(peers); | |
+ | |
+ for (i = 0; i < peers->number; i++) { | |
+ | |
+#if ( NGX_DEBUG ) | |
+ u_char text[NGX_SOCKADDR_STRLEN]; | |
+ ngx_str_t addr; | |
+ addr.data = text; | |
+ addr.len = ngx_sock_ntop(peers->peer[i].sockaddr, | |
+ peers->peer[i].socklen, text, NGX_SOCKADDR_STRLEN, 0); | |
+ ngx_log_debug2(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, | |
+ "peers->peer[%d]=\"%V\"", i, &addr); | |
+#endif | |
+ peer = &peers->peer[i]; | |
+ if (peer->down) { | |
+ continue; | |
+ } | |
+ | |
+ if (peer->max_fails && peer->fails >= peer->max_fails) { | |
+ continue; | |
+ } | |
+ | |
+ // assert(peer->sockaddr); | |
+ | |
+ if(ngx_memn2cmp((u_char *) &sc->sockaddr, | |
+ (u_char *) peer->sockaddr, | |
+ sc->socklen, peer->socklen) == 0) | |
+ { | |
+ scc = sc->connection; | |
+ ngx_log_debug0(NGX_LOG_DEBUG_HTTP,r->connection->log, 0, | |
+ "saved connection matched"); | |
+ | |
+ scc->idle = 0; | |
+ scc->sent = 0; | |
+ scc->log = u->peer.log; | |
+ scc->read->log = u->peer.log; | |
+ scc->write->log = u->peer.log; | |
+ scc->pool->log = u->peer.log; | |
+ | |
+ u->peer.cached = 1; | |
+ u->peer.connection = scc; | |
+ u->peer.sockaddr = peer->sockaddr; | |
+ u->peer.socklen = peer->socklen; | |
+ u->peer.name = &peer->name; | |
+ | |
+ rrp->current = peer; | |
+ ngx_http_upstream_rr_peers_unlock(peers); | |
+ return NGX_OK; | |
+ } | |
+ } | |
+ | |
+ ngx_http_upstream_rr_peers_unlock(peers); | |
+ return NGX_ERROR; | |
+} | |
+ | |
+static void | |
+ngx_http_upstream_keepalive_save(ngx_http_request_t *r, ngx_http_upstream_t *u) | |
+{ | |
+ ngx_uint_t invalid = 0; | |
+ ngx_peer_connection_t *pc = &u->peer; | |
+ ngx_connection_t *c = pc->connection; | |
+ | |
+ ngx_peer_saved_connection_t *sc = NULL; | |
+ | |
+ ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, | |
+ "upstream keepalive save: r->c->sc:%p", r->connection->saved_peer_conn); | |
+ | |
+ if (c == NULL | |
+ || c->read->eof | |
+ || c->read->error | |
+ || c->read->timedout | |
+ || c->write->error | |
+ || c->write->timedout) | |
+ { | |
+ invalid = 1; | |
+ } | |
+ else if (r->internal) { | |
+ invalid = 1; | |
+ } | |
+ else if (!u->keepalive) { | |
+ invalid = 1; | |
+ } | |
+ else if (ngx_handle_read_event(c->read, 0) != NGX_OK) { | |
+ invalid = 1; | |
+ } | |
+ | |
+ /* keeping this a bit verbose to keep the logic more obvious */ | |
+ if (!r->connection->saved_peer_conn) { | |
+ if (invalid == 1) { | |
+ // ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, | |
+ // "state is invalid, not saving"); | |
+ return; | |
+ } | |
+ sc = ngx_pcalloc(r->connection->pool, sizeof(ngx_peer_saved_connection_t)); | |
+ if (!sc) { | |
+ // ngx_log_stderr(0, "ngx_pcalloc failed!"); | |
+ return; | |
+ } | |
+ r->connection->saved_peer_conn = sc; | |
+ } | |
+ else { | |
+ sc = r->connection->saved_peer_conn; | |
+ if (!sc->connection) { | |
+ if (invalid == 1) { | |
+ // ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, | |
+ // "no saved conn, skipping this one as well"); | |
+ return; | |
+ } | |
+ // ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, | |
+ // "no saved conn, but we can cache this one"); | |
+ } | |
+ else { | |
+ // ngx_log_debug2(NGX_LOG_DEBUG_HTTP,r->connection->log, 0, | |
+ // "have a cached connection, is it the same? " | |
+ // " new conn=%p saved conn=%p", c, sc->connection); | |
+ if (c == sc->connection | |
+ && ngx_memn2cmp((u_char *) &sc->sockaddr, | |
+ (u_char *) pc->sockaddr, sc->socklen, pc->socklen) == 0) | |
+ { | |
+ if (invalid == 1) { | |
+ // ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, | |
+ // "yes- but no longer valid, clear cache"); | |
+ ngx_close_saved_connection(sc->connection); | |
+ sc->connection = NULL; | |
+ pc->connection = NULL; | |
+ return; | |
+ } | |
+ // ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, | |
+ // "yes and still valid"); | |
+ } else { | |
+ if (invalid == 1) { | |
+ // ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, | |
+ // "not the same, but this one is invalid, so do nothing"); | |
+ return; | |
+ } | |
+ // ngx_log_debug0(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, | |
+ // "not the same, close the saved conn and cache this one"); | |
+ ngx_close_saved_connection(sc->connection); | |
+ sc->connection = NULL; | |
+ } | |
+ } | |
+ } | |
+ | |
+ ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, | |
+ "upstream keepalive: saving connection %p", c); | |
+ | |
+ if (c->read->timer_set) { | |
+ ngx_del_timer(c->read); | |
+ } | |
+ if (c->write->timer_set) { | |
+ ngx_del_timer(c->write); | |
+ } | |
+ | |
+ c->write->handler = ngx_http_upstream_keepalive_dummy_handler; | |
+ c->read->handler = ngx_http_upstream_keepalive_close_handler; | |
+ | |
+ c->data = sc; | |
+ c->idle = 1; | |
+ c->log = ngx_cycle->log; | |
+ c->read->log = ngx_cycle->log; | |
+ c->write->log = ngx_cycle->log; | |
+ c->pool->log = ngx_cycle->log; | |
+ | |
+ ngx_memcpy(&sc->sockaddr, pc->sockaddr, pc->socklen); | |
+ sc->socklen = pc->socklen; | |
+ sc->connection = c; | |
+ pc->connection = NULL; | |
+ | |
+ if (c->read->ready) { | |
+ ngx_http_upstream_keepalive_close_handler(c->read); | |
+ } | |
+ | |
+ return; | |
+} | |
static void | |
ngx_http_upstream_connect(ngx_http_request_t *r, ngx_http_upstream_t *u) | |
@@ -1325,8 +1568,25 @@ ngx_http_upstream_connect(ngx_http_request_t *r, ngx_http_upstream_t *u) | |
u->state->connect_time = (ngx_msec_t) -1; | |
u->state->header_time = (ngx_msec_t) -1; | |
+ /* for now, only do connection mapping for resolved upstreams | |
+ * which default to using the base round robin balancer. | |
+ * see ngx_http_upstream_init_request(..) | |
+ * and ngx_http_upstream_resolve_handler(..) | |
+ */ | |
+ if (u->resolved && r->connection->saved_peer_conn) { | |
+ ngx_http_upstream_rr_peer_data_t *rrp; | |
+ ngx_http_upstream_rr_peers_t *peers; | |
+ rrp = ngx_http_upstream_get_round_robin_peers(&u->peer, u->peer.data, &peers); | |
+ if (rrp && peers) { | |
+ rc = ngx_http_upstream_keepalive_check(r, u, peers, rrp); | |
+ if (rc == NGX_OK) { | |
+ goto peer_picked; | |
+ } | |
+ } | |
+ } | |
rc = ngx_event_connect_peer(&u->peer); | |
+peer_picked: | |
ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, | |
"http upstream connect: %i", rc); | |
@@ -3800,6 +4060,10 @@ ngx_http_upstream_next(ngx_http_request_t *r, ngx_http_upstream_t *u, | |
} | |
u->peer.free(&u->peer, u->peer.data, state); | |
+ /* clear the cache if needed */ | |
+ if (u->resolved && u->peer.connection) { | |
+ ngx_http_upstream_keepalive_save(r, u); | |
+ } | |
u->peer.sockaddr = NULL; | |
} | |
@@ -3889,21 +4153,7 @@ ngx_http_upstream_next(ngx_http_request_t *r, ngx_http_upstream_t *u, | |
ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, | |
"close http upstream connection: %d", | |
u->peer.connection->fd); | |
-#if (NGX_HTTP_SSL) | |
- | |
- if (u->peer.connection->ssl) { | |
- u->peer.connection->ssl->no_wait_shutdown = 1; | |
- u->peer.connection->ssl->no_send_shutdown = 1; | |
- | |
- (void) ngx_ssl_shutdown(u->peer.connection); | |
- } | |
-#endif | |
- | |
- if (u->peer.connection->pool) { | |
- ngx_destroy_pool(u->peer.connection->pool); | |
- } | |
- | |
- ngx_close_connection(u->peer.connection); | |
+ ngx_close_saved_connection(u->peer.connection); | |
u->peer.connection = NULL; | |
} | |
@@ -3958,41 +4208,19 @@ ngx_http_upstream_finalize_request(ngx_http_request_t *r, | |
if (u->peer.free && u->peer.sockaddr) { | |
u->peer.free(&u->peer, u->peer.data, 0); | |
- u->peer.sockaddr = NULL; | |
} | |
+ /* try to cache the connecton first */ | |
+ if (u->resolved && u->peer.connection) { | |
+ ngx_http_upstream_keepalive_save(r, u); | |
+ } | |
+ | |
if (u->peer.connection) { | |
- | |
-#if (NGX_HTTP_SSL) | |
- | |
- /* TODO: do not shutdown persistent connection */ | |
- | |
- if (u->peer.connection->ssl) { | |
- | |
- /* | |
- * We send the "close notify" shutdown alert to the upstream only | |
- * and do not wait its "close notify" shutdown alert. | |
- * It is acceptable according to the TLS standard. | |
- */ | |
- | |
- u->peer.connection->ssl->no_wait_shutdown = 1; | |
- | |
- (void) ngx_ssl_shutdown(u->peer.connection); | |
- } | |
-#endif | |
- | |
- ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, | |
- "close http upstream connection: %d", | |
- u->peer.connection->fd); | |
- | |
- if (u->peer.connection->pool) { | |
- ngx_destroy_pool(u->peer.connection->pool); | |
- } | |
- | |
- ngx_close_connection(u->peer.connection); | |
+ ngx_close_saved_connection(u->peer.connection); | |
} | |
u->peer.connection = NULL; | |
+ u->peer.sockaddr = NULL; | |
if (u->pipe && u->pipe->temp_file) { | |
ngx_log_debug1(NGX_LOG_DEBUG_HTTP, r->connection->log, 0, | |
diff --git a/ngx_openresty-1.9.3.1/bundle/nginx-1.9.3/src/http/ngx_http_upstream_round_robin.c b/ngx_openresty-1.9.3.1/bundle/nginx-1.9.3/src/http/ngx_http_upstream_round_robin.c | |
index d6ae33b..e1c02a2 100644 | |
--- a/ngx_openresty-1.9.3.1/bundle/nginx-1.9.3/src/http/ngx_http_upstream_round_robin.c | |
+++ b/ngx_openresty-1.9.3.1/bundle/nginx-1.9.3/src/http/ngx_http_upstream_round_robin.c | |
@@ -507,6 +507,18 @@ failed: | |
return NGX_BUSY; | |
} | |
+ngx_http_upstream_rr_peer_data_t * | |
+ngx_http_upstream_get_round_robin_peers(ngx_peer_connection_t *pc, | |
+ void *data, ngx_http_upstream_rr_peers_t **peers) | |
+{ | |
+ ngx_http_upstream_rr_peer_data_t *rrp = data; | |
+ | |
+ ngx_log_debug2(NGX_LOG_DEBUG_HTTP, pc->log, 0, | |
+ "get rr peers, data: %p, peers (inout): %p", data, peers); | |
+ | |
+ *peers = rrp->peers; | |
+ return rrp; | |
+} | |
static ngx_http_upstream_rr_peer_t * | |
ngx_http_upstream_get_peer(ngx_http_upstream_rr_peer_data_t *rrp) | |
diff --git a/ngx_openresty-1.9.3.1/bundle/nginx-1.9.3/src/http/ngx_http_upstream_round_robin.h b/ngx_openresty-1.9.3.1/bundle/nginx-1.9.3/src/http/ngx_http_upstream_round_robin.h | |
index f2c573f..f02e387 100644 | |
--- a/ngx_openresty-1.9.3.1/bundle/nginx-1.9.3/src/http/ngx_http_upstream_round_robin.h | |
+++ b/ngx_openresty-1.9.3.1/bundle/nginx-1.9.3/src/http/ngx_http_upstream_round_robin.h | |
@@ -137,6 +137,10 @@ ngx_int_t ngx_http_upstream_get_round_robin_peer(ngx_peer_connection_t *pc, | |
void ngx_http_upstream_free_round_robin_peer(ngx_peer_connection_t *pc, | |
void *data, ngx_uint_t state); | |
+ngx_http_upstream_rr_peer_data_t* ngx_http_upstream_get_round_robin_peers( | |
+ ngx_peer_connection_t *pc, void *data, | |
+ ngx_http_upstream_rr_peers_t **peers); | |
+ | |
#if (NGX_HTTP_SSL) | |
ngx_int_t | |
ngx_http_upstream_set_round_robin_peer_session(ngx_peer_connection_t *pc, |
Please note: I dont get notifications for comments here, sorry for not replying earlier.
If its a really pressing matter you can open an issue in one of my repos to get my attention.
Hi Aram:
Nice job for nginx forward-proxy http keepalive one to one patch.
I have a question:
How to control inbound connection’s keep-alive? Does it need config keep-alive in the configurations or it will control when the outbound connection closed?
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
great job! Question re Nginx forward proxy blogpost: in case of proxying HTTPS connection (https://upstream -> https://proxy -> client) what's the recommended way to deal with SSL certificates mismatch (assuming proxy domain is different from upstream)?