Skip to content

Instantly share code, notes, and snippets.

@bmatheny
Created October 9, 2014 01:34
Show Gist options
  • Save bmatheny/e60631ea850e61cdfb81 to your computer and use it in GitHub Desktop.
Save bmatheny/e60631ea850e61cdfb81 to your computer and use it in GitHub Desktop.
diff --git a/include/net/tcp.h b/include/net/tcp.h
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -68,9 +68,9 @@
#define MAX_TCP_HEADER (128 + MAX_HEADER)
#define MAX_TCP_OPTION_SPACE 40
-/*
+/*
* Never offer a window over 32767 without using window scaling. Some
- * poor stacks do signed 16bit maths!
+ * poor stacks do signed 16bit maths!
*/
#define MAX_TCP_WINDOW 32767U
@@ -183,7 +183,7 @@
/*
* TCP option
*/
-
+
#define TCPOPT_NOP 1 /* Padding */
#define TCPOPT_EOL 0 /* End of options */
#define TCPOPT_MSS 2 /* Segment size negotiating */
@@ -285,6 +285,7 @@
extern int sysctl_tcp_adv_win_scale;
extern int sysctl_tcp_tw_reuse;
extern int sysctl_tcp_frto;
+extern int sysctl_tcp_syn_acceptq_pct;
extern int sysctl_tcp_low_latency;
extern int sysctl_tcp_dma_copybreak;
extern int sysctl_tcp_nometrics_save;
@@ -495,18 +496,19 @@
struct request_sock *req,
struct tcp_fastopen_cookie *foc);
extern int tcp_disconnect(struct sock *sk, int flags);
+extern int tcp_check_syn_acceptq(struct sock *sk);
void tcp_connect_init(struct sock *sk);
void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
/* From syncookies.c */
extern __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS];
-extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
+extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
struct ip_options *opt);
#ifdef CONFIG_SYN_COOKIES
-extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,
+extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,
__u16 *mss);
#else
static inline __u32 cookie_v4_init_sequence(struct sock *sk,
@@ -1081,16 +1083,16 @@
space - (space>>sysctl_tcp_adv_win_scale);
}
-/* Note: caller must be prepared to deal with negative returns */
+/* Note: caller must be prepared to deal with negative returns */
static inline int tcp_space(const struct sock *sk)
{
return tcp_win_from_space(sk->sk_rcvbuf -
atomic_read(&sk->sk_rmem_alloc));
-}
+}
static inline int tcp_full_space(const struct sock *sk)
{
- return tcp_win_from_space(sk->sk_rcvbuf);
+ return tcp_win_from_space(sk->sk_rcvbuf);
}
static inline void tcp_openreq_init(struct request_sock *req,
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -41,6 +41,10 @@
static int tcp_syn_retries_max = MAX_TCP_SYNCNT;
static int ip_ping_group_range_min[] = { 0, 0 };
static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX };
+
+static int tcp_syn_acceptq_min;
+static int tcp_syn_acceptq_max = 100;
+
static int tcp_icwnd_min = 2;
static int tcp_icwnd_max = 1000;
@@ -599,6 +603,15 @@
.proc_handler = proc_dointvec
},
{
+ .procname = "tcp_syn_acceptq_pct",
+ .data = &sysctl_tcp_syn_acceptq_pct,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &tcp_syn_acceptq_min,
+ .extra2 = &tcp_syn_acceptq_max,
+ },
+ {
.procname = "tcp_low_latency",
.data = &sysctl_tcp_low_latency,
.maxlen = sizeof(int),
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1479,6 +1479,21 @@
return 0;
}
+int sysctl_tcp_syn_acceptq_pct __read_mostly;
+
+int tcp_check_syn_acceptq(struct sock *sk)
+{
+ int retval = 0;
+
+ if (sysctl_tcp_syn_acceptq_pct > 0) {
+ if (sk->sk_ack_backlog >
+ ((sysctl_tcp_syn_acceptq_pct * sk->sk_max_ack_backlog) / 100))
+ retval = 1;
+ }
+ return retval;
+}
+EXPORT_SYMBOL(tcp_check_syn_acceptq);
+
int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
{
struct tcp_options_received tmp_opt;
@@ -1500,6 +1515,15 @@
if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
goto drop;
+ /*
+ * Before checking anything else, if we have filled up some
+ * (configurable) percentage of the acceptq, send a TCP reset.
+ */
+ if (tcp_check_syn_acceptq(sk)) {
+ tcp_v4_send_reset(NULL, skb);
+ goto drop;
+ }
+
/* TW buckets are converted to open requests without
* limitations, they conserve resources and peer is
* evidently real one.
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -962,6 +962,15 @@
if (!ipv6_unicast_destination(skb))
goto drop;
+ /*
+ * Before checking anything else, if we have filled up some (configurable)
+ * percentage of the acceptq, send a TCP reset.
+ */
+ if (tcp_check_syn_acceptq(sk)) {
+ tcp_v6_send_reset(NULL, skb);
+ goto drop;
+ }
+
if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6");
if (!want_cookie)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment