# Kernel sysctl configuration file for Red Hat Linux # # For binary values, 0 is disabled, 1 is enabled. See sysctl(8) and # sysctl.conf(5) for more details. # # Network settings # # # This sets the max OS receive buffer size for all types of connections net.core.rmem_max = 33554432 # This sets the max OS send buffer size for all types of connections. net.core.wmem_max = 33554432 # Increase number of incoming connections backlog net.core.netdev_max_backlog = 3000 # TCP Autotuning setting. # The first value tells the kernel the minimum receive buffer for each TCP connection, and this buffer # is always allocated to a TCP socket, even under high pressure on the system. # The second value specified tells the kernel the default receive buffer allocated for each TCP socket. # This value overrides the /proc/sys/net/core/rmem_default value used by other protocols. # The third and last value specified in this variable specifies the maximum receive buffer that can be allocated for a TCP socket. net.ipv4.tcp_rmem = 4096 16384 33554432 # TCP Autotuning setting. # This variable takes 3 different values which holds information on how much TCP sendbuffer memory # space each TCP socket has to use. Every TCP socket has this much buffer space to use before the buffer # is filled up. Each of the three values are used under different conditions. # The first value in this variable tells the minimum TCP send buffer space available for a single TCP socket. # The second value in the variable tells us the default buffer space allowed for a single TCP socket to use. # The third value tells the kernel the maximum TCP send buffer space. net.ipv4.tcp_wmem= 4096 16384 33554432 # TCP Autotuning setting. # The tcp_mem variable defines how the TCP stack should behave when it comes to memory usage. # The first value specified in the tcp_mem variable tells the kernel the low threshold. Below this point, # the TCP stack do not bother at all about putting any pressure on the memory usage by different TCP sockets. # The second value tells the kernel at which point to start pressuring memory usage down. # The final value tells the kernel how many memory pages it may use maximally. # If this value is reached, TCP streams and packets start getting dropped until we reach a lower memory # usage again. This value includes all TCP sockets currently in use. net.ipv4.tcp_mem = 786432 1048576 26777216 # Increase the tcp-time-wait buckets pool size to prevent simple DOS attacks net.ipv4.tcp_max_tw_buckets = 360000 net.ipv4.tcp_tw_reuse = 1 # Controls IP packet forwarding net.ipv4.ip_forward = 0 # Controls source route verification net.ipv4.conf.default.rp_filter = 1 # Do not accept source routing net.ipv4.conf.default.accept_source_route = 0 # Controls the use of TCP syncookies net.ipv4.tcp_syncookies = 1 # Allowed local port range net.ipv4.ip_local_port_range = 1024 65535 net.ipv4.tcp_timewait_len = 20 net.ipv4.tcp_fin_timeout = 20 # # Kernel settings # # Controls the System Request debugging functionality of the kernel kernel.sysrq = 0 # Controls whether core dumps will append the PID to the core filename. # Useful for debugging multi-threaded applications. kernel.core_uses_pid = 1 # # Message queue settings # # Controls the default maxmimum size of a mesage queue kernel.msgmnb = 65536 # Controls the maximum size of a message, in bytes kernel.msgmax = 65536 # # Shared memory settings # # Controls the maximum shared segment size, in bytes kernel.shmmax = 68719476736 # Controls the maximum number of shared memory segments, in pages kernel.shmall = 4294967296 # # Filesystem settings # # Increase size of file handles and inode cache fs.file-max = 400000