Created
October 13, 2025 13:24
-
-
Save coffnix/787509703e5951bfbbba3c0b658fcf52 to your computer and use it in GitHub Desktop.
file /etc/sysctl.d/50-vipnix.conf
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| #### | |
| ## VIPNIX TUNING - System and Network Optimization for High Performance | |
| ## | |
| ## This file adjusts Linux kernel parameters to maximize performance on 10 Gbps networks, | |
| ## reduce packet retransmissions, and improve efficiency on high-load servers. | |
| ## Focused on local networks with low latency (1–5 ms), such as connections between Linux servers and macOS clients. | |
| ## Values are optimized to minimize packet loss, prioritize low latency, and maximize throughput. | |
| # Increases the maximum number of events that inotify can monitor simultaneously. | |
| # Useful for applications that monitor many files (e.g., file servers, backup tools). | |
| # Value: 524288 allows monitoring up to half a million files, suitable for modern servers. | |
| fs.inotify.max_user_watches=524288 | |
| # Sets the maximum limit of pending connections in the socket listening queue. | |
| # Increasing from 128 (default) to 2048 supports more simultaneous connections, ideal for web or proxy servers. | |
| net.core.somaxconn=2048 | |
| # Increases the maximum limit of open file descriptors in the system. | |
| # A value of 100000 supports applications with many connections (e.g., database servers, proxies). | |
| fs.file-max=100000 | |
| # Reduces the kernel’s tendency to move idle processes to swap (default = 60). | |
| # A value of 10 prioritizes keeping data in RAM, improving performance on servers with large memory. | |
| vm.swappiness=10 | |
| # Expands the range of ephemeral ports available for outgoing connections. | |
| # Range 1024–65535 maximizes the number of simultaneous connections, essential for network servers. | |
| net.ipv4.ip_local_port_range=1024 65535 | |
| # Configures socket buffers to support high-speed (10 Gbps) networks. | |
| # rmem_max/wmem_max define the maximum receive/send buffer size (64 MB). | |
| # rmem_default/wmem_default (208 KB) are optimized for typical connections, reducing latency. | |
| net.core.rmem_max=67108864 | |
| net.core.wmem_max=67108864 | |
| net.core.rmem_default=212992 | |
| net.core.wmem_default=212992 | |
| # Enables TCP window scaling, allowing larger windows for high-speed networks. | |
| # Essential to maximize throughput on 10 Gbps low-latency links. | |
| net.ipv4.tcp_window_scaling=1 | |
| # Sets the default queueing discipline to 'fq' (Fair Queueing) for efficient packet management. | |
| # Combined with the CUBIC congestion algorithm, it improves stability on local networks. | |
| net.core.default_qdisc=fq | |
| net.ipv4.tcp_congestion_control=cubic | |
| # Enables Selective Acknowledgement (SACK) to confirm out-of-order packets. | |
| # Reduces unnecessary retransmissions, improving efficiency on lossy networks. | |
| net.ipv4.tcp_sack=1 | |
| # Sets the maximum size for TCP socket options (40 KB). | |
| # Supports advanced options (e.g., timestamps, SACK) without overloading the kernel. | |
| net.core.optmem_max=40960 | |
| # Configures TCP receive buffers (min, default, max). | |
| # Minimum of 8192 bytes supports initial connections; maximum of 16 MB scales for 10 Gbps. | |
| net.ipv4.tcp_rmem=8192 87380 16777216 | |
| # Configures TCP send buffers (min, default, max). | |
| # Minimum of 8192 bytes avoids startup bottlenecks; maximum of 16 MB supports high load. | |
| net.ipv4.tcp_wmem=8192 65536 16777216 | |
| # Increases the network interface packet backlog (5000) to handle traffic spikes. | |
| # Reduces packet loss on 10 Gbps networks without overloading the kernel. | |
| net.core.netdev_max_backlog=5000 | |
| # Sets the maximum number of pending SYN connections (8192). | |
| # Supports high loads of simultaneous connections, as on web or proxy servers. | |
| net.ipv4.tcp_max_syn_backlog=8192 | |
| # Increases the maximum number of TIME_WAIT sockets (2 million). | |
| # Prevents socket exhaustion on servers with many short-lived connections. | |
| net.ipv4.tcp_max_tw_buckets=2000000 | |
| # Allows reuse of TIME_WAIT sockets for new connections, reducing delays. | |
| net.ipv4.tcp_tw_reuse=1 | |
| # Reduces how long FIN_WAIT sockets remain open (10 seconds). | |
| # Frees resources quickly on high-turnover connection servers. | |
| net.ipv4.tcp_fin_timeout=10 | |
| # Enables TCP slow start after idle periods to reduce retransmissions on unstable networks. | |
| # Helps stabilize connections on high-speed local networks. | |
| net.ipv4.tcp_slow_start_after_idle=1 | |
| # Increases minimum buffers for UDP (16 KB) to support high-load UDP applications. | |
| # Useful for streaming or other UDP services on 10 Gbps networks. | |
| net.ipv4.udp_rmem_min=16384 | |
| net.ipv4.udp_wmem_min=16384 | |
| # Disables logging of packets with invalid addresses (martians) to reduce unnecessary logs. | |
| # Improves security and prevents overload on network servers. | |
| net.ipv4.conf.all.log_martians=0 | |
| # Disables acceptance of source routes to avoid vulnerabilities on local networks. | |
| # Unnecessary in modern setups and increases security. | |
| net.ipv4.conf.all.accept_source_route=0 | |
| # Disables sending of ICMP redirects to prevent routing attacks. | |
| # Safe for local networks where redirects aren’t used. | |
| net.ipv4.conf.all.send_redirects=0 | |
| # Disables acceptance of ICMP redirects for greater security. | |
| # Prevents malicious route manipulation on local networks. | |
| net.ipv4.conf.all.accept_redirects=0 | |
| # Increases the maximum number of orphaned TCP sockets (262144). | |
| # Supports servers with many connections without active clients (e.g., proxies). | |
| net.ipv4.tcp_max_orphans=262144 | |
| # Enables MTU probing to dynamically adjust packet size. | |
| # Avoids fragmentation on variable-MTU networks, reducing retransmissions. | |
| net.ipv4.tcp_mtu_probing=1 | |
| # Enables TCP Fast Open (TFO) for faster connections. | |
| # Value 3 enables TFO for both incoming and outgoing connections, ideal for web servers. | |
| net.ipv4.tcp_fastopen=3 | |
| # Enables Forward RTO-Recovery (F-RTO) to improve loss recovery. | |
| # Value 2 optimizes retransmissions on low-latency networks. | |
| net.ipv4.tcp_frto=2 | |
| # Increases the number of packets processed per network interrupt (300). | |
| # Improves packet handling on 10 Gbps networks, reducing losses. | |
| net.core.netdev_budget=300 | |
| # Enables Early Retransmit to reduce retransmission latency. | |
| # Value 3 optimizes lost packet recovery on local networks. | |
| net.ipv4.tcp_early_retrans=3 | |
| # Prioritizes low latency over maximum throughput. | |
| # Ideal for local networks with RTT of 1–5 ms, reducing retransmissions. | |
| net.ipv4.tcp_low_latency=1 | |
| # Enables Explicit Congestion Notification (ECN) to prevent packet loss. | |
| # Marks congestion before dropping packets, improving efficiency. | |
| net.ipv4.tcp_ecn=1 | |
| vm.vfs_cache_pressure=200 | |
| vm.swappiness=10 |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment