Skip to content

Instantly share code, notes, and snippets.

@linuxmalaysia
Last active June 16, 2025 13:59
Show Gist options
  • Save linuxmalaysia/873c396d43b9933bd0e01495bf4540fe to your computer and use it in GitHub Desktop.
Save linuxmalaysia/873c396d43b9933bd0e01495bf4540fe to your computer and use it in GitHub Desktop.
# Generated by nginxconfig.io
# See nginxconfig.txt for the configuration share link
# Note in comments by Harisfazillah Jamel and Google Gemini
# 15 Jun 2025
user www-data;
pid /run/nginx.pid;
worker_processes auto;
worker_rlimit_nofile 65535;
# Load modules
include /etc/nginx/modules-enabled/*.conf;
events {
multi_accept on;
worker_connections 65535;
}
http {
charset utf-8;
sendfile on;
tcp_nopush on;
tcp_nodelay on;
server_tokens off;
types_hash_max_size 2048;
types_hash_bucket_size 64;
client_max_body_size 16M;
# MIME
include mime.types;
default_type application/octet-stream;
# Log Format
log_format cloudflare '$remote_addr - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" $http_cf_ray $http_cf_connecting_ip $http_x_forwarded_for $http_x_forwarded_proto $http_true_client_ip $http_cf_ipcountry $http_cf_visitor $http_cdn_loop';
# Logging
access_log off;
error_log /var/log/nginx/error.log warn;
# Limits
limit_req_log_level warn;
limit_req_zone $binary_remote_addr zone=login:10m rate=10r/m;
# SSL
ssl_session_timeout 1d;
ssl_session_cache shared:SSL:10m;
ssl_session_tickets off;
# Mozilla Modern configuration
ssl_protocols TLSv1.3;
# OCSP Stapling
ssl_stapling on;
ssl_stapling_verify on;
resolver 1.1.1.1 1.0.0.1 [2606:4700:4700::1111] [2606:4700:4700::1001] 8.8.8.8 8.8.4.4 [2001:4860:4860::8888] [2001:4860:4860::8844] 208.67.222.222 208.67.220.220 [2620:119:35::35] [2620:119:53::53] 9.9.9.9 149.112.112.112 [2620:fe::fe] [2620:fe::9] 64.6.64.6 64.6.65.6 [2620:74:1b::1:1] [2620:74:1c::2:2] valid=60s;
resolver_timeout 2s;
# Connection header for WebSocket reverse proxy
map $http_upgrade $connection_upgrade {
default upgrade;
"" close;
}
map $remote_addr $proxy_forwarded_elem {
# IPv4 addresses can be sent as-is
~^[0-9.]+$ "for=$remote_addr";
# IPv6 addresses need to be bracketed and quoted
~^[0-9A-Fa-f:.]+$ "for=\"[$remote_addr]\"";
# Unix domain socket names cannot be represented in RFC 7239 syntax
default "for=unknown";
}
map $http_forwarded $proxy_add_forwarded {
# If the incoming Forwarded header is syntactically valid, append to it
"~^(,[ \\t]*)*([!#$%&'*+.^_`|~0-9A-Za-z-]+=([!#$%&'*+.^_`|~0-9A-Za-z-]+|\"([\\t \\x21\\x23-\\x5B\\x5D-\\x7E\\x80-\\xFF]|\\\\[\\t \\x21-\\x7E\\x80-\\xFF])*\"))?(;([!#$%&'*+.^_`|~0-9A-Za-z-]+=([!#$%&'*+.^_`|~0-9A-Za-z-]+|\"([\\t \\x21\\x23-\\x5B\\x5D-\\x7E\\x80-\\xFF]|\\\\[\\t \\x21-\\x7E\\x80-\\xFF])*\"))?)*([ \\t]*,([ \\t]*([!#$%&'*+.^_`|~0-9A-Za-z-]+=([!#$%&'*+.^_`|~0-9A-Za-z-]+|\"([\\t \\x21\\x23-\\x5B\\x5D-\\x7E\\x80-\\xFF]|\\\\[\\t \\x21-\\x7E\\x80-\\xFF])*\"))?(;([!#$%&'*+.^_`|~0-9A-Za-z-]+=([!#$%&'*+.^_`|~0-9A-Za-z-]+|\"([\\t \\x21\\x23-\\x5B\\x5D-\\x7E\\x80-\\xFF]|\\\\[\\t \\x21-\\x7E\\x80-\\xFF])*\"))?)*)?)*$" "$http_forwarded, $proxy_forwarded_elem";
# Otherwise, replace it
default "$proxy_forwarded_elem";
}
# --- NGINX CACHE CONFIGURATION START ---
# Define a cache zone named 'my_cache'
# Keys will be stored in memory (10m)
# Total cache size will be 1g
# Inactive items will be removed after 60m
# Cache directory: /var/cache/nginx
proxy_cache_path /var/cache/nginx levels=1:2 keys_zone=my_cache:10m max_size=1g inactive=60m use_temp_path=off;
# --- NGINX CACHE CONFIGURATION END ---
# Load configs
include /etc/nginx/conf.d/*.conf;
# linuxmalaysia.com
server {
listen 443 ssl reuseport;
listen [::]:443 ssl reuseport;
http2 on;
server_name linuxmalaysia.com;
root /var/www/linuxmalaysia.com/public;
# SSL
ssl_certificate /etc/nginx/ssl/linuxmalaysia.com.crt;
ssl_certificate_key /etc/nginx/ssl/linuxmalaysia.com.key;
# security headers
add_header X-XSS-Protection "1; mode=block" always;
add_header X-Content-Type-Options "nosniff" always;
add_header Referrer-Policy "no-referrer-when-downgrade" always;
add_header Content-Security-Policy "default-src 'self' http: https: ws: wss: data: blob: 'unsafe-inline'; frame-ancestors 'self';" always;
add_header Permissions-Policy "interest-cohort=()" always;
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always;
# . files
location ~ /\.(?!well-known) {
deny all;
}
# security.txt
location /security.txt {
return 301 /.well-known/security.txt;
}
location = /.well-known/security.txt {
alias ~/security.txt;
}
# restrict methods
if ($request_method !~ ^(GET|POST|PUT|HEAD|CONNECT)$) {
return '405';
}
# logging
access_log /var/log/nginx/access.log combined buffer=512k flush=1m;
error_log /var/log/nginx/error.log warn;
# reverse proxy
location / {
proxy_pass http://127.0.0.1:3000;
proxy_set_header Host $host;
proxy_http_version 1.1;
proxy_cache_bypass $http_upgrade;
# --- NGINX CACHE APPLICATION START ---
proxy_cache my_cache; # Use the defined cache zone
proxy_cache_valid 200 302 10m; # Cache successful responses for 10 minutes
proxy_cache_valid 404 1m; # Cache 404 responses for 1 minute
proxy_cache_key "$scheme$proxy_host$request_uri"; # Define the cache key
proxy_cache_min_uses 1; # Cache after 1 request
proxy_cache_revalidate on; # Revalidate stale content
proxy_cache_use_stale error timeout updating http_500 http_502 http_503 http_504; # Serve stale content on errors
# Add header to see if content is served from cache (optional, for debugging)
add_header X-Cache-Status $upstream_cache_status;
# --- NGINX CACHE APPLICATION END ---
# Proxy SSL
proxy_ssl_server_name on;
# Proxy headers
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $connection_upgrade;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header Forwarded $proxy_add_forwarded;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Host $host;
proxy_set_header X-Forwarded-Port $server_port;
# Proxy timeouts
proxy_connect_timeout 60s;
proxy_send_timeout 60s;
proxy_read_timeout 60s;
}
# favicon.ico
location = /favicon.ico {
log_not_found off;
}
# robots.txt
location = /robots.txt {
log_not_found off;
}
# # assets, media
# location ~* \.(?:css(\.map)?|js(\.map)?|jpe?g|png|gif|ico|cur|heic|webp|tiff?|mp3|m4a|aac|ogg|midi?|wav|mp4|mov|webm|mpe?g|avi|ogv|flv|wmv)$ {
# expires 7d;
# }
#
# # svg, fonts
# location ~* \.(?:svgz?|ttf|ttc|otf|eot|woff2?)$ {
# add_header Access-Control-Allow-Origin "*";
# expires 7d;
# }
# gzip
gzip on;
gzip_vary on;
gzip_proxied any;
gzip_comp_level 6;
gzip_types text/plain text/css text/xml application/json application/javascript application/rss+xml application/atom+xml image/svg+xml;
}
# HTTP redirect
server {
listen 80 reuseport;
listen [::]:80 reuseport;
server_name linuxmalaysia.com;
# logging
access_log /var/log/nginx/access.log combined buffer=512k flush=1m;
error_log /var/log/nginx/error.log warn;
return 301 https://linuxmalaysia.com$request_uri;
}
}
@linuxmalaysia
Copy link
Author

User ulimit (for Nginx user):

Even with fs.file-max set, the user running Nginx (often nginx or www-data) needs a high nofile limit in /etc/security/limits.conf or a file in /etc/security/limits.d/.

nginx soft nofile 655360
nginx hard nofile 655360

@linuxmalaysia
Copy link
Author


Let's break down the events block in your Nginx configuration. This block is crucial because it defines how Nginx handles connections and influences its scalability and performance, particularly concerning concurrent users.

events { ... }

The events block is a top-level configuration block in Nginx (alongside http, mail, stream, etc.). It contains directives that set global parameters for connection processing.

1. worker_connections 65536;

  • Explanation: This directive specifies the maximum number of simultaneous connections that a single Nginx worker process can open.
    • Since Nginx typically runs multiple worker processes (controlled by worker_processes in the main configuration block, often set to auto or the number of CPU cores), the total maximum number of connections Nginx can handle concurrently is worker_connections * worker_processes.
    • For example, if you have 4 worker processes and worker_connections 65536;, Nginx can theoretically handle $4 \times 65536 = 262144$ simultaneous connections.
  • Performance Impact (Scalability & Resource Usage):
    • High Concurrency: A high worker_connections value allows Nginx to handle a very large number of concurrent clients, which is essential for high-traffic websites.
    • Memory: Each connection consumes a small amount of memory. Setting this too high without sufficient RAM could lead to memory exhaustion.
    • CPU: While Nginx is very efficient, handling a vast number of connections will consume CPU cycles for processing requests.
    • File Descriptors: Each connection is a file descriptor. This setting must be lower than or equal to the operating system's per-process file descriptor limit.
  • Suggestions:
    • 65536 is a very common and high value, indicating a server designed for high concurrency.
    • Practical Limit: While Nginx might be configured for 65,536 connections per worker, the actual practical limit is often determined by the server's CPU, memory, and network bandwidth.
    • Monitoring: Monitor your server's resource usage (CPU, RAM, open file descriptors) under load to ensure it can comfortably handle this many connections. If you consistently hit the limit, consider scaling up your hardware or optimizing other parts of your Nginx/application stack.

2. worker_rlimit_nofile 655360;

  • Explanation: This directive sets the maximum number of open file descriptors (the RLIMIT_NOFILE limit) for Nginx's worker processes.
    • A file descriptor is used for every connection, every file being served, every log file, and so on.
    • The value 655360 (655,360) is ten times your worker_connections value.
  • Performance Impact (Stability & Reliability):
    • Prevents "Too Many Open Files" Errors: If Nginx runs out of file descriptors, it cannot accept new connections or open files, leading to service disruption and "Too many open files" errors in your logs.
    • Must be High Enough: This value must be at least worker_connections. It's often recommended to set it higher (e.g., 1.5x to 2x worker_connections for general use, or even 10x as you have for very busy servers) to account for other file descriptors used by Nginx (e.g., log files, cache files, internal pipes, upstream connections).
    • Operating System Limit: This Nginx setting cannot exceed the operating system's system-wide file descriptor limit (fs.file-max) or the per-user/per-process ulimit -n setting. You often need to adjust these OS limits first.
  • Suggestions:
    • 655360 is a very high and appropriate value for a server aiming for high concurrency.
    • Verify OS Limits: Before setting worker_rlimit_nofile this high, you must ensure your Linux system's ulimit -n for the Nginx user and fs.file-max kernel parameter are set to at least this value (or higher).
      • To check ulimit -n for the Nginx user (e.g., www-data):
        sudo su - www-data -c 'ulimit -n'
      • To check system-wide fs.file-max:
        cat /proc/sys/fs/file-max
      • To temporarily set ulimit -n for the current shell (e.g., for testing): ulimit -n 655360
      • To make it permanent, you'd typically edit /etc/security/limits.conf (for nofile for the www-data user) and /etc/sysctl.conf (for fs.file-max).

3. use epoll; (Example, commented out in your snippet)

  • Explanation: This directive specifies the connection processing method Nginx will use.
    • epoll is the most efficient and scalable I/O event notification mechanism on Linux systems. It allows Nginx to efficiently monitor a large number of file descriptors for readiness events (e.g., a new connection, data arriving on an existing connection).
    • Other methods exist (kqueue for FreeBSD/macOS, select, poll), but epoll is the default and preferred method on Linux due to its superior performance for high concurrency.
  • Performance Impact (CPU & Scalability):
    • Highly Efficient: epoll drastically reduces the CPU overhead associated with monitoring thousands of connections compared to older methods like select or poll. It scales very well with the number of connections.
  • Suggestions:
    • Explicitly include it: While Nginx often defaults to the best method for the detected OS, it's good practice to explicitly include use epoll; in your events block if you're on Linux, as it clearly states your intent and ensures Nginx uses the most efficient method.

Overall Impact on Nginx Performance (CPU and Memory)

  • CPU: worker_connections and the chosen use method directly impact CPU. A higher number of connections means more work for the CPU, but epoll minimizes the overhead of managing those connections.
  • Memory: worker_connections directly influences memory consumption as each connection requires a small amount of memory. worker_rlimit_nofile ensures Nginx has enough capacity to use file descriptors but doesn't directly consume memory itself (it's a limit, not an allocation).

Your events block configuration is very aggressive and suitable for a high-performance web server aiming to handle significant traffic, which aligns with serving linuxmalaysia.com effectively. The high worker_connections and worker_rlimit_nofile values are excellent for scalability, provided your underlying operating system is also configured to support them.

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment