-
-
Save linuxmalaysia/873c396d43b9933bd0e01495bf4540fe to your computer and use it in GitHub Desktop.
# Generated by nginxconfig.io | |
# See nginxconfig.txt for the configuration share link | |
# Note in comments by Harisfazillah Jamel and Google Gemini | |
# 15 Jun 2025 | |
user www-data; | |
pid /run/nginx.pid; | |
worker_processes auto; | |
worker_rlimit_nofile 65535; | |
# Load modules | |
include /etc/nginx/modules-enabled/*.conf; | |
events { | |
multi_accept on; | |
worker_connections 65535; | |
} | |
http { | |
charset utf-8; | |
sendfile on; | |
tcp_nopush on; | |
tcp_nodelay on; | |
server_tokens off; | |
types_hash_max_size 2048; | |
types_hash_bucket_size 64; | |
client_max_body_size 16M; | |
# MIME | |
include mime.types; | |
default_type application/octet-stream; | |
# Log Format | |
log_format cloudflare '$remote_addr - $remote_user [$time_local] "$request" $status $body_bytes_sent "$http_referer" "$http_user_agent" $http_cf_ray $http_cf_connecting_ip $http_x_forwarded_for $http_x_forwarded_proto $http_true_client_ip $http_cf_ipcountry $http_cf_visitor $http_cdn_loop'; | |
# Logging | |
access_log off; | |
error_log /var/log/nginx/error.log warn; | |
# Limits | |
limit_req_log_level warn; | |
limit_req_zone $binary_remote_addr zone=login:10m rate=10r/m; | |
# SSL | |
ssl_session_timeout 1d; | |
ssl_session_cache shared:SSL:10m; | |
ssl_session_tickets off; | |
# Mozilla Modern configuration | |
ssl_protocols TLSv1.3; | |
# OCSP Stapling | |
ssl_stapling on; | |
ssl_stapling_verify on; | |
resolver 1.1.1.1 1.0.0.1 [2606:4700:4700::1111] [2606:4700:4700::1001] 8.8.8.8 8.8.4.4 [2001:4860:4860::8888] [2001:4860:4860::8844] 208.67.222.222 208.67.220.220 [2620:119:35::35] [2620:119:53::53] 9.9.9.9 149.112.112.112 [2620:fe::fe] [2620:fe::9] 64.6.64.6 64.6.65.6 [2620:74:1b::1:1] [2620:74:1c::2:2] valid=60s; | |
resolver_timeout 2s; | |
# Connection header for WebSocket reverse proxy | |
map $http_upgrade $connection_upgrade { | |
default upgrade; | |
"" close; | |
} | |
map $remote_addr $proxy_forwarded_elem { | |
# IPv4 addresses can be sent as-is | |
~^[0-9.]+$ "for=$remote_addr"; | |
# IPv6 addresses need to be bracketed and quoted | |
~^[0-9A-Fa-f:.]+$ "for=\"[$remote_addr]\""; | |
# Unix domain socket names cannot be represented in RFC 7239 syntax | |
default "for=unknown"; | |
} | |
map $http_forwarded $proxy_add_forwarded { | |
# If the incoming Forwarded header is syntactically valid, append to it | |
"~^(,[ \\t]*)*([!#$%&'*+.^_`|~0-9A-Za-z-]+=([!#$%&'*+.^_`|~0-9A-Za-z-]+|\"([\\t \\x21\\x23-\\x5B\\x5D-\\x7E\\x80-\\xFF]|\\\\[\\t \\x21-\\x7E\\x80-\\xFF])*\"))?(;([!#$%&'*+.^_`|~0-9A-Za-z-]+=([!#$%&'*+.^_`|~0-9A-Za-z-]+|\"([\\t \\x21\\x23-\\x5B\\x5D-\\x7E\\x80-\\xFF]|\\\\[\\t \\x21-\\x7E\\x80-\\xFF])*\"))?)*([ \\t]*,([ \\t]*([!#$%&'*+.^_`|~0-9A-Za-z-]+=([!#$%&'*+.^_`|~0-9A-Za-z-]+|\"([\\t \\x21\\x23-\\x5B\\x5D-\\x7E\\x80-\\xFF]|\\\\[\\t \\x21-\\x7E\\x80-\\xFF])*\"))?(;([!#$%&'*+.^_`|~0-9A-Za-z-]+=([!#$%&'*+.^_`|~0-9A-Za-z-]+|\"([\\t \\x21\\x23-\\x5B\\x5D-\\x7E\\x80-\\xFF]|\\\\[\\t \\x21-\\x7E\\x80-\\xFF])*\"))?)*)?)*$" "$http_forwarded, $proxy_forwarded_elem"; | |
# Otherwise, replace it | |
default "$proxy_forwarded_elem"; | |
} | |
# --- NGINX CACHE CONFIGURATION START --- | |
# Define a cache zone named 'my_cache' | |
# Keys will be stored in memory (10m) | |
# Total cache size will be 1g | |
# Inactive items will be removed after 60m | |
# Cache directory: /var/cache/nginx | |
proxy_cache_path /var/cache/nginx levels=1:2 keys_zone=my_cache:10m max_size=1g inactive=60m use_temp_path=off; | |
# --- NGINX CACHE CONFIGURATION END --- | |
# Load configs | |
include /etc/nginx/conf.d/*.conf; | |
# linuxmalaysia.com | |
server { | |
listen 443 ssl reuseport; | |
listen [::]:443 ssl reuseport; | |
http2 on; | |
server_name linuxmalaysia.com; | |
root /var/www/linuxmalaysia.com/public; | |
# SSL | |
ssl_certificate /etc/nginx/ssl/linuxmalaysia.com.crt; | |
ssl_certificate_key /etc/nginx/ssl/linuxmalaysia.com.key; | |
# security headers | |
add_header X-XSS-Protection "1; mode=block" always; | |
add_header X-Content-Type-Options "nosniff" always; | |
add_header Referrer-Policy "no-referrer-when-downgrade" always; | |
add_header Content-Security-Policy "default-src 'self' http: https: ws: wss: data: blob: 'unsafe-inline'; frame-ancestors 'self';" always; | |
add_header Permissions-Policy "interest-cohort=()" always; | |
add_header Strict-Transport-Security "max-age=31536000; includeSubDomains; preload" always; | |
# . files | |
location ~ /\.(?!well-known) { | |
deny all; | |
} | |
# security.txt | |
location /security.txt { | |
return 301 /.well-known/security.txt; | |
} | |
location = /.well-known/security.txt { | |
alias ~/security.txt; | |
} | |
# restrict methods | |
if ($request_method !~ ^(GET|POST|PUT|HEAD|CONNECT)$) { | |
return '405'; | |
} | |
# logging | |
access_log /var/log/nginx/access.log combined buffer=512k flush=1m; | |
error_log /var/log/nginx/error.log warn; | |
# reverse proxy | |
location / { | |
proxy_pass http://127.0.0.1:3000; | |
proxy_set_header Host $host; | |
proxy_http_version 1.1; | |
proxy_cache_bypass $http_upgrade; | |
# --- NGINX CACHE APPLICATION START --- | |
proxy_cache my_cache; # Use the defined cache zone | |
proxy_cache_valid 200 302 10m; # Cache successful responses for 10 minutes | |
proxy_cache_valid 404 1m; # Cache 404 responses for 1 minute | |
proxy_cache_key "$scheme$proxy_host$request_uri"; # Define the cache key | |
proxy_cache_min_uses 1; # Cache after 1 request | |
proxy_cache_revalidate on; # Revalidate stale content | |
proxy_cache_use_stale error timeout updating http_500 http_502 http_503 http_504; # Serve stale content on errors | |
# Add header to see if content is served from cache (optional, for debugging) | |
add_header X-Cache-Status $upstream_cache_status; | |
# --- NGINX CACHE APPLICATION END --- | |
# Proxy SSL | |
proxy_ssl_server_name on; | |
# Proxy headers | |
proxy_set_header Upgrade $http_upgrade; | |
proxy_set_header Connection $connection_upgrade; | |
proxy_set_header X-Real-IP $remote_addr; | |
proxy_set_header Forwarded $proxy_add_forwarded; | |
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; | |
proxy_set_header X-Forwarded-Proto $scheme; | |
proxy_set_header X-Forwarded-Host $host; | |
proxy_set_header X-Forwarded-Port $server_port; | |
# Proxy timeouts | |
proxy_connect_timeout 60s; | |
proxy_send_timeout 60s; | |
proxy_read_timeout 60s; | |
} | |
# favicon.ico | |
location = /favicon.ico { | |
log_not_found off; | |
} | |
# robots.txt | |
location = /robots.txt { | |
log_not_found off; | |
} | |
# # assets, media | |
# location ~* \.(?:css(\.map)?|js(\.map)?|jpe?g|png|gif|ico|cur|heic|webp|tiff?|mp3|m4a|aac|ogg|midi?|wav|mp4|mov|webm|mpe?g|avi|ogv|flv|wmv)$ { | |
# expires 7d; | |
# } | |
# | |
# # svg, fonts | |
# location ~* \.(?:svgz?|ttf|ttc|otf|eot|woff2?)$ { | |
# add_header Access-Control-Allow-Origin "*"; | |
# expires 7d; | |
# } | |
# gzip | |
gzip on; | |
gzip_vary on; | |
gzip_proxied any; | |
gzip_comp_level 6; | |
gzip_types text/plain text/css text/xml application/json application/javascript application/rss+xml application/atom+xml image/svg+xml; | |
} | |
# HTTP redirect | |
server { | |
listen 80 reuseport; | |
listen [::]:80 reuseport; | |
server_name linuxmalaysia.com; | |
# logging | |
access_log /var/log/nginx/access.log combined buffer=512k flush=1m; | |
error_log /var/log/nginx/error.log warn; | |
return 301 https://linuxmalaysia.com$request_uri; | |
} | |
} |
Nginx Proxy SSL Server Name Indication (SNI)
You're asking about the proxy_ssl_server_name
directive. This setting is crucial when Nginx is proxying HTTPS requests to a backend server that hosts multiple SSL certificates on the same IP address.
Here's a breakdown:
What is SNI (Server Name Indication)?
Traditionally, for a server to host multiple secure websites (each with its own SSL certificate) on a single IP address, it would need a separate IP address for each. This is because the SSL handshake happens before the HTTP Host
header (which indicates the desired website) is sent. So, the server wouldn't know which certificate to present.
Server Name Indication (SNI) is an extension to the TLS protocol that solves this problem. With SNI, the client (e.g., a web browser) includes the hostname it's trying to connect to within the initial TLS handshake. This allows the server to present the correct SSL certificate, even if it's hosting many websites on the same IP address.
proxy_ssl_server_name on;
- Explanation: When
proxy_ssl_server_name
is set toon
, Nginx will pass the hostname from the client's original request (theHost
header Nginx received from the client) to your backend server during the SSL/TLS handshake. - How it works:
- A client connects to Nginx over HTTPS (e.g.,
https://linuxmalaysia.com
). - Nginx receives the request, including the
Host
header (linuxmalaysia.com
). - Nginx acts as a client to your backend server (
http://127.0.0.1:3000
). - If your backend also uses HTTPS (e.g.,
https://127.0.0.1:3000
), Nginx will include the original hostname (linuxmalaysia.com
) in its TLS handshake with the backend. - This allows your backend server to select and present the correct SSL certificate for
linuxmalaysia.com
(if it hosts multiple SSL-secured sites) to Nginx.
- A client connects to Nginx over HTTPS (e.g.,
- When it's important: This setting is essential if:
- Your Nginx instance is proxying to an HTTPS backend (meaning
proxy_pass
is tohttps://...
). - And that HTTPS backend hosts multiple domains/certificates on the same IP address. Without
proxy_ssl_server_name on;
, the backend might present the wrong certificate, leading to SSL errors between Nginx and the backend.
- Your Nginx instance is proxying to an HTTPS backend (meaning
Performance Impact
- Negligible on CPU/Memory: This setting has a minimal to negligible impact on Nginx's CPU or memory usage. It's primarily about ensuring the correct SSL handshake happens between Nginx and the backend.
- Crucial for Correctness: Its main "performance" benefit is that it enables your SSL-secured backend to function correctly in a multi-domain setup. Without it, you'd encounter SSL handshake failures or certificate mismatches between Nginx and your backend, leading to client errors and service outages.
Suggestions
- Keep it
on
if your backend uses HTTPS and hosts multiple domains: If yourproxy_pass
URL ishttps://127.0.0.1:3000
(instead ofhttp://127.0.0.1:3000
as in your current config) AND your backend application serves different domains over HTTPS from a single IP, thenproxy_ssl_server_name on;
is absolutely necessary. - It's currently redundant in your provided config: In your current configuration,
proxy_pass
is set tohttp://127.0.0.1:3000;
. This means Nginx is connecting to your backend using plain HTTP, not HTTPS. In this scenario,proxy_ssl_server_name on;
has no effect because there's no SSL handshake happening between Nginx and the backend.
Conclusion: While proxy_ssl_server_name on;
is a best practice for Nginx when proxying to HTTPS backends, it's currently a no-op given your proxy_pass http://127.0.0.1:3000;
directive. If you ever switch your backend to serve over HTTPS, this line will become highly relevant and important.
Nginx Security and .well-known
File Handling
You're configuring some important aspects of how Nginx handles specific files and directories, particularly those starting with a dot (.
) and the security.txt
file. These settings are crucial for both security and compliance with modern web standards.
Let's break down each of these location
blocks:
1. location ~ /\.(?!well-known) { deny all; }
- Explanation: This
location
block uses a regular expression to match requests for files or directories that start with a dot (.
) but are NOT within the.well-known
directory.~
: Indicates a regular expression match (case-sensitive)./\.
: Matches a literal dot at the beginning of a path segment (like.htaccess
,.env
,.git
).(?!well-known)
: This is a negative lookahead assertion. It means "match a dot, but only if it's NOT followed by 'well-known'".deny all;
: If a request matches this pattern, Nginx will immediately deny access, returning a403 Forbidden
error.
- Security Benefit: This is a critical security measure. Many sensitive files used by applications or version control systems (like
.env
for environment variables,.git
for Git repositories,.htaccess
for Apache configurations) start with a dot. Serving these files publicly could expose sensitive information, leading to severe security vulnerabilities. This directive ensures they are never served by Nginx. - Performance Impact: Negligible. It's a quick check and deny, which is very efficient.
- Suggestion: Keep this as is. It's a fundamental security best practice.
2. location /security.txt { return 301 /.well-known/security.txt; }
- Explanation: This
location
block handles requests for/security.txt
. It issues a permanent redirect (301 Moved Permanently
) to/.well-known/security.txt
. - Security Benefit: The
security.txt
file is a proposed standard for website security, allowing security researchers to easily find contact information for reporting vulnerabilities. The official standard dictates it should be placed under the/.well-known/
directory. This redirect ensures that if someone (human or automated scanner) looks for the file at the old or common root location, they are guided to the correct, standardized location. - Performance Impact: Minor. It's a single HTTP redirect. The
301
status code tells browsers and search engines to cache the redirect, so subsequent requests go directly to the new location. - Suggestion: Keep this redirect. It helps with compliance and discoverability for security researchers.
3. location = /.well-known/security.txt { alias ~/security.txt; }
- Explanation: This
location
block specifically handles requests for/.well-known/security.txt
.=
: Denotes an exact match. This means this block will only process requests that precisely match/.well-known/security.txt
.alias ~/security.txt;
: This is an important part.alias
: Specifies a different base path for the file than theroot
directive of theserver
block.~/security.txt
: The tilde~
symbol usually refers to the user's home directory. If Nginx is running aswww-data
,~
would typically resolve to/var/www/
. This implies yoursecurity.txt
file is expected to be located at/var/www/security.txt
(or/home/www-data/security.txt
if that's the user's home).
- Security Benefit: Correctly serves the
security.txt
file from a designated location, fulfilling the proposed standard for vulnerability reporting. - Performance Impact: Negligible. Serving a small static file is very efficient for Nginx.
- Important Consideration / Suggestion:
- File Location: You need to ensure that the
security.txt
file actually exists at the path specified byalias ~/security.txt
. Based on your earlierroot /var/www/linuxmalaysia.com/public;
setting, a more common and explicit path for this would bealias /var/www/linuxmalaysia.com/security.txt;
or evenalias /etc/nginx/security.txt;
if you prefer to keep it separate from your website's public directory. - User Permissions: The Nginx user (
www-data
) must have read permissions for thesecurity.txt
file at thealias
path.
- File Location: You need to ensure that the
Overall Summary of these Directives:
These location
blocks demonstrate good practices for managing specific file access and adhering to web standards:
- The
deny all
for dotfiles is a crucial security hardened default. - The
security.txt
redirects and serving ensure you comply with a valuable security and transparency standard.
Your configuration for these elements is generally solid. The main point to double-check is the exact path of your security.txt
file to ensure it aligns with the alias
directive's interpretation of ~
.
Is there anything else you'd like to understand about your Nginx setup, or any other parts of the configuration you'd like to review?
Nginx Request Method Restriction
You're using an if
directive to restrict the HTTP methods that Nginx will allow for requests. This is a security measure that can help protect your backend application from unexpected or potentially malicious requests using methods your application isn't designed to handle.
Let's break down this configuration:
-
if ($request_method !~ ^(GET|POST|PUT|HEAD|CONNECT)$) { ... }
if
directive: This is a conditional statement in Nginx. While generally discouraged for complex logic (due to some known issues and performance characteristics with complexif
blocks), for simple checks like this, it's often used.$request_method
: This is a built-in Nginx variable that holds the HTTP method of the current request (e.g.,GET
,POST
,DELETE
,OPTIONS
).!~
: This is the "does not match" operator for regular expressions (case-sensitive).^(GET|POST|PUT|HEAD|CONNECT)$
: This is the regular expression pattern.^
: Matches the beginning of the string.$
: Matches the end of the string.(GET|POST|PUT|HEAD|CONNECT)
: This is a group that matches any of the specified HTTP methods.GET
: Used to retrieve data.POST
: Used to submit data to be processed.PUT
: Used to create or replace a resource.HEAD
: Similar to GET, but requests only the headers, not the body.CONNECT
: Used to establish a tunnel to the server, primarily for proxies.
- In summary: The
if
condition evaluates totrue
if the$request_method
is not one ofGET
,POST
,PUT
,HEAD
, orCONNECT
.
-
return '405';
- Explanation: If the
if
condition is true (i.e., the request method is not one of the allowed ones), Nginx will immediately return an HTTP405 Method Not Allowed
status code to the client. This tells the client that the method used is not supported for the requested resource.
- Explanation: If the
Security Benefit
- Method Filtering: This helps ensure that your backend application only receives requests using the HTTP methods it's designed to process.
- Reduced Attack Surface: By explicitly denying unexpected methods, you can reduce the attack surface. For example, if your application doesn't implement
DELETE
orOPTIONS
(which it likely should for REST APIs, but this is a generic setup), blocking them at the Nginx level can prevent potential vulnerabilities if these methods were somehow mishandled by your backend. - Compliance: Can help in scenarios where strict adherence to allowed methods is required.
Performance Impact
- Minimal CPU/Memory: The impact on Nginx's CPU and memory is very minimal. Regular expression matching for a short string is a highly optimized operation. If a method is denied, Nginx returns the error immediately, consuming very few resources for that request.
- Efficiency: For disallowed methods, Nginx efficiently terminates the request at the edge, preventing it from ever reaching your backend application. This saves CPU and resources on your backend that would otherwise be spent processing and rejecting an invalid request.
Suggestions
- Consider your application's needs: Before implementing method restrictions, ensure that your backend application genuinely does not use any other HTTP methods.
- For a typical RESTful API, you'd usually also allow
DELETE
andPATCH
. OPTIONS
is commonly used for CORS (Cross-Origin Resource Sharing) preflight requests. If you have a frontend application on a different domain interacting with your backend, denyingOPTIONS
will break CORS.
- For a typical RESTful API, you'd usually also allow
- Placement: This
if
block is typically placed within aserver
block or a specificlocation
block. Placing it in thehttp
block (which seems to be the case here, given the context of your previous snippets) applies it globally to allserver
blocks unless overridden. This is fine for a blanket restriction. - Nginx
if
directive caution: While this specific use case is generally considered acceptable forif
, it's worth noting that theif
directive can sometimes lead to unexpected behavior when combined with other directives, especially in more complex scenarios. For method restriction specifically, it's usually safe.
In summary, this if
block is a simple yet effective security measure to restrict HTTP methods, contributing to a more robust and secure Nginx setup. Just ensure the allowed methods align with your application's actual requirements.
Nginx Configuration for favicon.ico
and robots.txt
These two location
blocks are dedicated to handling requests for favicon.ico
and robots.txt
. While seemingly minor, they help optimize your Nginx logs and prevent unnecessary logging noise, which can be beneficial for performance in very high-traffic scenarios.
Let's look at each:
1. location = /favicon.ico { log_not_found off; }
-
Explanation: This block specifically targets requests for
/favicon.ico
.location = /favicon.ico
: The=
sign ensures an exact match for this URI. Requests forfavicon.ico
(which is the small icon displayed in browser tabs) are very common.log_not_found off;
: This is the key part. It tells Nginx not to log 404 "file not found" errors to theerror_log
iffavicon.ico
isn't found.
-
Performance Impact:
- Reduced Log Noise: Favicons are often requested automatically by browsers, and if the file doesn't exist (or isn't found at that exact path), it would generate a
404
error entry in yourerror_log
. Turninglog_not_found off
keeps your error logs cleaner and more focused on actual issues. - Minimal CPU/Disk I/O: For very high-traffic sites, preventing thousands of
404
error log entries can save a tiny amount of CPU and disk I/O, as Nginx doesn't have to write these messages. This is more about efficiency and clean logs than a major performance boost.
- Reduced Log Noise: Favicons are often requested automatically by browsers, and if the file doesn't exist (or isn't found at that exact path), it would generate a
-
Suggestion: Keep this setting. It's a standard and good practice for managing Nginx logs.
2. location = /robots.txt { log_not_found off; }
-
Explanation: This block is identical in purpose to the
favicon.ico
one, but it applies to/robots.txt
.location = /robots.txt
: Ensures an exact match for therobots.txt
file, which search engine crawlers (like Googlebot) request to understand your site's crawling rules.log_not_found off;
: Prevents404
errors forrobots.txt
from being logged if the file isn't found.
-
Performance Impact:
- Reduced Log Noise: Similar to
favicon.ico
, ifrobots.txt
isn't present or found by Nginx, it would generate404
entries. This directive keeps those out of your error logs. - Minimal CPU/Disk I/O: Again, this contributes to marginal savings in CPU and disk I/O by avoiding unnecessary log writes.
- Reduced Log Noise: Similar to
-
Suggestion: Keep this setting. It's another standard and good practice for log management.
Overall Summary
These directives don't directly enhance the speed of content delivery, but they are beneficial for the operational efficiency and clarity of your Nginx server. By explicitly turning off 404
logging for these commonly requested (and sometimes missing) files, you keep your error logs focused on more significant issues, making it easier to monitor and troubleshoot your server. This is especially useful for managing a web server like yours, which handles traffic in Cyberjaya, Selangor, Malaysia, and globally.
That's a very common if
block you've got there! While it serves a good purpose in restricting HTTP methods, the general consensus in the Nginx community is to avoid if
statements when possible due to potential unexpected behavior in certain complex scenarios.
However, for simple method checks like this, it's often deemed acceptable. If you're running a basic web server with a standard application (like a blog or a simple API), your current setup is likely fine and provides good security benefits by explicitly allowing only the methods your application expects.
Here are a few suggestions based on your current configuration and common Nginx practices:
1. Keep it as is (if your application strictly uses these methods)
If your application truly only relies on GET
, POST
, PUT
, HEAD
, and CONNECT
methods, then your current if
statement effectively limits unnecessary requests to your backend, providing a basic security layer.
- Pros: Simple, readable, and immediately understood. It effectively filters unwanted methods at the Nginx edge.
- Cons: Uses an
if
statement, which purists generally avoid, though its impact here is minimal. - Use Case: Ideal for applications that definitely don't use methods like
DELETE
,PATCH
, orOPTIONS
(e.g., a static site served through Nginx, or an API that uses only the listed methods).
2. Refine the Allowed Methods (if your application needs more)
Consider if your application requires other standard HTTP methods. For example, if you're building a RESTful API, you'll very likely need DELETE
and PATCH
. Additionally, OPTIONS
is frequently used for CORS (Cross-Origin Resource Sharing) preflight requests. If you have a frontend JavaScript application hosted on a different domain from your API, it will send OPTIONS
requests, and blocking them will break your frontend's ability to communicate with the API.
Suggestion: Add any methods your application legitimately uses. For a typical API, a more comprehensive list might look like this:
---
# Suggested Method Restriction
if ($request_method !~ ^(GET|POST|PUT|DELETE|PATCH|HEAD|OPTIONS|CONNECT)$) {
return 405;
}
- Pros: Ensures your application functions correctly with all necessary HTTP verbs. Handles common CORS scenarios.
- Cons: Still uses an
if
statement. - Use Case: Recommended for modern web applications, especially those with APIs and separate frontends.
3. Remove the Restriction (if you're unsure or your backend handles it)
If you're unsure which methods your backend uses or if your application server (e.g., Node.js, Python, PHP framework) is already robustly handling method validation, you could simply remove this if
block.
- Pros: Simplifies your Nginx configuration. Defers method validation entirely to your application, which might be more appropriate if the application logic is complex.
- Cons: Less filtering at the Nginx edge; all method types will hit your backend, potentially exposing it to methods it doesn't handle.
- Use Case: When your application framework (like Express.js, Django REST Framework, Laravel) is explicitly designed to handle all HTTP methods and return appropriate
405
responses or specific logic for each, making Nginx's check redundant.
Which suggestion is best for you?
Given your setup with proxy_pass http://127.0.0.1:3000;
, it sounds like you're running a backend application (likely Node.js, since 3000 is a common port).
- If you're running a simple content-serving application or a blog without complex interactive features/APIs, then keeping your current list of methods is probably sufficient.
- However, if your
linuxmalaysia.com
site includes APIs, user interactions, or forms that might update/delete data, or if you have a separate frontend that makes AJAX requests, then I'd strongly suggest you refine the allowed methods to includeOPTIONS
,DELETE
, andPATCH
(if your API uses them). This will prevent unexpected405
errors and ensure full functionality.
Before making any changes, always test thoroughly to ensure your application behaves as expected with the chosen method restrictions!
User ulimit (for Nginx user):
Even with fs.file-max set, the user running Nginx (often nginx or www-data) needs a high nofile limit in /etc/security/limits.conf or a file in /etc/security/limits.d/.
nginx soft nofile 655360
nginx hard nofile 655360
Let's break down the events
block in your Nginx configuration. This block is crucial because it defines how Nginx handles connections and influences its scalability and performance, particularly concerning concurrent users.
events { ... }
The events
block is a top-level configuration block in Nginx (alongside http
, mail
, stream
, etc.). It contains directives that set global parameters for connection processing.
1. worker_connections 65536;
-
Explanation: This directive specifies the maximum number of simultaneous connections that a single Nginx worker process can open.
- Since Nginx typically runs multiple worker processes (controlled by
worker_processes
in the main configuration block, often set toauto
or the number of CPU cores), the total maximum number of connections Nginx can handle concurrently isworker_connections * worker_processes
. - For example, if you have 4 worker processes and
worker_connections 65536;
, Nginx can theoretically handle$4 \times 65536 = 262144$ simultaneous connections.
- Since Nginx typically runs multiple worker processes (controlled by
-
Performance Impact (Scalability & Resource Usage):
-
High Concurrency: A high
worker_connections
value allows Nginx to handle a very large number of concurrent clients, which is essential for high-traffic websites. - Memory: Each connection consumes a small amount of memory. Setting this too high without sufficient RAM could lead to memory exhaustion.
- CPU: While Nginx is very efficient, handling a vast number of connections will consume CPU cycles for processing requests.
- File Descriptors: Each connection is a file descriptor. This setting must be lower than or equal to the operating system's per-process file descriptor limit.
-
High Concurrency: A high
-
Suggestions:
-
65536
is a very common and high value, indicating a server designed for high concurrency. - Practical Limit: While Nginx might be configured for 65,536 connections per worker, the actual practical limit is often determined by the server's CPU, memory, and network bandwidth.
- Monitoring: Monitor your server's resource usage (CPU, RAM, open file descriptors) under load to ensure it can comfortably handle this many connections. If you consistently hit the limit, consider scaling up your hardware or optimizing other parts of your Nginx/application stack.
-
2. worker_rlimit_nofile 655360;
- Explanation: This directive sets the maximum number of open file descriptors (the
RLIMIT_NOFILE
limit) for Nginx's worker processes.- A file descriptor is used for every connection, every file being served, every log file, and so on.
- The value
655360
(655,360) is ten times yourworker_connections
value.
- Performance Impact (Stability & Reliability):
- Prevents "Too Many Open Files" Errors: If Nginx runs out of file descriptors, it cannot accept new connections or open files, leading to service disruption and "Too many open files" errors in your logs.
- Must be High Enough: This value must be at least
worker_connections
. It's often recommended to set it higher (e.g., 1.5x to 2xworker_connections
for general use, or even 10x as you have for very busy servers) to account for other file descriptors used by Nginx (e.g., log files, cache files, internal pipes, upstream connections). - Operating System Limit: This Nginx setting cannot exceed the operating system's system-wide file descriptor limit (
fs.file-max
) or the per-user/per-processulimit -n
setting. You often need to adjust these OS limits first.
- Suggestions:
655360
is a very high and appropriate value for a server aiming for high concurrency.- Verify OS Limits: Before setting
worker_rlimit_nofile
this high, you must ensure your Linux system'sulimit -n
for the Nginx user andfs.file-max
kernel parameter are set to at least this value (or higher).- To check
ulimit -n
for the Nginx user (e.g.,www-data
):sudo su - www-data -c 'ulimit -n'
- To check system-wide
fs.file-max
:cat /proc/sys/fs/file-max
- To temporarily set
ulimit -n
for the current shell (e.g., for testing):ulimit -n 655360
- To make it permanent, you'd typically edit
/etc/security/limits.conf
(fornofile
for thewww-data
user) and/etc/sysctl.conf
(forfs.file-max
).
- To check
3. use epoll;
(Example, commented out in your snippet)
- Explanation: This directive specifies the connection processing method Nginx will use.
epoll
is the most efficient and scalable I/O event notification mechanism on Linux systems. It allows Nginx to efficiently monitor a large number of file descriptors for readiness events (e.g., a new connection, data arriving on an existing connection).- Other methods exist (
kqueue
for FreeBSD/macOS,select
,poll
), butepoll
is the default and preferred method on Linux due to its superior performance for high concurrency.
- Performance Impact (CPU & Scalability):
- Highly Efficient:
epoll
drastically reduces the CPU overhead associated with monitoring thousands of connections compared to older methods likeselect
orpoll
. It scales very well with the number of connections.
- Highly Efficient:
- Suggestions:
- Explicitly include it: While Nginx often defaults to the best method for the detected OS, it's good practice to explicitly include
use epoll;
in yourevents
block if you're on Linux, as it clearly states your intent and ensures Nginx uses the most efficient method.
- Explicitly include it: While Nginx often defaults to the best method for the detected OS, it's good practice to explicitly include
Overall Impact on Nginx Performance (CPU and Memory)
- CPU:
worker_connections
and the chosenuse
method directly impact CPU. A higher number of connections means more work for the CPU, butepoll
minimizes the overhead of managing those connections. - Memory:
worker_connections
directly influences memory consumption as each connection requires a small amount of memory.worker_rlimit_nofile
ensures Nginx has enough capacity to use file descriptors but doesn't directly consume memory itself (it's a limit, not an allocation).
Your events
block configuration is very aggressive and suitable for a high-performance web server aiming to handle significant traffic, which aligns with serving linuxmalaysia.com
effectively. The high worker_connections
and worker_rlimit_nofile
values are excellent for scalability, provided your underlying operating system is also configured to support them.
HTTP to HTTPS Redirect Configuration
This
server
block is solely dedicated to handling HTTP requests and redirecting them to your secure HTTPS site. This is a crucial security and SEO best practice.Let's break down each part:
listen 80 reuseport;
reuseport
option (available in Nginx 1.9.1+ on Linux) allows multiple worker processes to listen on the same port, which can improve performance by distributing incoming connections more evenly across workers.reuseport
can offer minor performance benefits on high-concurrency systems by reducing lock contention when accepting new connections.listen [::]:80 reuseport;
listen
directive.server_name linuxmalaysia.com;
server
block should handle requests for the domainlinuxmalaysia.com
. If you have other domains or subdomains, you'd need separateserver_name
entries or additionalserver
blocks for them.server_name
directives to direct traffic to the correct block.access_log /var/log/nginx/access.log combined buffer=512k flush=1m;
combined
format, buffers logs in memory (up to 512KB), and flushes them to disk every 1 minute. This is the same logging configuration as your main HTTPS block.error_log /var/log/nginx/error.log warn;
warn
.return 301 https://linuxmalaysia.com$request_uri;
return 301
: Nginx will send an HTTP301 Moved Permanently
status code to the client. This is crucial for SEO, as it tells search engines that the content has permanently moved to the HTTPS version, transferring any SEO "link juice" to the new URL.https://linuxmalaysia.com
: This is the target protocol and domain.$request_uri
: This Nginx variable preserves the original path and query string from the incoming HTTP request. So, if a user typeshttp://linuxmalaysia.com/about?lang=en
, they will be redirected tohttps://linuxmalaysia.com/about?lang=en
.Overall Performance and Importance
This entire
server
block is extremely important for:301
redirect ensures that search engines correctly update their indexes and your site doesn't lose ranking due to duplicate content (HTTP vs. HTTPS).This is a standard and highly recommended configuration for any website using HTTPS. You've implemented it correctly and efficiently.