Last active
May 5, 2023 15:16
-
-
Save walfie/c83c636aec095ea3a4e9f6d33c5c0e84 to your computer and use it in GitHub Desktop.
nginx S3 proxy cache
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| # configure cache directory with 20G and holding old objects for max 31 days | |
| proxy_cache_path /var/cache/nginx levels=1:2 keys_zone=s3:500m max_size=30g inactive=31d; | |
| server { | |
| server_name files.example.com; | |
| gzip off; | |
| # need to setup external DNS resolver | |
| resolver 8.8.8.8 valid=300s; | |
| resolver_timeout 5s; | |
| listen 443 ssl; | |
| ssl on; | |
| ssl_certificate /etc/letsencrypt/live/example.com/fullchain.pem; | |
| ssl_certificate_key /etc/letsencrypt/live/example.com/privkey.pem; | |
| ssl_protocols TLSv1.2; | |
| ssl_ciphers EECDH+AESGCM:EECDH+AES:!3DES; | |
| ssl_ecdh_curve prime256v1; | |
| ssl_prefer_server_ciphers on; | |
| ssl_session_cache shared:SSL:10m; | |
| add_header Strict-Transport-Security "max-age=31536000; includeSubDomains"; | |
| keepalive_timeout 3600; | |
| location / { | |
| proxy_http_version 1.1; | |
| # Make sure we're proxying along the correct headers | |
| proxy_set_header Host 's3.amazonaws.com'; | |
| # Make sure we're using Keep-Alives with S3 | |
| proxy_set_header Connection ''; | |
| proxy_hide_header x-amz-id-2; | |
| proxy_hide_header x-amz-request-id; | |
| proxy_hide_header Set-Cookie; | |
| proxy_ignore_headers "Set-Cookie"; | |
| # Configure our cache | |
| proxy_cache s3; | |
| # Ignore query string in cache key | |
| proxy_cache_key $scheme$proxy_host$uri; | |
| # Cache all 200 OK's for 31 days | |
| proxy_cache_valid 200 31d; | |
| # Use stale cache file in all errors from upstream if we can | |
| proxy_cache_use_stale error timeout invalid_header updating http_500 http_502 http_503 http_504; | |
| # Lock the cache so that only one request can populate it at a time | |
| proxy_cache_lock on; | |
| # Reuse our SSL session for our upstream connection | |
| proxy_ssl_session_reuse on; | |
| # Set back a nice HTTP Header to indicate what the cache status was | |
| add_header X-Cache-Status $upstream_cache_status always; | |
| add_header 'Access-Control-Allow-Credentials' 'true'; | |
| add_header 'Access-Control-Allow-Methods' 'GET,POST,OPTIONS,PUT,DELETE,PATCH'; | |
| add_header 'Access-Control-Allow-Origin' 'https://example.com'; | |
| if ($request_method = 'OPTIONS') { | |
| add_header 'Access-Control-Max-Age' 1728000; | |
| add_header 'Content-Type' 'text/plain charset=UTF-8'; | |
| add_header 'Content-Length' 0; | |
| return 204; | |
| } | |
| # Set this to a variable instead of using an `upstream` | |
| # to coerce nginx into resolving as DNS instead of caching | |
| # it once on process boot and never updating. | |
| set $s3_host 's3.amazonaws.com'; | |
| set $s3_bucket 'files.example.com'; | |
| proxy_pass https://$s3_host/$s3_bucket$request_uri; | |
| } | |
| } |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment