# set http proxy
export http_proxy=http://PROXYHOST:PROXYPORT
# set http proxy with user and password
export http_proxy=http://USERNAME:PASSWORD@PROXYHOST:PROXYPORT
# set http proxy with user and password (with special characters)
# ============================================================================= | |
# Install AzCopy on Windows (PowerShell) | |
# https://docs.microsoft.com/en-us/azure/storage/common/storage-use-azcopy-v10 | |
# https://github.com/Azure/azure-storage-azcopy | |
# ----------------------------------------------------------------------------- | |
# Developer.......: Andre Essing (https://www.andre-essing.de/) | |
# (https://github.com/aessing) | |
# (https://twitter.com/aessing) | |
# (https://www.linkedin.com/in/aessing/) | |
# ----------------------------------------------------------------------------- |
# non root user example for alpine | |
# | |
# usage: | |
# $ docker build --build-arg "USER=someuser" --tag test . | |
# $ docker run --rm test | |
FROM alpine | |
ARG USER=default | |
ENV HOME /home/$USER |
Param ($DnsName) | |
$Cert = New-SelfSignedCertificate -CertStoreLocation Cert:\LocalMachine\My -DnsName $DnsName -Verbose | |
$Password = ConvertTo-SecureString -String $DnsName -Force -AsPlainText -Verbose | |
Export-Certificate -Cert $Cert -FilePath .\$DnsName.cer -Verbose | |
Export-PfxCertificate -Cert $Cert -FilePath .\$DnsName.pfx -Password $Password -Verbose | |
$CertThumbprint = $Cert.Thumbprint |
# This config came around after a friend had problems with a Steam cache on his | |
# Cox internet connection. Cox would intercept any requests to Steam content | |
# servers and return a 302 to Cox's servers. The cache would return the 302 | |
# to the Steam client, and the Steam client would go directly to Cox, bypassing | |
# the cache. | |
# This config makes nginx follow the 302 itself, and caches the result of the | |
# redirect as if it was the response to the original request. So subsequent | |
# requests to the URL that returned a 302 will get the file instead of a 302. |
This content from this markdown file has moved a new, happier home where it can serve more people. Please check it out : https://docs.microsoft.com/azure/azure-cache-for-redis/cache-best-practices.
The content from this article has been moved to a happier home on our official documentation site - please update your bookmarks.
{ | |
"id": 1, | |
"title": "MongoDB", | |
"originalTitle": "MongoDB", | |
"tags": [], | |
"style": "dark", | |
"timezone": "browser", | |
"editable": true, | |
"hideControls": false, | |
"sharedCrosshair": false, |
git clean -xfd | |
git submodule foreach --recursive git clean -xfd | |
git reset --hard | |
git submodule foreach --recursive git reset --hard | |
git submodule update --init --recursive |
Moved to git repository: https://github.com/denji/nginx-tuning
For this configuration you can use web server you like, i decided, because i work mostly with it to use nginx.
Generally, properly configured nginx can handle up to 400K to 500K requests per second (clustered), most what i saw is 50K to 80K (non-clustered) requests per second and 30% CPU load, course, this was 2 x Intel Xeon
with HyperThreading enabled, but it can work without problem on slower machines.
You must understand that this config is used in testing environment and not in production so you will need to find a way to implement most of those features best possible for your servers.