Created
October 25, 2012 15:46
-
-
Save micktwomey/3953523 to your computer and use it in GitHub Desktop.
Difference between default redis 2.4 and 2.6 configs
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
--- /dev/fd/63 2012-10-25 16:46:38.000000000 +0100 | |
+++ /dev/fd/62 2012-10-25 16:46:38.000000000 +0100 | |
@@ -1,6 +1,6 @@ | |
# Redis configuration file example | |
-# Note on units: when memory size is needed, it is possible to specifiy | |
+# Note on units: when memory size is needed, it is possible to specify | |
# it in the usual form of 1k 5GB 4M and so forth: | |
# | |
# 1k => 1000 bytes | |
@@ -45,7 +45,7 @@ | |
# verbose (many rarely useful info, but not a mess like the debug level) | |
# notice (moderately verbose, what you want in production probably) | |
# warning (only very important / critical messages are logged) | |
-loglevel verbose | |
+loglevel notice | |
# Specify the log file name. Also 'stdout' can be used to force | |
# Redis to log on the standard output. Note that if you use standard | |
@@ -82,17 +82,47 @@ | |
# after 60 sec if at least 10000 keys changed | |
# | |
# Note: you can disable saving at all commenting all the "save" lines. | |
+# | |
+# It is also possible to remove all the previously configured save | |
+# points by adding a save directive with a single empty string argument | |
+# like in the following example: | |
+# | |
+# save "" | |
save 900 1 | |
save 300 10 | |
save 60 10000 | |
+# By default Redis will stop accepting writes if RDB snapshots are enabled | |
+# (at least one save point) and the latest background save failed. | |
+# This will make the user aware (in an hard way) that data is not persisting | |
+# on disk properly, otherwise chances are that no one will notice and some | |
+# distater will happen. | |
+# | |
+# If the background saving process will start working again Redis will | |
+# automatically allow writes again. | |
+# | |
+# However if you have setup your proper monitoring of the Redis server | |
+# and persistence, you may want to disable this feature so that Redis will | |
+# continue to work as usually even if there are problems with disk, | |
+# permissions, and so forth. | |
+stop-writes-on-bgsave-error yes | |
+ | |
# Compress string objects using LZF when dump .rdb databases? | |
# For default that's set to 'yes' as it's almost always a win. | |
# If you want to save some CPU in the saving child set it to 'no' but | |
# the dataset will likely be bigger if you have compressible values or keys. | |
rdbcompression yes | |
+# Since verison 5 of RDB a CRC64 checksum is placed at the end of the file. | |
+# This makes the format more resistant to corruption but there is a performance | |
+# hit to pay (around 10%) when saving and loading RDB files, so you can disable it | |
+# for maximum performances. | |
+# | |
+# RDB files created with checksum disabled have a checksum of zero that will | |
+# tell the loading code to skip the check. | |
+rdbchecksum yes | |
+ | |
# The filename where to dump the DB | |
dbfilename dump.rdb | |
@@ -126,7 +156,7 @@ | |
# is still in progress, the slave can act in two different ways: | |
# | |
# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will | |
-# still reply to client requests, possibly with out of data data, or the | |
+# still reply to client requests, possibly with out of date data, or the | |
# data set may just be empty if this is the first synchronization. | |
# | |
# 2) if slave-serve-stale data is set to 'no' the slave will reply with | |
@@ -135,6 +165,22 @@ | |
# | |
slave-serve-stale-data yes | |
+# You can configure a slave instance to accept writes or not. Writing against | |
+# a slave instance may be useful to store some ephemeral data (because data | |
+# written on a slave will be easily deleted after resync with the master) but | |
+# may also cause problems if clients are writing to it because of a | |
+# misconfiguration. | |
+# | |
+# Since Redis 2.6 by default slaves are read-only. | |
+# | |
+# Note: read only slaves are not designed to be exposed to untrusted clients | |
+# on the internet. It's just a protection layer against misuse of the instance. | |
+# Still a read only slave exports by default all the administrative commands | |
+# such as CONFIG, DEBUG, and so forth. To a limited extend you can improve | |
+# security of read only slaves using 'rename-command' to shadow all the | |
+# administrative / dangerous commands. | |
+slave-read-only yes | |
+ | |
# Slaves send PINGs to server in a predefined interval. It's possible to change | |
# this interval with the repl_ping_slave_period option. The default value is 10 | |
# seconds. | |
@@ -182,7 +228,7 @@ | |
# Command renaming. | |
# | |
-# It is possilbe to change the name of dangerous commands in a shared | |
+# It is possible to change the name of dangerous commands in a shared | |
# environment. For instance the CONFIG command may be renamed into something | |
# of hard to guess so that it will be still available for internal-use | |
# tools but not available for general clients. | |
@@ -191,20 +237,23 @@ | |
# | |
# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 | |
# | |
-# It is also possilbe to completely kill a command renaming it into | |
+# It is also possible to completely kill a command renaming it into | |
# an empty string: | |
# | |
# rename-command CONFIG "" | |
################################### LIMITS #################################### | |
-# Set the max number of connected clients at the same time. By default there | |
-# is no limit, and it's up to the number of file descriptors the Redis process | |
-# is able to open. The special value '0' means no limits. | |
+# Set the max number of connected clients at the same time. By default | |
+# this limit is set to 10000 clients, however if the Redis server is not | |
+# able ot configure the process file limit to allow for the specified limit | |
+# the max number of allowed clients is set to the current file limit | |
+# minus 32 (as Redis reserves a few file descriptors for internal uses). | |
+# | |
# Once the limit is reached Redis will close all the new connections sending | |
# an error 'max number of clients reached'. | |
# | |
-# maxclients 128 | |
+# maxclients 10000 | |
# Don't use more memory than the specified amount of bytes. | |
# When the memory limit is reached Redis will try to remove keys | |
@@ -237,7 +286,7 @@ | |
# volatile-lru -> remove the key with an expire set using an LRU algorithm | |
# allkeys-lru -> remove any key accordingly to the LRU algorithm | |
# volatile-random -> remove a random key with an expire set | |
-# allkeys->random -> remove a random key, any key | |
+# allkeys-random -> remove a random key, any key | |
# volatile-ttl -> remove the key with the nearest expire time (minor TTL) | |
# noeviction -> don't expire at all, just return an error on write operations | |
# | |
@@ -264,21 +313,23 @@ | |
############################## APPEND ONLY MODE ############################### | |
-# By default Redis asynchronously dumps the dataset on disk. If you can live | |
-# with the idea that the latest records will be lost if something like a crash | |
-# happens this is the preferred way to run Redis. If instead you care a lot | |
-# about your data and don't want to that a single record can get lost you should | |
-# enable the append only mode: when this mode is enabled Redis will append | |
-# every write operation received in the file appendonly.aof. This file will | |
-# be read on startup in order to rebuild the full dataset in memory. | |
-# | |
-# Note that you can have both the async dumps and the append only file if you | |
-# like (you have to comment the "save" statements above to disable the dumps). | |
-# Still if append only mode is enabled Redis will load the data from the | |
-# log file at startup ignoring the dump.rdb file. | |
+# By default Redis asynchronously dumps the dataset on disk. This mode is | |
+# good enough in many applications, but an issue with the Redis process or | |
+# a power outage may result into a few minutes of writes lost (depending on | |
+# the configured save points). | |
+# | |
+# The Append Only File is an alternative persistence mode that provides | |
+# much better durability. For instance using the default data fsync policy | |
+# (see later in the config file) Redis can lose just one second of writes in a | |
+# dramatic event like a server power outage, or a single write if something | |
+# wrong with the Redis process itself happens, but the operating system is | |
+# still running correctly. | |
+# | |
+# AOF and RDB persistence can be enabled at the same time without problems. | |
+# If the AOF is enabled on startup Redis will load the AOF, that is the file | |
+# with the better durability guarantees. | |
# | |
-# IMPORTANT: Check the BGREWRITEAOF to check how to rewrite the append | |
-# log file in background when it gets too big. | |
+# Please check http://redis.io/topics/persistence for more information. | |
appendonly no | |
@@ -293,16 +344,19 @@ | |
# | |
# no: don't fsync, just let the OS flush the data when it wants. Faster. | |
# always: fsync after every write to the append only log . Slow, Safest. | |
-# everysec: fsync only if one second passed since the last fsync. Compromise. | |
+# everysec: fsync only one time every second. Compromise. | |
# | |
# The default is "everysec" that's usually the right compromise between | |
# speed and data safety. It's up to you to understand if you can relax this to | |
-# "no" that will will let the operating system flush the output buffer when | |
+# "no" that will let the operating system flush the output buffer when | |
# it wants, for better performances (but if you can live with the idea of | |
# some data loss consider the default persistence mode that's snapshotting), | |
# or on the contrary, use "always" that's very slow but a bit safer than | |
# everysec. | |
# | |
+# More details please check the following article: | |
+# http://antirez.com/post/redis-persistence-demystified.html | |
+# | |
# If unsure, use "everysec". | |
# appendfsync always | |
@@ -321,7 +375,7 @@ | |
# BGSAVE or BGREWRITEAOF is in progress. | |
# | |
# This means that while another child is saving the durability of Redis is | |
-# the same as "appendfsync none", that in pratical terms means that it is | |
+# the same as "appendfsync none", that in practical terms means that it is | |
# possible to lost up to 30 seconds of log in the worst scenario (with the | |
# default Linux settings). | |
# | |
@@ -343,12 +397,30 @@ | |
# is useful to avoid rewriting the AOF file even if the percentage increase | |
# is reached but it is still pretty small. | |
# | |
-# Specify a precentage of zero in order to disable the automatic AOF | |
+# Specify a percentage of zero in order to disable the automatic AOF | |
# rewrite feature. | |
auto-aof-rewrite-percentage 100 | |
auto-aof-rewrite-min-size 64mb | |
+################################ LUA SCRIPTING ############################### | |
+ | |
+# Max execution time of a Lua script in milliseconds. | |
+# | |
+# If the maximum execution time is reached Redis will log that a script is | |
+# still in execution after the maximum allowed time and will start to | |
+# reply to queries with an error. | |
+# | |
+# When a long running script exceed the maximum execution time only the | |
+# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be | |
+# used to stop a script that did not yet called write commands. The second | |
+# is the only way to shut down the server in the case a write commands was | |
+# already issue by the script but the user don't want to wait for the natural | |
+# termination of the script. | |
+# | |
+# Set it to 0 or a negative value for unlimited execution without warnings. | |
+lua-time-limit 5000 | |
+ | |
################################## SLOW LOG ################################### | |
# The Redis Slow Log is a system to log queries that exceeded a specified | |
@@ -373,89 +445,13 @@ | |
# You can reclaim memory used by the slow log with SLOWLOG RESET. | |
slowlog-max-len 128 | |
-################################ VIRTUAL MEMORY ############################### | |
- | |
-### WARNING! Virtual Memory is deprecated in Redis 2.4 | |
-### The use of Virtual Memory is strongly discouraged. | |
- | |
-# Virtual Memory allows Redis to work with datasets bigger than the actual | |
-# amount of RAM needed to hold the whole dataset in memory. | |
-# In order to do so very used keys are taken in memory while the other keys | |
-# are swapped into a swap file, similarly to what operating systems do | |
-# with memory pages. | |
-# | |
-# To enable VM just set 'vm-enabled' to yes, and set the following three | |
-# VM parameters accordingly to your needs. | |
- | |
-vm-enabled no | |
-# vm-enabled yes | |
- | |
-# This is the path of the Redis swap file. As you can guess, swap files | |
-# can't be shared by different Redis instances, so make sure to use a swap | |
-# file for every redis process you are running. Redis will complain if the | |
-# swap file is already in use. | |
-# | |
-# The best kind of storage for the Redis swap file (that's accessed at random) | |
-# is a Solid State Disk (SSD). | |
-# | |
-# *** WARNING *** if you are using a shared hosting the default of putting | |
-# the swap file under /tmp is not secure. Create a dir with access granted | |
-# only to Redis user and configure Redis to create the swap file there. | |
-vm-swap-file /tmp/redis.swap | |
- | |
-# vm-max-memory configures the VM to use at max the specified amount of | |
-# RAM. Everything that deos not fit will be swapped on disk *if* possible, that | |
-# is, if there is still enough contiguous space in the swap file. | |
-# | |
-# With vm-max-memory 0 the system will swap everything it can. Not a good | |
-# default, just specify the max amount of RAM you can in bytes, but it's | |
-# better to leave some margin. For instance specify an amount of RAM | |
-# that's more or less between 60 and 80% of your free RAM. | |
-vm-max-memory 0 | |
- | |
-# Redis swap files is split into pages. An object can be saved using multiple | |
-# contiguous pages, but pages can't be shared between different objects. | |
-# So if your page is too big, small objects swapped out on disk will waste | |
-# a lot of space. If you page is too small, there is less space in the swap | |
-# file (assuming you configured the same number of total swap file pages). | |
-# | |
-# If you use a lot of small objects, use a page size of 64 or 32 bytes. | |
-# If you use a lot of big objects, use a bigger page size. | |
-# If unsure, use the default :) | |
-vm-page-size 32 | |
- | |
-# Number of total memory pages in the swap file. | |
-# Given that the page table (a bitmap of free/used pages) is taken in memory, | |
-# every 8 pages on disk will consume 1 byte of RAM. | |
-# | |
-# The total swap size is vm-page-size * vm-pages | |
-# | |
-# With the default of 32-bytes memory pages and 134217728 pages Redis will | |
-# use a 4 GB swap file, that will use 16 MB of RAM for the page table. | |
-# | |
-# It's better to use the smallest acceptable value for your application, | |
-# but the default is large in order to work in most conditions. | |
-vm-pages 134217728 | |
- | |
-# Max number of VM I/O threads running at the same time. | |
-# This threads are used to read/write data from/to swap file, since they | |
-# also encode and decode objects from disk to memory or the reverse, a bigger | |
-# number of threads can help with big objects even if they can't help with | |
-# I/O itself as the physical device may not be able to couple with many | |
-# reads/writes operations at the same time. | |
-# | |
-# The special value of 0 turn off threaded I/O and enables the blocking | |
-# Virtual Memory implementation. | |
-vm-max-threads 4 | |
- | |
############################### ADVANCED CONFIG ############################### | |
-# Hashes are encoded in a special way (much more memory efficient) when they | |
-# have at max a given numer of elements, and the biggest element does not | |
-# exceed a given threshold. You can configure this limits with the following | |
-# configuration directives. | |
-hash-max-zipmap-entries 512 | |
-hash-max-zipmap-value 64 | |
+# Hashes are encoded using a memory efficient data structure when they have a | |
+# small number of entries, and the biggest entry does not exceed a given | |
+# threshold. These thresholds can be configured using the following directives. | |
+hash-max-ziplist-entries 512 | |
+hash-max-ziplist-value 64 | |
# Similarly to hashes, small lists are also encoded in a special way in order | |
# to save a lot of space. The special representation is only used when | |
@@ -478,9 +474,9 @@ | |
# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in | |
# order to help rehashing the main Redis hash table (the one mapping top-level | |
-# keys to values). The hash table implementation redis uses (see dict.c) | |
+# keys to values). The hash table implementation Redis uses (see dict.c) | |
# performs a lazy rehashing: the more operation you run into an hash table | |
-# that is rhashing, the more rehashing "steps" are performed, so if the | |
+# that is rehashing, the more rehashing "steps" are performed, so if the | |
# server is idle the rehashing is never complete and some more memory is used | |
# by the hash table. | |
# | |
@@ -496,10 +492,47 @@ | |
# want to free memory asap when possible. | |
activerehashing yes | |
+# The client output buffer limits can be used to force disconnection of clients | |
+# that are not reading data from the server fast enough for some reason (a | |
+# common reason is that a Pub/Sub client can't consume messages as fast as the | |
+# publisher can produce them). | |
+# | |
+# The limit can be set differently for the three different classes of clients: | |
+# | |
+# normal -> normal clients | |
+# slave -> slave clients and MONITOR clients | |
+# pubsub -> clients subcribed to at least one pubsub channel or pattern | |
+# | |
+# The syntax of every client-output-buffer-limit directive is the following: | |
+# | |
+# client-output-buffer-limit <class> <hard limit> <soft limit> <soft seconds> | |
+# | |
+# A client is immediately disconnected once the hard limit is reached, or if | |
+# the soft limit is reached and remains reached for the specified number of | |
+# seconds (continuously). | |
+# So for instance if the hard limit is 32 megabytes and the soft limit is | |
+# 16 megabytes / 10 seconds, the client will get disconnected immediately | |
+# if the size of the output buffers reach 32 megabytes, but will also get | |
+# disconnected if the client reaches 16 megabytes and continuously overcomes | |
+# the limit for 10 seconds. | |
+# | |
+# By default normal clients are not limited because they don't receive data | |
+# without asking (in a push way), but just after a request, so only | |
+# asynchronous clients may create a scenario where data is requested faster | |
+# than it can read. | |
+# | |
+# Instead there is a default limit for pubsub and slave clients, since | |
+# subscribers and slaves receive data in a push fashion. | |
+# | |
+# Both the hard or the soft limit can be disabled just setting it to zero. | |
+client-output-buffer-limit normal 0 0 0 | |
+client-output-buffer-limit slave 256mb 64mb 60 | |
+client-output-buffer-limit pubsub 32mb 8mb 60 | |
+ | |
################################## INCLUDES ################################### | |
# Include one or more other config files here. This is useful if you | |
-# have a standard template that goes to all redis server but also need | |
+# have a standard template that goes to all Redis server but also need | |
# to customize a few per-server settings. Include files can include | |
# other files, so use this wisely. | |
# |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment