Skip to content

Instantly share code, notes, and snippets.

@seancribbs
Created September 6, 2013 21:17
Show Gist options
  • Save seancribbs/6470138 to your computer and use it in GitHub Desktop.
Save seancribbs/6470138 to your computer and use it in GitHub Desktop.
## Default ring creation size. Make sure it is a power of 2,
## e.g. 16, 32, 64, 128, 256, 512 etc
## ring_size = 64
## enable active anti-entropy subsystem
anti_entropy = on
## location of the console log
log.console.file = ./log/console.log
## location of the error log
log.error.file = ./log/error.log
## turn on syslog
log.syslog = off
## listener.http.<name> is an IP address and TCP port that the Riak
## HTTP interface will bind.
listener.http.internal = 127.0.0.1:8098
## listener.protobuf.<name> is an IP address and TCP port that the Riak
## Protocol Buffers interface will bind.
listener.protobuf.internal = 127.0.0.1:8087
## pb_backlog is the maximum length to which the queue of pending
## connections may grow. If set, it must be an integer >= 0.
## By default the value is 5. If you anticipate a huge number of
## connections being initialised *simultaneously*, set this number
## higher.
## protobuf.backlog = 64
## Default location of ringstate
ring.state_dir = ./data/ring
## listener.https.<name> is an IP address and TCP port that the Riak
## HTTPS interface will bind.
## listener.https.internal = 127.0.0.1:8098
## Default cert location for https can be overridden
## with the ssl config variable, for example:
## ssl.certfile = ./etc/cert.pem
## Default key location for https can be overridden
## with the ssl config variable, for example:
## ssl.keyfile = ./etc/key.pem
## handoff.port is the TCP port that Riak uses for
## intra-cluster data handoff.
handoff.port = 8099
## To encrypt riak_core intra-cluster data handoff traffic,
## uncomment the following line and edit its path to an
## appropriate certfile and keyfile. (This example uses a
## single file with both items concatenated together.)
## handoff.ssl.certfile = /tmp/erlserver.pem
## DTrace support
## Do not enable 'dtrace' unless your Erlang/OTP
## runtime is compiled to support DTrace. DTrace is
## available in R15B01 (supported by the Erlang/OTP
## official source package) and in R14B04 via a custom
## source repository & branch.
dtrace = off
platform_bin_dir = ./bin
platform_data_dir = ./data
platform_etc_dir = ./etc
platform_lib_dir = ./lib
platform_log_dir = ./log
## To enable Search functionality set this 'on'.
search = off
## The root dir to store search merge_index data
merge_index.data_root = ./data/merge_index
## Size, in bytes, of the in-memory buffer. When this
## threshold has been reached the data is transformed
## into a segment file which resides on disk.
merge_index.buffer_rollover_size = 1mb
## Overtime the segment files need to be compacted.
## This is the maximum number of segments that will be
## compacted at once. A lower value will lead to
## quicker but more frequent compactions.
merge_index.max_compact_segments = 20
## Whether to write a crash log, and where.
## Commented/omitted/undefined means no crash logger.
log.crash.file = ./log/crash.log
## Maximum size in bytes of events in the crash log - defaults to 65536
log.crash.msg_size = 64kb
## Maximum size of the crash log in bytes, before its rotated, set
## to 0 to disable rotation - default is 0
log.crash.size = 10mb
## What time to rotate the crash log - default is no time
## rotation. See the lager README for a description of this format:
## https://github.com/basho/lager/blob/master/README.org
log.crash.date = $D0
## Number of rotated crash logs to keep, 0 means keep only the
## current one - default is 0
log.crash.count = 5
## Whether to redirect error_logger messages into lager - defaults to true
log.error.redirect = on
## maximum number of error_logger messages to handle in a second
## lager 2.0.0 shipped with a limit of 50, which is a little low for riak's startup
log.error.messages_per_second = 100
## Storage_backend specifies the Erlang module defining the storage
## mechanism that will be used on this node.
storage_backend = multi
multi_backend.default = bitcask_mult
multi_backend.bitcask_mult.storage_backend = bitcask
multi_backend.bitcask_mult.bitcask.data_root = data/multi/bitcask
multi_backend.leveldb_mult.storage_backend = leveldb
multi_backend.leveldb_mult.bitcask.data_root = data/multi/leveldb
multi_backend.memory_mult.storage_backend = memory
multi_backend.memory_mult.memory_backend.ttl = 1d
## raw_name is the first part of all URLS used by the Riak raw HTTP
## interface. See riak_web.erl and raw_http_resource.erl for
## details.
## raw_name = riak
## Restrict how fast AAE can build hash trees. Building the tree
## for a given partition requires a full scan over that partition's
## data. Once built, trees stay built until they are expired.
## Config is of the form:
## {num-builds, per-timespan}
## Default is 1 build per hour.
anti_entropy.build_limit.number = 1
anti_entropy.build_limit.per_timespan = 1h
## Determine how often hash trees are expired after being built.
## Periodically expiring a hash tree ensures the on-disk hash tree
## data stays consistent with the actual k/v backend data. It also
## helps Riak identify silent disk failures and bit rot. However,
## expiration is not needed for normal AAE operation and should be
## infrequent for performance reasons. The time is specified in
## milliseconds. The default is 1 week.
anti_entropy.expire = 1w
## Limit how many AAE exchanges/builds can happen concurrently.
anti_entropy.concurrency = 2
## The tick determines how often the AAE manager looks for work
## to do (building/expiring trees, triggering exchanges, etc).
## The default is every 15 seconds. Lowering this value will
## speedup the rate that all replicas are synced across the cluster.
## Increasing the value is not recommended.
anti_entropy.tick = 15s
## The directory where AAE hash trees are stored.
anti_entropy.data_dir = ./data/anti_entropy
## The LevelDB options used by AAE to generate the LevelDB-backed
## on-disk hashtrees.
anti_entropy.write_buffer_size = 4mb
anti_entropy.max_open_files = 20
## mapred_name is URL used to submit map/reduce requests to Riak.
mapred_name = mapred
## mapred_2i_pipe indicates whether secondary-index
## MapReduce inputs are queued in parallel via their own
## pipe ('true'), or serially via a helper process
## ('false' or undefined). Set to 'false' or leave
## undefined during a rolling upgrade from 1.0.
mapred_2i_pipe = on
## Each of the following entries control how many Javascript
## virtual machines are available for executing map, reduce,
## pre- and post-commit hook functions.
javascript_vm.map_js_vm_count = 8
javascript_vm.reduce_js_vm_count = 6
javascript_vm.hook_js_vm_count = 2
## js_max_vm_mem is the maximum amount of memory, in megabytes,
## allocated to the Javascript VMs. If unset, the default is
## 8MB.
javascript_vm.max_vm_mem = 8
## js_thread_stack is the maximum amount of thread stack, in megabyes,
## allocate to the Javascript VMs. If unset, the default is 16MB.
## NOTE: This is not the same as the C thread stack.
javascript_vm.thread_stack = 16
## js_source_dir should point to a directory containing Javascript
## source files which will be loaded by Riak when it initializes
## Javascript VMs.
## javascript_vm.source_dir = /tmp/js_source
## http_url_encoding determines how Riak treats URL encoded
## buckets, keys, and links over the REST API. When set to 'on'
## Riak always decodes encoded values sent as URLs and Headers.
## Otherwise, Riak defaults to compatibility mode where links
## are decoded, but buckets and keys are not. The compatibility
## mode will be removed in a future release.
http_url_encoding = on
## Switch to vnode-based vclocks rather than client ids. This
## significantly reduces the number of vclock entries.
## Only set on if *all* nodes in the cluster are upgraded to 1.0
vnode_vclocks = on
## This option toggles compatibility of keylisting with 1.0
## and earlier versions. Once a rolling upgrade to a version
## > 1.0 is completed for a cluster, this should be set to
## true for better control of memory usage during key listing
## operations
listkeys_backpressure = on
## This option specifies how many of each type of fsm may exist
## concurrently. This is for overload protection and is a new
## mechanism that obsoletes 1.3's health checks. Note that this number
## represents two potential processes, so +P in vm.args should be at
## least 3X the fsm_limit.
fsm_limit = 50000
## object_format controls which binary representation of a riak_object
## is stored on disk.
## Current options are: v0, v1.
## v0: Original erlang:term_to_binary format. Higher space overhead.
## v1: New format for more compact storage of small values.
object_format = v1
## Set to false to disable the admin panel.
riak_control = off
## Authentication style used for access to the admin
## panel. Valid styles are 'userlist' <TODO>.
riak_control.auth = userlist
## If auth is set to 'userlist' then this is the
## list of usernames and passwords for access to the
## admin panel.
riak_control.user.user.password = pass
## The admin panel is broken up into multiple
## components, each of which is enabled or disabled
## by one of these settings.
riak_control.admin = on
## bitcask data root
bitcask.data_root = ./data/bitcask
## Configure how Bitcask writes data to disk.
## erlang: Erlang's built-in file API
## nif: Direct calls to the POSIX C API
## The NIF mode provides higher throughput for certain
## workloads, but has the potential to negatively impact
## the Erlang VM, leading to higher worst-case latencies
## and possible throughput collapse.
bitcask.io_mode = erlang
## leveldb data_root
leveldb.data_root = ./data/leveldb
## Each database .sst table file can include an optional "bloom filter"
## that is highly effective in shortcutting data queries that are destined
## to not find the requested key. The bloom_filter typically increases the
## size of an .sst table file by about 2%. This option must be set to true
## in the riak.conf to take effect.
leveldb.bloomfilter = on
## Name of the riak node
nodename = [email protected]
## Cookie for distributed node communication. All nodes in the same cluster
## should use the same cookie or they will not be able to communicate.
distributed_cookie = riak
erlang.async_threads = 64
## Increase number of concurrent ports/sockets
erlang.max_ports = 64000
## Set the location of crash dumps
erlang.crash_dump = ./log/erl_crash.dump
## Raise the ETS table limit
erlang.max_ets_tables = 256000
## Raise the default erlang process limit
process_limit = 256000
## For nodes with many busy_dist_port events, Basho recommends
## raising the sender-side network distribution buffer size.
## 32MB may not be sufficient for some workloads and is a suggested
## starting point.
## The Erlang/OTP default is 1024 (1 megabyte).
## See: http://www.erlang.org/doc/man/erl.html#%2bzdbbl
## erlang.zdouble = 32mb
## Erlang VM scheduler tuning.
## Prerequisite: a patched VM from Basho, or a VM compiled separately
## with this patch applied:
## https://gist.github.com/evanmcc/a599f4c6374338ed672e
## erlang.swiffy = 500
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment