Created
November 8, 2019 15:09
-
-
Save widnyana/1be87968fecad60cd784fcee894b3650 to your computer and use it in GitHub Desktop.
splunk tuned adm configuration
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# | |
# tuned configuration | |
# | |
[main] | |
summary=Broadly applicable to Splunk server tuning that provides excellent performance across a variety of common server workloads | |
[cpu] | |
governor=performance | |
energy_perf_bias=performance | |
min_perf_pct=100 | |
[vm] | |
# Transparent huge memory pages and Splunk performance | |
# http://docs.splunk.com/Documentation/Splunk/7.1.2/ReleaseNotes/SplunkandTHP | |
transparent_hugepage=never | |
[disk] | |
# The default unit for readahead is KiB. This can be adjusted to sectors | |
# by specifying the relevant suffix, eg. (readahead => 8192 s). There must | |
# be at least one space between the number and suffix (if suffix is specified). | |
readahead=>4096 | |
[sysctl] | |
# ktune sysctl settings for rhel6 servers, maximizing i/o throughput | |
# | |
# Minimal preemption granularity for CPU-bound tasks: | |
# (default: 1 msec# (1 + ilog(ncpus)), units: nanoseconds) | |
kernel.sched_min_granularity_ns = 10000000 | |
# SCHED_OTHER wake-up granularity. | |
# (default: 1 msec# (1 + ilog(ncpus)), units: nanoseconds) | |
# | |
# This option delays the preemption effects of decoupled workloads | |
# and reduces their over-scheduling. Synchronous workloads will still | |
# have immediate wakeup/sleep latencies. | |
kernel.sched_wakeup_granularity_ns = 15000000 | |
# If a workload mostly uses anonymous memory and it hits this limit, the entire | |
# working set is buffered for I/O, and any more write buffering would require | |
# swapping, so it's time to throttle writes until I/O can catch up. Workloads | |
# that mostly use file mappings may be able to use even higher values. | |
# | |
# The generator of dirty data starts writeback at this percentage (system default | |
# is 20%) | |
vm.dirty_ratio = 40 | |
# Start background writeback (via writeback threads) at this percentage (system | |
# default is 10%) | |
vm.dirty_background_ratio = 10 | |
# PID allocation wrap value. When the kernel's next PID value | |
# reaches this value, it wraps back to a minimum PID value. | |
# PIDs of value pid_max or larger are not allocated. | |
# | |
# A suggested value for pid_max is 1024 * <# of cpu cores/threads in system> | |
# e.g., a box with 32 cpus, the default of 32768 is reasonable, for 64 cpus, | |
# 65536, for 4096 cpus, 4194304 (which is the upper limit possible). | |
#kernel.pid_max = 65536 | |
# The swappiness parameter controls the tendency of the kernel to move | |
# processes out of physical memory and onto the swap disk. | |
# 0 tells the kernel to avoid swapping processes out of physical memory | |
# for as long as possible | |
# 100 tells the kernel to aggressively swap processes out of physical memory | |
# and move them to swap cache | |
vm.swappiness=10 |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment