Created
August 19, 2014 19:16
-
-
Save manugarri/306537ab7c502625b834 to your computer and use it in GitHub Desktop.
Graylog Conf file
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# If you are running more than one instances of graylog2-server you have to select one of these | |
# instances as master. The master will perform some periodical tasks that non-masters won't perform. | |
is_master = true | |
# The auto-generated node ID will be stored in this file and read after restarts. It is a good idea | |
# to use an absolute file path here if you are starting graylog2-server from init scripts or similar. | |
node_id_file = /etc/graylog2-server-node-id | |
# You MUST set a secret to secure/pepper the stored user passwords here. Use at least 64 characters. | |
# Generate one by using for example: pwgen -s 96 | |
password_secret = <SECRET> | |
# the default root user is named 'admin' | |
# root_username = <ROOT_USER> | |
# You MUST specify a hash password for the root user (which you only need to initially set up the | |
# system and in case you lose connectivity to your authentication backend) | |
# This password cannot be changed using the API or via the web interface. If you need to change it, | |
# modify it in this file. | |
# Create one by using for example: echo -n yourpassword | shasum -a 256 | |
# and put the resulting hash value into the following line | |
root_password_sha2 = <SHA2> | |
# Set plugin directory here (relative or absolute) | |
plugin_dir = plugin | |
# REST API listen URI. Must be reachable by other graylog2-server nodes if you run a cluster. | |
rest_listen_uri = http://127.0.0.1:12900/ | |
# REST API transport address. Defaults to the value of rest_listen_uri. Exception: If rest_listen_uri | |
# is set to a wildcard IP address (0.0.0.0) the first non-loopback IPv4 system address is used. | |
# This will be promoted in the cluster discovery APIs and other nodes may try to connect on this | |
# address. (see rest_listen_uri) | |
rest_transport_uri = http://127.0.0.1:12900/ | |
# Enable CORS headers for REST api. This is necessary for JS-clients accessing the server directly. | |
# If these are disabled, modern browsers will not be able to retrieve resources from the server. | |
# This is disabled by default. Uncomment the next line to enable it. | |
#rest_enable_cors = true | |
# Enable GZIP support for REST api. This compresses API responses and therefore helps to reduce | |
# overall round trip times. This is disabled by default. Uncomment the next line to enable it. | |
#rest_enable_gzip = true | |
# Embedded elasticsearch configuration file | |
# pay attention to the working directory of the server, maybe use an absolute path here | |
#elasticsearch_config_file = /etc/graylog2-elasticsearch.yml | |
elasticsearch_max_docs_per_index = 20000000 | |
# How many indices do you want to keep? | |
# elasticsearch_max_number_of_indices*elasticsearch_max_docs_per_index=total number of messages in your setup | |
elasticsearch_max_number_of_indices = 20 | |
# Decide what happens with the oldest indices when the maximum number of indices is reached. | |
# The following strategies are availble: | |
# - delete # Deletes the index completely (Default) | |
# - close # Closes the index and hides it from the system. Can be re-opened later. | |
retention_strategy = close | |
# How many ElasticSearch shards and replicas should be used per index? Note that this only applies to newly created indices. | |
elasticsearch_shards = 1 | |
elasticsearch_replicas = 0 | |
elasticsearch_index_prefix = graylog2 | |
# Do you want to allow searches with leading wildcards? This can be extremely resource hungry and should only | |
# be enabled with care. See also: http://support.torch.sh/help/kb/graylog2-web-interface/the-search-bar-explained | |
allow_leading_wildcard_searches = false | |
# Do you want to allow searches to be highlighted? Depending on the size of your messages this can be memory hungry and | |
# should only be enabled after making sure your elasticsearch cluster has enough memory. | |
allow_highlighting = false | |
# settings to be passed to elasticsearch's client (overriding those in the provided elasticsearch_config_file) | |
# all these | |
# this must be the same as for your elasticsearch cluster | |
#elasticsearch_cluster_name = graylog2 | |
# you could also leave this out, but makes it easier to identify the graylog2 client instance | |
#elasticsearch_node_name = graylog2-server | |
# we don't want the graylog2 server to store any data, or be master node | |
#elasticsearch_node_master = false | |
#elasticsearch_node_data = false | |
# use a different port if you run multiple elasticsearch nodes on one machine | |
#elasticsearch_transport_tcp_port = 9350 | |
# we don't need to run the embedded HTTP server here | |
#elasticsearch_http_enabled = false | |
elasticsearch_discovery_zen_ping_multicast_enabled = false | |
elasticsearch_discovery_zen_ping_unicast_hosts = 127.0.0.1:9300 | |
# the following settings allow to change the bind addresses for the elasticsearch client in graylog2 | |
# these settings are empty by default, letting elasticsearch choose automatically, | |
# override them here or in the 'elasticsearch_config_file' if you need to bind to a special address | |
# refer to http://www.elasticsearch.org/guide/en/elasticsearch/reference/0.90/modules-network.html for special values here | |
# elasticsearch_network_host = | |
# elasticsearch_network_bind_host = | |
# elasticsearch_network_publish_host = | |
# Analyzer (tokenizer) to use for message and full_message field. The "standard" filter usually is a good idea. | |
# All supported analyzers are: standard, simple, whitespace, stop, keyword, pattern, language, snowball, custom | |
# ElasticSearch documentation: http://www.elasticsearch.org/guide/reference/index-modules/analysis/ | |
# Note that this setting only takes effect on newly created indices. | |
elasticsearch_analyzer = standard | |
# Batch size for the ElasticSearch output. This is the maximum (!) number of messages the ElasticSearch output | |
# module will get at once and write to ElasticSearch in a batch call. If the configured batch size has not been | |
# reached within output_flush_interval seconds, everything that is available will be flushed at once. Remember | |
# that every outputbuffer processor manages its own batch and performs its own batch write calls. | |
# ("outputbuffer_processors" variable) | |
output_batch_size = 25 | |
# Flush interval (in seconds) for the ElasticSearch output. This is the maximum amount of time between two | |
# batches of messages written to ElasticSearch. It is only effective at all if your minimum number of messages | |
# for this time period is less than output_batch_size * outputbuffer_processors. | |
output_flush_interval = 1 | |
# The number of parallel running processors. | |
# Raise this number if your buffers are filling up. | |
processbuffer_processors = 5 | |
outputbuffer_processors = 3 | |
# Wait strategy describing how buffer processors wait on a cursor sequence. (default: sleeping) | |
# Possible types: | |
# - yielding | |
# Compromise between performance and CPU usage. | |
# - sleeping | |
# Compromise between performance and CPU usage. Latency spikes can occur after quiet periods. | |
# - blocking | |
# High throughput, low latency, higher CPU usage. | |
# - busy_spinning | |
# Avoids syscalls which could introduce latency jitter. Best when threads can be bound to specific CPU cores. | |
processor_wait_strategy = blocking | |
# Size of internal ring buffers. Raise this if raising outputbuffer_processors does not help anymore. | |
# For optimum performance your LogMessage objects in the ring buffer should fit in your CPU L3 cache. | |
# Start server with --statistics flag to see buffer utilization. | |
# Must be a power of 2. (512, 1024, 2048, ...) | |
ring_size = 1024 | |
# EXPERIMENTAL: Dead Letters | |
# Every failed indexing attempt is logged by default and made visible in the web-interface. You can enable | |
# the experimental dead letters feature to write every message that was not successfully indexed into the | |
# MongoDB "dead_letters" collection to make sure that you never lose a message. The actual writing of dead | |
# letter should work fine already but it is not heavily tested yet and will get more features in future | |
# releases. | |
dead_letters_enabled = false | |
# How many seconds to wait between marking node as DEAD for possible load balancers and starting the actual | |
# shutdown process. Set to 0 if you have no status checking load balancers in front. | |
lb_recognition_period_seconds = 3 | |
# MongoDB Configuration | |
mongodb_useauth = false | |
mongodb_user = grayloguser | |
mongodb_password = 123 | |
mongodb_host = 127.0.0.1 | |
#mongodb_replica_set = localhost:27017,localhost:27018,localhost:27019 | |
mongodb_database = graylog2 | |
mongodb_port = 27017 | |
# Raise this according to the maximum connections your MongoDB server can handle if you encounter MongoDB connection problems. | |
mongodb_max_connections = 100 | |
# Number of threads allowed to be blocked by MongoDB connections multiplier. Default: 5 | |
# If mongodb_max_connections is 100, and mongodb_threads_allowed_to_block_multiplier is 5, then 500 threads can block. More than that and an exception will be thrown. | |
# http://api.mongodb.org/java/current/com/mongodb/MongoOptions.html#threadsAllowedToBlockForConnectionMultiplier | |
mongodb_threads_allowed_to_block_multiplier = 5 | |
# Drools Rule File (Use to rewrite incoming log messages) | |
# See: http://support.torch.sh/help/kb/graylog2-server/custom-message-rewritingprocessing | |
# rules_file = /etc/graylog2.drl | |
# Specify and uncomment this if you want to include links to the stream in your stream alert mails. | |
# This should define the fully qualified base url to your web interface exactly the same way as it is accessed by your users. | |
# | |
# transport_email_web_interface_url = https://graylog2.example.com | |
# HTTP proxy for outgoing HTTP calls | |
# |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment