Created
August 26, 2014 04:53
-
-
Save sahilsk/b16cb51387847e6c3329 to your computer and use it in GitHub Desktop.
nginx proxy configuration for elasticsearch
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#!/bin/bash | |
########## Run cron job as: ################## | |
### Run at 2:30 am every month, every day, every week | |
# 30 2 * * * /home/ubuntu/logstash/curator_cron.sh &> /dev/null | |
############################################## | |
# Fast return | |
## Logstash | |
/usr/local/bin/curator -l /var/log/curator/curator.log delete --older-than 15 | |
/usr/local/bin/curator -l /var/log/curator/curator.log close --older-than 14 | |
/usr/local/bin/curator -l /var/log/curator/curator.log bloom --older-than 1 | |
/usr/local/bin/curator -l /var/log/curator/curator.log snapshot --delete-older-than 15 --repository Repo-One | |
# Slow return | |
## Logstash | |
/usr/local/bin/curator -l /var/log/curator/curator.log optimize --older-than 1 --max_num_segments 1 | |
/usr/local/bin/curator -l /var/log/curator/curator.log snapshot --older-than 2 --repository Untergeek |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#API usage: feeds.mysite.fr/index/type?q=* | |
server { | |
listen 91.191.152.55:443; | |
server_name feeds.mysite.fr; | |
access_log /var/log/nginx/elasticsearch.access.log; | |
error_log /var/log/nginx/elasticsearch.error.log; | |
location / { | |
# Deny Nodes Shutdown API | |
if ($request_filename ~ "_shutdown") { | |
return 403; | |
break; | |
} | |
# Deny access to Cluster API | |
if ($request_filename ~ "_cluster") { | |
return 403; | |
break; | |
} | |
if ($request_method !~ ^(GET|POST|HEAD)$ ) { | |
return 405; | |
break; | |
} | |
# Pass requests to ElasticSearch | |
proxy_pass http://localhost:9200; | |
proxy_redirect off; | |
proxy_set_header X-Real-IP $remote_addr; | |
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; | |
proxy_set_header Host $http_host; | |
# For CORS Ajax | |
add_header Access-Control-Allow-Origin *; | |
add_header Access-Control-Allow-Credentials true; | |
add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS'; | |
# Route all requests to feeds index | |
rewrite ^(.*)/(.*)/(.*) /$1/$2/_search$3 break; | |
rewrite_log on; | |
} | |
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
###################### Elasticsearch Configuration Example ##################### | |
# This file contains an overview of various configuration settings, | |
# targeted at operations staff. Application developers should | |
# consult the guide at <http://elasticsearch.org/guide>. | |
# | |
# The installation procedure is covered at | |
# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/setup.html>. | |
# | |
# Elasticsearch comes with reasonable defaults for most settings, | |
# so you can try it out without bothering with configuration. | |
# | |
# Most of the time, these defaults are just fine for running a production | |
# cluster. If you're fine-tuning your cluster, or wondering about the | |
# effect of certain configuration option, please _do ask_ on the | |
# mailing list or IRC channel [http://elasticsearch.org/community]. | |
# Any element in the configuration can be replaced with environment variables | |
# by placing them in ${...} notation. For example: | |
# | |
#node.rack: ${RACK_ENV_VAR} | |
# For information on supported formats and syntax for the config file, see | |
# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/setup-configuration.html> | |
################################### Cluster ################################### | |
# Cluster name identifies your cluster for auto-discovery. If you're running | |
# multiple clusters on the same network, make sure you're using unique names. | |
# | |
cluster.name: log-cluster | |
#################################### Node ##################################### | |
# Node names are generated dynamically on startup, so you're relieved | |
# from configuring them manually. You can tie this node to a specific name: | |
# | |
node.name: "elk-node-01" | |
# Every node can be configured to allow or deny being eligible as the master, | |
# and to allow or deny to store the data. | |
# | |
# Allow this node to be eligible as a master node (enabled by default): | |
# | |
#node.master: true | |
# | |
# Allow this node to store data (enabled by default): | |
# | |
#node.data: true | |
# You can exploit these settings to design advanced cluster topologies. | |
# | |
# 1. You want this node to never become a master node, only to hold data. | |
# This will be the "workhorse" of your cluster. | |
# | |
#node.master: false | |
#node.data: true | |
# | |
# 2. You want this node to only serve as a master: to not store any data and | |
# to have free resources. This will be the "coordinator" of your cluster. | |
# | |
#node.master: true | |
#node.data: false | |
# | |
# 3. You want this node to be neither master nor data node, but | |
# to act as a "search load balancer" (fetching data from nodes, | |
# aggregating results, etc.) | |
# | |
#node.master: false | |
#node.data: false | |
# Use the Cluster Health API [http://localhost:9200/_cluster/health], the | |
# Node Info API [http://localhost:9200/_nodes] or GUI tools | |
# such as <http://www.elasticsearch.org/overview/marvel/>, | |
# <http://github.com/karmi/elasticsearch-paramedic>, | |
# <http://github.com/lukas-vlcek/bigdesk> and | |
# <http://mobz.github.com/elasticsearch-head> to inspect the cluster state. | |
# A node can have generic attributes associated with it, which can later be used | |
# for customized shard allocation filtering, or allocation awareness. An attribute | |
# is a simple key value pair, similar to node.key: value, here is an example: | |
# | |
#node.rack: rack314 | |
# By default, multiple nodes are allowed to start from the same installation location | |
# to disable it, set the following: | |
#node.max_local_storage_nodes: 1 | |
#################################### Index #################################### | |
# You can set a number of options (such as shard/replica options, mapping | |
# or analyzer definitions, translog settings, ...) for indices globally, | |
# in this file. | |
# | |
# Note, that it makes more sense to configure index settings specifically for | |
# a certain index, either when creating it or by using the index templates API. | |
# | |
# See <http://elasticsearch.org/guide/en/elasticsearch/reference/current/index-modules.html> and | |
# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/indices-create-index.html> | |
# for more information. | |
# Set the number of shards (splits) of an index (5 by default): | |
# | |
#index.number_of_shards: 5 | |
# Set the number of replicas (additional copies) of an index (1 by default): | |
# | |
#index.number_of_replicas: 1 | |
# Note, that for development on a local machine, with small indices, it usually | |
# makes sense to "disable" the distributed features: | |
# | |
#index.number_of_shards: 1 | |
#index.number_of_replicas: 0 | |
# These settings directly affect the performance of index and search operations | |
# in your cluster. Assuming you have enough machines to hold shards and | |
# replicas, the rule of thumb is: | |
# | |
# 1. Having more *shards* enhances the _indexing_ performance and allows to | |
# _distribute_ a big index across machines. | |
# 2. Having more *replicas* enhances the _search_ performance and improves the | |
# cluster _availability_. | |
# | |
# The "number_of_shards" is a one-time setting for an index. | |
# | |
# The "number_of_replicas" can be increased or decreased anytime, | |
# by using the Index Update Settings API. | |
# | |
# Elasticsearch takes care about load balancing, relocating, gathering the | |
# results from nodes, etc. Experiment with different settings to fine-tune | |
# your setup. | |
# Use the Index Status API (<http://localhost:9200/A/_status>) to inspect | |
# the index status. | |
#################################### Paths #################################### | |
# Path to directory containing configuration (this file and logging.yml): | |
# | |
#path.conf: /path/to/conf | |
# Path to directory where to store index data allocated for this node. | |
# | |
path.data: /data/data | |
# | |
# Can optionally include more than one location, causing data to be striped across | |
# the locations (a la RAID 0) on a file level, favouring locations with most free | |
# space on creation. For example: | |
# | |
#path.data: /path/to/data1,/path/to/data2 | |
# Path to temporary files: | |
# | |
#path.work: /path/to/work | |
# Path to log files: | |
# | |
path.logs: /data/logs | |
# Path to where plugins are installed: | |
# | |
path.plugins: /data/plugins | |
#################################### Plugin ################################### | |
# If a plugin listed here is not installed for current node, the node will not start. | |
# | |
#plugin.mandatory: mapper-attachments,lang-groovy | |
################################### Memory #################################### | |
# Elasticsearch performs poorly when JVM starts swapping: you should ensure that | |
# it _never_ swaps. | |
# | |
# Set this property to true to lock the memory: | |
# | |
#bootstrap.mlockall: true | |
# Make sure that the ES_MIN_MEM and ES_MAX_MEM environment variables are set | |
# to the same value, and that the machine has enough memory to allocate | |
# for Elasticsearch, leaving enough memory for the operating system itself. | |
# | |
# You should also make sure that the Elasticsearch process is allowed to lock | |
# the memory, eg. by using `ulimit -l unlimited`. | |
############################## Network And HTTP ############################### | |
# Elasticsearch, by default, binds itself to the 0.0.0.0 address, and listens | |
# on port [9200-9300] for HTTP traffic and on port [9300-9400] for node-to-node | |
# communication. (the range means that if the port is busy, it will automatically | |
# try the next port). | |
# Set the bind address specifically (IPv4 or IPv6): | |
# | |
#network.bind_host: 0.0.0.0 | |
# Set the address other nodes will use to communicate with this node. If not | |
# set, it is automatically derived. It must point to an actual IP address. | |
# | |
network.publish_host: 192.168.0.1 | |
# Set both 'bind_host' and 'publish_host': | |
# | |
#network.host: 192.168.23.11 | |
# Set a custom port for the node to node communication (9300 by default): | |
# | |
#transport.tcp.port: 9300 | |
# Enable compression for all communication between nodes (disabled by default): | |
# | |
#transport.tcp.compress: true | |
# Set a custom port to listen for HTTP traffic: | |
# | |
#http.port: 9200 | |
# Set a custom allowed content length: | |
# | |
#http.max_content_length: 100mb | |
# Disable HTTP completely: | |
# | |
#http.enabled: false | |
################################### Gateway ################################### | |
# The gateway allows for persisting the cluster state between full cluster | |
# restarts. Every change to the state (such as adding an index) will be stored | |
# in the gateway, and when the cluster starts up for the first time, | |
# it will read its state from the gateway. | |
# There are several types of gateway implementations. For more information, see | |
# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-gateway.html>. | |
# The default gateway type is the "local" gateway (recommended): | |
# | |
#gateway.type: local | |
# Settings below control how and when to start the initial recovery process on | |
# a full cluster restart (to reuse as much local data as possible when using shared | |
# gateway). | |
# Allow recovery process after N nodes in a cluster are up: | |
# | |
#gateway.recover_after_nodes: 1 | |
# Set the timeout to initiate the recovery process, once the N nodes | |
# from previous setting are up (accepts time value): | |
# | |
#gateway.recover_after_time: 5m | |
# Set how many nodes are expected in this cluster. Once these N nodes | |
# are up (and recover_after_nodes is met), begin recovery process immediately | |
# (without waiting for recover_after_time to expire): | |
# | |
#gateway.expected_nodes: 2 | |
############################# Recovery Throttling ############################# | |
# These settings allow to control the process of shards allocation between | |
# nodes during initial recovery, replica allocation, rebalancing, | |
# or when adding and removing nodes. | |
# Set the number of concurrent recoveries happening on a node: | |
# | |
# 1. During the initial recovery | |
# | |
#cluster.routing.allocation.node_initial_primaries_recoveries: 4 | |
# | |
# 2. During adding/removing nodes, rebalancing, etc | |
# | |
#cluster.routing.allocation.node_concurrent_recoveries: 2 | |
# Set to throttle throughput when recovering (eg. 100mb, by default 20mb): | |
# | |
#indices.recovery.max_bytes_per_sec: 20mb | |
# Set to limit the number of open concurrent streams when | |
# recovering a shard from a peer: | |
# | |
#indices.recovery.concurrent_streams: 5 | |
################################## Discovery ################################## | |
# Discovery infrastructure ensures nodes can be found within a cluster | |
# and master node is elected. Multicast discovery is the default. | |
# Set to ensure a node sees N other master eligible nodes to be considered | |
# operational within the cluster. Its recommended to set it to a higher value | |
# than 1 when running more than 2 nodes in the cluster. | |
# | |
#discovery.zen.minimum_master_nodes: 1 | |
# Set the time to wait for ping responses from other nodes when discovering. | |
# Set this option to a higher value on a slow or congested network | |
# to minimize discovery failures: | |
# | |
#discovery.zen.ping.timeout: 3s | |
# For more information, see | |
# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-discovery-zen.html> | |
# Unicast discovery allows to explicitly control which nodes will be used | |
# to discover the cluster. It can be used when multicast is not present, | |
# or to restrict the cluster communication-wise. | |
# | |
# 1. Disable multicast discovery (enabled by default): | |
# | |
discovery.zen.ping.multicast.enabled: false | |
# | |
# 2. Configure an initial list of master nodes in the cluster | |
# to perform discovery when new nodes (master or data) are started: | |
# | |
discovery.zen.ping.unicast.hosts: ["192.168.0.1", "192.168.0.2"] | |
# EC2 discovery allows to use AWS EC2 API in order to perform discovery. | |
# | |
# You have to install the cloud-aws plugin for enabling the EC2 discovery. | |
# | |
# For more information, see | |
# <http://elasticsearch.org/guide/en/elasticsearch/reference/current/modules-discovery-ec2.html> | |
# | |
# See <http://elasticsearch.org/tutorials/elasticsearch-on-ec2/> | |
# for a step-by-step tutorial. | |
# GCE discovery allows to use Google Compute Engine API in order to perform discovery. | |
# | |
# You have to install the cloud-gce plugin for enabling the GCE discovery. | |
# | |
# For more information, see <https://github.com/elasticsearch/elasticsearch-cloud-gce>. | |
# Azure discovery allows to use Azure API in order to perform discovery. | |
# | |
# You have to install the cloud-azure plugin for enabling the Azure discovery. | |
# | |
# For more information, see <https://github.com/elasticsearch/elasticsearch-cloud-azure>. | |
################################## Slow Log ################################## | |
# Shard level query and fetch threshold logging. | |
#index.search.slowlog.threshold.query.warn: 10s | |
#index.search.slowlog.threshold.query.info: 5s | |
#index.search.slowlog.threshold.query.debug: 2s | |
#index.search.slowlog.threshold.query.trace: 500ms | |
#index.search.slowlog.threshold.fetch.warn: 1s | |
#index.search.slowlog.threshold.fetch.info: 800ms | |
#index.search.slowlog.threshold.fetch.debug: 500ms | |
#index.search.slowlog.threshold.fetch.trace: 200ms | |
#index.indexing.slowlog.threshold.index.warn: 10s | |
#index.indexing.slowlog.threshold.index.info: 5s | |
#index.indexing.slowlog.threshold.index.debug: 2s | |
#index.indexing.slowlog.threshold.index.trace: 500ms | |
################################## GC Logging ################################ | |
#monitor.jvm.gc.young.warn: 1000ms | |
#monitor.jvm.gc.young.info: 700ms | |
#monitor.jvm.gc.young.debug: 400ms | |
#monitor.jvm.gc.old.warn: 10s | |
#monitor.jvm.gc.old.info: 5s | |
#monitor.jvm.gc.old.debug: 2s | |
################################## Security ################################ | |
# Uncomment if you want to disable JSONP as a valid return transport on the | |
# http server. With this enabled, it may pose a security risk, so disabling | |
# it unless you need it is recommended. | |
# | |
#http.jsonp.enable: false |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
server { | |
listen 8080; | |
server_name elasticsearch; | |
client_max_body_size 50M; | |
error_log /var/log/nginx/elasticsearch-errors.log; | |
access_log /var/log/nginx/elasticsearch.log; | |
location / { | |
# Deny Nodes Shutdown API | |
if ($request_filename ~ "_shutdown") { | |
return 403; | |
break; | |
} | |
# Deny access to Cluster API | |
if ($request_filename ~ "_cluster") { | |
return 403; | |
break; | |
} | |
# Pass requests to ElasticSearch | |
proxy_pass http://localhost:9200; | |
proxy_redirect off; | |
proxy_set_header X-Real-IP $remote_addr; | |
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; | |
proxy_set_header Host $http_host; | |
# For CORS Ajax | |
proxy_pass_header Access-Control-Allow-Origin; | |
proxy_pass_header Access-Control-Allow-Methods; | |
proxy_hide_header Access-Control-Allow-Headers; | |
add_header Access-Control-Allow-Headers 'X-Requested-With, Content-Type'; | |
add_header Access-Control-Allow-Credentials true; | |
# Authorize access | |
auth_basic "ElasticSearch"; | |
auth_basic_user_file /usr/local/etc/elasticsearch/passwords; | |
} | |
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
upstream elasticsearch { | |
server 10.0.0.1:9200; | |
server 10.0.0.2:9200; | |
server 10.0.0.3:9200; | |
keepalive 64; | |
} | |
server { | |
listen 9200; | |
server_name search.proxy; | |
client_max_body_size 50m; | |
location / { | |
proxy_pass http://elasticsearch; | |
proxy_redirect off; | |
proxy_http_version 1.1; | |
proxy_set_header Connection ""; | |
proxy_set_header X-Real-IP $remote_addr; | |
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; | |
proxy_set_header Host $http_host; | |
proxy_pass_header Access-Control-Allow-Origin; | |
proxy_pass_header Access-Control-Allow-Methods; | |
proxy_hide_header Access-Control-Allow-Headers; | |
add_header Access-Control-Allow-Headers 'X-Requested-With, Content-Type'; | |
add_header Access-Control-Allow-Credentials true; | |
} | |
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
## Install Docker | |
``` | |
# install the backported kernel | |
$ sudo apt-get update | |
$ sudo apt-get install linux-image-generic-lts-raring linux-headers-generic-lts-raring | |
$ sudo apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9 | |
$ sudo sh -c "echo deb https://get.docker.io/ubuntu docker main > /etc/apt/sources.list.d/docker.list" | |
$ sudo apt-get update | |
$ sudo apt-get install lxc-docker | |
# reboot | |
$ sudo reboot | |
``` | |
## Install elasticsearch | |
Link: http://dockerfile.github.io/#/elasticsearch | |
``` bash | |
$ sudo docker pull dockerfile/elasticsearch | |
$ sudo mkdir -p /es/data /es/config /es/logs /es/plugins | |
$ sudo vim /es/config/elasticsearch.yml | |
$ sudo docker run -d -m 1024m --name es -p 192.168.23.11:9200:9200 -p 192.168.23.11:9300:9300 -v /es:/data dockerfile/elasticsearch /elasticsearch/bin/elasticsearch -Des.config=/data/config/elasticsearch.yml -Xms64m -Xmx256m | |
OR | |
$ sudo docker run -d -m 1224m --name es -p 9200:9200 -p 9300:9300 -v /es:/data dockerfile/elasticsearch /elasticsearch/bin/elasticsearch -Des.config=/data/config/elasticsearch.yml -Xms64m -Xmx1200m | |
``` | |
## Install elasticsearch plugins | |
``` | |
$ sudo docker run -d -v /es:/data dockerfile/elasticsearch /elasticsearch/bin/plugin -install karmi/elasticsearch-paramedic | |
$ sudo docker run -d -v /es:/data dockerfile/elasticsearch /elasticsearch/bin/plugin -install royrusso/elasticsearch-HQ | |
$ sudo docker run -d -v /es:/data dockerfile/elasticsearch /elasticsearch/bin/plugin -install mobz/elasticsearch-head | |
$ sudo docker run -d -v /es:/data dockerfile/elasticsearch /elasticsearch/bin/plugin -install lukas-vlcek/bigdesk/2.4.0 | |
``` | |
visit url to access plugins: | |
------------ | |
``` | |
x.x.x.x:9200/_plugin/head | |
x.x.x.x:9200/_plugin/hq | |
x.x.x.x:9200/_plugin/paramedic | |
``` | |
NOTE: Elasticsearch config file | |
edit: | |
``` | |
cluster.name: log-cluster | |
node.name: "elk-node-01" # if you do not want super hero names | |
path.data: /data/data | |
path.logs: /data/logs | |
path.plugins: /data/plugins | |
network.publish_host: IP of machine ## otherwise docker can't communicate with each other | |
discovery.zen.ping.multicast.enabled: false ## disable multicast | |
discovery.zen.ping.unicast.hosts: ["192.168.0.1", "192.168.0.2"] | |
``` | |
## Install Kibana | |
``` | |
$ sudo docker pull arcus/kibana | |
$ sudo docker run -p 80:80 --name kibana -e ES_HOST=192.168.0.1 -e ES_PORT=9200 arcus/kibana | |
`` | |
## Install Redis and Redis-cli | |
Link: https://github.com/dockerfile/redis | |
Link: http://dockerfile.github.io/#/redis | |
``` | |
$ sudo docker pull dockerfile/redis | |
``` | |
Redis-server | |
``` | |
$ sudo docker run -d --name redis -p 0.0.0.0:6379:6379 dockerfile/redis | |
``` | |
Redis-Cli | |
``` | |
$ sudo docker run -it dockerfile/redis bash -c 'redis-cli -h 192.168.0.1' | |
``` | |
## Install logstash | |
``` bash | |
$ docker build -t logstash . | |
$ docker run -d --name lshipper logstash agent -f logstash-shipper.conf | |
$ docker run -d --name lforwarder logstash agent -f logstash-indexer.conf | |
``` | |
logstash-shipper | |
``` | |
$ sudo docker run -d -v /home/ubuntu/logstash/:/data:ro --name lshipper custom/logstash agent -f /data/logstash-shipper.conf | |
Logstash-indexer | |
``` | |
$ sudo docker run -d -v /home/ubuntu/logstash:/data --name lindexer custom/logstash agent -f /data/logstash-indexer.conf | |
`` | |
## Install collectd | |
``` | |
sudo apt-get install build-essential librrd2-dev libsensors4-dev libsnmp-dev libgcrypt-dev chkconfig | |
wget http://collectd.org/files/collectd-5.2.0.tar.gz | |
tar zxvf collectd-5.2.0.tar.gz | |
cd collectd-5.2.0 | |
./configure --prefix=/usr --sysconfdir=/etc --localstatedir=/var --libdir=/usr/lib --mandir=/usr/share/man --enable-all-plugins | |
make | |
make install | |
``` | |
### Redis Metrics | |
link: https://github.com/powdahound/redis-collectd-plugin | |
## Elasticsearch Curator | |
link: https://github.com/elasticsearch/curator | |
``` | |
# Fast return | |
## Logstash | |
/usr/local/bin/curator -l /var/log/curator/curator.log delete --older-than 15 | |
/usr/local/bin/curator -l /var/log/curator/curator.log close --older-than 14 | |
/usr/local/bin/curator -l /var/log/curator/curator.log bloom --older-than 1 | |
/usr/local/bin/curator -l /var/log/curator/curator.log snapshot --delete-older-than 15 --repository Repo-One | |
# Slow return | |
## Logstash | |
/usr/local/bin/curator -l /var/log/curator/curator.log optimize --older-than 1 --max_num_segments 1 | |
/usr/local/bin/curator -l /var/log/curator/curator.log snapshot --older-than 2 --repository Untergeek | |
``` | |
### Set curator as cron job | |
``` | |
30 2 * * * ~/bin/curator.sh &> /dev/null | |
``` |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
sudo vim /etc/monit/monitrc | |
# ################### | |
# Monit Configuration | |
# ################### | |
set daemon 120 | |
with start delay 240 | |
set alert [email protected] | |
set mailserver localhost | |
set httpd port 2812 and | |
use address localhost | |
allow localhost | |
check system search.elasticsearch.org | |
if loadavg (5min) > 10 then alert | |
if memory usage > 80% then alert | |
if cpu usage (user) > 90% then alert | |
check filesystem data with path /var | |
if space usage > 80% for 5 times within 15 cycles then alert | |
if inode usage > 90% then alert | |
if space usage > 99% then stop | |
if inode usage > 99% then stop | |
group filesystem | |
check host elasticsearch with address 127.0.0.1 | |
if failed url http://127.0.0.1:9200/ with timeout 15 seconds then alert | |
group elasticsearch | |
check process elasticsearch1 with pidfile /var/run/elasticsearch/elasticsearch1.pid | |
start program = "/usr/bin/sudo -H -u elasticsearch /usr/local/lib/elasticsearch-0.15.2/bin/elasticsearch -p /var/run/elasticsearch/elasticsearch1.pid" with timeout 60 seconds | |
stop program = "/bin/kill $(/bin/cat /var/run/elasticsearch/elasticsearch1.pid)" | |
if cpu > 90% for 5 cycles then restart | |
if totalmem > 2 GB for 5 cycles then restart | |
if loadavg(5min) greater than 10 for 8 cycles then stop | |
if 3 restarts within 5 cycles then timeout | |
group elasticsearch | |
check process varnishd with pidfile /var/run/varnish/varnishd.pid | |
start program = "/usr/sbin/varnishd -f /etc/varnish/default.vcl -a 0.0.0.0:80 -P /var/run/varnish/varnishd.pid" with timeout 60 seconds | |
stop program = "/bin/kill $(/bin/cat /var/run/varnish/varnishd.pid)" | |
if cpu > 90% for 5 cycles then restart | |
if totalmem > 500 MB for 5 cycles then restart | |
if loadavg(5min) greater than 10 for 8 cycles then stop | |
if 3 restarts within 5 cycles then timeout | |
group elasticsearch | |
check process post_receive_server with pidfile /var/applications/hide/tmp/thin.pid | |
start program = "/usr/bin/sudo -H -u elasticsearch /usr/bin/env BUNDLE_GEMFILE=/var/applications/hide/Gemfile /usr/bin/bundle exec thin --chdir /var/applications/hide --rackup /var/applications/hide/config.ru --port 5000 --log /var/applications/hide/log/thin.log --pid /var/applications/hide/tmp/thin.pid --environment production --tag hide --daemonize start" with timeout 60 seconds | |
stop program = "/bin/kill $(/bin/cat /var/applications/hide/tmp/thin.pid)" | |
if cpu > 90% for 5 cycles then restart | |
if totalmem > 2 GB for 5 cycles then restart | |
if loadavg(5min) greater than 10 for 8 cycles then stop | |
if 3 restarts within 5 cycles then timeout | |
group git |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Run me with: | |
# | |
# $ nginx -p /path/to/this/file/ -c nginx.conf | |
# | |
# All requests are then routed to authenticated user's index, so | |
# | |
# GET http://user:password@localhost:8080/_search?q=* | |
# | |
# is rewritten to: | |
# | |
# GET http://localhost:9200/user/_search?q=* | |
worker_processes 1; | |
pid nginx.pid; | |
events { | |
worker_connections 1024; | |
} | |
http { | |
server { | |
listen 8080; | |
server_name search.example.com; | |
error_log elasticsearch-errors.log; | |
access_log elasticsearch.log; | |
location / { | |
# Deny access to Cluster API | |
if ($request_filename ~ "_cluster") { | |
return 403; | |
break; | |
} | |
# Pass requests to ElasticSearch | |
proxy_pass http://localhost:9200; | |
proxy_redirect off; | |
proxy_set_header X-Real-IP $remote_addr; | |
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; | |
proxy_set_header Host $http_host; | |
# Authorize access | |
auth_basic "ElasticSearch"; | |
auth_basic_user_file passwords; | |
# Route all requests to authorized user's own index | |
rewrite ^(.*)$ /$remote_user$1 break; | |
rewrite_log on; | |
return 403; | |
} | |
} | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment