Skip to content

Instantly share code, notes, and snippets.

@theist
Last active May 17, 2017 11:31
Show Gist options
  • Save theist/8388085 to your computer and use it in GitHub Desktop.
Save theist/8388085 to your computer and use it in GitHub Desktop.
Changed to choice between "local" and EC2 setup. To do a local setup yo will do id_rsa created and authorized in your user. Your user must also possess sudo with no password (user or usergroup ALL=(ALL:ALL) NOPASSWD: ALL) This is useful to make local tests and changes to setup without wasting EC2 resources
#!/bin/bash
####################################
# BASIC REQUIREMENTS
# http://graphite.wikidot.com/installation
# Forked from: http://geek.michaelgrace.org/2011/09/how-to-install-graphite-on-ubuntu/
# Ubuntu 11.10 Oneiric Ocelot
# Forked from https://gist.github.com/1287170
# Modified to use NGinx + uwsgi instead of Apache, as well as memcached and supervisord, incorporating ideas from
# http://blog.adku.com/2011/10/scalable-realtime-stats-with-graphite.html
# Uses python based statsite / twisted instead of statsd / node.js
# Added NRPE listener for Nagios / OpsView support
# Fixed some bugs in supervisord configuration from post above
# Designed to fire up an entirely new instance on EC2, mounting a separate EBS volume for storage -- modify values below as appropriate
# This script assumes you have the EC2 tools properly configured in your environment
# and that you have a local file with the SSH key to be used on the new machine
# Last tested & updated 11/12/2011
#
# Tested with ubuntu 13.10 also
####################################
#####################################################################
# Customize these variables for your environment #
#####################################################################
if [ $1 ]; then # assume local
export IS_LOCAL=true
fi
ami_id='ami-a7f539ce' #32-bit ubuntu 11.10
security_groups='-g sg-cbf8e7a2 -g sg-c7f6e9ae' #graphite-statsd-linux AND linux-ssh
instance_user='ubuntu' #other images use root
zone_id='us-east-1d'
instance_type='c1.medium'
volume_size_gb=40
volume_mount_point='/data'
name='Graphite Performance Monitoring (Ubuntu Oneiric)'
if [ $IS_LOCAL ]; then
key_path=~/.ssh/id_rsa
else
key_path=~/mykey.pem
fi
server_name='foo.bar.com'
cores=2
memcached_mb=256
# Note that if the destinations are all carbon-caches then this should
# exactly match the webapp's CARBONLINK_HOSTS setting in terms of
# instances listed (order matters!).
carbon_relay_destinations='127.0.0.1:2004'
##servers as comma separated list of servers => carbon01, carbon02, carbon03
carbon_cache_servers='127.0.0.1'
## list of servers MUST INCLUDE TRAILING comma - don't ever use loopback => 'carbon01:11211', 'carbon02:11211', 'carbon03:11211',
graphite_additional_memcache_hosts=""
## list of servers MUST INCLUDE TRAILING comma - don't ever use loopback => 'carbon01:7001', 'carbon02:7001', 'carbon03:7001',
graphite_additional_cluster_servers=""
## whether or not to use relaying
relay_enabled='false'
#number of seconds to use between flushing, default of 10, 1 for realtime
statsite_flush_interval=10
# 1 seconds * 1209600 datapoints = 2 week of 1 second granularity
# 10 seconds * 864000 datapoints = 2 months of 10-second granularity
# 60 seconds * 259200 datapoints = 6 months of 1-minute granularity
# 10 minutes * 262974 datapoints = ~3 years of 10-minute granularity
graphite_retentions='1:1209600,10:864000,60:259200,600:262974'
# seconds to cache in memcached, 1 for realtime, default is 60
graphite_memcache_duration=20
#####################################################################
# Fire up the EC2 instance #
#####################################################################
if [ $IS_LOCAL ]; then
echo "local inst"
instance_user=`whoami`
instance_host=localhost
instance_ip='127.0.0.1'
sudo mkdir -p $volume_mount_point
else
instance_id=$(ec2-run-instances ${ami_id} -k ${EC2_KEYPAIR} -t ${instance_type} -z ${zone_id} ${security_groups} | awk '/INSTANCE/{print $2}')
ec2-create-tags $instance_id --tag Name="$name"
volume_id=$(ec2-create-volume -s ${volume_size_gb} -z ${zone_id} | awk '/VOLUME/{print $2}')
#wait for instance and then volume, to become available
while [ "$INST_ISREADY" != "running" ]; do
instance_description=$(ec2-describe-instances $instance_id)
INST_ISREADY=$(echo "$instance_description" | awk '/^INSTANCE/ {print $6}')
if [ "$INST_ISREADY" = "running" ]; then
break
fi
echo 'Waiting for instance to start...'
sleep 5
done
while [ "$VOL_ISREADY" != "available" ]; do
VOL_ISREADY=$(ec2-describe-volumes $volume_id | awk '/^VOLUME/ {print $5}')
if [ "$VOL_ISREADY" = "available" ]; then
break
fi
echo 'Waiting for volume to become available...'
sleep 5
done
ec2-attach-volume $volume_id --instance $instance_id --device /dev/sdf
#info only available after entering running state
instance_host=$(echo "$instance_description" | awk '/INSTANCE/{print $4}')
instance_ip=$(echo "$instance_description" | awk '/INSTANCE/{print $15}')
echo "Created instance ${instance_id} / ${instance_host} / ${instance_ip}"
#mount the disk
ssh -i $key_path -o StrictHostKeyChecking=no $instance_user@$instance_host <<EOF
sudo apt-get install -y xfsprogs
grep -q xfs /proc/filesystems || sudo modprobe xfs
sudo mkfs.xfs /dev/xvdf
echo "/dev/xvdf $volume_mount_point xfs noatime 0 0" | sudo tee -a /etc/fstab
sudo mkdir -m 000 $volume_mount_point
sudo mount $volume_mount_point
df -h
EOF
fi
#run requisite installers
#####################################################################
# Install support packages #
#####################################################################
ssh -i $key_path $instance_user@$instance_host <<EOF
sudo add-apt-repository ppa:nginx/stable
sudo apt-get update -y
#sudo apt-get upgrade
sudo apt-get install python-setuptools python-dev -y
sudo apt-get install supervisor memcached -y
sudo easy_install pip
sudo apt-get install libgetopt-mixed-perl libmcrypt4 -y
sudo apt-get install libssl0.9.8 libxml2-dev -y
sudo apt-get install curl -y
EOF
#####################################################################
# Install opsview agent which listens on 5666
# Install nginx, uwsgi - web serving layer ######################################################################
ssh -i $key_path $instance_user@$instance_host <<EOF
curl -o opsview-agent_3.13.1.6691-1lucid1_i386.deb http://downloads.opsera.com/opsview-community/latest/apt/pool/main/o/opsview-base/opsview-agent_3.13.1.6691-1lucid1_i386.deb
sudo dpkg -i opsview-agent_3.13.1.6691-1lucid1_i386.deb
sudo apt-get -f install nginx-full -y
sudo update-rc.d -f nginx remove
sudo pip install uwsgi
EOF
#####################################################################
# Install statsite, graphite-web, carbon, and whisper - stats layer #
#####################################################################
ssh -i $key_path $instance_user@$instance_host <<EOF
sudo mkdir -p /tmp/installation
sudo pip install -d /tmp/installation graphite-web==0.9.9 carbon==0.9.9 whisper==0.9.9
sudo pip install statsite==0.4.0
sudo pip install Django==1.2.7 python-memcached==1.47 Twisted==11.0.0 txAMQP==0.5 zope.interface==3.8.0 django-tagging
cd /tmp/installation
sudo tar xvf whisper-0.9.9.tar.gz
sudo tar xvf carbon-0.9.9.tar.gz
sudo tar xvf graphite-web-0.9.9.tar.gz
cd /tmp/installation/whisper-0.9.9
sudo python setup.py install
cd /tmp/installation/carbon-0.9.9
sudo python setup.py install
cd /tmp/installation/graphite-web-0.9.9
sudo python setup.py install
#since the data_dir is being changed in config, symlink to our storage drive, otherwise web front end will not find data
sudo ln -s $volume_mount_point /opt/graphite/storage/whisper
EOF
####################################
#Write to tmp and copy config files!
####################################
####################################
# /etc/nginx/sites-enabled/graphite
####################################
temp_path=/tmp/graphite
echo "server {
listen 80 default;
server_name $server_name;
access_log /var/log/nginx/graphite.access.log main;
# http://flask.pocoo.org/docs/deploying/uwsgi/
# http://readthedocs.org/docs/uwsgi/en/latest/features/magic-variables.html
# http://readthedocs.org/docs/uwsgi/en/latest/examples.html
location / {
include uwsgi_params;
uwsgi_pass 127.0.0.1:3031;
}
}" | tee $temp_path
scp -i $key_path $temp_path $instance_user@$instance_host:$temp_path
ssh -i $key_path $instance_user@$instance_host "sudo mv $temp_path /etc/nginx/sites-enabled/graphite"
#rm $temp_path
####################################
# /etc/nginx/nginx.conf
####################################
temp_path=/tmp/nginx.conf
echo "daemon off;
user www-data;
worker_processes $cores;
error_log /var/log/nginx/error.log;
pid /var/run/nginx.pid;
events {
worker_connections 1024;
}
http {
log_format main '\$remote_addr - \$remote_user [\$time_local] \"\$request\" '
'\$status \$body_bytes_sent \"\$http_referer\" '
'\"\$http_user_agent\" \"\$http_x_forwarded_for\" \$request_time \$upstream_response_time';
add_header P3P \"CP=\\\"CAO PSA OUR\\\"\";
include /etc/nginx/mime.types;
access_log /var/log/nginx/access.log main;
sendfile on;
keepalive_timeout 65;
tcp_nodelay on;
gzip on;
gzip_disable \"MSIE [1-6]\.(?!.*SV1)\";
include /etc/nginx/conf.d/*.conf;
include /etc/nginx/sites-enabled/graphite;
}
" | tee $temp_path
scp -i $key_path $temp_path $instance_user@$instance_host:$temp_path
ssh -i $key_path $instance_user@$instance_host "sudo mv $temp_path /etc/nginx/nginx.conf"
#rm $temp_path
####################################
# /etc/supervisor/supervisord.conf
####################################
line_prefix='; '
if [ "$relay_enabled" = 'true' ]; then
line_prefix=''
fi
temp_path=/tmp/supervisord.conf
echo "; supervisor config file
[unix_http_server]
file=/var/run/supervisor.sock ; (the path to the socket file)
chmod=0700 ; sockef file mode (default 0700)
[supervisord]
logfile=/var/log/supervisor/supervisord.log ; (main log file;default \$CWD/supervisord.log)
pidfile=/var/run/supervisord.pid ; (supervisord pidfile;default supervisord.pid)
childlogdir=/var/log/supervisor ; ('AUTO' child log dir, default \$TEMP)
; the below section must remain in the config file for RPC
; (supervisorctl/web interface) to work, additional interfaces may be
; added by defining them in separate rpcinterface: sections
[rpcinterface:supervisor]
supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface
[supervisorctl]
serverurl=unix:///var/run//supervisor.sock ; use a unix:// URL for a unix socket
; The [include] section can just contain the \"files\" setting. This
; setting can list multiple files (separated by whitespace or
; newlines). It can also contain wildcards. The filenames are
; interpreted as relative to this file. Included files *cannot*
; include files themselves.
[include]
files = /etc/supervisor/conf.d/*.conf
; Spinning up our web servers
[program:nginx]
command=/usr/sbin/nginx
process_name=%(program_name)s
autostart=true
autorestart=true
stopsignal=QUIT
priority=100
[program:uwsgi-graphite]
command=/usr/local/bin/uwsgi --socket 127.0.0.1:3031 --master --processes $cores --limit-as 512 --chdir=/opt/graphite/webapp --env DJANGO_SETTINGS_MODULE=graphite.settings --module='django.core.handlers.wsgi:WSGIHandler()'
process_name=%(program_name)s
autostart=true
autorestart=true
stopsignal=QUIT
priority=100
; Spinning up memcached - to cache statsd image requests
[program:memcached]
command=/usr/bin/memcached -m $memcached_mb logfile /var/log/memcached.log
process_name=%(program_name)s
autostart=true
autorestart=true
user=nobody
priority=100
[program:carbon-cache]
command=python /opt/graphite/bin/carbon-cache.py --debug start
process_name=%(program_name)s
autostart=true
autorestart=true
stopsignal=TERM
priority=300
${line_prefix}[program:carbon-relay]
${line_prefix}command=python /opt/graphite/bin/carbon-relay.py --debug start
${line_prefix}process_name=%(program_name)s
${line_prefix}autostart=true
${line_prefix}autorestart=true
${line_prefix}stopsignal=QUIT
${line_prefix}priority=300
[program:statsite]
command=/usr/local/bin/statsite -c /etc/statsite.conf
process_name=%(program_name)s
autostart=true
autorestart=true
priority=999
" | tee $temp_path
scp -i $key_path $temp_path $instance_user@$instance_host:$temp_path
ssh -i $key_path $instance_user@$instance_host "sudo mv $temp_path /etc/supervisor/supervisord.conf"
#rm $temp_path
####################################
# /etc/memcached.conf
####################################
temp_path=/tmp/memcached.conf
echo '# memcached default config file
# 2003 - Jay Bonci <[email protected]>
# This configuration file is read by the start-memcached script provided as
# part of the Debian GNU/Linux distribution.
# Run memcached as a daemon. This command is implied, and is not needed for the
# daemon to run. See the README.Debian that comes with this package for more
# information.
-d
# Log memcacheds output to /var/log/memcached
logfile /var/log/memcached.log
# Be verbose
# -v
# Be even more verbose (print client commands as well)
# -vv
# Start with a cap of 64 megs of memory. Its reasonable, and the daemon default
# Note that the daemon will grow to this size, but does not start out holding this much
# memory
-m 4096
# Default connection port is 11211
-p 11211
# Run the daemon as root. The start-memcached will default to running as root if no
# -u command is present in this config file
-u nobody
# Specify which IP address to listen on. The default is to listen on all IP addresses
# This parameter is one of the only security measures that memcached has, so make sure
# its listening on a firewalled interface.
-l 127.0.0.1
# Limit the number of simultaneous incoming connections. The daemon default is 1024
# -c 1024
# Lock down all paged memory. Consult with the README and homepage before you do this
# -k
# Return error when memory is exhausted (rather than removing items)
# -M
# Maximize core file limit
# -r
' | tee $temp_path
scp -i $key_path $temp_path $instance_user@$instance_host:$temp_path
ssh -i $key_path $instance_user@$instance_host "sudo mv $temp_path /etc/memcached.conf"
#rm $temp_path
####################################
# /etc/statsite.conf
####################################
temp_path=/tmp/statsite.conf
echo "# Settings for the 'collector' which is the UDP listener
[collector]
host = 0.0.0.0
port = 8125
# Specify settings for the metrics 'store' which is where graphite is
[store]
host = 127.0.0.1
port = 2003
[flush]
interval = $statsite_flush_interval
" | tee $temp_path
scp -i $key_path $temp_path $instance_user@$instance_host:$temp_path
ssh -i $key_path $instance_user@$instance_host "sudo mv $temp_path /etc/statsite.conf"
#rm $temp_path
####################################
# /opt/graphite/conf/carbon.conf
####################################
line_prefix='# '
if [ "$relay_enabled" = 'true' ]; then
line_prefix=''
fi
temp_path=/tmp/carbon.conf
echo "[cache]
# Configure carbon directories.
#
# OS environment variables can be used to tell carbon where graphite is
# installed, where to read configuration from and where to write data.
#
# GRAPHITE_ROOT - Root directory of the graphite installation.
# Defaults to ../
# GRAPHITE_CONF_DIR - Configuration directory (where this file lives).
# Defaults to \$GRAPHITE_ROOT/conf/
# GRAPHITE_STORAGE_DIR - Storage directory for whipser/rrd/log/pid files.
# Defaults to \$GRAPHITE_ROOT/storage/
#
# To change other directory paths, add settings to this file. The following
# configuration variables are available with these default values:
#
# STORAGE_DIR = \$GRAPHITE_STORAGE_DIR
# LOCAL_DATA_DIR = STORAGE_DIR/whisper/
# WHITELISTS_DIR = STORAGE_DIR/lists/
# CONF_DIR = STORAGE_DIR/conf/
# LOG_DIR = STORAGE_DIR/log/
# PID_DIR = STORAGE_DIR/
#
# For FHS style directory structures, use:
#
# STORAGE_DIR = /var/lib/carbon/
# CONF_DIR = /etc/carbon/
# LOG_DIR = /var/log/carbon/
# PID_DIR = /var/run/
#
LOCAL_DATA_DIR = $volume_mount_point
#See /opt/graphite/webapp/local_settings.py
# Specify the user to drop privileges to
# If this is blank carbon runs as the user that invokes it
# This user must have write access to the local data directory
USER =
# Limit the size of the cache to avoid swapping or becoming CPU bound.
# Sorts and serving cache queries gets more expensive as the cache grows.
# Use the value \"inf\" (infinity) for an unlimited cache size.
MAX_CACHE_SIZE = inf
# Limits the number of whisper update_many() calls per second, which effectively
# means the number of write requests sent to the disk. This is intended to
# prevent over-utilizing the disk and thus starving the rest of the system.
# When the rate of required updates exceeds this, then carbon's caching will
# take effect and increase the overall throughput accordingly.
MAX_UPDATES_PER_SECOND = 1000
# Softly limits the number of whisper files that get created each minute.
# Setting this value low (like at 50) is a good way to ensure your graphite
# system will not be adversely impacted when a bunch of new metrics are
# sent to it. The trade off is that it will take much longer for those metrics'
# database files to all get created and thus longer until the data becomes usable.
# Setting this value high (like \"inf\" for infinity) will cause graphite to create
# the files quickly but at the risk of slowing I/O down considerably for a while.
MAX_CREATES_PER_MINUTE = 50
LINE_RECEIVER_INTERFACE = 0.0.0.0
LINE_RECEIVER_PORT = 2003
# Set this to True to enable the UDP listener. By default this is off
# because it is very common to run multiple carbon daemons and managing
# another (rarely used) port for every carbon instance is not fun.
ENABLE_UDP_LISTENER = False
UDP_RECEIVER_INTERFACE = 0.0.0.0
UDP_RECEIVER_PORT = 2003
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
PICKLE_RECEIVER_PORT = 2004
# Per security concerns outlined in Bug #817247 the pickle receiver
# will use a more secure and slightly less efficient unpickler.
# Set this to True to revert to the old-fashioned insecure unpickler.
USE_INSECURE_UNPICKLER = False
CACHE_QUERY_INTERFACE = 0.0.0.0
CACHE_QUERY_PORT = 7002
# Set this to False to drop datapoints received after the cache
# reaches MAX_CACHE_SIZE. If this is True (the default) then sockets
# over which metrics are received will temporarily stop accepting
# data until the cache size falls below 95% MAX_CACHE_SIZE.
USE_FLOW_CONTROL = True
# By default, carbon-cache will log every whisper update. This can be excessive and
# degrade performance if logging on the same volume as the whisper data is stored.
LOG_UPDATES = False
# On some systems it is desirable for whisper to write synchronously.
# Set this option to True if youd like to try this. Basically it will
# shift the onus of buffering writes from the kernel into carbons cache.
WHISPER_AUTOFLUSH = False
# Enable AMQP if you want to receve metrics using an amqp broker
# ENABLE_AMQP = False
# Verbose means a line will be logged for every metric received
# useful for testing
# AMQP_VERBOSE = False
# AMQP_HOST = localhost
# AMQP_PORT = 5672
# AMQP_VHOST = /
# AMQP_USER = guest
# AMQP_PASSWORD = guest
# AMQP_EXCHANGE = graphite
# AMQP_METRIC_NAME_IN_BODY = False
# The manhole interface allows you to SSH into the carbon daemon
# and get a python interpreter. BE CAREFUL WITH THIS! If you do
# something like time.sleep() in the interpreter, the whole process
# will sleep! This is *extremely* helpful in debugging, assuming
# you are familiar with the code. If you are not, please dont
# mess with this, you are asking for trouble :)
#
# ENABLE_MANHOLE = False
# MANHOLE_INTERFACE = 127.0.0.1
# MANHOLE_PORT = 7222
# MANHOLE_USER = admin
# MANHOLE_PUBLIC_KEY = ssh-rsa AAAAB3NzaC1yc2EAAAABiwAaAIEAoxN0sv/e4eZCPpi3N3KYvyzRaBaMeS2RsOQ/cDuKv11dlNzVeiyc3RFmCv5Rjwn/lQ79y0zyHxw67qLyhQ/kDzINc4cY41ivuQXm2tPmgvexdrBv5nsfEpjs3gLZfJnyvlcVyWK/lId8WUvEWSWHTzsbtmXAF2raJMdgLTbQ8wE=
# Patterns for all of the metrics this machine will store. Read more at
# http://en.wikipedia.org/wiki/Advanced_Message_Queuing_Protocol#Bindings
#
# Example: store all sales, linux servers, and utilization metrics
# BIND_PATTERNS = sales.#, servers.linux.#, #.utilization
#
# Example: store everything
# BIND_PATTERNS = #
# To configure special settings for the 'foo' carbon-cache instance, uncomment this:
#[cache:foo]
#LINE_RECEIVER_PORT = 2103
#PICKLE_RECEIVER_PORT = 2104
#CACHE_QUERY_PORT = 7102
# and any other settings you want to customize, defaults are inherited
# from [carbon] section.
[relay]
LINE_RECEIVER_INTERFACE = 0.0.0.0
LINE_RECEIVER_PORT = 2013
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
PICKLE_RECEIVER_PORT = 2014
# To use consistent hashing instead of the user defined relay-rules.conf,
# change this to:
# RELAY_METHOD = consistent-hashing
RELAY_METHOD = rules
# If you use consistent-hashing you may want to add redundancy
# of your data by replicating every datapoint to more than
# one machine.
REPLICATION_FACTOR = 1
# This is a list of carbon daemons we will send any relayed or
# generated metrics to. The default provided would send to a single
# carbon-cache instance on the default port. However if you
# use multiple carbon-cache instances then it would look like this:
#
# DESTINATIONS = 127.0.0.1:2004:a, 127.0.0.1:2104:b
#
# The general form is IP:PORT:INSTANCE where the :INSTANCE part is
# optional and refers to the 'None' instance if omitted.
#
# Note that if the destinations are all carbon-caches then this should
# exactly match the webapp's CARBONLINK_HOSTS setting in terms of
# instances listed (order matters!).
DESTINATIONS = $carbon_relay_destinations
# This defines the maximum 'message size' between carbon daemons.
# You shouldn't need to tune this unless you really know what you're doing.
MAX_DATAPOINTS_PER_MESSAGE = 500
MAX_QUEUE_SIZE = 10000
# Set this to False to drop datapoints when any send queue (sending datapoints
# to a downstream carbon daemon) hits MAX_QUEUE_SIZE. If this is True (the
# default) then sockets over which metrics are received will temporarily stop accepting
# data until the send queues fall below 80% MAX_QUEUE_SIZE.
USE_FLOW_CONTROL = True
[aggregator]
LINE_RECEIVER_INTERFACE = 0.0.0.0
LINE_RECEIVER_PORT = 2023
PICKLE_RECEIVER_INTERFACE = 0.0.0.0
PICKLE_RECEIVER_PORT = 2024
DESTINATION_HOST = 127.0.0.1
DESTINATION_PORT = 2004
MAX_QUEUE_SIZE = 10000
# This defines the maximum 'message size' between carbon daemons.
# You shouldnt need to tune this unless you really know what youre doing.
MAX_DATAPOINTS_PER_MESSAGE = 500
" | tee $temp_path
scp -i $key_path $temp_path $instance_user@$instance_host:$temp_path
ssh -i $key_path $instance_user@$instance_host "sudo mv $temp_path /opt/graphite/conf/carbon.conf"
#rm $temp_path
####################################
# /opt/graphite/conf/dashboard.conf
####################################
temp_path=/tmp/dashboard.conf
echo '# This configuration file controls the behavior of the Dashboard UI, available
# at http://my-graphite-server/dashboard/.
#
# This file must contain a [ui] section that defines the following settings:
#
[ui]
default_graph_width = 400
default_graph_height = 250
automatic_variants = true
refresh_interval = 60
#
# These settings apply to the UI as a whole, all other sections in this file
# pertain only to specific metric types.
#
# The dashboard presents only metrics that fall into specified naming schemes
# defined in this file. This creates a simpler, more targetted view of the
# data. The general form for defining a naming scheme is as follows:
#
#[Metric Type]
#scheme = basis.path.<field1>.<field2>.<fieldN>
#field1.label = Foo
#field2.label = Bar
#
#
# Where each <field> will be displayed as a dropdown box
# in the UI and the remaining portion of the namespace
# shown in the Metric Selector panel. The .label options set the labels
# displayed for each dropdown.
#
# For example:
#
#[Sales]
#scheme = sales.<channel>.<type>.<brand>
#channel.label = Channel
#type.label = Product Type
#brand.label = Brand
#
# This defines a "Sales" metric type that uses 3 dropdowns in the Context Selector
# (the upper-left panel) while any deeper metrics (per-product counts or revenue, etc)
# will be available in the Metric Selector (upper-right panel).
' | tee $temp_path
scp -i $key_path $temp_path $instance_user@$instance_host:$temp_path
ssh -i $key_path $instance_user@$instance_host "sudo mv $temp_path /opt/graphite/conf/dashboard.conf"
#rm $temp_path
####################################
# /opt/graphite/conf/relay-rules.conf
####################################
# ONLY USED with clustering / multiple back-ends #
temp_path=/tmp/relay-rules.conf
echo "[default]
default = true
servers = $carbon_cache_servers
" | tee $temp_path
if [ "$relay_enabled" = "true" ]; then
scp -i $key_path $temp_path $instance_user@$instance_host:$temp_path
ssh -i $key_path $instance_user@$instance_host "sudo mv $temp_path /opt/graphite/conf/relay-rules.conf"
fi
#rm $temp_path
####################################
# /opt/graphite/conf/storage-schemas.conf
####################################
temp_path=/tmp/storage-schemas.conf
echo "[stats]
priority = 110
pattern = .*
retentions = $graphite_retentions
#realtime
# 1 seconds * 1209600 datapoints = 2 week of 1 second granularity
# 10 seconds * 864000 datapoints = 2 months of 10-second granularity
# 60 seconds * 259200 datapoints = 6 months of 1-minute granularity
# 10 minutes * 262974 datapoints = ~3 years of 10-minute granularity
#retentions='1:1209600,10:864000,60:259200,600:262974'
#standard
# 10 seconds * 2160 datapoints = 6 hours of 10-second granularity
# 60 seconds * 100080 datapoints = 1 week of 1-minute granularity
# 10 minutes * 262974 datapoints = ~3 years of 10-minute granularity
" | tee $temp_path
scp -i $key_path $temp_path $instance_user@$instance_host:$temp_path
ssh -i $key_path $instance_user@$instance_host "sudo mv $temp_path /opt/graphite/conf/storage-schemas.conf"
#rm $temp_path
####################################
# /opt/graphite/webapp/graphite/local_settings.py
####################################
temp_path=/tmp/local_settings.py
echo "# Edit this file to override the default graphite settings, do not edit settings.py!!!
STORAGE_DIR = '$volume_mount_point'
#See /opt/graphite/conf/carbon.conf (purposely without /whisper/)
# Turn on debugging and restart apache if you ever see an \"Internal Server Error\" page
#DEBUG = True
# Set your local timezone (django will *try* to figure this out automatically)
# If your graphs appear to be offset by a couple hours then this probably
# needs to be explicitly set to your local timezone.
#TIME_ZONE = 'America/Los_Angeles'
# Uncomment these to enable more performance-related logging
#LOG_RENDERING_PERFORMANCE = True
#LOG_CACHE_PERFORMANCE = True
# Override this if you need to provide documentation specific to your graphite deployment
#DOCUMENTATION_URL = \"http://wiki.mycompany.com/graphite\"
# Enable email-related features
#SMTP_SERVER = \"mail.mycompany.com\"
#####################################
# LDAP Authentication Configuration #
#####################################
# LDAP / ActiveDirectory authentication setup
#USE_LDAP_AUTH = True
#LDAP_SERVER = \"ldap.mycompany.com\"
#LDAP_PORT = 389
# OR
#LDAP_URI = \"ldaps://ldap.mycompany.com:636\"
#LDAP_SEARCH_BASE = \"OU=users,DC=mycompany,DC=com\"
#LDAP_BASE_USER = \"CN=some_readonly_account,DC=mycompany,DC=com\"
#LDAP_BASE_PASS = \"readonly_account_password\"
#LDAP_USER_QUERY = \"(username=%s)\" #For Active Directory use \"(sAMAccountName=%s)\"
#
# If you want to further customize the ldap connection options you should
# directly use ldap.set_option to set the ldap module's global options.
# For example:
#
#import ldap
#ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_ALLOW)
#ldap.set_option(ldap.OPT_X_TLS_CACERTDIR, \"/etc/ssl/ca\")
#ldap.set_option(ldap.OPT_X_TLS_CERTFILE, \"/etc/ssl/mycert.pem\")
#ldap.set_option(ldap.OPT_X_TLS_KEYFILE, \"/etc/ssl/mykey.pem\")
# See http://www.python-ldap.org/ for further details on these options.
##########################
# Database Configuration #
##########################
# By default sqlite is used. If you cluster multiple webapps you will need
# to setup an external database (like mysql) and configure all the webapps
# to use the same database. Note that this database is only used to store
# django models like saved graphs, dashboards, user preferences, etc. Metric
# data is not stored here.
#
# DON'T FORGET TO RUN 'manage.py syncdb' AFTER SETTING UP A NEW DB!
#
#DATABASE_ENGINE = 'mysql' # or 'postgres'
#DATABASE_NAME = 'graphite'
#DATABASE_USER = 'graphite'
#DATABASE_PASSWORD = 'graphite-is-awesome'
#DATABASE_HOST = 'mysql.mycompany.com'
#DATABASE_PORT = '3306'
#########################
# Cluster Configuration #
#########################
# (To avoid excessive DNS lookups you want to stick to using IP addresses only in this entire section)
#
# This should list the IP address (and optionally port) of each webapp in your cluster.
# Strings are of the form \"ip[:port]\"
# Usually this will be the same as MEMCACHE_HOSTS except for the port numbers.
#
#CLUSTER_SERVERS = []
CLUSTER_SERVERS = [${graphite_additional_cluster_servers}'${instance_ip}:7001']
# This lists all the memcached servers that will be used by this webapp.
# If you have a cluster of webapps you want to make sure all of them
# have the *exact* same value for this setting. That will maximize cache
# efficiency. Setting MEMCACHE_HOSTS to be empty will turn off use of
# memcached entirely.
#
# You should not use the loopback address 127.0.0.1 here because every webapp in
# the cluster should use the exact same value and should list every member in the
# cluster.
#MEMCACHE_HOSTS = ['10.10.10.10:11211', '10.10.10.11:11211', '10.10.10.12:11211']
MEMCACHE_HOSTS = [${graphite_additional_memcache_hosts}'${instance_ip}:11211']
MEMCACHE_DURATION = $graphite_memcache_duration
# If you are running multiple carbon-caches on this machine (typically behind a relay using
# consistent hashing), you'll need to list the ip address, cache query port, and instance name of each carbon-cache
# instance on the local machine (NOT every carbon-cache in the entire cluster). The default cache query port is 7002
# and a common scheme is to use 7102 for instance b, 7202 for instance c, etc.
#
# You *should* use 127.0.0.1 here.
#CARBONLINK_HOSTS = [\"127.0.0.1:7002:a\", \"127.0.0.1:7102:b\", \"127.0.0.1:7202:c\"]
CARBONLINK_HOSTS = [\"127.0.0.1:7002\"]
" | tee $temp_path
scp -i $key_path $temp_path $instance_user@$instance_host:$temp_path
ssh -i $key_path $instance_user@$instance_host "sudo mv $temp_path /opt/graphite/webapp/graphite/local_settings.py"
#rm $temp_path
####################################
# CREATE GRAPHITE-WEB DATABASE
# START supervisord (which starts carbon, graphite, etc)
# ENSURE PROCESSES DIDNT BLOW
####################################
ssh -i $key_path $instance_user@$instance_host <<EOF
cd /opt/graphite/webapp/graphite
yes no | sudo python manage.py syncdb
#nginx and wsgi runs under www-data user by default
sudo chown -R www-data:www-data /opt/graphite
sudo chown -R www-data:www-data $volume_mount_point
# reread supervisord configuration, then start carbon, graphite, etc
sudo supervisorctl reread
sudo supervisorctl reload
sleep 2
sudo supervisorctl status
#should show all listening ports - 5666 for nrpe, 11211 for memcached, 80 for nginx
#2003 for line receiver / 2004 for pickle receiver / 7002 for cache query for carbon (python)
#3031 for uwsgi, 8125 for statsite (udp)
sudo netstat -lntup
# ENSURE PROCESSES DIDNT BLOW
#sudo tail -f /var/log/supervisor/*.log
EOF
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment