Created
February 3, 2020 15:51
-
-
Save pkieltyka/4a71c9d6c1c721b91a423bc05513f27f to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
prometheus setup | |
================ | |
1. sudo docker network create prom | |
2. mkdir -p /data/prometheus | |
3. create the prometheus.yml file.. | |
3. sudo docker run -d --name prometheus --network prom -p 9090:9090 -v /data/prometheus:/prometheus-data/ prom/prometheus --config.file=/prometheus-data/prometheus.yml | |
4. sudo docker run -d --name=grafana --network prom -p 80:3000 grafana/grafana | |
--- | |
prometheus.yml: | |
--- | |
# my global config | |
global: | |
scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. | |
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. | |
# scrape_timeout is set to the global default (10s). | |
# Alertmanager configuration | |
alerting: | |
alertmanagers: | |
- static_configs: | |
- targets: | |
# - alertmanager:9093 | |
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'. | |
rule_files: | |
# - "first_rules.yml" | |
# - "second_rules.yml" | |
# A scrape configuration containing exactly one endpoint to scrape: | |
# Here it's Prometheus itself. | |
scrape_configs: | |
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config. | |
- job_name: 'prometheus' | |
# metrics_path defaults to '/metrics' | |
# scheme defaults to 'http'. | |
static_configs: | |
- targets: ['localhost:9090'] |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment