Created
July 19, 2019 07:36
-
-
Save cosh/8842709585ca9fa8feb64c483fe358d6 to your computer and use it in GitHub Desktop.
Remote read and write config for Prometheus
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# my global config | |
global: | |
scrape_interval: 1m # Set the scrape interval to every 15 seconds. Default is every 1 minute. | |
evaluation_interval: 1m # Evaluate rules every 15 seconds. The default is every 1 minute. | |
# scrape_timeout is set to the global default (10s). | |
# Alertmanager configuration | |
alerting: | |
alertmanagers: | |
- static_configs: | |
- targets: | |
# - alertmanager:9093 | |
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'. | |
rule_files: | |
# - "first_rules.yml" | |
# - "second_rules.yml" | |
# A scrape configuration containing exactly one endpoint to scrape: | |
# Here it's Prometheus itself. | |
scrape_configs: | |
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config. | |
- job_name: 'prometheus' | |
# metrics_path defaults to '/metrics' | |
# scheme defaults to 'http'. | |
static_configs: | |
- targets: ['localhost:9090'] | |
remote_write: | |
- url: "https://<yourWriteFunc>.azurewebsites.net/api/Write" | |
remote_timeout: 30s | |
queue_config: | |
capacity: 100000 | |
max_shards: 1000 | |
max_samples_per_send: 1000 | |
batch_send_deadline: 5s | |
max_retries: 10 | |
min_backoff: 30ms | |
max_backoff: 100ms | |
remote_read: | |
- url: "https://<yourReadFunc>.azurewebsites.net/api/Read" | |
read_recent: true |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Good i like it !