Skip to content

Instantly share code, notes, and snippets.

@isogram
Forked from bocharovf/docker-compose.yml
Created December 22, 2022 09:08
Show Gist options
  • Save isogram/b1dcb9afe458c86b1d1b90d2f0e45be0 to your computer and use it in GitHub Desktop.
Save isogram/b1dcb9afe458c86b1d1b90d2f0e45be0 to your computer and use it in GitHub Desktop.
Complete Jaeger docker-compose deployment with ElasticSearch (oss) and Apache Kafka. Jaeger Query and Kibana to search logs and traces. Monitoring with Prometheus and Grafana.
version: "3"
services:
# Using ElasticSearch as a storage for traces and logs
elasticsearch:
image: docker.elastic.co/elasticsearch/elasticsearch-oss:6.7.1
networks:
- elastic-jaeger
ports:
- "9200:9200"
- "9300:9300"
restart: on-failure
environment:
ES_JAVA_OPTS: "-Xms512m -Xmx512m"
volumes:
- esdata:/usr/share/elasticsearch/data
#
# SERVICES
#
# Using Logstash to import logs
logstash:
image: docker.elastic.co/logstash/logstash-oss:6.7.1
ports:
- "5000:5000"
networks:
- elastic-jaeger
restart: on-failure
volumes:
- ./logstash.conf:/usr/share/logstash/pipeline/logstash.conf:ro
command: logstash -f /usr/share/logstash/pipeline/logstash.conf
depends_on:
- elasticsearch
# Using Apache Kafka as a temporary storage and stream processing system (span post processing)
kafka:
image: confluentinc/cp-kafka:5.2.1
networks:
- elastic-jaeger
ports:
- "9092:9092"
environment:
KAFKA_ZOOKEEPER_CONNECT: "zookeeper:2181"
KAFKA_ADVERTISED_LISTENERS: "PLAINTEXT://kafka:9092"
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_BROKER_ID: 1
restart: on-failure
# Using Apache Zookeeper to coordenate Apache services
zookeeper:
image: confluentinc/cp-zookeeper:5.2.1
networks:
- elastic-jaeger
ports:
- "2181:2181"
environment:
ZOOKEEPER_CLIENT_PORT: "2181"
# Using Jaeger Collector to receive spans from Jaeger Agents and send them to Kafka
jaeger-collector:
image: jaegertracing/jaeger-collector:1.11.0
hostname: jaeger-collector
ports:
- "14269:14269"
- "14268:14268"
- "14267:14267"
- "9411:9411"
networks:
- elastic-jaeger
restart: on-failure
environment:
LOG_LEVEL: "info"
SPAN_STORAGE_TYPE: "kafka"
KAFKA_BROKERS: "kafka:9092"
METRICS_BACKEND: "prometheus"
depends_on:
- elasticsearch
# Using Jaeger Ingester to receive spans from Kafka and send them to ElasticSearch
jaeger-ingester:
image: jaegertracing/jaeger-ingester:1.11.0
networks:
- elastic-jaeger
ports:
- "14270:14270"
- "14271:14271"
restart: on-failure
environment:
LOG_LEVEL: "info"
INGESTER_PARALLELISM: "1"
INGESTER_DEADLOCKINTERVAL: "0ms"
SPAN_STORAGE_TYPE: "elasticsearch"
ES_SERVER_URLS: "http://elasticsearch:9200"
KAFKA_BROKERS: "kafka:9092"
METRICS_BACKEND: "prometheus"
depends_on:
- kafka
# Using Jaeger Agent to receive spans from clients locally and send to remote Jaeger Collector
jaeger-agent:
image: jaegertracing/jaeger-agent:1.11.0
hostname: jaeger-agent
command: ["--collector.host-port=jaeger-collector:14267"]
ports:
- "5775:5775/udp"
- "6831:6831/udp"
- "6832:6832/udp"
- "5778:5778"
networks:
- elastic-jaeger
restart: on-failure
environment:
SPAN_STORAGE_TYPE: "elasticsearch"
METRICS_BACKEND: "prometheus"
depends_on:
- jaeger-collector
#
# USER INTERFACES
#
# Using Jaeger Query to work with traces
jaeger-query:
image: jaegertracing/jaeger-query:1.11.0
environment:
SPAN_STORAGE_TYPE: "elasticsearch"
no_proxy: "localhost"
METRICS_BACKEND: "prometheus"
ports:
- "16686:16686"
- "16687:16687"
networks:
- elastic-jaeger
restart: on-failure
command: [
"--es.server-urls=http://elasticsearch:9200",
"--span-storage.type=elasticsearch",
"--log-level=debug",
"--query.ui-config=/usr/share/jaeger-query/jaeger-query-config.json"
]
volumes:
- ./jaeger-query-config.json:/usr/share/jaeger-query/jaeger-query-config.json:ro
depends_on:
- jaeger-agent
# Using Kibana to work with logs
kibana:
image: docker.elastic.co/kibana/kibana-oss:6.7.1
ports:
- "5601:5601"
environment:
SERVER_NAME: "kibana"
ELASTICSEARCH_HOSTS: "http://elasticsearch:9200"
networks:
- elastic-jaeger
depends_on:
- elasticsearch
# Using Kafka REST Proxy to have a REST API needed by various UI
kafka-rest:
image: confluentinc/cp-kafka-rest:5.2.1
hostname: kafka-rest
ports:
- "8090:8082"
environment:
KAFKA_REST_ZOOKEEPER_CONNECT: "zookeeper:2181"
KAFKA_REST_LISTENERS: "http://0.0.0.0:8090/"
KAFKA_REST_HOST_NAME: "kafka-rest"
networks:
- elastic-jaeger
depends_on:
- kafka
# Using Kafka Topics UI to monitor Kafka streams
kafka-ui:
image: landoop/kafka-topics-ui:0.9.4
ports:
- "8000:8000"
environment:
KAFKA_REST_PROXY_URL: "http://kafka-rest:8090/"
PROXY: "true"
networks:
- elastic-jaeger
depends_on:
- kafka
# Using Jaeger Demo Application to generate sample data
demo:
image: jaegertracing/example-hotrod:1.11.0
ports:
- "8080:8080"
- "8081:8081"
- "8082:8082"
- "8083:8083"
networks:
- elastic-jaeger
restart: on-failure
environment:
JAEGER_AGENT_HOST: "jaeger-agent"
JAEGER_AGENT_PORT: 6831
command: [
"all"
]
depends_on:
- jaeger-agent
#
# SELF OBSERVABILITY AND MONITORING
#
# Using Prometheus to collect metrics from Jaeger services
prometheus:
image: prom/prometheus:v2.9.2
ports:
- "9090:9090"
networks:
- elastic-jaeger
volumes:
- prometheus_data:/prometheus
- ./prometheus.yml:/etc/prometheus/prometheus.yml
command:
- '--config.file=/etc/prometheus/prometheus.yml'
# Using Grphana to monitor Jaeger services
grafana:
image: grafana/grafana:6.1.6
networks:
- elastic-jaeger
depends_on:
- prometheus
ports:
- "3000:3000"
volumes:
- grafana_data:/var/lib/grafana
volumes:
esdata:
grafana_data:
prometheus_data:
networks:
elastic-jaeger:
driver: bridge
{
"dependencies": {
"dagMaxNumServices": 200,
"menuEnabled": true
},
"archiveEnabled": true,
"menu": [
{
"label": "Custom menu",
"items": [
{
"label": "My custom item 1",
"url": "https://github.com/jaegertracing/jaeger"
},
{
"label": "My custom item 2",
"url": "http://jaeger.readthedocs.io/en/latest/"
}
]
}
]
}
input
{
http
{
port => 5000
codec => json
}
}
filter
{
mutate
{
remove_field => ["headers"]
}
}
output
{
elasticsearch
{
hosts => "elasticsearch:9200"
}
}
global:
scrape_interval: 15s # By default, scrape targets every 15 seconds.
evaluation_interval: 15s # By default, scrape targets every 15 seconds.
# scrape_timeout is set to the global default (10s).
# Let's scrape every Jaeger service:
scrape_configs:
- job_name: 'jaeger-agent'
scrape_interval: 5s
static_configs:
- targets: ['jaeger-agent:5778']
- job_name: 'jaeger-ingester'
scrape_interval: 5s
static_configs:
- targets: ['jaeger-ingester:14271']
- job_name: 'jaeger-collector'
scrape_interval: 5s
static_configs:
- targets: ['jaeger-collector:14268']
- job_name: 'jaeger-query'
scrape_interval: 5s
static_configs:
- targets: ['jaeger-query:16686']
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment