Program:
- docker destkop + kubernetes
- minikube
- helm
Repo:
git clone https://github.com/xuan-cao-swi/opentelemetry-operator.git # clone from my repo that with autoinstrumentation ruby change, branch: ruby-autoinstrumentation
git clone https://github.com/xuan-cao-swi/opentelemetry-helm-charts.git # same as above, branch: autoinstrumentation-ruby
(Helm chart is used for local helm repo and that's for your local k8s operator)
Create a directory and
- create a rails application use
rails new --minimal app
- create couple of yaml file for deployment and mutation
mkdir working_folder
cd working_folder
rails new --minimal rails-app
touch deployment-rails.yaml
touch service-rails.yaml
touch autoinstrumentation-ruby.yaml
touch otel-collector-deployment.yaml
touch otel-operator.yaml
deployment-rails.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
name: test-rails-k8s
spec:
replicas: 1
selector:
matchLabels:
app: test-rails-k8s
template:
metadata:
labels:
app: test-rails-k8s
annotations:
instrumentation.opentelemetry.io/inject-ruby: "true"
spec:
containers:
- name: rails-app
image: test-rails-k8s:latest
imagePullPolicy: Never
ports:
- containerPort: 3000
service-rails.yaml
apiVersion: v1
kind: Service
metadata:
name: xuan-test-rails-k8s
spec:
selector:
app: xuan-test-rails-k8s
ports:
- protocol: TCP
port: 3000
targetPort: 3000
otel-operator.yaml
manager:
autoInstrumentationImage:
ruby:
repository: "autoinstrumentation-ruby"
tag: "3.3.3"
autoinstrumentation-ruby.yaml
apiVersion: opentelemetry.io/v1alpha1
kind: Instrumentation
metadata:
name: demo-instrumentation
spec:
exporter:
endpoint: http://deployment-opentelemetry-collector:4318
sampler:
type: parentbased_traceidratio
argument: "1"
ruby:
env:
- name: OTEL_DEBUG_LEVEL
value: "debug"
- name: OTEL_EXPORTER_OTLP_ENDPOINT
value: http://deployment-opentelemetry-collector:4318
- name: OTEL_OPERATOR
value: "true"
- name: OTEL_TRACES_EXPORTER
value: "otlp,console"
otel-collector-deployment.yaml
# Any custom otel-collector values to merge
# See also `helm show values open-telemetry/opentelemetry-collector`
mode: deployment
image:
repository: otel/opentelemetry-collector-k8s
# We only want one of these collectors - any more and we'd produce duplicate data
replicaCount: 1
presets:
# enables the k8sclusterreceiver and adds it to the metrics pipelines
clusterMetrics:
enabled: true
# enables the k8sobjectsreceiver to collect events only and adds it to the logs pipelines
kubernetesEvents:
enabled: true
config:
receivers:
otlp/notls:
protocols:
grpc:
endpoint: 0.0.0.0:4317
http:
endpoint: 0.0.0.0:4318
processors:
exporters:
debug:
verbosity: detailed
service:
pipelines:
traces:
receivers: [otlp/notls]
processors: []
exporters: [debug]
metrics:
receivers: [otlp/notls]
processors: []
exporters: [debug]
logs:
receivers: [otlp/notls]
processors: []
exporters: [debug]
All the operation below should done within minikube cluster
minikube start --driver=docker
eval $(minikube -p minikube docker-env) # link your local docker deamon to minikube (apply this to all terminal you worked on)
# build and save the sample app to minikube docker registery for later pulling
# you may save the step of save docker image to tar and load in minikube if your current terminal is under `eval $(minikube -p minikube docker-env)`
docker build -t test-rails-k8s:latest -f rails-app/Dockerfile rails-app/
docker save -o test-rails-k8s.tar test-rails-k8s:latest
minikube image load test-rails-k8s.tar
# start the rails app in k8s cluster
kubectl apply -f deployment-rails.yaml
kubectl apply -f service-rails.yaml
# "Make sure you have cert-manager installed" (https://cert-manager.io/docs/installation/)
kubectl apply -f https://github.com/cert-manager/cert-manager/releases/download/v1.16.2/cert-manager.yaml
# the reason for using 3.3.3, not latest because if use latest, the minikube will try to pull from docker registry or github package
cd working_folder/opentelemetry-operator/autoinstrumentation/ruby
docker build -t autoinstrumentation-ruby:3.3.3 . --progress=plain --no-cache
# use dev-operator-local to avoid update helm chart each time (make sure `eval $(minikube -p minikube docker-env)`)
# `make generate ...` will create pod, `make undeploy` to remove the pod
cd working_folder/opentelemetry-operator/
IMG=opentelemetry-operator:dev-operator-local make generate container deploy
make undeploy
Before build helm repo, following step need to be done so that the helm repo point to local operator
change Charts/opentelemtry-operator/Chart.yaml sources:
to the absolute path of your operator e.g.
- file:///Users/Xuan.Cao/Desktop/opentelemetry/opentelemetry-operator/
change Charts/opentelemtry-operator/values.yaml to
manager:
image:
repository: opentelemetry-operator
tag: "dev-operator-local"
then build local helm repo
# package the chart and serve it under url http://localhost:8080 (it's easier to provide helm repo url rather then path file)
# don't kill the process
cd working_folder/opentelemetry-helm-charts/charts
helm package opentelemetry-operator
helm repo index .
python3 -m http.server 8080
# add two helm charts: one if from local, another is from upstream
helm repo add opentelemetry-operator-local http://localhost:8080
helm repo add open-telemetry https://open-telemetry.github.io/opentelemetry-helm-charts
# install collector chart
helm install deployment open-telemetry/opentelemetry-collector --values otel-collector-deployment.yaml
# install local operator chart
helm install opentelemetry-operator opentelemetry-operator-local/opentelemetry-operator --set "manager.collectorImage.repository=otel/opentelemetry-collector-k8s" --set admissionWebhooks.certManager.enabled=false --set admissionWebhooks.autoGenerateCert.enabled=true --values otel-operator.yaml
# apply the autoinstrumentation-ruby
kubectl apply -f autoinstrumentation-ruby.yaml
# restart your rails app
kubectl rollout restart -f deployment-rails.yaml
# now you can send request and get the trace/span