#!/bin/bash
# Periodic CPU Contention Experiment
EXPERIMENT_NAME='No Contention Baseline'
TARGET_NODE='node-3'
VICTIM_SERVICES='search'
NOISY_NEIGHBOR_TYPE='cpu'
CONTENTION_BURSTS=$(burst 0 60 1)
# Experiment configuration
ITERATIONS=3
ITERATION_DELAY=20
# Windowed sampling
ENABLE_WINDOWED_SAMPLING=true
WINDOW_INTERVAL_MS=100
PERF_EVENTS='cycles,instructions,LLC-loads,LLC-load-misses,branch-instructions,branch-misses,dTLB-loads,dTLB-load-misses,page-faults'
TIMING_BUFFER_SIZE=16384
# Jaeger tracing
JAEGER_SAMPLE_RATIO=0.01 # 1% sampling, set to 0 to disable
# wrk2 workload
WRK2_TARGET_SERVICE='frontend'
WRK2_TARGET_IP='192.168.166.19' # CHANGE TO YOUR IP
WRK2_TARGET_PORT=5000
WRK2_SCRIPT='../wrk2/scripts/hotel-reservation/mixed-workload_type_1.lua'
WRK2_RATE=200
WRK2_THREADS=3
WRK2_CONNECTIONS=3
Last active
December 12, 2025 22:28
-
-
Save RoyZhang7/4a0fe47f93c53a81b80ad396cb934f36 to your computer and use it in GitHub Desktop.
Perf contention paper relevant
cd hotelReservation
# Build basic perf_api library
gcc -c services/perf/perf_api.c -o services/perf/perf_api.o
ar rcs services/perf/libperf_api.a services/perf/perf_api.o
# Build windowed perf_api library
gcc -c services/perf/perf_api_windowed.c -o services/perf/perf_api_windowed.o
ar rcs services/perf/libperf_api_windowed.a services/perf/perf_api_windowed.o
# Verify libraries were created
ls -lh services/perf/*.a
# Set CGO environment
export CGO_ENABLED=1
export GOOS=linux
# Build search service
go build -o build/search ./cmd/search/
# Verify binary was created
ls -lh build/search
file build/search
# Compile with verbose output
gcc -c -v services/perf/perf_api.c -o services/perf/perf_api.o 2>&1 | grep error
gcc -c -v services/perf/perf_api_windowed.c -o services/perf/perf_api_windowed.o 2>&1 | grep error
# Build with verbose output
CGO_ENABLED=1 go build -v -x ./cmd/search/ 2>&1 | tee build.log# Check the pod is using the windowed image
kubectl get deployment search -o jsonpath='{.spec.template.spec.containers[0].image}'
# Check environment variables
kubectl exec deployment/search -- env | grep -E "ENABLE_WINDOWED|ITERATION_ID|WINDOW_INTERVAL"
# Check if windowed sampling started
kubectl logs deployment/search | grep -i "windowed\|sampling\|iteration"
# After experiment runs for 30s, check for data file
kubectl exec deployment/search -- ls -la /data/
# View the data
kubectl exec deployment/search -- cat /data/run_data_search_iter1.json | jq '.sample_count'
# Look for the initialization message or error
kubectl logs deployment/search | grep -A5 -B5 "Initialized windowed perf\|Failed to"
# Check if perf_window_init returned NULL
kubectl logs deployment/search 2>&1 | grep -i "failed to initialize perf"sudo apt update
sudo apt install linux-tools-common linux-tools-generic linux-tools-$(uname -r)
sudo apt install cpuid
cpuid -1 | grep "number of counters"
echo 0 | sudo tee /proc/sys/kernel/nmi_watchdog
echo -1 | sudo tee /proc/sys/kernel/perf_event_paranoid
perf stat -e cycles,instructions,LLC-loads,LLC-load-misses,branch-instructions,branch-misses,dTLB-loads,dTLB-load-misses,page-faults sleep 1# Check replicas for all deployments
kubectl get deployments -o custom-columns="NAME:.metadata.name,DESIRED:.spec.replicas,READY:.status.readyReplicas,AVAILABLE:.status.availableReplicas"
# or
kubectl get deployments -o custom-columns="NAME:.metadata.name,REPLICAS:.spec.replicas" | grep -v "nfs\|NAME" | sort
# see which services have more than 1 replica
kubectl get deployments -o custom-columns="NAME:.metadata.name,REPLICAS:.spec.replicas" --no-headers | awk '$2 > 1 {print $1, $2}'
#scale them back to 1 replica
for deploy in $(kubectl get deployments -o custom-columns="NAME:.metadata.name,REPLICAS:.spec.replicas" --no-headers | awk '$2 > 1 {print $1}'); do
echo "Scaling $deploy to 1 replica"
kubectl scale deployment $deploy --replicas=1
done
# check for stale consul registry
CONSUL_POD=$(kubectl get pods -l io.kompose.service=consul -o jsonpath='{.items[0].metadata.name}')
kubectl exec $CONSUL_POD -- consul catalog servicesSafe Resource Limite and Request update (Update ONE service at a time (not bulk), and then clean up consul registration)
# Define the services to update (exclude infrastructure: consul, jaeger, mongodb, memcached)
SERVICES="frontend geo rate profile recommendation user reservation search"
for svc in $SERVICES; do
echo "=== Updating $svc ==="
# Update resources
kubectl set resources deployment $svc --limits=cpu=3 --requests=cpu=1
# Wait for rollout to complete
kubectl rollout status deployment $svc --timeout=120s
# Wait for Consul registration
sleep 10
echo "$svc updated successfully"
echo ""
done
# Update MongoDB deployments one at a time
for db in mongodb-geo mongodb-rate mongodb-recommendation mongodb-reservation mongodb-profile mongodb-user; do
echo "Updating $db"
kubectl set resources deployment $db --limits=cpu=2 --requests=cpu=500m
kubectl rollout status deployment $db --timeout=120s
sleep 5
done
# Check cur resource configs for all services
for pod in $(kubectl get pods -o jsonpath='{.items[*].metadata.name}'); do
echo "=== $pod ==="
kubectl get pod $pod -o jsonpath='{.spec.containers[*].resources}' | jq .
echo ""
done
CONSUL_POD=$(kubectl get pods -l io.kompose.service=consul -o jsonpath='{.items[0].metadata.name}')
# Get current valid pod IPs
CURRENT_IPS=$(kubectl get pods -o wide --no-headers | awk '{print $6}')
# List registered services and check for stale
kubectl exec $CONSUL_POD -- curl -s http://localhost:8500/v1/agent/services | python3 -c "
import sys,json
current_ips = '''$CURRENT_IPS'''.split()
services = json.load(sys.stdin)
stale = []
for sid, svc in services.items():
if svc['Address'] not in current_ips and svc['Service'].startswith('srv-'):
stale.append(sid)
print(f\"STALE: {sid} -> {svc['Address']} ({svc['Service']})\")
if not stale:
print('No stale registrations found')
"
# Deregister stale (only do it when necessary)
# Get stale IDs and deregister each
kubectl exec $CONSUL_POD -- curl -s http://localhost:8500/v1/agent/services | python3 -c "
import sys, json
current_ips = '''$CURRENT_IPS'''.split()
services = json.load(sys.stdin)
for sid, svc in services.items():
if svc['Address'] not in current_ips and svc['Service'].startswith('srv-'):
print(sid)
" | xargs -I {} kubectl exec $CONSUL_POD -- consul services deregister -id={}
# Check pods in default namespace
kubectl get pods -n default -o custom-columns='POD:.metadata.name,IMAGE:.spec.containers[*].image'
# Check deployments in default namespace
kubectl get deployments -n default -o custom-columns='DEPLOYMENT:.metadata.name,IMAGE:.spec.template.spec.containers[*].image'
# Check all workload types in default namespace
kubectl get deploy,sts,ds -n default -o custom-columns='TYPE:.kind,NAME:.metadata.name,IMAGE:.spec.template.spec.containers[*].image'
# Get the new pod name
NEW_POD=$(kubectl get pods -l io.kompose.service=recommendation -o jsonpath='{.items[0].metadata.name}')
# Check CPU affinity (should show 12-14, not 0-31)
echo "=== CPU Affinity ==="
kubectl exec $NEW_POD -- taskset -cp 1
# Check the actual running command (should have taskset)
echo "=== Process Command Line ==="
kubectl exec $NEW_POD -- cat /proc/1/cmdline | tr '\0' ' ' && echo
# Verify CPU_SET environment variable is set
echo "=== CPU_SET Environment ==="
kubectl exec $NEW_POD -- printenv CPU_SET
# Check perf initialization (should show proper FD counts)
echo "=== Perf Initialization ==="
kubectl logs $NEW_POD | grep "Initialized windowed perf"
# Profile DB
MONGO_PROFILE=$(kubectl get pods -l io.kompose.service=mongodb-profile -o jsonpath='{.items[0].metadata.name}')
kubectl exec $MONGO_PROFILE -- mongo profile-db --eval "db.hotels.count()"
# Geo DB
MONGO_GEO=$(kubectl get pods -l io.kompose.service=mongodb-geo -o jsonpath='{.items[0].metadata.name}')
kubectl exec $MONGO_GEO -- mongo geo-db --eval "db.geo.count()"
# Recommendation DB
MONGO_REC=$(kubectl get pods -l io.kompose.service=mongodb-recommendation -o jsonpath='{.items[0].metadata.name}')
kubectl exec $MONGO_REC -- mongo recommendation-db --eval "db.recommendation.count()"
# Rate DB
MONGO_RATE=$(kubectl get pods -l io.kompose.service=mongodb-rate -o jsonpath='{.items[0].metadata.name}')
kubectl exec $MONGO_RATE -- mongo rate-db --eval "db.inventory.count()"
# Reservation DB
MONGO_RESERVE=$(kubectl get pods -l io.kompose.service=mongodb-reservation -o jsonpath='{.items[0].metadata.name}')
kubectl exec $MONGO_RESERVE -- mongo reservation-db --eval "db.reservation.count(); db.number.count()"
# User DB
MONGO_USER=$(kubectl get pods -l io.kompose.service=mongodb-user -o jsonpath='{.items[0].metadata.name}')
kubectl exec $MONGO_USER -- mongo user-db --eval "db.user.count()"
# Profile DB
echo "Dropping profile-db.hotels..."
kubectl exec $MONGO_PROFILE -- mongo profile-db --eval "db.hotels.drop()"
# Geo DB
echo "Dropping geo-db.geo..."
kubectl exec $MONGO_GEO -- mongo geo-db --eval "db.geo.drop()"
# Recommendation DB
echo "Dropping recommendation-db.recommendation..."
kubectl exec $MONGO_REC -- mongo recommendation-db --eval "db.recommendation.drop()"
# Rate DB
echo "Dropping rate-db.inventory..."
kubectl exec $MONGO_RATE -- mongo rate-db --eval "db.inventory.drop()"
# Reservation DB (has multiple collections)
echo "Dropping reservation-db collections..."
kubectl exec $MONGO_RESERVE -- mongo reservation-db --eval "db.reservation.drop(); db.number.drop()"
# User DB
echo "Dropping user-db.user..."
kubectl exec $MONGO_USER -- mongo user-db --eval "db.user.drop()"
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment