Created
September 20, 2022 16:39
-
-
Save ravi9/b18a956732488cc9490c04b8aec82024 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
""" | |
Copyright (c) 2022 Intel Corporation | |
Licensed under the Apache License, Version 2.0 (the "License"); | |
you may not use this file except in compliance with the License. | |
You may obtain a copy of the License at | |
http://www.apache.org/licenses/LICENSE-2.0 | |
Unless required by applicable law or agreed to in writing, software | |
distributed under the License is distributed on an "AS IS" BASIS, | |
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
See the License for the specific language governing permissions and | |
limitations under the License. | |
""" | |
# !/bin/bash | |
# Read the parameters | |
source params.cfg | |
# Before starting the benchmarking, stop any prev containers | |
echo -e "\nStopping previous benchmarking docker containers. Error response is expected if no existing containers are running...." | |
eval $ovms_docker_stop_cmd | |
eval "docker stop $benchclient_container_name" | |
# Initialize few parameters | |
host_ip=`hostname -I | cut -d ' ' -f1 | xargs` | |
ovms_docker_stop_cmd="docker stop $ovms_container_name" | |
log_folder_root="ovms_bm_logs/$(date +%Y-%m-%d-%H-%M-%S)" | |
logs_search_string='worker: window_mean_latency:' | |
num_sockets=`lscpu | grep "Socket(s)" | cut -d ":" -f2 | xargs` | |
cores_per_socket=`lscpu | grep "Core(s) per socket" | cut -d ":" -f2 | xargs` | |
num_phy_cores=$((${num_sockets}*${cores_per_socket})) | |
summary_txt_info="\nHost IP: $host_ip,\nlog_folder_root: $log_folder_root" | |
# Setup STREAMS | |
# Find all divisors of num_phy_cores and set them in STREAMS array. | |
for i in $(seq 1 $num_phy_cores); do | |
if [ $(expr $num_phy_cores / $i \* $i) == $num_phy_cores ]; then | |
streams+=($i) | |
fi; | |
done; | |
# Remove the last element in the STREAMS array. | |
# Last element is num_phy_cores, i.e, if # of IE streams=num_cores, then each IE will use 1 core, which is typically not required. | |
unset streams[-1] | |
# Set the max_streams_val variable | |
max_streams_val=${streams[-1]} | |
# Setup CONCURRENCY | |
# For mode 1, concurrency is an parameter from params.cfg. (kpi_concurrency) | |
# For mode 2, concurrency is a range from params.cfg. (min_concurrency, max_concurrency) | |
# For mode 3, concurrency is dynamically set based on the num_streams. | |
concurrency=() | |
if [ -n "$kpi_concurrency" ]; then | |
summary_txt_info+="\nkpi_concurrency is given: $kpi_concurrency" | |
summary_txt_info+="\nRunning script in Mode 1. Setting concurrency to $kpi_concurrency \n" | |
concurrency=($kpi_concurrency) | |
script_mode=1 | |
total_benchmarks_runs=$((${#concurrency[@]} * ${#streams[@]})) | |
elif [ -n "$kpi_latency" ] && [ -n "$min_concurrency" ] && [ -n "$max_concurrency" ]; then | |
if [ $min_concurrency -le $max_concurrency ]; then | |
summary_txt_info+="\nParams read: kpi_latency: $kpi_latency ms, min,max concurrency: $min_concurrency, $max_concurrency" | |
summary_txt_info+="\nRunning script in Mode 2. Setting concurrency: ($min_concurrency to $max_concurrency) \n" | |
concurrency=($(seq $min_concurrency $max_concurrency)) | |
script_mode=2 | |
total_benchmarks_runs=$((${#concurrency[@]} * ${#streams[@]})) | |
else | |
echo -e "\nmin_concurrency ($min_concurrency) must be <= max_concurrency ($max_concurrency) !" | |
exit | |
fi | |
else | |
summary_txt_info+="\n No KPIs are given ! Setting concurrency to many combinations. \n" | |
summary_txt_info+="\nRunning script in Mode 3. \n" | |
script_mode=3 | |
total_benchmarks_runs="Dynamic" | |
fi | |
# Print INFO | |
summary_txt_info+="\nBenchmark Settings Summary:" | |
summary_txt_info+="\ncores_per_socket: $cores_per_socket, num_sockets: $num_sockets " | |
summary_txt_info+="\nnum_phy_cores: $num_phy_cores" | |
summary_txt_info+="\nstreams list: ${streams[@]}" | |
summary_txt_info+="\nconcurrency list: ${concurrency[@]}" | |
summary_txt_info+="\ntotal_benchmarks_runs : $total_benchmarks_runs" | |
mkdir -p $log_folder_root | |
echo -e $summary_txt_info | |
echo -e $summary_txt_info >> ${log_folder_root}/summary.log | |
# Function to get dynamic concurrency list for mode 3 | |
function get_concurrency_list { | |
streams=$1 | |
pos=$2 | |
pos_b=$3 | |
max_streams_val=$4 | |
concurrency=() | |
if (( $(echo "$pos < 0.5" |bc -l) )) && [ $streams -gt 3 ]; then | |
concurrency+=($((streams - 1))) | |
fi | |
if (( $(echo "$pos >= 0.5" |bc -l) )) && (( $(echo "$pos < 1" |bc -l) )); | |
then | |
case $pos_b in | |
(2) end_val=$((max_streams_val + 1 ));; | |
(3) end_val=$((max_streams_val - 1 ));; | |
*) end_val=$((streams + pos_b ));; | |
esac | |
start_val=$((streams - 1)) | |
concurrency+=($(seq $start_val $end_val)) | |
else | |
concurrency+=($streams) # diagonal and for pos==1, i.e, for max_streams | |
fi | |
echo ${concurrency[@]} | |
} | |
benchmarks_ran=0 | |
for idx in "${!model_paths_list[@]}"; do | |
model_path=${model_paths_list[idx]} | |
model_name=$(basename $model_path) | |
model_dir=$(dirname $model_path) | |
docker_mount="$model_dir:/models" | |
model_mount_path="/models/$model_name" | |
log_folder=$log_folder_root/$model_name | |
echo "model_path: $model_path" | |
echo "model_name: $model_name" | |
echo "model_dir: $model_dir" | |
echo "docker_mount: $docker_mount" | |
echo "model_mount_path: $model_mount_path" | |
echo "log_folder: $log_folder" | |
mkdir -p $log_folder | |
metrics_csv="Streams,Concurrency,Latency(ms)\n" | |
echo "Streams,Concurrency,Latency(ms)" >> ${log_folder}/metrics.csv | |
for i in "${!streams[@]}"; do | |
ovms_docker_run_cmd="docker run --rm -d -it \ | |
--name=$ovms_container_name \ | |
-v $docker_mount \ | |
-p=$r_port:$r_port \ | |
-p=$port:$port \ | |
$ovms_docker_image \ | |
--model_path=$model_mount_path \ | |
--model_name=$model_name \ | |
--nireq=${streams[i]} \ | |
--plugin_config='{\"CPU_THROUGHPUT_STREAMS\": \"${streams[i]}\"}' \ | |
--shape=$shape \ | |
--port=$port \ | |
--rest_port=$r_port" | |
echo -e "\n*** Starting OVMS server :" | |
echo $ovms_docker_run_cmd | |
eval $ovms_docker_run_cmd | |
#sleep 2 | |
if [ $script_mode -eq 3 ]; then | |
streams_val=${streams[i]} | |
pos=$(echo "$i/${#streams[@]}" | bc -l) | |
pos_b=$((${#streams[@]} - $i)) | |
concurrency=($(get_concurrency_list $streams_val $pos $pos_b $max_streams_val)) | |
echo -e "\n Streams list: ${streams[@]}" | |
echo " get_concurrency_list: $streams_val $pos $pos_b $max_streams_val" | |
echo " Loop streams_val: $streams_val" | |
echo " Loop concurrency list: ${concurrency[@]}" | |
fi | |
for j in "${!concurrency[@]}"; do | |
ovms_client_log_file="${log_folder}/${model_name}_s_${streams[i]}_c_${concurrency[j]}.log" | |
benchm_client_docker_run_cmd="docker run --rm \ | |
--name=$benchclient_container_name | |
--network host \ | |
$benchmark_docker_image \ | |
-c=${concurrency[j]} \ | |
-m=$model_name \ | |
-a=$host_ip \ | |
-r=$r_port \ | |
-p=$port \ | |
-t=$benchmark_duration \ | |
--print_all \ | |
2>&1 | tee $ovms_client_log_file " | |
echo -e "\n*** Starting OVMS client :" | |
echo $benchm_client_docker_run_cmd | |
eval $benchm_client_docker_run_cmd | |
((benchmarks_ran+=1)) | |
echo -e "\n$benchmarks_ran of $total_benchmarks_runs benchmark runs completed." | |
grep_cmd="grep '${logs_search_string}' ${ovms_client_log_file} | cut -d ':' -f3 | xargs" | |
latency=`eval $grep_cmd` | |
echo -e "\nMetrics: Streams,Concurrency,Latency : ${streams[i]},${concurrency[j]},${latency}\n" | |
# Save Metrics into CSV | |
metrics_csv+="${streams[i]},${concurrency[j]},${latency}\n" | |
echo -e "${streams[i]},${concurrency[j]},${latency}" >> ${log_folder}/metrics.csv | |
done; #concurrency loop end | |
echo -e "\n*** Stopping OVMS server :" | |
echo $ovms_docker_stop_cmd | |
eval $ovms_docker_stop_cmd | |
done; #streams loop end | |
summary_txt="\n***** Completed model: $model_path *****\n" | |
summary_txt+="\nMetrics saved at ${log_folder}/metrics.csv" | |
if [ $script_mode -eq 2 ]; then | |
summary_txt+="\n\nScript ran in MODE $script_mode. Given kpi_latency=$kpi_latency ms " | |
percent=5 | |
min_kpi_latency=$(echo "$kpi_latency * (1 - $percent/100) " | bc -l) | |
max_kpi_latency=$(echo "$kpi_latency * (1 + $percent/100) " | bc -l) | |
# Within the metrics, search for configuration within 5% of the given kpi_latency | |
search_output=$(awk -v min_kpi=$min_kpi_latency -v max_kpi=$max_kpi_latency -F, '($3 >= min_kpi) && ($3 <= max_kpi)' ${log_folder}/metrics.csv) | |
if [ -z $search_output ]; then | |
summary_txt+="\nNo options found within $percent% of given kpi_latency=$kpi_latency ms" | |
else | |
summary_txt+="\nWithin $percent% of given kpi_latency=$kpi_latency ms, following are the options: " | |
summary_txt+="\nStreams,Concurrency,Latency(ms)" | |
summary_txt+="\n$search_output\n" | |
fi | |
fi | |
#sort the metrics based on latency column and get the optimal streams. | |
best_metric=`sort -k3 -n -t, ${log_folder}/metrics.csv | awk 'NR==3' |xargs` | |
best_streams=$(echo $best_metric | cut -d ',' -f1) | |
best_concurrency=$(echo $best_metric | cut -d ',' -f2) | |
best_latency=$(echo $best_metric | cut -d ',' -f3) | |
summary_txt+="\n\nOverall Best Result within this run: \nBest Latency: $best_latency ms with $best_concurrency Concurrency/cameras, with OVMS NSTREAMS: $best_streams \n" | |
summary_txt+="\nHowever, for complete results and other possibilities, see ${log_folder}/metrics.csv \n" | |
echo -e $summary_txt | |
echo -e $summary_txt >> ${log_folder_root}/summary.log | |
done; #model_paths_list loop end. | |
echo -e "\n***** Script execution completed *****" | |
echo -e "Total number of benchmark runs completed: $benchmarks_ran" | |
echo -e "Log folder root: ${log_folder_root}" | |
echo -e "Summary: ${log_folder_root}/summary.log \n" |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment