Created
October 4, 2025 19:11
-
-
Save abelardojarab/39b17c36b2a497ba9d1996fc2053eda4 to your computer and use it in GitHub Desktop.
OpenSSL sweep parametric test
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| #!/usr/bin/env bash | |
| set -euo pipefail | |
| # ================= Tunables ================= | |
| SECONDS_PER_RUN=${SECONDS_PER_RUN:-15} | |
| MULTIS=(${MULTIS:-1 2 4 10 30 60 120}) | |
| ASYNC_DEPTHS=(${ASYNC_DEPTHS:-64 128 256 512 1024}) | |
| # NOTE: ecdhp256 will likely be CPU for NoLoad until you implement ECDH KEX. | |
| ALGS=(${ALGS:-ecdhp256 ecdsap256}) | |
| : "${OPENSSL:=openssl}" # override with OPENSSL=/usr/local/bin/openssl if needed | |
| OUTDIR=${OUTDIR:-./ossl_speed_results} | |
| CSV="${OUTDIR}/speed_summary.csv" | |
| LOGDIR="${OUTDIR}/logs" | |
| mkdir -p "${OUTDIR}" "${LOGDIR}" | |
| # ================ Helpers =================== | |
| ts() { date -u +"%Y-%m-%dT%H:%M:%SZ"; } | |
| # Parse ops/sec from -mr lines (+F* rows). For ECDSA (sign/verify metrics), | |
| # we take the max F-field seen to get a single "ops/sec" summary. | |
| parse_ops() { | |
| awk -F: ' | |
| $1 ~ /^\+F[0-9]+$/ && NF>=5 { | |
| v=$4+0; if (v>max) max=v | |
| } | |
| END { if (max>0) printf("%.3f\n", max); else print "NA" } | |
| ' | |
| } | |
| run_case() { | |
| local label="$1" alg="$2" multi="$3" async="$4" | |
| local logfile="${LOGDIR}/${label}_${alg}_multi${multi}_async${async}.log" | |
| local provider_args ppq | |
| if [[ "$label" == "cpu" ]]; then | |
| provider_args="-provider default" | |
| ppq="-propquery provider=default" | |
| else | |
| # NoLoad needs default for RNG/digests; but force FETCH to noload. | |
| provider_args=" " | |
| ppq="-propquery '?provider=noload-openssl-provider'" | |
| fi | |
| local cmd="${OPENSSL} speed -seconds ${SECONDS_PER_RUN} -mr ${provider_args} ${ppq} -multi ${multi} -async_jobs ${async} ${alg}" | |
| echo "[`ts`] RUN : ${cmd}" | |
| if ! ${cmd} > "${logfile}" 2>&1; then | |
| echo "[`ts`] ERROR: ${cmd} (see ${logfile})" | |
| echo "${label},${alg},${multi},${async},ERROR" >> "${CSV}" | |
| return | |
| fi | |
| local ops | |
| ops=$(grep '^\+F' "${logfile}" | parse_ops) | |
| echo "[`ts`] OPS : ${ops} (${label} ${alg} m=${multi} a=${async})" | |
| echo "${label},${alg},${multi},${async},${ops}" >> "${CSV}" | |
| } | |
| print_ascii_tables() { | |
| local csv="$1" | |
| echo | |
| echo "====================== SUMMARY (ASCII) ======================" | |
| echo "File: ${csv}" | |
| echo | |
| # For each algorithm, print CPU and NoLoad subtables with scaling efficiency. | |
| for ALG in "${ALGS[@]}"; do | |
| for PROV in cpu noload; do | |
| echo "Algorithm: ${ALG} Provider: ${PROV}" | |
| echo "multi async ops_per_sec baseline(m=1) linear_ideal efficiency(%)" | |
| echo "----- ----- -------------- -------------- -------------- --------------" | |
| awk -F, -v alg="${ALG}" -v prov="${PROV}" ' | |
| BEGIN { OFS=" " } | |
| NR>1 && $1==prov && $2==alg && $5!="ERROR" { | |
| key=$4; # async depth | |
| data[key, $3]=$5+0; # ops for (async, multi) | |
| if (!seen_async[key]++) asyncs[++na]=key; | |
| if (!seen_multi[$3]++) multis[++nm]=$3+0; | |
| } | |
| END { | |
| # sort arrays numerically (simple bubble; small sets) | |
| for (i=1;i<=na;i++) for (j=i+1;j<=na;j++) if (asyncs[j]+0 < asyncs[i]+0) { t=asyncs[i]; asyncs[i]=asyncs[j]; asyncs[j]=t } | |
| for (i=1;i<=nm;i++) for (j=i+1;j<=nm;j++) if (multis[j]+0 < multis[i]+0) { t=multis[i]; multis[i]=multis[j]; multis[j]=t } | |
| for (ai=1; ai<=na; ai++) { | |
| a = asyncs[ai] | |
| base = data[a, 1] # baseline is multi=1 at this async | |
| for (mi=1; mi<=nm; mi++) { | |
| m = multis[mi] | |
| val = data[a, m] | |
| if (val==0) continue | |
| lin = base * m | |
| eff = (lin>0 ? (val/lin*100.0) : 0) | |
| printf "%5d %5d %14.2f %14.2f %14.2f %14.2f\n", m, a, val, base, lin, eff | |
| } | |
| } | |
| print "" | |
| } | |
| ' "$csv" | |
| done | |
| done | |
| # CPU vs NoLoad quick compare: best ops/sec per algorithm (any async/multi) | |
| echo "CPU vs NoLoad (best observed ops/sec)" | |
| echo "algorithm best_cpu_ops/s best_noload_ops/s" | |
| echo "-------------- ---------------- ------------------" | |
| awk -F, ' | |
| NR>1 && $5!="ERROR" { | |
| key=$2 | |
| if ($1=="cpu" && ($5+0)>cpu[key]) cpu[key]=$5+0 | |
| if ($1=="noload" && ($5+0)>noload[key]) noload[key]=$5+0 | |
| } | |
| END { | |
| for (k in cpu) { | |
| printf "%-14s %16.2f %18.2f\n", k, cpu[k]+0, noload[k]+0 | |
| } | |
| } | |
| ' "$csv" | |
| echo "============================================================" | |
| } | |
| # ============== Preamble ================ | |
| echo "# Output directory: ${OUTDIR}" | |
| echo "OpenSSL version: $(${OPENSSL} version -a | sed -n '1,3p')" | |
| echo "Active providers (default env):" | |
| ${OPENSSL} list -providers -verbose | sed -n '1,40p' || true | |
| # Start fresh CSV | |
| echo "provider,algorithm,multi,async_jobs,ops_per_sec" > "${CSV}" | |
| # ============== Matrix ================== | |
| for alg in "${ALGS[@]}"; do | |
| for async in "${ASYNC_DEPTHS[@]}"; do | |
| for multi in "${MULTIS[@]}"; do | |
| # run_case "cpu" "${alg}" "${multi}" "${async}" | |
| run_case "noload" "${alg}" "${multi}" "${async}" | |
| done | |
| done | |
| done | |
| # ============== Postprocess ============= | |
| print_ascii_tables "${CSV}" | |
| echo | |
| echo "Done." | |
| echo "Summary CSV: ${CSV}" | |
| echo "Raw logs : ${LOGDIR}/" |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment