- Machine: AMD Ryzen 9 7950X 16-Core (32 CPUs), 64 GB RAM
- OS: Fedora Linux 43, kernel 6.19.11-200.fc43.x86_64
- Java: 25.0.2-tem
- Quarkus: 3.34.3
- Spring Boot 4: 4.0.5
- CPU pinning: app=0-3, db=4-6, load-gen=7-9, monitor=10, otel=11-13
- Network: host networking (--use-container-host-network)
| import numpy as np | |
| import matplotlib.pyplot as plt | |
| from mpl_toolkits.mplot3d import Axes3D | |
| # 1. Set up parameter ranges | |
| # Global injection rate from 10k to 90k TPS | |
| Lambdas = np.linspace(10000, 90000, 20) | |
| # Service times from 5 us to 150 us | |
| S_FJ = np.linspace(0.000005, 0.000150, 20) | |
| S_IO = np.linspace(0.000005, 0.000150, 20) |
| private static final ScopedValue<ExecutionContext> EXECUTION_CTX = ScopedValue.newInstance(); | |
| private static final class ExecutionContext { | |
| private final ReentrantLock lock = new ReentrantLock(true); | |
| private final AtomicReference<CountDownLatch> suspended = new AtomicReference<>(null); | |
| /** | |
| * This can be called by any thread! | |
| */ | |
| public void resume() { | |
| var toResume = suspended.getAndSet(null); |
| /* | |
| * Copyright 2025 The Netty Project | |
| * | |
| * The Netty Project licenses this file to you under the Apache License, | |
| * version 2.0 (the "License"); you may not use this file except in compliance | |
| * with the License. You may obtain a copy of the License at: | |
| * | |
| * https://www.apache.org/licenses/LICENSE-2.0 | |
| * | |
| * Unless required by applicable law or agreed to in writing, software |
In queuing theory, enhancing the service time—the duration it takes to serve a customer or process a request—can have a remarkably significant and often disproportionate impact on the overall response time. While it may seem intuitive that faster service leads to shorter waits, the mathematical principles of queuing theory reveal a non-linear relationship, meaning a small improvement in service speed can yield a much larger reduction in total time spent in the system, especially as the system becomes busier.
The response time is the total time a customer or request spends in a system, from arrival to departure. It is the sum of the waiting time (time spent in the queue) and the service time itself. The effectiveness of improving service time is most clearly understood through its effect on system utilization.
| #include <stdio.h> | |
| #include <string.h> | |
| #include <stdlib.h> | |
| #include <sys/epoll.h> | |
| #include <sys/eventfd.h> | |
| #include <unistd.h> | |
| #include <pthread.h> | |
| #include <errno.h> | |
| void* writer_thread(void* arg) { |
| package redhat.app.services.benchmark; | |
| import java.util.concurrent.TimeUnit; | |
| import org.openjdk.jmh.annotations.Benchmark; | |
| import org.openjdk.jmh.annotations.BenchmarkMode; | |
| import org.openjdk.jmh.annotations.CompilerControl; | |
| import org.openjdk.jmh.annotations.Fork; | |
| import org.openjdk.jmh.annotations.Measurement; | |
| import org.openjdk.jmh.annotations.Mode; |
| import java.util.Objects; | |
| /** | |
| * It's a growable ring buffer that allows to move tail/head sequences, clear, append, set/replace at specific positions. | |
| */ | |
| final class ArrayRingBuffer<T> { | |
| private static final Object[] EMPTY = new Object[0]; | |
| // it points to the next slot after the last element | |
| private int tailSequence; |
Weird ASM produced around type guards
With
# VM version: JDK 17.0.7, Java HotSpot(TM) 64-Bit Server VM, 17.0.7+8-LTS-224
red.hat.puzzles.polymorphism.InstanceOfScalabilityBenchmark.encodeFullType -pencoderType=a -ppollutionCases=20000 -t 16 -f 4 -prof perfasm
where
| Benchmark (same) (size) Mode Cnt Score Error Units | |
| LowerCaseComparison.optimizedContentEqualsIgnoreCase true 8 avgt 10 6.896 ± 0.163 ns/op | |
| LowerCaseComparison.optimizedContentEqualsIgnoreCase:CPI true 8 avgt 0.240 clks/insn | |
| LowerCaseComparison.optimizedContentEqualsIgnoreCase:IPC true 8 avgt 4.161 insns/clk | |
| LowerCaseComparison.optimizedContentEqualsIgnoreCase:L1-dcache-load-misses true 8 avgt 0.007 #/op | |
| LowerCaseComparison.optimizedContentEqualsIgnoreCase:L1-dcache-loads true 8 avgt 36.741 #/op | |
| LowerCaseComparison.optimizedContentEqualsIgnoreCase:L1-icache-load-misses true 8 avgt ≈ 10⁻³ #/op | |
| LowerCaseComparison.optimizedContentEqualsIgnoreCase:L1-icache-loads |