In the following demo each client can execute 2 request per 10 seconds. Clients requests are distributed via Gaussian distribution, internal requests (clientId: null) does not have limit.
See stdout.txt file for demo output.
import java.util.*; | |
import java.util.concurrent.*; | |
public class Main { | |
public static void main(String[] args) throws Exception { | |
String[] clientIds = new String[] { | |
"abc-1", | |
"def-2", | |
"ghi-3", | |
"jkl-4", | |
"mno-5", | |
"prs-6", | |
"stu-7", | |
"vzz-8", | |
"xyw-9", | |
}; | |
final ExecutorService server = Executors.newFixedThreadPool(16); | |
// Dependency injection, register singleton services | |
final Random rand = new Random(); | |
final RateLimiter rateLimiter = new RateLimiter(2, 10, TimeUnit.SECONDS); | |
while(true) { | |
// get random clientId | |
String clientId = rand.nextBoolean() ? null : clientIds[(int)rand.nextGaussian() + 4]; | |
// execute request asynchronously | |
server.execute(() -> { | |
DummyController controller = new DummyController(rateLimiter); | |
Request req = new Request("GET", "/api/echo", null, new HashMap<>(){{ | |
put("X-ClientId", clientId); | |
}}); | |
Response res = controller.getEcho(req); | |
System.out.println(res); | |
System.out.flush(); | |
}); | |
// wait between requests | |
Thread.sleep(rand.nextInt(1000)); | |
} | |
} | |
} | |
class Response { | |
final String body; | |
final int httpCode; | |
final Request request; | |
Response(String body, int httpCode, Request request) { | |
this.body = body; | |
this.httpCode = httpCode; | |
this.request = request; | |
} | |
@Override | |
public String toString() { | |
return String.format("%s | %d: %s", request, httpCode, body); | |
} | |
} | |
class Request { | |
final String method; | |
final String url; | |
final String body; | |
final Map<String,String> headers; | |
Request(String method, String url, String body, Map<String, String> headers) { | |
this.method = method; | |
this.url = url; | |
this.body = body; | |
this.headers = headers; | |
} | |
@Override | |
public String toString() { | |
return String.format("%s %s", method, url); | |
} | |
public Response response(String body, int httpCode) { | |
return new Response(body, httpCode, this); | |
} | |
} | |
class RateLimiter { | |
private final int _limit; | |
// map needs to be synchronized, because of the concurrent requests | |
private final Map<String, Integer> _rates = Collections.synchronizedMap(new HashMap<>()); | |
private final ScheduledExecutorService _cleaner = Executors.newSingleThreadScheduledExecutor(); | |
RateLimiter(int limit, int delay, TimeUnit timeUnit) { | |
_limit = limit; | |
// clear counters periodically | |
_cleaner.scheduleAtFixedRate(() -> { | |
_rates.clear(); | |
System.out.println("====== Rate Limiter counters are cleared ======"); | |
System.out.flush(); | |
}, delay, delay, timeUnit); | |
} | |
boolean allowed(final Request request) { | |
// extract clientId from the request headers | |
String clientId = request.headers.getOrDefault("X-ClientId", null); | |
// clientId == null assumes that request is internal | |
if (clientId == null) { | |
return true; | |
} | |
// for other clients increase counter and check with the limit | |
return _rates.compute(clientId, (key, value) -> value == null ? 1 : value+1) <= _limit; | |
} | |
} | |
class DummyController { | |
private final RateLimiter _rateLimiter; | |
DummyController(final RateLimiter rateLimiter) { | |
_rateLimiter = rateLimiter; | |
} | |
Response getEcho(final Request request) { | |
String clientId = request.headers.getOrDefault("X-ClientId", null); | |
if(!_rateLimiter.allowed(request)) { | |
// return "429 Too Many Requests" response | |
return request.response(String.format("clientId: %s | LIMIT REACHED...", clientId), 429); | |
} | |
// return "200 OK" response | |
return request.response(String.format("clientId: %s", clientId), 200); | |
} | |
} |
GET /api/echo | 200: clientId: null | |
GET /api/echo | 200: clientId: mno-5 | |
GET /api/echo | 200: clientId: null | |
GET /api/echo | 200: clientId: null | |
GET /api/echo | 200: clientId: mno-5 | |
GET /api/echo | 200: clientId: jkl-4 | |
GET /api/echo | 429: clientId: mno-5 | LIMIT REACHED... | |
GET /api/echo | 429: clientId: mno-5 | LIMIT REACHED... | |
GET /api/echo | 200: clientId: null | |
GET /api/echo | 200: clientId: prs-6 | |
GET /api/echo | 200: clientId: null | |
GET /api/echo | 429: clientId: mno-5 | LIMIT REACHED... | |
GET /api/echo | 200: clientId: null | |
GET /api/echo | 429: clientId: mno-5 | LIMIT REACHED... | |
GET /api/echo | 200: clientId: null | |
GET /api/echo | 200: clientId: null | |
GET /api/echo | 429: clientId: mno-5 | LIMIT REACHED... | |
GET /api/echo | 200: clientId: null | |
GET /api/echo | 200: clientId: null | |
GET /api/echo | 429: clientId: mno-5 | LIMIT REACHED... | |
GET /api/echo | 429: clientId: mno-5 | LIMIT REACHED... | |
====== Rate Limiter counters are cleared ====== | |
GET /api/echo | 429: clientId: mno-5 | LIMIT REACHED... | |
GET /api/echo | 200: clientId: mno-5 | |
GET /api/echo | 200: clientId: mno-5 | |
GET /api/echo | 200: clientId: null | |
GET /api/echo | 429: clientId: mno-5 | LIMIT REACHED... | |
GET /api/echo | 429: clientId: mno-5 | LIMIT REACHED... | |
GET /api/echo | 429: clientId: mno-5 | LIMIT REACHED... | |
GET /api/echo | 429: clientId: mno-5 | LIMIT REACHED... | |
GET /api/echo | 200: clientId: null | |
GET /api/echo | 200: clientId: null | |
GET /api/echo | 200: clientId: null | |
GET /api/echo | 429: clientId: mno-5 | LIMIT REACHED... | |
GET /api/echo | 200: clientId: prs-6 | |
GET /api/echo | 200: clientId: null | |
GET /api/echo | 200: clientId: null | |
GET /api/echo | 200: clientId: null | |
GET /api/echo | 429: clientId: mno-5 | LIMIT REACHED... | |
GET /api/echo | 429: clientId: mno-5 | LIMIT REACHED... | |
GET /api/echo | 200: clientId: null | |
GET /api/echo | 429: clientId: mno-5 | LIMIT REACHED... | |
GET /api/echo | 200: clientId: null | |
====== Rate Limiter counters are cleared ====== | |
GET /api/echo | 200: clientId: mno-5 | |
GET /api/echo | 200: clientId: mno-5 | |
... |