Last active
August 4, 2020 15:32
-
-
Save erewok/14da395beb4fe86bedcdc08cd52a232c to your computer and use it in GitHub Desktop.
Example Starlette app to test different scenarios with BaseHttpMiddleware
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import asyncio | |
import json | |
import uvicorn | |
from starlette.applications import Starlette | |
from starlette.middleware.base import BaseHTTPMiddleware | |
from starlette.background import BackgroundTask | |
from starlette.responses import JSONResponse, PlainTextResponse, StreamingResponse | |
class TransparentMiddlewareNoStreaming(BaseHTTPMiddleware): | |
async def __call__(self, scope, receive, send) -> None: | |
print("__call__ middleware!") | |
await self.app(scope, receive, send) | |
class aTransparentMiddlewareStreaming(BaseHTTPMiddleware): | |
async def dispatch(self, request, call_next): | |
print("dispatching a!") | |
response = await call_next(request) | |
return response | |
class bTransparentMiddlewareStreaming(BaseHTTPMiddleware): | |
async def dispatch(self, request, call_next): | |
print("dispatching b!") | |
response = await call_next(request) | |
return response | |
class cTransparentMiddlewareStreaming(BaseHTTPMiddleware): | |
async def dispatch(self, request, call_next): | |
print("dispatching c!") | |
response = await call_next(request) | |
return response | |
async def slow_numbers(minimum, maximum): | |
yield('<html><body><ul>') | |
for number in range(minimum, maximum + 1): | |
yield '<li>%d</li>' % number | |
await asyncio.sleep(0.2) | |
yield('</ul></body></html>') | |
async def some_sleeper(): | |
print("sleeping!") | |
await asyncio.sleep(4) | |
print("waking up now!") | |
async def somthing_broken(minimum, maximum): | |
yield('<html><body><ul>') | |
for number in range(minimum, maximum + 1): | |
yield '<li>%d</li>' % number | |
if number > 2: | |
raise RuntimeError("This is a broken stream") | |
await asyncio.sleep(0.2) | |
yield('</ul></body></html>') | |
app = Starlette(debug=True) | |
app.add_middleware(aTransparentMiddlewareStreaming) | |
app.add_middleware(bTransparentMiddlewareStreaming) | |
app.add_middleware(cTransparentMiddlewareStreaming) | |
@app.route("/p") | |
async def some_plaintext(_): | |
task = BackgroundTask(some_sleeper) | |
print("returning response plain text") | |
return PlainTextResponse("OK", background=task) | |
@app.route("/j") | |
async def some_json(_): | |
task = BackgroundTask(some_sleeper) | |
print("returning response json") | |
return JSONResponse({"hey": "you"}, background=task) | |
@app.route("/s") | |
async def some_streaming(_): | |
task = BackgroundTask(some_sleeper) | |
print("returning response streaming") | |
return StreamingResponse(slow_numbers(1, 5), background=task) | |
@app.route("/b") | |
async def some_broken_streaming(_): | |
task = BackgroundTask(some_sleeper) | |
print("returning response streaming") | |
return StreamingResponse(somthing_broken(1, 5), background=task) | |
@app.route("/streaming-memory-test/{total_size:int}") | |
async def streaming_memory_test(request): | |
total_size = request.path_params["total_size"] | |
chunk_size = 1024 | |
while total_size < chunk_size: | |
total_size *= chunk_size | |
async def byte_stream(): | |
chunk_count = total_size // chunk_size | |
remainder = total_size % chunk_size | |
for n in range(chunk_count): | |
print("Yield chunk: ", n) | |
await asyncio.sleep(0.2) | |
yield os.urandom(chunk_size) | |
yield os.urandom(remainder) | |
return StreamingResponse(byte_stream()) | |
if __name__ == '__main__': | |
uvicorn.run(app, host="0.0.0.0", port=8000) |
When this demo is run, you should also be able to confirm that the background returns after the response has returned by running the following in separate terminals:
~/open_source/starlette stream_response_bg*
starlette ❯ curl http://localhost:8000/s
<html><body><ul><li>1</li><li>2</li><li>3</li><li>4</li><li>5</li></ul></body></html>
~/open_source/starlette stream_response_bg* 2m 31s
starlette ❯ python ../starlette_demo.py
INFO: Started server process [71658]
INFO: Waiting for application startup.
INFO: Application startup complete.
INFO: Uvicorn running on http://0.0.0.0:8000 (Press CTRL+C to quit)
dispatching c!
dispatching b!
dispatching a!
returning response streaming
INFO: 127.0.0.1:53985 - "GET /s HTTP/1.1" 200 OK
sleeping! # <-- response above has been returned at this point
waking up now! # <--- 4 seconds later, this line appears; request has already been completed
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Results from running
wrk
against Starlette app withstarlette/middleware/base.py
as defined in branchstream_response_bg
:The above test is useful for demonstrating no
TransportClosed
errors appear and also that thebackground
is not preventing responses from being returned: with a 12-second duration, the 4-second background task would prevent requests from completion and we'd see much smaller number of requests