Skip to content

Instantly share code, notes, and snippets.

@julik
Created November 21, 2020 13:02
Show Gist options
  • Save julik/bfc893fa8e29d646b3656185634fe1a8 to your computer and use it in GitHub Desktop.
Save julik/bfc893fa8e29d646b3656185634fe1a8 to your computer and use it in GitHub Desktop.
require 'bundler'
Bundler.setup
require 'benchmark'
require 'benchmark/ips'
require_relative '../lib/zip_tricks'
some_randomness = StringIO.new(Random.new.bytes(800 * 1024))
rackup_path = File.join(__dir__, 'webserver.ru')
File.open(rackup_path, 'wb') do |rackup_code|
IO.copy_stream(DATA, rackup_code)
end
command = %W[bundle exec puma --threads 6 --port 9494 #{rackup_path}]
server = IO.popen(command, 'r')
server_pid = server.pid
at_exit do
Process.kill('TERM', server_pid)
end
sleep 10 # dirty - wait for puma to start
buffer_sizes = [0, 1024, 4 * 1024, 16 * 1024, 64 * 1024]
Benchmark.ips do |x|
x.config(time: 5, warmup: 0)
buffer_sizes.each do |buf_size|
x.report "Rack each with bufsize of #{buf_size}" do
`ab -n 32 -c 6 -l "http://127.0.0.1:9494/#{buf_size}"`
end
end
x.compare!
end
__END__
require 'bundler'
Bundler.setup
require_relative '../lib/zip_tricks'
class BufferingEnumerator
DEFAULT_WRITE_BUFFER_SIZE = 65 * 1024
# Creates a new OutputEnumerator.
#
# @param streamer_options[Hash] options for Streamer, see {ZipTricks::Streamer.new}
# It might be beneficial to tweak the `write_buffer_size` to your liking so that you won't be
# doing too many write attempts and block right after
# @param write_buffer_size[Integer] By default all ZipTricks writes are unbuffered. For output to sockets
# it is beneficial to bulkify those writes so that they are roughly sized to a socket buffer chunk. This
# object will bulkify writes for you in this way (so `each` will yield not on every call to `<<` from the Streamer
# but at block size boundaries or greater). If you do S3 multipart uploading, where all the parts except the last
# must be 5MB or larger, configure this write buffer size to 5 megabytes to have your output automatically segmented.
# @param blk a block that will receive the Streamer object when executing. The block will not be executed
# immediately but only once `each` is called on the OutputEnumerator
def initialize(write_buffer_size: DEFAULT_WRITE_BUFFER_SIZE, **streamer_options, &blk)
@streamer_options = streamer_options.to_h
@bufsize = write_buffer_size.to_i
@archiving_block = blk
end
# Executes the block given to the constructor with a {ZipTricks::Streamer}
# and passes each written chunk to the block given to the method. This allows one
# to "take" output of the ZIP piecewise. If called without a block will return an Enumerator
# that you can pull data from using `next`.
#
# @yield [String] a chunk of the ZIP output in binary encoding
def each
if block_given?
block_write = ZipTricks::BlockWrite.new { |chunk| yield(chunk) }
buffer = ZipTricks::WriteBuffer.new(block_write, @bufsize)
ZipTricks::Streamer.open(buffer, **@streamer_options, &@archiving_block)
buffer.flush!
else
enum_for(:each)
end
end
end
zip_serving_app = ->(env) {
req = Rack::Request.new(env)
buf_size = req.path.split('/').last.to_i
some_randomness = StringIO.new(Random.new.bytes(800 * 1024))
response_body = BufferingEnumerator.new(write_buffer_size: buf_size) do |zip|
64.times do |n|
zip.write_deflated_file("war-and-peace-#{n}.txt") do |w|
File.open(__dir__ + '/../spec/zip_tricks/war-and-peace.txt', 'rb') do |f|
IO.copy_stream(f, w)
end
end
zip.write_stored_file("random-#{n}.bin") do |w|
IO.copy_stream(some_randomness, w)
some_randomness.rewind
end
end
end
[200, {}, response_body]
}
run zip_serving_app
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment