Last active
October 2, 2023 21:57
-
-
Save navidemad/3fd2def6034a2d5dfa08a891bb7ab412 to your computer and use it in GitHub Desktop.
Benchmark Rails methods
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# frozen_string_literal: true | |
# Usage: | |
# bundle exec rails runner benchmark.rb | |
require "benchmark" | |
require "benchmark/ips" | |
require "objspace" | |
require "timecop" | |
ActiveRecord::Base.logger = nil | |
class GCSuite | |
def warming(*) | |
run_gc | |
end | |
def running(*) | |
run_gc | |
end | |
def warmup_stats(*) | |
end | |
def add_report(*) | |
end | |
def disable | |
GC.disable | |
end | |
private | |
def run_gc | |
GC.enable | |
GC.start | |
GC.disable | |
end | |
end | |
module BenchmarkExecution | |
COLORS = { yellow: "\e[33m", green: "\e[32m", red: "\e[31m", bold: "\e[1m", reset: "\e[0m" }.freeze | |
CONFIDENCE_DELTA_PERCENTAGE = 5 | |
class << self | |
def run( | |
benchmark_measures:, | |
benchmark_warmup:, | |
benchmark_output:, | |
benchmark_memory:, | |
benchmark_execution_time:, | |
analyze_nplus_one_queries: | |
) | |
puts "#{COLORS[:bold]}[BenchmarkExecution.run]#{COLORS[:reset]}" | |
puts "Options:" | |
puts " - benchmark_measures: #{benchmark_measures}" | |
puts " - benchmark_warmup: #{benchmark_warmup}" | |
puts " - benchmark_output: #{benchmark_output}" | |
puts " - benchmark_memory: #{benchmark_memory}" | |
puts " - benchmark_execution_time: #{benchmark_execution_time}" | |
puts " - analyze_nplus_one_queries: #{analyze_nplus_one_queries}" | |
puts "" | |
old_method, new_method = TestCode.new.benchmark_methods | |
if analyze_nplus_one_queries | |
Bullet.enable = true | |
Prosopite.enabled = true | |
end | |
BenchmarkExecution.execute_and_compare( | |
old_method: old_method, | |
new_method: new_method, | |
benchmark_measures: benchmark_measures, | |
benchmark_warmup: benchmark_warmup, | |
benchmark_output: benchmark_output, | |
benchmark_memory: benchmark_memory, | |
benchmark_execution_time: benchmark_execution_time, | |
analyze_nplus_one_queries: analyze_nplus_one_queries, | |
) | |
end | |
def execute_and_compare( | |
old_method:, | |
new_method:, | |
benchmark_measures:, | |
benchmark_warmup:, | |
benchmark_output:, | |
benchmark_memory:, | |
benchmark_execution_time:, | |
analyze_nplus_one_queries: | |
) | |
compare_methods( | |
old_method: old_method, | |
new_method: new_method, | |
benchmark_measures: benchmark_measures, | |
benchmark_warmup: benchmark_warmup, | |
benchmark_output: benchmark_output, | |
benchmark_memory: benchmark_memory, | |
benchmark_execution_time: benchmark_execution_time, | |
analyze_nplus_one_queries: analyze_nplus_one_queries, | |
) | |
end | |
private | |
def compare_methods( | |
old_method:, | |
new_method:, | |
benchmark_measures:, | |
benchmark_warmup:, | |
benchmark_output:, | |
benchmark_memory:, | |
benchmark_execution_time:, | |
analyze_nplus_one_queries: | |
) | |
if benchmark_output | |
print "\n#{COLORS[:bold]}[Check Same Outputs]#{COLORS[:reset]}" | |
old_output = old_method.call | |
new_output = new_method.call | |
if old_output == new_output | |
puts " #{COLORS[:bold]}#{COLORS[:green]}✅#{COLORS[:reset]}" | |
else | |
puts " #{COLORS[:bold]}#{COLORS[:red]}⛔️ Different#{COLORS[:reset]}" | |
puts "#{COLORS[:yellow]}Old Output: #{old_output}#{COLORS[:reset]}" | |
puts "#{COLORS[:red]}New Output: #{new_output}#{COLORS[:reset]}" | |
end | |
end | |
if benchmark_memory | |
measure_allocated_objects( | |
benchmark_measures: benchmark_measures, | |
benchmark_warmup: benchmark_warmup, | |
old_method: old_method, | |
new_method: new_method, | |
) | |
end | |
if benchmark_execution_time | |
measure_ips( | |
benchmark_measures: benchmark_measures, | |
benchmark_warmup: benchmark_warmup, | |
old_method: old_method, | |
new_method: new_method, | |
) | |
end | |
if analyze_nplus_one_queries | |
puts "\n#{COLORS[:bold]}[Analyzing N+1 Queries]#{COLORS[:reset]}\n" | |
analyze_nplus(new_method) | |
end | |
end | |
def measure_ips(benchmark_measures:, benchmark_warmup:, old_method:, new_method:) | |
puts "\n#{COLORS[:bold]}[Time Measurements]#{COLORS[:reset]}\n" | |
suite = GCSuite.new | |
Benchmark.ips do |x| | |
x.config( | |
time: benchmark_measures, | |
warmup: benchmark_warmup, | |
suite: suite, | |
stats: :bootstrap, | |
confidence: 100 - CONFIDENCE_DELTA_PERCENTAGE, | |
) | |
x.report("Old") { old_method.call } | |
x.report("New") { new_method.call } | |
x.compare! | |
end | |
end | |
def measure_allocated_objects(benchmark_measures:, benchmark_warmup:, old_method:, new_method:) | |
puts "\n#{COLORS[:bold]}[Object Allocation Count]#{COLORS[:reset]}\n" | |
suite = GCSuite.new | |
avg_old_memory = | |
measure_method_objects( | |
benchmark_measures: benchmark_measures, | |
benchmark_warmup: benchmark_warmup, | |
suite: suite, | |
method_proc: old_method, | |
) | |
avg_new_memory = | |
measure_method_objects( | |
benchmark_measures: benchmark_measures, | |
benchmark_warmup: benchmark_warmup, | |
suite: suite, | |
method_proc: new_method, | |
) | |
puts "Old: #{avg_old_memory} objects#{COLORS[:reset]}" | |
puts "New: #{avg_new_memory} objects#{COLORS[:reset]}" | |
delta_percentage = ((avg_new_memory - avg_old_memory).to_f.fdiv(avg_old_memory) * 100).round(2) | |
if avg_old_memory == avg_new_memory | |
puts "#{COLORS[:bold]}#{COLORS[:yellow]}🛄 No difference!#{COLORS[:reset]}" | |
elsif delta_percentage.abs <= CONFIDENCE_DELTA_PERCENTAGE | |
puts "#{COLORS[:bold]}#{COLORS[:yellow]}🔄 Delta is too small (#{delta_percentage}%) to determine a significant change.#{COLORS[:reset]}" | |
elsif avg_old_memory < avg_new_memory | |
puts "#{COLORS[:bold]}#{COLORS[:red]}⛔️ Old version is better!#{COLORS[:reset]}" | |
else | |
puts "#{COLORS[:bold]}#{COLORS[:green]}✅ New version is better!#{COLORS[:reset]}" | |
end | |
end | |
def measure_method_objects(benchmark_measures:, benchmark_warmup:, suite:, method_proc:) | |
suite.disable | |
benchmark_warmup.times { method_proc.call } | |
memory_results = [] | |
benchmark_measures.times do | |
before = ObjectSpace.count_objects[:TOTAL] | |
method_proc.call | |
after = ObjectSpace.count_objects[:TOTAL] | |
memory_results << (after - before) | |
end | |
memory_results.sum / benchmark_measures | |
end | |
def analyze_nplus(method_proc) | |
Bullet.profile { method_proc.call } | |
Prosopite.scan { method_proc.call } | |
end | |
end | |
end | |
class BenchmarkTest | |
def initialize | |
extend ActionView::Helpers::TranslationHelper | |
Rails.logger.extend(ActiveSupport::Logger.broadcast(ActiveSupport::Logger.new($stdout))) | |
end | |
end | |
class TestCode < BenchmarkTest | |
include ApplicationHelper | |
def benchmark_methods | |
old_version = -> { User.pluck("DISTINCT email").sort } | |
new_version = -> { User.distinct.pluck(:email).sort } | |
[old_version, new_version] | |
end | |
end | |
Timecop.freeze(Time.zone.local(2023, 9, 24)) do | |
BenchmarkExecution.run( | |
benchmark_measures: 10, | |
benchmark_warmup: 3, | |
benchmark_output: false, | |
benchmark_memory: false, | |
benchmark_execution_time: true, | |
analyze_nplus_one_queries: false, | |
) | |
end |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment