Created
March 6, 2013 19:08
-
-
Save carlsverre/5102084 to your computer and use it in GitHub Desktop.
Minimal python benchmark class
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import time | |
import logging | |
from logging import StreamHandler, Formatter | |
minimal_logger = logging.getLogger(__name__ + "minimal") | |
minimal_logger.setLevel(logging.INFO) | |
_handler = StreamHandler() | |
_handler.setFormatter(Formatter('%(message)s')) | |
minimal_logger.addHandler(_handler) | |
class Benchmark: | |
def __init__(self): | |
self.benchmarks = {} | |
def run(self, name): | |
self.running_benchmark = { "name": name } | |
return self | |
def __enter__(self): | |
self.running_benchmark["start"] = time.time() * 1e3 | |
return self | |
def __exit__(self, *args): | |
self.running_benchmark["end"] = time.time() * 1e3 | |
self.running_benchmark["interval"] = self.running_benchmark["end"] - self.running_benchmark["start"] | |
name = self.running_benchmark["name"] | |
if name in self.benchmarks: | |
self.benchmarks[name].append(self.running_benchmark) | |
else: | |
self.benchmarks[name] = [self.running_benchmark] | |
def summarize(self): | |
for name, bmarks in self.benchmarks.items(): | |
intervals = sorted([bmark['interval'] for bmark in bmarks]) | |
total = reduce(lambda m, i: m + i, intervals) | |
avg = total / len(intervals) | |
median = intervals[len(intervals) / 2] | |
min_i = intervals[0] | |
max_i = intervals[-1] | |
minimal_logger.info( | |
"%d iterations of %s took %f milliseconds\n\tmin: %f\n\tmax: %f\n\tmedian: %f\n\tavg: %f" % | |
(len(intervals), name, total, min_i, max_i, median, avg)) | |
iterations = 10 | |
sub_iterations = 100 | |
def benchmark_expo(benchmark): | |
import random | |
with benchmark.run("expovariate"): | |
for i in range(sub_iterations): | |
random.expovariate(0.5) | |
def benchmark_gamma(benchmark): | |
import random | |
with benchmark.run("gammavariate"): | |
for i in range(sub_iterations): | |
random.gammavariate(0.5, 1.5) | |
if __name__=='__main__': | |
benchmark = Benchmark() | |
for i in range(iterations): | |
benchmark_expo(benchmark) | |
benchmark_gamma(benchmark) | |
benchmark.summarize() |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment