Skip to content

Instantly share code, notes, and snippets.

@marcelcaraciolo
Last active December 17, 2015 17:49
Show Gist options
  • Save marcelcaraciolo/5648996 to your computer and use it in GitHub Desktop.
Save marcelcaraciolo/5648996 to your computer and use it in GitHub Desktop.
class BenchmarkGitRunner(object):
...
def run(self):
revisions = self._get_revisions_to_run()
for rev in revisions:
any_succeeded, n_active = self._run_and_write_results(rev)
if not any_succeeded and n_active > 0:
self.bench_repo.hard_clean()
any_succeeded2, n_active = self._run_and_write_results(rev)
# just guessing that this revision is broken, should stop
# wasting our time
if (not any_succeeded2 and n_active > 5):
print 'BLACKLISTING %s' % rev
self.db.add_rev_blacklist(rev)
def _run_and_write_results(self, rev):
"""
Returns True if any runs succeeded
"""
n_active_benchmarks, results = self._run_revision(rev)
tracebacks = []
any_succeeded = False
for checksum, timing in results.iteritems():
if 'traceback' in timing:
tracebacks.append(timing['traceback'])
timestamp = self.repo.timestamps[rev]
any_succeeded = any_succeeded or 'timing' in timing
self.db.write_result(checksum, rev, timestamp,
timing.get('loops'),
timing.get('timing'),
timing.get('traceback'))
return any_succeeded, n_active_benchmarks
def _register_benchmarks(self):
ex_benchmarks = self.db.get_benchmarks()
db_checksums = set(ex_benchmarks.index)
for bm in self.benchmarks:
if bm.checksum in db_checksums:
self.db.update_name(bm)
else:
print 'Writing new benchmark %s, %s' % (bm.name, bm.checksum)
self.db.write_benchmark(bm)
def _run_revision(self, rev):
need_to_run = self._get_benchmarks_for_rev(rev)
if not need_to_run:
print 'No benchmarks need running at %s' % rev
return 0, {}
print 'Running %d benchmarks for revision %s' % (len(need_to_run), rev)
for bm in need_to_run:
print bm.name
self.bench_repo.switch_to_revision(rev)
pickle_path = os.path.join(self.tmp_dir, 'benchmarks.pickle')
results_path = os.path.join(self.tmp_dir, 'results.pickle')
if os.path.exists(results_path):
os.remove(results_path)
pickle.dump(need_to_run, open(pickle_path, 'w'))
# run the process
cmd = 'python %s/run_benchmarks.py %s %s' % (pickle_path, results_path)
print cmd
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
cwd=self.tmp_dir)
stdout, stderr = proc.communicate()
print 'stdout: %s' % stdout
if stderr:
if ("object has no attribute" in stderr or
'ImportError' in stderr):
print stderr
print 'HARD CLEANING!'
self.bench_repo.hard_clean()
print stderr
if not os.path.exists(results_path):
print 'Failed for revision %s' % rev
return len(need_to_run), {}
results = pickle.load(open(results_path, 'r'))
try:
os.remove(pickle_path)
except OSError:
pass
return len(need_to_run), results
def _get_benchmarks_for_rev(self, rev):
existing_results = self.db.get_rev_results(rev)
need_to_run = []
timestamp = self.repo.timestamps[rev]
for b in self.benchmarks:
if b.start_date is not None and b.start_date > timestamp:
continue
if b.checksum not in existing_results:
need_to_run.append(b)
return need_to_run
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment