Created
August 27, 2021 16:04
-
-
Save ian-r-rose/e011a3242454cfa98ee7027d0ff1b6fa to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import io | |
import os | |
import re | |
import string | |
import zipfile | |
import altair | |
import altair_saver | |
import junitparser | |
import pandas | |
import requests | |
def get_from_github(url, params={}): | |
""" | |
Make an authenticated request to the GitHub REST API. | |
""" | |
r = requests.get(url, params=params, headers={"Authorization": f"token {TOKEN}"}) | |
r.raise_for_status() | |
return r | |
def maybe_get_next_page_path(response): | |
""" | |
If a response is paginated, get the url for the next page. | |
""" | |
link_regex = re.compile(r'<([^>]*)>;\s*rel="([\w]*)\"') | |
link_headers = response.headers.get("Link") | |
next_page_path = None | |
if link_headers: | |
links = {} | |
matched = link_regex.findall(link_headers) | |
for match in matched: | |
links[match[1]] = match[0] | |
next_page_path = links.get("next", None) | |
return next_page_path | |
def get_artifact_listing(repo="dask/distributed"): | |
""" | |
Get a list of artifacts from GitHub actions | |
""" | |
params = {"per_page": 100} | |
r = get_from_github( | |
f"https://api.github.com/repos/{repo}/actions/artifacts", params=params | |
) | |
artifacts = r.json()["artifacts"] | |
next_page = maybe_get_next_page_path(r) | |
while next_page: | |
r = get_from_github(next_page) | |
artifacts = artifacts + r.json()["artifacts"] | |
next_page = maybe_get_next_page_path(r) | |
return artifacts | |
def dataframe_from_jxml(run, date): | |
""" | |
Turn a parsed JXML into a pandas dataframe | |
""" | |
fname = [] | |
tname = [] | |
status = [] | |
message = [] | |
for suite in run: | |
for test in suite: | |
fname.append(test.classname) | |
tname.append(test.name) | |
s = "✓" | |
result = test.result | |
if len(result) == 0: | |
status.append(s) | |
message.append("") | |
continue | |
result = result[0] | |
m = result.message if result and hasattr(result, "message") else "" | |
if isinstance(result, junitparser.Error): | |
s = "x" | |
elif isinstance(result, junitparser.Failure): | |
s = "x" | |
elif isinstance(result, junitparser.Skipped): | |
s = "s" | |
else: | |
s = "x" | |
status.append(s) | |
message.append(m) | |
df = pandas.DataFrame( | |
{"file": fname, "test": tname, date: status, date + "-message": message} | |
) | |
return df.set_index(["file", "test"]) | |
if __name__ == "__main__": | |
# Get a listing of all artifacts | |
print("Getting all recent artifacts") | |
# Download artifacts here for some number of days | |
# FILL IN HERE | |
from fsspec.implementations.local import LocalFileSystem | |
fs = LocalFileSystem() | |
artifacts = fs.ls(f"path-to-artifacts") | |
# Download the selected artifacts | |
test_runs = {} | |
for artifact in artifacts: | |
# Download the artifact and parse it | |
print(f"loading {artifact}") | |
with fs.open(artifact) as f: | |
data = f.read() | |
run = junitparser.JUnitXml.fromstring(data) | |
# Insert into test runs dict | |
# FILL IN HERE | |
print("Generating test report") | |
# Convert the JUnit data to dataframes | |
ll = [dataframe_from_jxml(suite, date) for date, suite in test_runs.items()] | |
# Drop duplicated index values for now, figure out a better solution later | |
ll = [df.loc[~df.index.duplicated()] for df in ll] | |
total_df = pandas.concat(ll, axis=1) | |
# Split the total DF into sub-dfs according to test suite | |
dfs = {} | |
# FILL IN HERE | |
colors = { | |
"✓": "#acf2a5", | |
"x": "#f2a5a5", | |
"s": "#f2ef8f", | |
} | |
altair.data_transformers.disable_max_rows() | |
charts = [] | |
for account, df in dfs.items(): | |
# Reshape to long, which altair prefers | |
message_cols = [c for c in df.columns if "message" in c] | |
status_cols = [c for c in df.columns if "message" not in c] | |
long = pandas.concat( | |
[ | |
df[status_cols].melt( | |
var_name="Date", value_name="Status", ignore_index=False | |
), | |
df[message_cols] | |
.melt(var_name="dupdate", value_name="Message", ignore_index=False) | |
.drop(columns="dupdate"), | |
], | |
axis=1, | |
).reset_index() | |
long = ( | |
long.assign( | |
test=long.file + "." + long.test, | |
Message=long.Message.fillna(""), | |
) | |
.drop(columns=["file"]) | |
.loc[(long.Status != "s") & long.Status.notna()] | |
) | |
# Make aggregated version with overall pass rate | |
long_agg = ( | |
long[long.Status == "✓"] | |
.groupby("test") | |
.size() | |
.truediv(long.groupby("test").size(), fill_value=0) | |
.to_frame(name="Pass Rate") | |
.reset_index() | |
) | |
# Create a grid with hover tooltip for error messages | |
charts.append( | |
altair.Chart(long) | |
.mark_rect(stroke="gray") | |
.encode( | |
x="Date:O", | |
y=altair.Y("test:N", title=None), | |
color=altair.Color( | |
"Status:N", | |
scale=altair.Scale( | |
domain=list(colors.keys()), | |
range=list(colors.values()), | |
), | |
), | |
tooltip=["test:N", "Date:O", "Status:N", "Message:N"], | |
) | |
.properties(title=account) | |
| altair.Chart(long_agg.assign(_="_")) | |
.mark_rect(stroke="gray") | |
.encode( | |
y=altair.Y("test:N", title=None, axis=altair.Axis(labels=False)), | |
x=altair.X("_:N", title=None), | |
color=altair.Color( | |
"Pass Rate:Q", | |
scale=altair.Scale( | |
range=[colors["x"], colors["✓"]], domain=[0.0, 1.0] | |
), | |
), | |
tooltip=["test:N", "Pass Rate:Q"], | |
) | |
) | |
chart = altair.vconcat(*charts).configure_axis(labelLimit=1000) | |
out_file = f"test_results_{deployment}.html" | |
altair_saver.save(chart, out_file) |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment