Skip to content

Instantly share code, notes, and snippets.

@devlifeX
Created December 30, 2024 12:52
Show Gist options
  • Save devlifeX/7e92f38062e489a7426c8bf7c3d3791d to your computer and use it in GitHub Desktop.
Save devlifeX/7e92f38062e489a7426c8bf7c3d3791d to your computer and use it in GitHub Desktop.
extract_coverage
#!/usr/bin/env python3
import json
import argparse
import os
import re
def extract_coverage(file_path, output_dir="coverage_output"):
"""
Extracts code coverage information from a Chrome JSON coverage export and saves the covered CSS code in separate files.
python3 coverage.py Coverage-20241230T150406.json ./test
Args:
file_path (str): Path to the JSON file exported by Chrome.
output_dir (str): Directory to save the CSS files and summary.
Returns:
None
"""
try:
# Load JSON data
with open(file_path, "r") as file:
coverage_data = json.load(file)
# Ensure the output directory exists, create it if it doesn't
if not os.path.exists(output_dir):
os.makedirs(output_dir)
coverage_summary = []
total_original_size = 0 # Total size of all original CSS files
total_covered_size = 0 # Total covered size
total_uncovered_size = 0 # Total uncovered size
for entry in coverage_data:
url = entry.get("url", "unknown")
text = entry.get("text", "")
ranges = entry.get("ranges", [])
# Only process CSS files
if url.lower().endswith('.css'):
total_original_size += len(text)
covered_css = ''
for r in ranges:
covered_css += text[r["start"]:r["end"]]
uncovered_css = text[len(covered_css):] # Uncovered portion
# Update total covered and uncovered sizes
total_covered_size += len(covered_css)
total_uncovered_size += len(uncovered_css)
# Clean up the URL to use as a valid filename
sanitized_url = re.sub(r'[^a-zA-Z0-9_\-]', '_', url)
css_filename = os.path.join(output_dir, f"{sanitized_url}.css")
# Write the covered CSS content to a file
with open(css_filename, 'w') as css_file:
css_file.write(covered_css)
coverage_summary.append({
"url": url,
"original_size": len(text),
"covered_length": len(covered_css),
"uncovered_length": len(uncovered_css),
"file": css_filename
})
# Calculate total statistics
total_coverage_percentage = (total_covered_size / total_original_size * 100) if total_original_size else 0
total_size_reduction_percentage = (total_uncovered_size / total_original_size * 100) if total_original_size else 0
# Save the coverage summary to a file
summary_file_path = os.path.join(output_dir, "coverage_summary.txt")
with open(summary_file_path, "w") as summary_file:
summary_file.write("CSS Code Coverage Summary:\n")
summary_file.write("-" * 50 + "\n")
summary_file.write(f"Total Original Size: {total_original_size} chars\n")
summary_file.write(f"Total Covered Size: {total_covered_size} chars\n")
summary_file.write(f"Total Uncovered Size: {total_uncovered_size} chars\n")
summary_file.write(f"Total Coverage: {total_coverage_percentage:.2f}%\n")
summary_file.write(f"Total Size Reduction (Uncovered): {total_size_reduction_percentage:.2f}%\n")
summary_file.write("\n" + "-" * 50 + "\n")
# Add individual file statistics
for entry in coverage_summary:
coverage_percent = (entry['covered_length'] / entry['original_size'] * 100) if entry['original_size'] else 0
uncovered_percent = (entry['uncovered_length'] / entry['original_size'] * 100) if entry['original_size'] else 0
size_reduction_percent = (entry['uncovered_length'] / entry['original_size'] * 100) if entry['original_size'] else 0
summary_file.write(
f"URL: {entry['url']}\n"
f"Original Size: {entry['original_size']} chars\n"
f"Covered Length: {entry['covered_length']} chars ({coverage_percent:.2f}%)\n"
f"Uncovered Length: {entry['uncovered_length']} chars ({uncovered_percent:.2f}%)\n"
f"Size Reduction: {size_reduction_percent:.2f}%\n"
f"Saved to: {entry['file']}\n\n"
)
print(f"Coverage summary saved to {summary_file_path}")
print(f"Covered CSS files saved in {output_dir} directory.")
except Exception as e:
print(f"Error processing coverage data: {e}")
def main():
# Set up the argument parser
parser = argparse.ArgumentParser(description="Extract code coverage from Chrome JSON export and save covered CSS code")
parser.add_argument("input_file", help="Relative path to the Chrome coverage JSON file")
parser.add_argument("output_dir", help="Directory to save the covered CSS files and summary")
# Parse the arguments
args = parser.parse_args()
# Ensure the input file exists
if not os.path.exists(args.input_file):
print(f"Error: The file '{args.input_file}' does not exist.")
return
# Call the extract_coverage function
extract_coverage(args.input_file, args.output_dir)
if __name__ == "__main__":
main()
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment