Skip to content

Instantly share code, notes, and snippets.

@shivamsnaik
Last active October 8, 2023 03:24
Show Gist options
  • Select an option

  • Save shivamsnaik/c5c5e99c00819d2167317b1e56871187 to your computer and use it in GitHub Desktop.

Select an option

Save shivamsnaik/c5c5e99c00819d2167317b1e56871187 to your computer and use it in GitHub Desktop.
Pure COCO-library-based Method to calculate metrics such as Precision, Recall, and F1-Score with outputs in MarkDown tables
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from pycocotools.cocoeval import Params
from IPython.display import JSON
import numpy as np
from Evaluator import evaluate_metrics, display_metrics
import argparse
def _compute_and_display_metrics(args):
coco_gt = COCO(args.gt_coco_path,)
coco_pred = coco_gt.loadRes(args.evaluation_result_path)
cocoEval = COCOeval(coco_gt, coco_pred, "bbox")
# Load the default parameters for COCOEvaluation
params = cocoEval.params
### Modify required parameters. Available params are:
#imgIds - [all],
#catIds - [all],
#iouThrs - [.5:.05:.95],
#areaRng,maxDets - [1 10 100],
#iouType - ['bbox'],useCats
# eg. param.iouType = 'bbox'
#params.iouThrs = np.linspace(.5, .9, int(np.round((.9 - .5) / .1)) + 1, endpoint=True)
# Calculate the metrics
precision, recall, scores, iou_lookup = evaluate_metrics(cocoEval, params, args.show_eval_summary)
# take precision for all classes, all areas and 100 detections
display_metrics(precision, recall, scores, iou_lookup, log_path=args.output_log_path)
# Calculate metrics for each category
for cat in coco_gt.loadCats(coco_gt.getCatIds()):
# Calculate the metrics
params.catIds = [cat["id"]]
precision, recall, scores, iou_lookup = evaluate_metrics(cocoEval, params, args.show_eval_summary)
# take precision for all classes, all areas and 100 detections
display_metrics(precision, recall, scores, iou_lookup, class_name=cat["name"], log_path=args.output_log_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Evaluate Metrics from the predictions and Ground Truths")
parser.add_argument("--gt-coco-path", type=str, required=True)
parser.add_argument("--evaluation-result-path", type=str, required=True)
parser.add_argument("--output-log-path", type=str, default="evaluation.log")
parser.add_argument("--show-eval-summary", type=bool, default=False)
args = parser.parse_args()
_compute_and_display_metrics(args)
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from pycocotools.cocoeval import Params
import numpy as np
import logging
def evaluate_metrics(cocoEval, params = None, display_summary = False):
# Display the iouThresholds for which the evaluation took place
if params:
cocoEval.params = params
print("IoU Thresholds: ",cocoEval.params.iouThrs)
iou_lookup = {float(format(val, '.2f')): index for index, val in enumerate(cocoEval.params.iouThrs)}
cocoEval.evaluate() # Calculates the metrics for each class
cocoEval.accumulate(p = params) # Stores the values in the cocoEval's 'eval' object
if display_summary:
cocoEval.summarize() # Display the metrics.
# Extract the metrics from accumulated results.
precision = cocoEval.eval["precision"]
recall = cocoEval.eval["recall"]
scores = cocoEval.eval["scores"]
return precision, recall, scores, iou_lookup
# Print final results
def display_metrics(precision, recall, scores, iou_lookup, class_name=None, log_path='evaluation.txt'):
# Initialize logger
logger = logging.getLogger('eval_log')
if not logger.hasHandlers():
handler = logging.FileHandler(log_path)
logger.addHandler(handler)
if class_name:
logger.warning("| Class Name | IoU | mAP | F1-Score | Precision | Recall |")
logger.warning("|------------|-----|-----|----------|-----------|--------|")
else:
logger.warning("| IoU | mAP | F1-Score | Precision | Recall |")
logger.warning("|-----|-----|----------|-----------|--------|")
for iou in iou_lookup.keys():
precesion_iou = precision[iou_lookup[iou], :, :, 0, -1].mean(1)
scores_iou = scores[iou_lookup[iou], :, :, 0, -1].mean(1)
recall_iou = recall[iou_lookup[iou], :, 0, -1]
prec = precesion_iou.mean()
rec = recall_iou.mean()
if class_name:
#print("Class Name: {:10s} IoU: {:2.2f} mAP: {:6.3f} F1-Score: {:2.3f} Precision: {:2.2f} Recall: {:2.2f}".format(
# class_name, iou, prec * 100,scores_iou.mean(), (2 * prec * rec / (prec + rec + 1e-8)), prec, rec
#))
logger.warning("|{}|{:.2f}|{:.2f}|{:.2f}|{:.2f}|{:.2f}|".format(
class_name, iou, prec * 100,scores_iou.mean(), (2 * prec * rec / (prec + rec + 1e-8)), prec, rec
))
else:
#print("IoU: {:2.2f} mAP: {:6.3f} F1-Score: {:2.3f} Precision: {:2.2f} Recall: {:2.2f}".format(
# iou, prec * 100,scores_iou.mean(), (2 * prec * rec / (prec + rec + 1e-8)), prec, rec
#))
logger.warning("|{:.2f}|{:.2f}|{:.2f}|{:.2f}|{:.2f}|".format(
iou, prec * 100,scores_iou.mean(), (2 * prec * rec / (prec + rec + 1e-8)), prec, rec
))
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment