Skip to content

Instantly share code, notes, and snippets.

@deshwalmahesh
Last active April 19, 2021 16:02
Show Gist options
  • Save deshwalmahesh/a028ad82f6253c0f86daf6b534fdd622 to your computer and use it in GitHub Desktop.
Save deshwalmahesh/a028ad82f6253c0f86daf6b534fdd622 to your computer and use it in GitHub Desktop.
Generate Yolov4 Darknet type Annotations from Bounding Boxe given as (x,y,w,h) and Vice Versa. Also with Yolov4 weights and config file, it generates files for each image. You can use it to extend your data. Creates a classes.txt file in the same DIR as LabelImg can fetch that. Open LabelImg and open the DIR after executing code to verify. Calcu…
import cv2
from PIL import Image
import numpy as np
import glob
def select_box(results:np.ndarray,method:str)->int:
'''
Select a Single BB based on Max Probability or Max area logic
args:
results: Pass in the results by detection module in (classes, scores, boxes) format
method: Whether to use 'prob' or 'area'
out:
Index of the bounding box to select
'''
classes, scores, bboxes = results
if method == 'area':
return np.argmax([box[2]*box[3] for box in bboxes ])
return np.argmax(scores)
def bnd_box_to_yolo_line(box:np.ndarray,img_size:np.ndarray)->tuple:
'''
Change Bounding Box to YOLO text format
args:
box: 1 D array of Bounding box in format [x,y,w,h]
img_size: 1-D array of Image Size in format [Width, Height, Channels]
out:
4 floating point values as (x_center, y_center) are relative points of center of Rectangle. (w,h) is the width and height of Rectangle Relative to Image Size
'''
(x_min, y_min) = (box[0], box[1])
(w, h) = (box[2], box[3])
x_max = x_min + w
y_max = y_min + h
x_center = float((x_min + x_max)) / 2 / img_size[1]
y_center = float((y_min + y_max)) / 2 / img_size[0]
w = float((x_max - x_min)) / img_size[1]
h = float((y_max - y_min)) / img_size[0]
return x_center, y_center, w, h
def build_yolo_model(weight_file_path:str,config_file_path:str,size:tuple=(416,416)):
'''
Build a Yolo Model
args:
weight_file_path: Path to the .weights (Yolo v3,v4 etc) file
config_file_path: PAth to the .cfg file
size: Size of the model detection. You can pass in multiple of 32. Works even when you have trained with 416 and now testing on 608
'''
net = cv2.dnn.readNet(weight_file_path, config_file_path)
model = cv2.dnn_DetectionModel(net)
model.setInputParams(size=size, scale=1/255.)
return model
def generate_text_annotation(dir_path:str,weight_file_path:str,config_file_path:str,size:tuple=(416,416),CONFIDENCE_THRESHOLD:float=0.51, NMS_THRESHOLD:float=0.51)->None:
'''
Generate Annotation File per image for images given in a directory. Uses Bounding Box from the model
args:
dir_path: Directory path where your images are downloaded. (./dir/whatever/) We are Assuming that they are in .png format only
weight_file_path: Path to the .weights (Yolo v3,v4 etc) file
config_file_path: PAth to the .cfg file
size: Size of the model detection. You can pass in multiple of 32. Works even when you have trained with 416 and now testing on 608
CONFIDENCE_THRESHOLD: Only MAke detections valid when the Confidence is above thsi level. Increasing this will lead to FN and decreasing will lead to FP
NMS_THRESHOLD: Non MAximum Suppression threshold. Decreasing this will give more number of BB per image. Increasing it will give less no of BBs
'''
net = cv2.dnn.readNet(weight_file_path, config_file_path)
model = cv2.dnn_DetectionModel(net)
model.setInputParams(size=size, scale=1/255.)
image_names = glob.glob(f'{dir_path}*.png')
for image_path in image_names:
annot = [0] # one class annotation. by default 0
text_file_name = '.'+image_path.split('.')[1]+'.txt'
img_array = np.array(Image.open(image_path))
classes, scores, bboxes = model.detect(img_array, CONFIDENCE_THRESHOLD, NMS_THRESHOLD)
if type(classes) == tuple:
continue
if classes.shape == (1,1): # if onlt 1 detection
index = 0
else:
index = select_box((classes, scores, bboxes),'prob') # if multiple select on Max Area vs Max Prob
box = bboxes[index]
score = scores[index]
annot.extend(bnd_box_to_yolo_line(box,img_array.shape))
with open(text_file_name,'w')as f:
f.write(' '.join([str(i) for i in annot]))
with open(dir_path+'classes.txt','w')as f:
f.write('Default Class')
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment