Last active
May 18, 2020 01:31
-
-
Save chiehpower/7d2a598c9c2b6bef96a525c2f93ae927 to your computer and use it in GitHub Desktop.
For detectron2 issue
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import numpy as np | |
import cv2 | |
import os | |
import sys | |
import requests | |
import torch | |
import detectron2 | |
#from detectron2.utils.visualizer import Visualizer | |
from detectron2.data.catalog import MetadataCatalog, DatasetCatalog | |
from detectron2.engine import DefaultTrainer | |
from detectron2.config import get_cfg | |
from detectron2.engine.defaults import DefaultPredictor | |
#from detectron2.utils.visualizer import ColorMode | |
# from detectron2.data.datasets import register_coco_instances, register_semantic | |
import time | |
import json | |
import copy | |
from detectron2.utils.logger import setup_logger | |
import ctypes | |
import torch | |
# from skimage import measure | |
# import skimage | |
currentfile_path = os.path.dirname(os.path.realpath(__file__)) | |
# edited by Brilian | |
class model_custom(torch.nn.Module): | |
def __init__(self, fs, uptolayer=-1, include_head=False): | |
super(model_custom, self).__init__() | |
features = fs | |
bodylist = list(features.children()) | |
if not include_head: | |
bodylist[-1] = bodylist[uptolayer].head | |
else: | |
bodylist = bodylist[:uptolayer] | |
self.features = torch.nn.ModuleList(bodylist) | |
def forward(self, x, debug=False): | |
xsize = x[1] | |
x = x[0] | |
print('..................input: ', x.shape, xsize) | |
for ii, fs in enumerate(self.features): | |
if ii == 1: | |
x = fs([xsize, [torch.tensor(1)]], x, None) | |
continue | |
x = fs(x) | |
return x | |
def download_model(basepath=currentfile_path, | |
download_file = 'model_final_a3ec72.pkl'): | |
print('\n') | |
print('*'*80) | |
download_file = basepath + '/' + download_file | |
if not os.path.exists(download_file): | |
url = 'https://dl.fbaipublicfiles.com/detectron2/COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x/138205316/model_final_a3ec72.pkl' | |
print('...[download_model] Downloading weight file... from {}'.format(url)) | |
r = requests.get(url, allow_redirects=True) | |
print('...[download_model] Write the downloaded file...') | |
open(download_file, 'wb').write(r.content) | |
# with open(download_file, 'wb') as of: | |
# torch.save(of) | |
print('...[download_model] Download weight file finish...') | |
else: | |
print('...[download_model] The weight file is already downloaded, skip downloading...') | |
return download_file | |
def combine_config(path, weightfile, basepath = currentfile_path): | |
print('\n') | |
print('*'*80) | |
print('...[combine_config] Combining config file... from {}'.format(path)) | |
voc_config = { | |
"min_dimension": 800, | |
"max_dimension": 1333, | |
"test_score_thresh": 0.8, | |
"fine_tune_checkpoint": "", | |
"max_iter": 500, | |
"save_by_second": -1, | |
"network": 0, | |
"max_detections": 100, | |
"is_use_data_aug": 1, | |
"gpu_limit": -1, | |
"UseAngle": 1, | |
"segmenting_images": "", | |
"class_name": "", | |
"n_classes": 80, | |
"TrainModel_n_classes": 0, | |
"ResetModel": 0 | |
} | |
cfg = get_cfg() | |
# cfg.merge_from_file(r"D:\NewSegment\detectron2\configs\COCO-InstanceSegmentation\mask_rcnn_R_101_FPN_3x.yaml") | |
cfg.merge_from_file(path) | |
cfg.DATALOADER.NUM_WORKERS =0 | |
cfg.MODEL.WEIGHTS = weightfile | |
# cfg.OUTPUT_DIR = basepath | |
cfg.SOLVER.IMS_PER_BATCH = 1 | |
cfg.SOLVER.BASE_LR = 0.0025 | |
cfg.SOLVER.MAX_ITER = (voc_config["max_iter"]) | |
cfg.MODEL.ROI_HEADS.NUM_CLASSES = voc_config['n_classes'] | |
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = voc_config['test_score_thresh'] | |
cfg.INPUT.MIN_SIZE_TRAIN = voc_config['min_dimension'] | |
cfg.INPUT.MAX_SIZE_TRAIN = voc_config['max_dimension'] | |
cfg.INPUT.MIN_SIZE_TEST = voc_config['min_dimension'] | |
cfg.INPUT.MAX_SIZE_TEST = voc_config['max_dimension'] | |
print('...[combine_config] Combining config file finish...') | |
return cfg | |
def dummy_convert(cfg, device = torch.device('cuda:0'), only_backbone = True): | |
print('\n') | |
print('*'*80) | |
print('...[dummy_convert] Loading base model and load weightfile...') | |
weightfile = cfg.MODEL.WEIGHTS | |
detector = DefaultPredictor(cfg) | |
# checkpoint = torch.load(cfg.MODEL.WEIGHTS) | |
import pickle | |
with open(cfg.MODEL.WEIGHTS, 'rb') as f: | |
obj = f.read() | |
checkpoint = pickle.loads(obj, encoding='latin1') | |
detector.model.load_state_dict(checkpoint, strict = False) | |
# create a dummy input | |
print('...[dummy_convert] Initialize dummy input and size...') | |
dummy_input = torch.randn(1, 3, cfg.INPUT.MIN_SIZE_TRAIN, cfg.INPUT.MIN_SIZE_TRAIN) | |
dummy_input = dummy_input.to(device) | |
imsize = torch.IntTensor((cfg.INPUT.MIN_SIZE_TRAIN, cfg.INPUT.MIN_SIZE_TRAIN)) | |
imsize = imsize.to(device) | |
if only_backbone: | |
print('...[dummy_convert] Loading custom model...') | |
cpmodel = model_custom(detector.model, uptolayer=-2, include_head=True).to(device) | |
print('...[dummy_convert] The custom model: \n', cpmodel) | |
# _ = cpmodel([dummy_input, imsize]) | |
# export the model | |
print('...[dummy_convert] Export model...') | |
tonnxfile = currentfile_path+'\\checkmodel.onnx' | |
torch.onnx.export(cpmodel, | |
[dummy_input, imsize], | |
currentfile_path+'/checkmodel.onnx', | |
opset_version=11, | |
export_params=True) | |
else: | |
print('...[dummy_convert] Test detect...') | |
print('what is this value: ', dummy_input.shape, imsize) | |
# FIXME: | |
# # this one is error INT | |
# imsize= imsize.cpu().numpy() | |
# inputs = {"image": dummy_input[0], "height": int(imsize[0]), "width": int(imsize[1])} | |
# this one is error instance | |
inputs = {"image": dummy_input[0], "height": imsize[0], "width": imsize[1]} | |
with torch.no_grad(): | |
_ = detector.model([inputs]) | |
print('...[dummy_convert] Export model in full...') | |
torch.onnx.export(detector.model.to(device), | |
[inputs], | |
currentfile_path+'/checkmodel.onnx', | |
opset_version=11, | |
export_params=True | |
) | |
print('...[dummy_convert] Export model to simplify model...') | |
tonnxsimple = tonnxfile.split('.') | |
tonnxsimple[-1] = '_simplify.' + tonnxsimple[-1] | |
tonnxsimple = ''.join(tonnxsimple) | |
os.system('python3 -m onnxsim {} {}'.format(tonnxfile, tonnxsimple)) | |
print('...[dummy_convert] Export model finish...') | |
if __name__ == "__main__": | |
# download weight file (automatically skip if exist) | |
# will return weightfile path | |
weightfile = download_model() | |
# set path maskrcnn config | |
path_maskrcnn_cfg = os.path.join(currentfile_path, "../configs/COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml") | |
# combine our custom config with the default config rom maskrcnn | |
# will return config with all of its parameters | |
cfg = combine_config(path_maskrcnn_cfg, weightfile) | |
print('cfg:', cfg) | |
# try to export our custom model | |
# dummy_convert(cfg, only_backbone = True) # only backbone + FPN | |
dummy_convert(cfg, only_backbone = False) # all |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment
Command:
Output: