Skip to content

Instantly share code, notes, and snippets.

View SubhadityaMukherjee's full-sized avatar

Subhaditya Mukherjee SubhadityaMukherjee

View GitHub Profile
@SubhadityaMukherjee
SubhadityaMukherjee / latex_report_template.tex
Created September 9, 2022 16:01
latex_report_template
\begin{document}
\title{Computer Vision in Pytorch - A Primer\\
\thanks{I thank Overleaf for this template}
}
\author{\IEEEauthorblockN{1\textsuperscript{st} Subhaditya Mukherjee}
\IEEEauthorblockA{\textit{Faculty of Science and Engineering } \\
\textit{University Of Groningen}\\
Groningen, Netherlands \\
@SubhadityaMukherjee
SubhadityaMukherjee / latex_report_modules.tex
Created September 9, 2022 16:01
latex_report_modules
\documentclass[conference]{IEEEtran}
\IEEEoverridecommandlockouts
% The preceding line is only needed to identify funding in the first footnote. If that is unneeded, please comment it out.
\usepackage{cite}
\usepackage{amsmath,amssymb,amsfonts}
\usepackage{algorithmic}
\usepackage{graphicx}
\usepackage{textcomp}
\usepackage{xcolor}
\def\BibTeX{{\rm B\kern-.05em{\sc i\kern-.025em b}\kern-.08em
img = PILImage.create(
"/media/hdd/Datasets/Fish_Dataset/Fish_Dataset/Shrimp/Shrimp/00012.png"
)
(x,) = first(dls.test_dl([img]))
# cam_map = torch.einsum('ck,kij->cij', learn.model[1][-1].weight, act)
x_dec = TensorImage(dls.train.decode((x,))[0][0])
image_count = len(learn.model[0])
col = 4
row = math.ceil(image_count / col)
root_dir = "/media/hdd/Datasets/Fish_Dataset/Fish_Dataset/"
path = Path(root_dir)
fields = DataBlock(
blocks=(ImageBlock, CategoryBlock),
get_items=get_image_files,
get_y=parent_label,
splitter=RandomSplitter(valid_pct=0.2, seed=42),
item_tfms=RandomResizedCrop(224, min_scale=0.5),
batch_tfms=aug_transforms(),
)
import timm
from fastai.vision.all import *
from fastai.vision.widgets import *
import os
import matplotlib.pyplot as plt
from IPython.display import Image
os.environ["TORCH_HOME"] = "/media/hdd/Datasets/"
os.environ["FASTAI_HOME"] = "/media/hdd/Datasets/"
predictions_path = Path(predictions_path) # The folder where your files
tst_files = get_image_files(predictions_path) # Similar to training
def predict_batch(self, item, rm_type_tfms=None, with_input=False): # this bit is slightly complicated. ignore it for now
dl = self.dls.test_dl(item, rm_type_tfms=rm_type_tfms, num_workers=15)
ret = self.get_preds(dl=dl, with_input=False, with_decoded=True)
return ret
Learner.predict_batch = predict_batch
@SubhadityaMukherjee
SubhadityaMukherjee / fastaiv2predictsfull.py
Last active August 13, 2022 07:15
fastaiv2predicts_full
# Assuming you have set up your Dataloader and learner as dls, learn
learn.fine_tune(1, wd=0.5)
learn.export("model.pkl") # Save the model
predictions_path = "../input/fruits/fruits-360_dataset/fruits-360/Test"
def predict_batch(self, item, rm_type_tfms=None, with_input=False): # this bit is slightly complicated. ignore it for now
dl = self.dls.test_dl(item, rm_type_tfms=rm_type_tfms, num_workers=15)
ret = self.get_preds(dl=dl, with_input=False, with_decoded=True)
predictions_path = "../input/fruits/fruits-360_dataset/fruits-360/Test"
def predict_batch(self, item, rm_type_tfms=None, with_input=False): # this bit is slightly complicated. ignore it for now
dl = self.dls.test_dl(item, rm_type_tfms=rm_type_tfms, num_workers=15)
ret = self.get_preds(dl=dl, with_input=False, with_decoded=True)
return ret
import random
predictions_path = Path(predictions_path)
Learner.predict_batch = predict_batch
from fastai.interpret import *
from fastai.vision.widgets import *
interp = ClassificationInterpretation.from_learner(learn)
interp.plot_top_losses(5, nrows=1)
#We can use this to see what our model gets confused about. This will change as you train it more.
interp.most_confused()