Skip to content

Instantly share code, notes, and snippets.

View sadimanna's full-sized avatar
🌏
Submitted Ph.D. thesis a few days ago

Siladittya Manna sadimanna

🌏
Submitted Ph.D. thesis a few days ago
View GitHub Profile
fig,ax = plt.subplots(1,1, figsize=(10,10))
b = []
for i in range(num_plyrs):
b.append(ax.bar(x - (i - num_plyrs/2 + 0.5)*w,
stats.loc[i].values[1:],
width=w,
color=colors(i),
align='center',
edgecolor = 'black',
tdg = DSDataGen('test', testimages, testlabels, num_classes=10)
tdl = DataLoader(tdg, batch_size = 32, drop_last = True)
dsmodel.eval()
loss_sublist = np.array([])
acc_sublist = np.array([])
with torch.no_grad():
for epoch in range(20):
stime = time.time()
print("=============== Epoch : %3d ==============="%(epoch+1))
loss_sublist = np.array([])
acc_sublist = np.array([])
#iter_num = 0
dsmodel.train()
tr_ep_loss = []
tr_ep_acc = []
val_ep_loss = []
val_ep_acc = []
min_val_loss = 100.0
EPOCHS = 10
num_cl = 10
class DSDataGen(Dataset):
def __init__(self, phase, imgarr,labels,num_classes):
self.phase = phase
self.num_classes = num_classes
self.imgarr = imgarr
self.labels = labels
self.randomcrop = transforms.RandomResizedCrop(32,(0.8,1.0))
class DSModel(nn.Module):
def __init__(self,premodel,num_classes):
super().__init__()
self.premodel = premodel
self.num_classes = num_classes
for p in self.premodel.parameters():
p.requires_grad = False
nr = 0
current_epoch = 0
epochs = 100
tr_loss = []
val_loss = []
for epoch in range(100):
print(f"Epoch [{epoch}/{epochs}]\t")
stime = time.time()
def save_model(model, optimizer, scheduler, current_epoch, name):
out = os.path.join('/content/saved_models/',name.format(current_epoch))
torch.save({'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict':scheduler.state_dict()}, out)
def plot_features(model, num_classes, num_feats, batch_size):
preds = np.array([]).reshape((0,1))
gt = np.array([]).reshape((0,1))
#OPTMIZER
optimizer = LARS(
[params for params in model.parameters() if params.requires_grad],
lr=0.2,
weight_decay=1e-6,
exclude_from_weight_decay=["batch_normalization", "bias"],
)
# "decay the learning rate with the cosine decay schedule without restarts"
#SCHEDULER OR LINEAR EWARMUP
from torch.optim.optimizer import Optimizer, required
import re
EETA_DEFAULT = 0.001
class LARS(Optimizer):
"""
Layer-wise Adaptive Rate Scaling for large batch training.
Introduced by "Large Batch Training of Convolutional Networks" by Y. You,