Skip to content

Instantly share code, notes, and snippets.

@level14taken
Created December 29, 2020 10:13
Show Gist options
  • Save level14taken/7abd8e6bfff432630cc1b411ef64bbb3 to your computer and use it in GitHub Desktop.
Save level14taken/7abd8e6bfff432630cc1b411ef64bbb3 to your computer and use it in GitHub Desktop.
kf = StratifiedKFold(10,shuffle=True,random_state=seed)
for fold, (trn_idx,val_idx) in enumerate(kf.split(file_list,coverage)):
print('****************************************************')
print('******************* fold %d *******************' % fold)
print('****************************************************')
file_list_train= [x for i,x in enumerate(file_list) if i in trn_idx]
file_list_val= [x for i,x in enumerate(file_list) if i in val_idx]
train = TGSSaltDataset(train_path, file_list_train,augment=transform_train)
val = TGSSaltDataset(train_path, file_list_val,augment= transform_test)
writer =SummaryWriter(FLAGS['log_dir']+'scse')
train_loader = torch.utils.data.DataLoader(
train,
batch_size=32,
num_workers=4,
drop_last=False,worker_init_fn=_init_fn)
test_loader = torch.utils.data.DataLoader(
val,
batch_size=FLAGS['batch_size']*5,
shuffle=False,
num_workers=2,
drop_last=False,worker_init_fn=_init_fn)
set_seed()
device = 'cuda'
model = model.to(device)
loss_fn= nn.BCEWithLogitsLoss()
optimizer= torch.optim.Adam(model.parameters(),lr=3e-4)
early_stopping = EarlyStopping(patience=80,path= './models/scse.pth', verbose=False)
scheduler= None
for epoch in range(1,21):############
train_loss,train_iou,train_acc=train_loop_fn(train_loader)
print("Finished training epoch {}".format(epoch))
val_loss,val_iou,val_acc= test_loop_fn(test_loader)
writer.add_scalars('loss_exp',{'train':train_loss,'val':val_loss},epoch)
writer.add_scalars('IOU',{'train':train_iou,'val':val_iou},epoch)
loss_fn = LovaszLoss()
optimizer= torch.optim.Adam(model.parameters(),lr=1e-4)
for epoch in range(21,91):
train_loss,train_iou,train_acc=train_loop_fn(train_loader)
print("Finished training epoch {}".format(epoch))
val_loss,val_iou,val_acc= test_loop_fn(test_loader)
writer.add_scalars('loss_exp',{'train':train_loss,'val':val_loss},epoch)
writer.add_scalars('IOU',{'train':train_iou,'val':val_iou},epoch)
scheduler=torch.optim.lr_scheduler.CyclicLR(optimizer, base_lr=1e-4, max_lr=1e-3, step_size_up=560*2, step_size_down=None, mode='triangular2', cycle_momentum=False,last_epoch=-1)
for epoch in range(91,171):
train_loss,train_iou,train_acc=train_loop_fn(train_loader)
print("Finished training epoch {}".format(epoch))
val_loss,val_iou,val_acc= test_loop_fn(test_loader)
writer.add_scalars('loss_exp',{'train':train_loss,'val':val_loss},epoch)
writer.add_scalars('IOU',{'train':train_iou,'val':val_iou},epoch)
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment