This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
1) do an `eb ssh` to your instance | |
2) find your django app in /var/app/current/ | |
3) to activate the venv that elastic beanstalk creates: `source /var/app/venv/staging-*/bin/activate` | |
4) to grab any environment variables set in the evironment configuration: | |
`while IFS='=' read -r name value; do export "$name"="$value"; done < /opt/elasticbeanstalk/deployment/env` | |
5) `python manage.py shell` or whatever you want |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
transform = transforms.Compose([ | |
transforms.RandomHorizontalFlip(), | |
transforms.RandomVerticalFlip(), | |
transforms.RandomRotation(90), | |
transforms.RandomAffine(degrees=15, translate=(0.1, 0.1), scale=(0.8, 0.8)) | |
]) | |
augmented_mri_dataset = Brain_MRI_Segmentation_Dataset(positive_diagnoses, transform=transform) | |
validation_size = int(0.3 * len(mri_dataset)) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
class Brain_MRI_Segmentation_Dataset(data.Dataset): | |
def __init__(self, inputs, transform=None): | |
self.inputs = inputs | |
self.transform = transform | |
self.input_dtype = torch.float32 | |
self.target_dtype = torch.float32 | |
def __len__(self): | |
return len(self.inputs) | |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
class ConvBlock(nn.Module): | |
def __init__(self, in_channels, out_channels): | |
super(ConvBlock, self).__init__() | |
self.block = nn.Sequential( | |
nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False), | |
nn.BatchNorm2d(out_channels), | |
nn.ReLU(inplace=True), | |
nn.Conv2d(out_channels, out_channels, kernel_size=3, stride=1, padding=1, bias=False), | |
nn.BatchNorm2d(out_channels), | |
nn.ReLU(inplace=True) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def run_experiment(model_name, model, optimizer, criterion, train_loader, val_loader, device='cuda', num_epochs=50, clear_mem=True): | |
####################### | |
#Train model # | |
####################### | |
print('Model sent to ' + str(device)) | |
model.to(device) | |
losses = [] | |
train_scores = [] # hold IoU scores | |
iters = 0 |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def parabolic_dist(x): | |
return x*x | |
def inverse_parabolic_dist(x): | |
return -(x*x) | |
def gauss_2d(mu=0, sigma=1): | |
x = random.gauss(mu, sigma) | |
y = random.gauss(mu, sigma) | |
return (x, y) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
z_dim = 10 | |
num_epochs = 30000 | |
batch_size = 32 | |
device = torch.device("cuda" if (torch.cuda.is_available()) else "cpu") | |
criterion = nn.BCELoss() | |
gen = Generator(z_dim=z_dim, hidden_dim=28, n_layers=3, out_dim=2).to(device) | |
disc = Discriminator(input_dim=2, hidden_dim=28, n_layers=3).to(device) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
#binary classifier that determines whether data points are from the original distribution or the fake (generated) distribution | |
class Discriminator(nn.Module): | |
def __init__(self, input_dim=2, hidden_dim=28, n_layers=3): | |
super(Discriminator,self).__init__() | |
self.input = nn.Sequential( nn.Linear(input_dim, hidden_dim), nn.LeakyReLU() ) | |
self.layers = [] | |
for i in range(n_layers): | |
self.layers.append( nn.Sequential( nn.Linear(hidden_dim, hidden_dim), nn.LeakyReLU() ) ) | |