Skip to content

Instantly share code, notes, and snippets.

from PIL import Image
import glob
for p in glob.glob('*.png'):
with open(p, 'rb') as f:
img = Image.open(f)
try:
img.convert('RGB')
except:
print(p)
class AddCoords(nn.Module):
def __init__(self, with_r=False):
super().__init__()
self.with_r = with_r
def forward(self, input_tensor):
batch_size, _, x_dim, y_dim = input_tensor.size()
xx_channel = torch.arange(x_dim).repeat(1, y_dim, 1)
@enijkamp
enijkamp / coordconv.py
Created January 11, 2019 08:51
coordconv
class AddCoords(nn.Module):
def __init__(self, with_r=False):
super().__init__()
self.with_r = with_r
def forward(self, input_tensor):
batch_size, _, x_dim, y_dim = input_tensor.size()
xx_channel = torch.arange(x_dim).repeat(1, y_dim, 1)
@enijkamp
enijkamp / ensemble.py
Created January 12, 2019 11:06
ensemble
import os
import pandas as pd
import numpy as np
import argparse
import torch
import torchvision
import models
@enijkamp
enijkamp / out
Created January 12, 2019 11:39
out
0 cs1= 0.2207 cs2= 1.3196 c1= 0.9443 c2= 0.6621
1 cs1= 0.2215 cs2= 1.3286 c1= 0.9683 c2= 0.6934
2 cs1= 0.2242 cs2= 1.3213 c1= 0.9744 c2= 0.7188
3 cs1= 0.2252 cs2= 1.3243 c1= 0.9749 c2= 0.7258
4 cs1= 0.2258 cs2= 1.3321 c1= 0.9778 c2= 0.7244
5 cs1= 0.2255 cs2= 1.3320 c1= 0.9788 c2= 0.7263
6 cs1= 0.2247 cs2= 1.3337 c1= 0.9780 c2= 0.7319
7 cs1= 0.2252 cs2= 1.3411 c1= 0.9788 c2= 0.7273
8 cs1= 0.2257 cs2= 1.3427 c1= 0.9800 c2= 0.7319
9 cs1= 0.2249 cs2= 1.3394 c1= 0.9802 c2= 0.7341
@enijkamp
enijkamp / entropy
Created January 12, 2019 12:14
entropy
0 cs1= 0.2292 cs2= 1.3196 cs1_l= 0.2292 cs2_l= 1.3196 c1= 0.9407 c2= 0.6621
1 cs1= 0.2308 cs2= 1.3286 cs1_l= 0.1636 cs2_l= 1.1018 c1= 0.9634 c2= 0.6934
2 cs1= 0.2333 cs2= 1.3213 cs1_l= 0.1473 cs2_l= 1.0273 c1= 0.9688 c2= 0.7188
3 cs1= 0.2343 cs2= 1.3243 cs1_l= 0.1395 cs2_l= 0.9930 c1= 0.9692 c2= 0.7258
4 cs1= 0.2335 cs2= 1.3321 cs1_l= 0.1339 cs2_l= 0.9786 c1= 0.9734 c2= 0.7244
5 cs1= 0.2314 cs2= 1.3320 cs1_l= 0.1292 cs2_l= 0.9649 c1= 0.9751 c2= 0.7263
6 cs1= 0.2318 cs2= 1.3337 cs1_l= 0.1272 cs2_l= 0.9550 c1= 0.9763 c2= 0.7319
7 cs1= 0.2313 cs2= 1.3411 cs1_l= 0.1246 cs2_l= 0.9546 c1= 0.9788 c2= 0.7273
8 cs1= 0.2301 cs2= 1.3427 cs1_l= 0.1224 cs2_l= 0.9503 c1= 0.9790 c2= 0.7319
9 cs1= 0.2287 cs2= 1.3394 cs1_l= 0.1203 cs2_l= 0.9418 c1= 0.9805 c2= 0.7341
This file has been truncated, but you can view the full file.
loss1= 0.1201 lr= 0.000100
2019-01-14 07:50:29,196 : epoch 153 , step 277 : loss1= 0.1204 lr= 0.000100
2019-01-14 07:50:29,358 : epoch 153 , step 278 : loss1= 0.1205 lr= 0.000100
2019-01-14 07:50:29,607 : epoch 153 , step 279 : loss1= 0.1201 lr= 0.000100
2019-01-14 07:50:29,776 : epoch 153 , step 280 : loss1= 0.1208 lr= 0.000100
2019-01-14 07:50:29,898 : epoch 153 , step 281 : loss1= 0.1199 lr= 0.000100
2019-01-14 07:50:30,070 : epoch 153 , step 282 : loss1= 0.1207 lr= 0.000100
2019-01-14 07:50:30,184 : epoch 153 , step 283 : loss1= 0.1203 lr= 0.000100
2019-01-14 07:50:30,315 : epoch 153 , step 284 : loss1= 0.1203 lr= 0.000100
2019-01-14 07:50:30,460 : epoch 153 , step 285 : loss1= 0.1208 lr= 0.000100
@enijkamp
enijkamp / 51
Created January 20, 2019 09:58
51
2019-01-20 01:54:49,042 : epoch 210 , step 385 loss0= 0.0000 loss1= 29.9530 loss2= 0.0000 lr= 0.001000
2019-01-20 01:54:49,132 : epoch 210 , step 386 loss0= 0.0000 loss1= 30.0526 loss2= 0.0000 lr= 0.001000
2019-01-20 01:54:49,243 : epoch 210 , step 387 loss0= 0.0000 loss1= 30.1210 loss2= 0.0000 lr= 0.001000
2019-01-20 01:54:49,327 : epoch 210 , step 388 loss0= 0.0000 loss1= 30.0791 loss2= 0.0000 lr= 0.001000
2019-01-20 01:54:49,421 : epoch 210 , step 389 loss0= 0.0000 loss1= 29.9636 loss2= 0.0000 lr= 0.001000
2019-01-20 01:54:49,514 : epoch 210 , step 390 loss0= 0.0000 loss1= 29.8071 loss2= 0.0000 lr= 0.001000
2019-01-20 01:55:04,683 : epoch=210 n=10 acc=0.7752 (target)
2019-01-20 01:55:23,738 : >> epoch 210 : average_loss= 0.7859, average_accuracy=73.131%, mean_weight_std=0.09486, mean_normalized_std=11.0705 mean_dist= 36.55
2019-01-20 01:55:23,744 : >> epoch 210 : median_loss= 0.7838, median_accuracy=73.045%, median_weight_std
@enijkamp
enijkamp / 55 56
Created January 20, 2019 10:25
55 56
2019-01-20 02:22:59,985 : >> epoch 20 : dist pair-wise (target)
2019-01-20 02:22:59,987 :
[[ 0. 11.072 10.757 11.233 11.483 10.696 11.791 11.471 12.791 11.896]
[11.072 0. 9.685 10.94 10.753 10.934 10.456 11.523 11.475 11.205]
[10.757 9.685 0. 10.439 10.51 11.268 10.206 10.785 11.371 10.588]
[11.233 10.94 10.439 0. 10.959 11.421 11.273 11.323 11.763 11.446]
[11.483 10.753 10.51 10.959 0. 11.945 10.611 11.584 10.717 10.703]
[10.696 10.934 11.268 11.421 11.945 0. 12.167 11.694 12.71 12.403]
[11.791 10.456 10.206 11.273 10.611 12.167 0. 11.947 10.735 10.685]
[11.471 11.523 10.785 11.323 11.584 11.694 11.947 0. 12.685 12.144]
@enijkamp
enijkamp / 59_5
Created January 21, 2019 10:27
59_5
This file has been truncated, but you can view the full file.
2019-01-20 23:27:25,717 : set device 1 out of 4 cuda devices
2019-01-20 23:27:25,717 : Namespace(checkpoint=None, data_path='./data', dataset='CIFAR10', device=1, e_increasing_layer_size=False, e_layer_size=200, e_number_of_hidden_layers=3, epochs=100000000000000000000, gamma=1.0, initial_weights=None, lamBda=0.0, lr_init=0.0001, model='LeNet', momentum=0.0, num_workers=4, resume_checkpoint=False, save_checkpoint=True, seed=2092, train_log_freq=1, w_batch_size=10, weight_sharing=False, wg_number_of_hidden_layers=2, with_adam=True, with_lr_plateau_schedule=False, with_norm=2, with_residual=False, x_batch_size=512, z_batch_size=10, z_dim=100, z_std=1.0)
2019-01-20 23:27:30,267 : {'weight_sharing': False, 'input_noise_size': 100, 'with_layernorm': False, 'with_batchnorm': True, 'e_increasing_layer_size': False, 'e_number_of_hidden_layers': 3, 'e_layer_size': 200, 'wg_number_of_hidden_layers': 2, 'wg_hidden_layer_size_formula': <function train.<locals>.wg_hidden_layer_size_formula at 0x7f7ed2bcdb70>, 'code_size_f