Skip to content

Instantly share code, notes, and snippets.

t=+0.0000 d=+0000.0000 c_train=+0.1425 c_test=+1.6767 a_train=+0.9600 a_test=+0.6351 diff(a)=+0.0000 eigs=[+1.5348 +5.0679 -6.0570 +7.4783 -10.9613-12.9609 -14.8142 +15.2833 -15.6782 +19.5550]
t=+0.0526 d=+0336.5288 c_train=+0.1837 c_test=+1.7479 a_train=+0.9394 a_test=+0.6369 diff(a)=+0.0018 eigs=[-0.0151 +0.1797 -0.5535 -0.9537 +1.9200 -2.3797 -4.3579 +5.5634 -6.2135 +6.4144]
t=+0.1053 d=+0616.0807 c_train=+0.1502 c_test=+1.9037 a_train=+0.9518 a_test=+0.6341 diff(a)=-0.0010 eigs=[+1.0179 +5.3170 -5.8028 +6.4890-10.6475-13.4638+14.7852-16.7331+17.7944-19.7590]
t=+0.1579 d=+0850.7733 c_train=+0.1273 c_test=+2.0505 a_train=+0.9590 a_test=+0.6330 diff(a)=-0.0021 eigs=[-1.0922 +2.3346 -3.0863 -3.4616 +3.8922 -5.6419 -6.1383 -6.5192 -7.0559 +7.3406]
t=+0.2105 d=+1047.5834 c_train=+0.1127 c_test=+2.1657 a_train=+0.9646 a_test=+0.6326 diff(a)=-0.0025 eigs=[-0.5418 +2.1378 -4.2646 -6.3989 +7.4868+10.0773+12.0573+15.8084-16.1579-18.1559]
t=+0.2632 d=+1210.4380 c_train=+0.1081 c_test=+2.2417 a_train=+0.9641 a_test=+0
/media/vclagpu/Data1/enijkamp/repeller-sgd/venv/bin/python /media/vclagpu/Data1/enijkamp/repeller-sgd/experiments/bezier/eval_bezier_eigen_5.py
2018-05-14 13:57:18.728290: I tensorflow/core/platform/cpu_feature_guard.cc:137] Your CPU supports instructions that this TensorFlow binary was not compiled to use: SSE4.1 SSE4.2 AVX AVX2 FMA
2018-05-14 13:57:19.053588: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1030] Found device 0 with properties:
name: TITAN Xp major: 6 minor: 1 memoryClockRate(GHz): 1.582
pciBusID: 0000:09:00.0
totalMemory: 11.90GiB freeMemory: 11.74GiB
2018-05-14 13:57:19.053614: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1120] Creating TensorFlow device (/device:GPU:0) -> (device: 1, name: TITAN Xp, pci bus id: 0000:09:00.0, compute capability: 6.1)
2018-05-14 13:57:19,165 - loading ../../resnet_cifar100/model_1/model.ckpt-97675
2018-05-14 13:57:22,610 - Restoring parameters from ../../resnet_cifar100/model_1/model.ckpt-97675
2018-05-14 13:57:23,706 - conv2d_w
/home/enijkamp/env/tf/bin/python3 /home/enijkamp/Dropbox/repeller-sgd/experiments/cyclic/cyclic_9.py
2018-05-15 01:55:34,154 - cyclic_9
2018-05-15 01:55:34,156 - loading ../../resnet_cifar100/model_2/model.ckpt-156280
2018-05-15 01:55:34.156473: I tensorflow/core/platform/cpu_feature_guard.cc:140] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2 FMA
2018-05-15 01:55:34.273215: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:898] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero
2018-05-15 01:55:34.273501: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1344] Found device 0 with properties:
name: TITAN X (Pascal) major: 6 minor: 1 memoryClockRate(GHz): 1.531
pciBusID: 0000:02:00.0
totalMemory: 11.90GiB freeMemory: 11.54GiB
2018-05-15 01:55:34.325401: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:898] successful NUMA node read from SysFS had negative value (-1), but there
2018-05-15 05:51:51,407 - i=14890 lr=0.002255 c=0.67255330 l=18.93780136 mean(l)=18.67137085 r=38.65760803 t=17.92041802
2018-05-15 05:52:09,288 - i=14900 lr=0.002253 c=0.80631238 l=20.48881531 mean(l)=18.76381592 r=38.61412811 t=17.88345909
2018-05-15 05:52:27,237 - i=14910 lr=0.002251 c=0.81058139 l=19.97891998 mean(l)=18.68626587 r=38.73333359 t=17.94503307
2018-05-15 05:52:45,187 - i=14920 lr=0.002248 c=0.73022264 l=18.31782150 mean(l)=18.73870117 r=38.68188858 t=17.95400310
2018-05-15 05:53:03,173 - i=14930 lr=0.002246 c=0.66511607 l=18.61609650 mean(l)=18.80107788 r=38.74983215 t=17.98453307
2018-05-15 05:53:21,123 - i=14940 lr=0.002244 c=0.75952756 l=20.30416298 mean(l)=18.91235474 r=38.73876190 t=17.95242691
2018-05-15 05:53:39,118 - i=14950 lr=0.002242 c=0.77111256 l=19.60203362 mean(l)=18.97098755 r=38.69242859 t=17.99260187
2018-05-15 05:53:57,082 - i=14960 lr=0.002239 c=0.74147147 l=17.00726318 mean(l)=18.93935791 r=38.76287079 t=17.96582890
2018-05-15 05:54:15,051 - i=14970 lr=0.002237 c=0.530608
import collections
import tensorflow as tf
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import tensor_array_ops
This file has been truncated, but you can view the full file.
2018-05-19 02:59:12,147 - cyclic_12
2018-05-19 02:59:12,152 - loading ../../resnet_cifar100/model_2/model.ckpt-156280
2018-05-19 02:59:16,692 - Restoring parameters from ../../resnet_cifar100/model_2/model.ckpt-156280
2018-05-19 02:59:20,559 - conv2d_w
2018-05-19 02:59:20,559 - conv2d_w_1
2018-05-19 02:59:20,559 - batch_normalization_offset
2018-05-19 02:59:20,559 - batch_normalization_scales
2018-05-19 02:59:20,559 - conv2d_w_2
2018-05-19 02:59:20,559 - batch_normalization_offset_1
2018-05-19 02:59:20,559 - batch_normalization_scales_1
@enijkamp
enijkamp / gputemps.py
Created July 14, 2018 22:20
nvidia-smi temps
from subprocess import Popen, PIPE
import os
import datetime
import threading
import pygsheets
def fetch_temps():
p = Popen(["nvidia-smi", "--query-gpu=index,uuid,temperature.gpu,utilization.gpu,memory.total,memory.used,memory.free,driver_version,name,gpu_serial,display_active,display_mode", "--format=csv,noheader,nounits"], stdout=PIPE)
output = p.stdout.read().decode('UTF-8')
import numpy as np
POINTS_PER_WF = int(1e4)
X_SPACE = np.linspace(0, 100, POINTS_PER_WF)
def make_waveform_with_noise():
def add_noise(vec):
stdev = float(np.random.uniform(0, 0.2))
return vec + np.random.normal(0, stdev, size=len(vec))
def copy_source(file, output_dir):
copyfile(file, os.path.join(output_dir, os.path.basename(file)))
def setup_logging(output_dir):
log_format = logging.Formatter("%(asctime)s : %(message)s")
logger = logging.getLogger()
logger.handlers = []
output_file = os.path.join(output_dir, 'output.log')
file_handler = logging.FileHandler(output_file)
def calc_gradient_penalty(netD, real_data, fake_data, LAMBDA=10, BATCH_SIZE=128, HW=[64, 64]):
alpha = torch.rand(BATCH_SIZE, 1)
alpha = alpha.expand(BATCH_SIZE, int(real_data.nelement() / BATCH_SIZE)).contiguous()
alpha = alpha.view(BATCH_SIZE, 3, HW[0], HW[1])
alpha = alpha.to(device)
fake_data = fake_data.view(BATCH_SIZE, 3, HW[0], HW[1])
interpolates = alpha * real_data.detach() + ((1 - alpha) * fake_data.detach())
interpolates = interpolates.to(device)