:bowtie:
😄 :smile:
😆 :laughing:
😊 :blush:
😃 :smiley:
:bowtie:
😄 :smile:
😆 :laughing:
😊 :blush:
😃 :smiley:
""" Adapting ImageNet-scale models to complex distribution shifts with self-learning | |
Run with: | |
❯ docker pull pytorch/pytorch | |
❯ DATADIR="/path/to/imagenetc" | |
❯ curl -s https://stes.io/gce.py > gce.py | |
❯ docker run --gpus 1 -v ${DATADIR}:/data/imagenetc:ro \ | |
-v $(pwd):/app -w /app -u $(id -u) \ | |
--tmpfs /.cache --tmpfs /.local \ |
#!/bin/bash | |
# Download gitignore files from https://github.com/github/gitignore | |
# Example Usage: ./gitignore.sh Python Java > .gitignore | |
invalid_language() { | |
>&2 echo "No gitignore found for $1." | |
return 1 | |
} | |
echo_header() { |
#!/bin/bash | |
# Seen on https://coderwall.com/p/pfm8qg/counting-lines-of-code-in-a-python-project | |
# execute with file extension as argument, e.g.: | |
# $ ./loc.sh py | |
find . -name "*.${1}" -type f -exec grep . {} \; | wc -l |
""" Convert weights from matconvnet model to an hdf5 dataset loadable into Lasagne models. | |
Call with ./matconv2hdf5.py [basename] | |
""" | |
import sys, os | |
from urllib.request import urlretrieve | |
import scipy.io | |
import collections | |
import h5py |
#define NB_STEPS (1000) | |
byte sine[] = { | |
127,127,128,129,130,131,131,132,133,134,135,135,136,137,138,139,139,140,141,142,143,143,144,145,146,147,147,148,149,150,151,151,152,153,154,154,155,156,157,158,158,159,160,161,161,162,163,164,165,165,166,167,168,168,169,170,171,171,172,173,174,174,175,176,177,177,178,179,180,180,181,182,183,183,184,185,185,186,187,188,188,189,190,190,191,192,192,193,194,194,195,196,197,197,198,199,199,200,200,201,202,202,203,204,204,205,206,206,207,208,208,209,209,210,211,211,212,212,213,214,214,215,215,216,217,217,218,218,219,219,220,220,221,222,222,223,223,224,224,225,225,226,226,227,227,228,228,229,229,230,230,231,231,232,232,232,233,233,234,234,235,235,235,236,236,237,237,238,238,238,239,239,239,240,240,241,241,241,242,242,242,243,243,243,244,244,244,245,245,245,246,246,246,246,247,247,247,248,248,248,248,249,249,249,249,249,250,250,250,250,251,251,251,251,251,251,252,252,252,252,252,252,253,253,253,253,253,253,253,253,254,254,254,254,254,254,254,254,254,254,254,254,254,254,254,2 |
import theano | |
import lasagne | |
import numpy as np | |
def upsample(layer, nb_kernels): | |
def build_bilinear_kernel(ratio): | |
half_kern = np.arange(1, ratio + 1) | |
kern = np.concatenate([half_kern, half_kern[-2::-1]]) |
from sklearn.manifold import TSNE | |
from sklearn.decomposition import PCA | |
from collections import OrderedDict | |
def cluster(X, pca_components=100, min_explained_variance=0.5, tsne_dimensions=2, nb_centroids=[4, 8, 16],\ | |
X_=None, embedding=None): | |
""" Simple K-Means Clustering Pipeline for high dimensional data | |
Perform the following steps for robust clustering: | |
- Zero mean, unit variance normalization over all feature dimensions |
git log --date=short --pretty=format:'"%an","%cd","%s"'
/* | |
* A simple learning vector quantization (LVQ) neural network used to map datasets | |
* (right now, however, without a normalization of the input data) | |
* | |
* Copyright (c) stes 2011 | |
*/ | |
import java.io.IOException; | |
import java.util.ArrayList; | |
import java.util.Random; |