git clone [email protected]:YOUR-USERNAME/YOUR-FORKED-REPO.git
cd into/cloned/fork-repo
git remote add upstream git://github.com/ORIGINAL-DEV-USERNAME/REPO-YOU-FORKED-FROM.git
git fetch upstream
import torch | |
import torch.distributed as dist | |
import torch.multiprocessing as mp | |
from torch.utils.data import IterableDataset, DataLoader | |
class DistributedIterableDataset(IterableDataset): | |
""" | |
Example implementation of an IterableDataset that handles both multiprocessing (num_workers > 0) |
#!/usr/bin/env sh | |
sudo apt-get update -y && \ | |
sudo apt-get upgrade -y && \ | |
sudo apt-get dist-upgrade -y && \ | |
sudo apt-get install build-essential software-properties-common -y && \ | |
sudo add-apt-repository ppa:ubuntu-toolchain-r/test -y && \ | |
sudo apt-get update -y && \ | |
sudo apt-get install gcc-9 g++-9 -y && \ | |
sudo update-alternatives --install /usr/bin/gcc gcc /usr/bin/gcc-9 60 --slave /usr/bin/g++ g++ /usr/bin/g++-9 && \ |
import numpy as np | |
def make_lsh_model(nb_tables, nb_bits, nb_dimensions, vector_sample): | |
# vector_sample: np arr w/ shape (2 * nb_tables * nb_tables, nb_dimensions). | |
# normals, midpoints: np arrs w/ shape (nb_bits, nb_dimensions) | |
# thresholds: np arrs w/ shape (nb_bits) | |
# all_normals, all_thresholds: lists w/ one normal, one threshold per table. | |
all_normals, all_thresholds = [], [] | |
for i in range(0, len(vector_sample), 2 * nb_bits): | |
vector_sample_a = vector_sample[i:i + nb_bits] |
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ | |
## Created by: Hang Zhang, Rutgers University, Email: [email protected] | |
## Modified by Thomas Wolf, HuggingFace Inc., Email: [email protected] | |
## Copyright (c) 2017-2018 | |
## | |
## This source code is licensed under the MIT-style license found in the | |
## LICENSE file in the root directory of this source tree | |
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ | |
"""Encoding Data Parallel""" |
Compile the C++ code creating a shared library (or shared object in UNIX) | |
$ clang++ TestJNI.cpp -o libTestJNI.so -fPIC -shared -std=c++11 -I$HOME/opt/java/include -I$HOME/opt/java/include/linux | |
Run the application | |
$ scala -save load.scala | |
dir = /home/archbox/opengl/jni/libTestJNI.so | |
Hello world java | |
i = 0 |
git clone [email protected]:YOUR-USERNAME/YOUR-FORKED-REPO.git
cd into/cloned/fork-repo
git remote add upstream git://github.com/ORIGINAL-DEV-USERNAME/REPO-YOU-FORKED-FROM.git
git fetch upstream
import argparse | |
import os | |
import shutil | |
import time | |
import torch | |
import torch.nn as nn | |
import torch.nn.parallel | |
import torch.backends.cudnn as cudnn | |
import torch.optim |
# This is an example for the CIFAR-10 dataset. | |
# There's a function for creating a train and validation iterator. | |
# There's also a function for creating a test iterator. | |
# Inspired by https://discuss.pytorch.org/t/feedback-on-pytorch-for-kaggle-competitions/2252/4 | |
from utils import plot_images | |
def get_train_valid_loader(data_dir, | |
batch_size, | |
augment, |
import numpy as np | |
import matplotlib.pyplot as plt | |
import torch | |
import torch.nn as nn | |
import torch.optim as optim | |
import torch.nn.functional as F | |
from torch.autograd import Variable | |
import torchvision | |
import torchvision.transforms as transforms | |
import numpy as np |
import numpy as np | |
from scipy.ndimage.interpolation import map_coordinates | |
from scipy.ndimage.filters import gaussian_filter | |
def elastic_transform(image, alpha, sigma, random_state=None): | |
"""Elastic deformation of images as described in [Simard2003]_. | |
.. [Simard2003] Simard, Steinkraus and Platt, "Best Practices for | |
Convolutional Neural Networks applied to Visual Document Analysis", in |