Skip to content

Instantly share code, notes, and snippets.

(tf_cu90) $ source deactivate
$ echo $LD_LIBRARY_PATH
/usr/local/cuda-9.0/lib64:/usr/local/cuda-9.0/extras/CUPTI/lib64:/lib/nccl/cuda-9:/usr/lib64/openmpi/lib/:
mkdir -p ~/anaconda3/envs/tf_cu90/etc/conda/deactivate.d
touch ~/anaconda3/envs/tf_cu90/etc/conda/deactivate.d/deactivate.sh
vim ~/anaconda3/envs/tf_cu90/etc/conda/deactivate.d/deactivate.sh
chmod +x ~/anaconda3/envs/tf_cu90/etc/conda/deactivate.d/deactivate.sh
#!/bin/sh
export LD_LIBRARY_PATH=$ORIGINAL_LD_LIBRARY_PATH
unset ORIGINAL_LD_LIBRARY_PATH
$ cat ~/anaconda3/envs/tensorflow_p27/etc/conda/activate.d/activate.sh
#!/bin/sh
export KERAS_BACKEND='tensorflow'
cp ~/.keras/keras_tensorflow.json ~/.keras/keras.json
export LD_LIBRARY_PATH=/usr/local/cuda-9.0/lib64:/usr/local/cuda-9.0/extras/CUPTI/lib64:/lib/nccl/cuda-9:$LD_LIBRARY_PATH_WITHOUT_CUDA
$ cat ~/anaconda3/envs/caffe_p27/etc/conda/activate.d/activate.sh
#!/bin/sh
export LD_LIBRARY_PATH=/usr/local/cuda-8.0/lib64:/usr/local/cuda-8.0/extras/CUPTI/lib64:/lib/nccl/cuda-8:$LD_LIBRARY_PATH_WITHOUT_CUDA
#!/bin/bash
GPU_TO_TRAIN=0,1,2,3,4,5,6,7
BATCH_SIZE=960
KVSTORE=dist_sync
source activate mxnet_p36
python image-classification/train_imagenet.py \
--gpu $GPU_TO_TRAIN --batch-size $BATCH_SIZE --num-epochs 10 \
--data-nthreads 40 --disp-batches 20 \
import nltk
dwlr = nltk.downloader.Downloader()
for pkg in dwlr.packages():
if pkg.subdir== 'tokenizers':
dwlr.download(pkg.id)
gcloud compute ssh $INSTANCE_NAME
export IMAGE_FAMILY="tf-latest-gpu" # or put any required
export ZONE="us-west1-b"
export INSTANCE_NAME="my-instance"
export INSTANCE_TYPE="n1-standard-8"
gcloud compute instances create $INSTANCE_NAME \
--zone=$ZONE \
--image-family=$IMAGE_FAMILY \
--image-project=deeplearning-platform-release \
--maintenance-policy=TERMINATE \
--accelerator='type=nvidia-tesla-v100,count=8' \
gcloud compute images describe-from-family tf-latest-gpu \
--project deeplearning-platform-release