Skip to content

Instantly share code, notes, and snippets.

export IMAGE_FAMILY="tf-latest-gpu"
export ZONE="us-west1-b"
export INSTANCE_NAME="model-prep"
gcloud compute instances create $INSTANCE_NAME \
--zone=$ZONE \
--image-family=$IMAGE_FAMILY \
--machine-type=n1-standard-8 \
--image-project=deeplearning-platform-release \
--maintenance-policy=TERMINATE \
--accelerator="type=nvidia-tesla-t4,count=1" \
function execute_notebook_with_gpu() {
IMAGE_FAMILY="tf-latest-cu100" # or put any required
ZONE="us-west1-b"
INSTANCE_NAME="notebookexecutor"
INSTANCE_TYPE="n1-standard-8"
INPUT_NOTEBOOK=$1
OUTPUT_NOTEBOOK=$2
GPU_TYPE=$3
GPU_COUNT=$4
gcloud compute instances create $INSTANCE_NAME \
if lspci -vnn | grep NVIDIA > /dev/null 2>&1; then
# Nvidia card found, need to check if driver is up
if ! nvidia-smi > /dev/null 2>&1; then
echo "Installing driver"
/opt/deeplearning/install-driver.sh
fi
fi
readonly INPUT_NOTEBOOK_GCS_FILE=$(curl http://metadata.google.internal/computeMetadata/v1/instance/attributes/input_notebook -H "Metadata-Flavor: Google")
readonly OUTPUT_NOTEBOOK_GCS_FOLDER=$(curl http://metadata.google.internal/computeMetadata/v1/instance/attributes/output_notebook -H "Metadata-Flavor: Google")
export IMAGE_FAMILY="tf-latest-cu100" # or put any required
export ZONE="us-west1-b"
export INSTANCE_NAME="notebookexecutor"
export INSTANCE_TYPE="n1-standard-8"
export INPUT_NOTEBOOK="gs://my-bucket/test.ipynb"
export OUTPUT_NOTEBOOK="gs://my-bucket/"
gcloud compute instances create $INSTANCE_NAME \
--zone=$ZONE \
--image-family=$IMAGE_FAMILY \
--image-project=deeplearning-platform-release \
export IMAGE_FAMILY="tf-latest-cu100" # or put any required
export ZONE="us-west1-b"
export INSTANCE_NAME="my-instance"
export INSTANCE_TYPE="n1-standard-8"
gcloud compute instances create $INSTANCE_NAME \
--zone=$ZONE \
--image-family=$IMAGE_FAMILY \
--image-project=deeplearning-platform-release \
--maintenance-policy=TERMINATE \
--accelerator='type=nvidia-tesla-v100,count=8' \
tensorflow_model_server --model_base_path=$HOME/resnet_v2_int8_NCHW/ --rest_api_port=8888
git clone https://github.com/b0noI/TFTools.git
cd TFTools
python ./convert_to_rt.py \
--input_model_dir=$HOME/resnet_v2_fp32_savedmodel_NCHW/1538687196 \
--output_model_dir=$HOME/resnet_v2_int8_NCHW/00001 \
--batch_size=128 \
--precision_mode="INT8"
export IMAGE_FAMILY="tf-latest-cpu"
export ZONE="us-west1-b"
export INSTANCE_NAME="inferance-model-prep"
export INSTANCE_TYPE="n1-standard-8"
gcloud compute instances create $INSTANCE_NAME \
--zone=$ZONE \
--image-family=$IMAGE_FAMILY \
--image-project=deeplearning-platform-release \
--machine-type=$INSTANCE_TYPE \
--boot-disk-size=120GB
import PIL
import tempfile
import numpy as np
import tensorflow.contrib.tensorrt as trt
import tensorflow as tf
def encode(request_data):
with tempfile.NamedTemporaryFile(mode="wb", suffix=".jpg") as f:
f.write(request_data)
img = PIL.Image.open(f.name).resize((224, 224))
#!/bin/bash
# GPU Agent
git clone https://github.com/b0noI/gcp-gpu-utilization-metrics.git
cd gcp-gpu-utilization-metrics
git checkout 6e62ea324bf097817474b51119786e8222dd9fdf
pip install -r ./requirenments.txt
cp ./report_gpu_metrics.py /root/report_gpu_metrics.py
cat <<-EOH > /lib/systemd/system/gpu_utilization_agent.service