-
From your terminal
ssh
to the following IP:
-
In order to be able to login to
Bridges
from your VM you need to generate XSEDE certificate using the following command:myproxy-logon -s myproxy.xsede.org -l user_name -t 72
import shutil | |
import argparse | |
import os, os.path | |
from mpi4py import MPI | |
from queue import Queue | |
import iwp_divideimg as divide | |
parser = argparse.ArgumentParser() | |
parser.add_argument('imgs_path', help='Path of the dataset') | |
parser.add_argument('worker_root', help='main work dir') |
#include <stdio.h> | |
#include <stdlib.h> | |
#include <math.h> | |
// CUDA kernel. Each thread takes care of one element of c | |
__global__ void vecAdd(double *a, double *b, double *c, int n) | |
{ | |
// Get our global thread ID | |
int id = blockIdx.x*blockDim.x+threadIdx.x; | |
from myproxy.client import MyProxyClient | |
myproxy_clnt = MyProxyClient(hostname="myproxy.somewhere.ac.uk") | |
cert, private_key = myproxy_clnt.logon(username, password, bootstrap=True) |
import os | |
import radical.pilot as rp | |
from radical.entk import Pipeline, Stage, Task, AppManager | |
crop_size = int(360) #covnvert this to argument later | |
worker_root = r"/pylon5/mc3bggp/aymen/local_dir/datasets/polygon/" #convert this to argument later | |
weights_path = r"/pylon5/mc3bggp/aymen/local_dir/datasets/logs/ice_wedge_polygon20180823T1403/mask_rcnn_ice_wedge_polygon_0008.h5" #convert this to argument later | |
imgs_path = r"/pylon5/mc3bggp/aymen/local_dir/datasets/polygon/input_img/" #convert this to argument later |
cnn_time | img_name | |
---|---|---|
10 | Image1 | |
12 | Image2 | |
14 | Image3 | |
23 | Image4 | |
30 | Image5 |
#!/bin/bash | |
NGPUS=2 # Number of gpus with compute_capability 3.5 per server | |
# Start the MPS server for each GPU | |
for ((i=0; i< $NGPUS; i++)) | |
do | |
mkdir /home/aymen/mps_$i | |
mkdir /home/aymen/mps_log_$i | |
export CUDA_VISIBLE_DEVICES=$i | |
export CUDA_MPS_PIPE_DIRECTORY=/home/aymen/mps_$i | |
export CUDA_MPS_LOG_DIRECTORY=/home/aymen/mps_log_$i |
#!/bin/bash | |
export CUDA_VISIBLE_DEVICES=0 | |
lrank=$OMPI_COMM_WORLD_LOCAL_RANK | |
case ${lrank} in | |
[0]) | |
export CUDA_MPS_PIPE_DIRECTORY=/home/aymen/mps_0; ./vector_add | |
;; | |
[1]) | |
export CUDA_MPS_PIPE_DIRECTORY=/home/aymen/mps_1; ./vector_add | |
;; |
#!/bin/bash | |
# Stop the MPS control daemon for each GPU and clean up /tmp | |
NGPUS=2 # Number of gpus with compute_capability 3.5 per server | |
for ((i=0; i< $NGPUS; i++)) | |
do | |
echo $i | |
export CUDA_MPS_PIPE_DIRECTORY=/home/aymen/mps_$i | |
echo "quit" | nvidia-cuda-mps-control | |
rm -rf /home/aymen/mps_$i | |
rm -rf /home/aymen/mps_log_$i |
From your terminal ssh
to the following IP:
In order to be able to login to Bridges
from your VM you need to generate XSEDE certificate using the following command:
myproxy-logon -s myproxy.xsede.org -l user_name -t 72
from __future__ import print_function | |
''' | |
Basic Multi GPU computation example using TensorFlow library. | |
Single/Multi-GPU non-MPI Author: Aymeric Damien | |
Multi-GPU Large scale/Multi-node MPI Author: Aymen Alsaadi | |
Project: https://github.com/aymericdamien/TensorFlow-Examples/ | |
''' | |
''' | |
This tutorial requires your machine to have 2 GPUs |