- Copy the caffe2 folder from
/mnt/nfs/work1/elm/hzjiang/Share/caffe2
to your own work/toolboxes folder. For me this was/home/arunirc/work1/Tools/caffe2/build
. - Warning: make sure to remove Anaconda paths in your .bashrc, if any.
export PYTHONPATH=/mnt/nfs/work1/elm/arunirc/Tools/caffe2/build:$PYTHONPATH
export LD_LIBRARY_PATH=/mnt/nfs/work1/elm/arunirc/Tools/caffe2/build/lib:$LD_LIBRARY_PATH
module unload python/2.7.13
module load cuda80/toolkit/8.0.44 cudnn/6.0`
- Install python "future" module in user space:
pip install future --user
- Run code to check everything is working (and pray that it does);
python2 -c 'from caffe2.python import core' 2>/dev/null && echo "Success" || echo "Failure"
pip install cython --user
Check that my Detectron folder /mnt/nfs/work1/elm/arunirc/Tools/detectron/
is visible to you.
export PYTHONPATH=/mnt/nfs/work1/elm/arunirc/Tools/detectron/lib:$PYTHONPATH
export LD_LIBRARY_PATH=/mnt/nfs/work1/elm/arunirc/Tools/caffe2/build/lib:$LD_LIBRARY_PATH
module unload python/2.7.13
module load cuda80/toolkit/8.0.44 cudnn/6.0
export PYTHONPATH=/mnt/nfs/work1/elm/arunirc/Tools/detectron/lib:$PYTHONPATH
- Check if it works:
srun -p m40-short --gres=gpu:1 python2 $DETECTRON/tests/test_spatial_narrow_as_op.py
Install the MS-COCO Python AP from here: https://github.com/cocodataset/cocoapi
COCO_API=<YOUR_LOCAL_FOLDER>/cocoapi/PythonAPI
export PYTHONPATH=${COCO_API}:$PYTHONPATH
#!/bin/bash
#
#SBATCH --job-name=retina_video
##SBATCH -n 1 # Number of cores
#SBATCH -N 1 # Ensure that all cores are on one machine
#SBATCH -p m40-short # Partition to submit to (serial_requeue)
#SBATCH --mem=50000 # Memory pool for all cores (see also --mem-per-cpu)
#SBATCH -o logs/log_%j.out # File to which STDOUT will be written
#SBATCH -e logs/error_%j.err # File to which STDERR will be written
#SBATCH --gres=gpu:1
## Usage (from the project root):
## sbatch scripts/run_video_cluster.sbatch ${VIDEO_PATH} ${DATASET_NAME}
##### Experiment settings #####
echo $1
VIDEO_PATH=$1 # argument to the script is the video name
OUTPUT_NAME=$2 # change this for your expts
MIN_SCORE=0.6
# Outputs saved at: output/retina-video-{DATASET-NAME}
# set paths for caffe2, detectron and coco-api
export PYTHONPATH=/mnt/nfs/work1/elm/arunirc/Tools/caffe2/build:$PYTHONPATH
export LD_LIBRARY_PATH=/mnt/nfs/work1/elm/arunirc/Tools/caffe2/build/lib:$LD_LIBRARY_PATH
module unload python/2.7.13
module load cuda80/toolkit/8.0.44 cudnn/6.0
export PYTHONPATH=/mnt/nfs/work1/elm/arunirc/Tools/detectron/lib:$PYTHONPATH
COCO_API=/mnt/nfs/work1/elm/arunirc/Tools/cocoapi/PythonAPI
export PYTHONPATH=${COCO_API}:$PYTHONPATH
DETECTRON=/home/arunirc/work1/Tools/detectron
WT_PATH=/home/arunirc/work1/Research/RetinaNet-COCO/data/models/retinanet_R-50-FPN_2x_train-coco_model_final.pkl
# install required python packages (at user level)
pip install pyyaml --user
pip install matplotlib --user
pip install opencv-python --user
pip install sk-video --user
# Run detectron on a single sample video
# VIDEO_PATH=/mnt/nfs/scratch1/aprasad/VLOG_dataset/chunk_0/_7MHVeRsVrA.mkv
python2 tools/detect_video_cluster.py \
--cfg ${DETECTRON}/configs/12_2017_baselines/retinanet_R-50-FPN_2x.yaml \
--wts ${WT_PATH} \
--output-dir output/retina_video-${OUTPUT_NAME} \
--thresh ${MIN_SCORE} \
${VIDEO_PATH}