qstat -F gpus,gputype,mem_total -q gpuqlogin -pe gpu-titanx 1| TIME_DURATION_UNITS = ( | |
| ('week', 60*60*24*7), | |
| ('day', 60*60*24), | |
| ('hour', 60*60), | |
| ('min', 60), | |
| ('sec', 1) | |
| ) | |
| def human_time_duration(seconds): |
| #!/usr/bin/env python3 | |
| # pip install coloredlogs verboselogs | |
| import os | |
| import coloredlogs, logging, verboselogs | |
| #logger = logging.getLogger(__name__) | |
| logger = verboselogs.VerboseLogger(__name__) # add logger.success | |
| log_path = 'train.log' |
| #!/usr/bin/env python3 | |
| # For command line usage, include the token and chat ids in advance in the script. | |
| # If you want to use only the functions, there's no need to include this. | |
| telegram_token = "" | |
| # Get chat id by opening the URL: https://api.telegram.org/bot{token}/getUpdates | |
| telegram_chat_ids = [""] | |
| git clone https://git.videolan.org/git/ffmpeg/nv-codec-headers.git | |
| cd nv-codec-headers | |
| vi Makefile # change the first line to PREFIX = ${CONDA_PREFIX} | |
| make install | |
| cd .. | |
| git clone https://git.ffmpeg.org/ffmpeg.git | |
| cd ffmpeg | |
| git checkout n4.2.2 | |
| conda install nasm |
| from nvidia.dali.pipeline import Pipeline | |
| from nvidia.dali.plugin import pytorch | |
| import nvidia.dali.ops as ops | |
| import nvidia.dali.types as types | |
| import argparse | |
| parser = argparse.ArgumentParser() | |
| parser.add_argument('--file_list', type=str, default='file_list.txt', | |
| help='DALI file_list for VideoReader') |
| import argparse | |
| from rich_argparse import ArgumentDefaultsRichHelpFormatter | |
| def get_parser(): | |
| parser = argparse.ArgumentParser( | |
| description="Example of argparse usage, with default values printed in help.", | |
| formatter_class=ArgumentDefaultsRichHelpFormatter, | |
| ) |
| #!/bin/bash | |
| if [ $# -lt 2 ] | |
| then | |
| echo "usage: $0 [input_dir] [output_dir] [output_resolution=224x224]" | |
| echo "Crops the video into 4 corners and 1 centre, and flips the video horizontally to do the same, resulting in 10 augmentation per video." | |
| exit 1 | |
| fi | |
| input_dir="$1" |
| #docker-compose.yml : Make container as described here. | |
| #Author : Hyeonsu Lyu, hslyu@unist.ac.kr, +82 10-5117-9780 | |
| # Kiyoon Kim (kiyoon.kim@ed.ac.uk) | |
| #First version, Jan. 30, 2019 | |
| version: '2' | |
| services: | |
| aislab-docker: | |
| container_name: base_env | |
| image: kiyoon/docker-for-ML:cuda10.1-cudnn7 |
| ffmpeg -hwaccel cuvid -c:v h264_cuvid -i <input.MP4> -c:v h264_nvenc -rc:v vbr_hq -cq:v 19 -b:v 10000k -maxrate:v 20000k -profile:v high -color_range pc -colorspace bt709 -color_trc bt709 -color_primaries bt709 -c:a copy <output.mp4> |