Created
October 14, 2021 11:20
-
-
Save jeanpat/5315c468b4e3738eae3a48154d177684 to your computer and use it in GitHub Desktop.
A jupyter notebook to check the lightning-flash installation
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
{ | |
"cells": [ | |
{ | |
"cell_type": "code", | |
"execution_count": 1, | |
"id": "989656fc", | |
"metadata": {}, | |
"outputs": [], | |
"source": [ | |
"import numpy as np\n", | |
"import sys\n", | |
"import torch\n", | |
"import fastai\n", | |
"import flash" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 2, | |
"id": "e499bfa8", | |
"metadata": {}, | |
"outputs": [ | |
{ | |
"name": "stdout", | |
"output_type": "stream", | |
"text": [ | |
"python: /mnt/stockage/Developp/EnvPLFlash/bin/python3.9\n", | |
"numpy 1.21.2\n", | |
"PyTorch 1.8.2\n", | |
"Fastai: 2.3.1\n", | |
"CUDA GPU True\n", | |
"fastai: 2.3.1\n", | |
"flash 0.5.0\n" | |
] | |
} | |
], | |
"source": [ | |
"print('python:',sys.executable)\n", | |
"print('numpy', np.__version__)\n", | |
"print(\"PyTorch\", torch.__version__)\n", | |
"#print(\"Detectron2 \",detectron2.__version__)\n", | |
"print(\"Fastai:\",fastai.__version__)\n", | |
"print(\"CUDA GPU\",torch.cuda.is_available())\n", | |
"print('fastai:', fastai.__version__)\n", | |
"print('flash', flash.__version__)" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 3, | |
"id": "2185e320", | |
"metadata": {}, | |
"outputs": [ | |
{ | |
"name": "stdout", | |
"output_type": "stream", | |
"text": [ | |
"---------------------- --------------------------------------------------------------------------------\r\n", | |
"sys.platform linux\r\n", | |
"Python 3.9.7 | packaged by conda-forge | (default, Sep 29 2021, 19:23:11) [GCC 9.4.0]\r\n", | |
"numpy 1.21.2\r\n", | |
"detectron2 failed to import\r\n", | |
"detectron2._C not built correctly: No module named 'detectron2'\r\n", | |
"Compiler ($CXX) c++ (Ubuntu 9.3.0-17ubuntu1~20.04) 9.3.0\r\n", | |
"CUDA compiler Build cuda_11.4.r11.4/compiler.30300941_0\r\n", | |
"DETECTRON2_ENV_MODULE <not set>\r\n", | |
"PyTorch 1.8.2 @/mnt/stockage/Developp/EnvPLFlash/lib/python3.9/site-packages/torch\r\n", | |
"PyTorch debug build False\r\n", | |
"GPU available True\r\n", | |
"GPU 0 NVIDIA GeForce GTX 960 (arch=5.2)\r\n", | |
"CUDA_HOME /usr/local/cuda-11.4\r\n", | |
"Pillow 8.3.1\r\n", | |
"torchvision 0.9.2 @/mnt/stockage/Developp/EnvPLFlash/lib/python3.9/site-packages/torchvision\r\n", | |
"torchvision arch flags 3.5, 5.0, 6.0, 7.0, 7.5\r\n", | |
"fvcore 0.1.5.post20210924\r\n", | |
"iopath 0.1.9\r\n", | |
"cv2 4.5.3\r\n", | |
"---------------------- --------------------------------------------------------------------------------\r\n", | |
"PyTorch built with:\r\n", | |
" - GCC 7.3\r\n", | |
" - C++ Version: 201402\r\n", | |
" - Intel(R) oneAPI Math Kernel Library Version 2021.4-Product Build 20210904 for Intel(R) 64 architecture applications\r\n", | |
" - Intel(R) MKL-DNN v1.7.0 (Git Hash 7aed236906b1f7a05c0917e5257a1af05e9ff683)\r\n", | |
" - OpenMP 201511 (a.k.a. OpenMP 4.5)\r\n", | |
" - NNPACK is enabled\r\n", | |
" - CPU capability usage: NO AVX\r\n", | |
" - CUDA Runtime 10.2\r\n", | |
" - NVCC architecture flags: -gencode;arch=compute_37,code=sm_37;-gencode;arch=compute_50,code=sm_50;-gencode;arch=compute_60,code=sm_60;-gencode;arch=compute_61,code=sm_61;-gencode;arch=compute_70,code=sm_70;-gencode;arch=compute_75,code=sm_75;-gencode;arch=compute_37,code=compute_37\r\n", | |
" - CuDNN 7.6.5\r\n", | |
" - Magma 2.5.2\r\n", | |
" - Build settings: BLAS_INFO=mkl, BUILD_TYPE=Release, CUDA_VERSION=10.2, CUDNN_VERSION=7.6.5, CXX_COMPILER=/opt/rh/devtoolset-7/root/usr/bin/c++, CXX_FLAGS= -Wno-deprecated -fvisibility-inlines-hidden -DUSE_PTHREADPOOL -fopenmp -DNDEBUG -DUSE_KINETO -DUSE_FBGEMM -DUSE_QNNPACK -DUSE_PYTORCH_QNNPACK -DUSE_XNNPACK -O2 -fPIC -Wno-narrowing -Wall -Wextra -Werror=return-type -Wno-missing-field-initializers -Wno-type-limits -Wno-array-bounds -Wno-unknown-pragmas -Wno-sign-compare -Wno-unused-parameter -Wno-unused-variable -Wno-unused-function -Wno-unused-result -Wno-unused-local-typedefs -Wno-strict-overflow -Wno-strict-aliasing -Wno-error=deprecated-declarations -Wno-stringop-overflow -Wno-psabi -Wno-error=pedantic -Wno-error=redundant-decls -Wno-error=old-style-cast -fdiagnostics-color=always -faligned-new -Wno-unused-but-set-variable -Wno-maybe-uninitialized -fno-math-errno -fno-trapping-math -Werror=format -Wno-stringop-overflow, LAPACK_INFO=mkl, PERF_WITH_AVX=1, PERF_WITH_AVX2=1, PERF_WITH_AVX512=1, TORCH_VERSION=1.8.2, USE_CUDA=ON, USE_CUDNN=ON, USE_EXCEPTION_PTR=1, USE_GFLAGS=OFF, USE_GLOG=OFF, USE_MKL=ON, USE_MKLDNN=ON, USE_MPI=OFF, USE_NCCL=ON, USE_NNPACK=ON, USE_OPENMP=ON, \r\n", | |
"\r\n" | |
] | |
} | |
], | |
"source": [ | |
"!wget -nc -q https://github.com/facebookresearch/detectron2/raw/master/detectron2/utils/collect_env.py && python collect_env.py" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 4, | |
"id": "337e8ac4", | |
"metadata": {}, | |
"outputs": [ | |
{ | |
"name": "stdout", | |
"output_type": "stream", | |
"text": [ | |
"# packages in environment at /mnt/stockage/Developp/EnvPLFlash:\r\n", | |
"#\r\n", | |
"# Name Version Build Channel\r\n", | |
"_libgcc_mutex 0.1 conda_forge conda-forge\r\n", | |
"_openmp_mutex 4.5 1_llvm conda-forge\r\n", | |
"absl-py 0.14.1 pypi_0 pypi\r\n", | |
"aiohttp 3.7.4.post0 pypi_0 pypi\r\n", | |
"albumentations 0.5.2 pypi_0 pypi\r\n", | |
"antlr4-python3-runtime 4.8 pypi_0 pypi\r\n", | |
"anyio 3.3.3 pypi_0 pypi\r\n", | |
"argon2-cffi 21.1.0 pypi_0 pypi\r\n", | |
"async-timeout 3.0.1 pypi_0 pypi\r\n", | |
"attrs 21.2.0 pypi_0 pypi\r\n", | |
"backcall 0.2.0 pypi_0 pypi\r\n", | |
"blas 1.0 mkl \r\n", | |
"bleach 4.1.0 pypi_0 pypi\r\n", | |
"blis 0.7.4 pypi_0 pypi\r\n", | |
"bzip2 1.0.8 h7f98852_4 conda-forge\r\n", | |
"ca-certificates 2021.10.8 ha878542_0 conda-forge\r\n", | |
"cachetools 4.2.4 pypi_0 pypi\r\n", | |
"catalogue 2.0.6 pypi_0 pypi\r\n", | |
"certifi 2021.10.8 pypi_0 pypi\r\n", | |
"cffi 1.15.0 pypi_0 pypi\r\n", | |
"chardet 4.0.0 pypi_0 pypi\r\n", | |
"charset-normalizer 2.0.7 pypi_0 pypi\r\n", | |
"click 8.0.3 pypi_0 pypi\r\n", | |
"configparser 5.0.2 pypi_0 pypi\r\n", | |
"cudatoolkit 10.2.89 h8f6ccaa_9 conda-forge\r\n", | |
"cycler 0.10.0 pypi_0 pypi\r\n", | |
"cymem 2.0.5 pypi_0 pypi\r\n", | |
"cython 0.29.24 pypi_0 pypi\r\n", | |
"dataclasses 0.6 pypi_0 pypi\r\n", | |
"decorator 5.1.0 pypi_0 pypi\r\n", | |
"defusedxml 0.7.1 pypi_0 pypi\r\n", | |
"docker-pycreds 0.4.0 pypi_0 pypi\r\n", | |
"docstring-parser 0.11 pypi_0 pypi\r\n", | |
"effdet 0.2.4 pypi_0 pypi\r\n", | |
"efficientnet-pytorch 0.6.3 pypi_0 pypi\r\n", | |
"entrypoints 0.3 pypi_0 pypi\r\n", | |
"fastai 2.3.1 pypi_0 pypi\r\n", | |
"fastcore 1.3.26 pypi_0 pypi\r\n", | |
"fastprogress 1.0.0 pypi_0 pypi\r\n", | |
"ffmpeg 4.4.0 hca11adc_0 conda-forge\r\n", | |
"fire 0.4.0 pypi_0 pypi\r\n", | |
"freetype 2.10.4 h0708190_1 conda-forge\r\n", | |
"fsspec 2021.10.0 pypi_0 pypi\r\n", | |
"future 0.18.2 pypi_0 pypi\r\n", | |
"fvcore 0.1.5.post20210924 pypi_0 pypi\r\n", | |
"gettext 0.21.0 hf68c758_0 \r\n", | |
"gitdb 4.0.7 pypi_0 pypi\r\n", | |
"gitpython 3.1.24 pypi_0 pypi\r\n", | |
"gmp 6.2.1 h58526e2_0 conda-forge\r\n", | |
"gnutls 3.6.15 he1e5248_0 \r\n", | |
"google-auth 2.3.0 pypi_0 pypi\r\n", | |
"google-auth-oauthlib 0.4.6 pypi_0 pypi\r\n", | |
"grpcio 1.41.0 pypi_0 pypi\r\n", | |
"icedata 0.4.0 pypi_0 pypi\r\n", | |
"icevision 0.8.1 pypi_0 pypi\r\n", | |
"icu 68.1 h58526e2_0 conda-forge\r\n", | |
"idna 3.3 pypi_0 pypi\r\n", | |
"imageio 2.9.0 pypi_0 pypi\r\n", | |
"imgaug 0.4.0 pypi_0 pypi\r\n", | |
"iopath 0.1.9 pypi_0 pypi\r\n", | |
"ipykernel 5.5.6 pypi_0 pypi\r\n", | |
"ipython 7.28.0 pypi_0 pypi\r\n", | |
"ipython-genutils 0.2.0 pypi_0 pypi\r\n", | |
"jedi 0.18.0 pypi_0 pypi\r\n", | |
"jinja2 3.0.2 pypi_0 pypi\r\n", | |
"joblib 1.1.0 pypi_0 pypi\r\n", | |
"jpeg 9b h024ee3a_2 \r\n", | |
"jsonargparse 3.19.4 pypi_0 pypi\r\n", | |
"jsonschema 4.1.0 pypi_0 pypi\r\n", | |
"jupyter-client 6.1.12 pypi_0 pypi\r\n", | |
"jupyter-core 4.8.1 pypi_0 pypi\r\n", | |
"jupyter-server 1.11.1 pypi_0 pypi\r\n", | |
"jupyterlab-pygments 0.1.2 pypi_0 pypi\r\n", | |
"kiwisolver 1.3.2 pypi_0 pypi\r\n", | |
"kornia 0.5.3 pypi_0 pypi\r\n", | |
"lame 3.100 h7f98852_1001 conda-forge\r\n", | |
"lcms2 2.12 h3be6417_0 \r\n", | |
"ld_impl_linux-64 2.36.1 hea4e1c9_2 conda-forge\r\n", | |
"libffi 3.4.2 h9c3ff4c_4 conda-forge\r\n", | |
"libgcc-ng 11.2.0 h1d223b6_11 conda-forge\r\n", | |
"libiconv 1.16 h516909a_0 conda-forge\r\n", | |
"libidn2 2.3.2 h7f98852_0 conda-forge\r\n", | |
"libpng 1.6.37 h21135ba_2 conda-forge\r\n", | |
"libstdcxx-ng 11.2.0 he4da1e4_11 conda-forge\r\n", | |
"libtasn1 4.17.0 h7f98852_0 conda-forge\r\n", | |
"libtiff 4.2.0 h85742a9_0 \r\n", | |
"libunistring 0.9.10 h7f98852_0 conda-forge\r\n", | |
"libuv 1.42.0 h7f98852_0 conda-forge\r\n", | |
"libwebp-base 1.2.1 h7f98852_0 conda-forge\r\n", | |
"libxml2 2.9.12 h72842e0_0 conda-forge\r\n", | |
"libzlib 1.2.11 h36c2ea0_1013 conda-forge\r\n", | |
"lightning-bolts 0.4.0 pypi_0 pypi\r\n", | |
"lightning-flash 0.5.0 pypi_0 pypi\r\n", | |
"llvm-openmp 12.0.1 h4bd325d_1 conda-forge\r\n", | |
"loguru 0.5.3 pypi_0 pypi\r\n", | |
"lz4-c 1.9.3 h9c3ff4c_1 conda-forge\r\n", | |
"markdown 3.3.4 pypi_0 pypi\r\n", | |
"markupsafe 2.0.1 pypi_0 pypi\r\n", | |
"matplotlib 3.4.3 pypi_0 pypi\r\n", | |
"matplotlib-inline 0.1.3 pypi_0 pypi\r\n", | |
"mistune 0.8.4 pypi_0 pypi\r\n", | |
"mkl 2021.4.0 h8d4b97c_729 conda-forge\r\n", | |
"mkl-service 2.4.0 py39h3811e60_0 conda-forge\r\n", | |
"mkl_fft 1.3.0 py39h42c9631_2 \r\n", | |
"mkl_random 1.2.2 py39hde0f152_0 conda-forge\r\n", | |
"multidict 5.2.0 pypi_0 pypi\r\n", | |
"munch 2.5.0 pypi_0 pypi\r\n", | |
"murmurhash 1.0.5 pypi_0 pypi\r\n", | |
"nbclient 0.5.4 pypi_0 pypi\r\n", | |
"nbconvert 6.2.0 pypi_0 pypi\r\n", | |
"nbformat 5.1.3 pypi_0 pypi\r\n", | |
"ncurses 6.2 h58526e2_4 conda-forge\r\n", | |
"nest-asyncio 1.5.1 pypi_0 pypi\r\n", | |
"nettle 3.7.3 hbbd107a_1 \r\n", | |
"networkx 2.6.3 pypi_0 pypi\r\n", | |
"ninja 1.10.2 h4bd325d_1 conda-forge\r\n", | |
"nose 1.3.7 pypi_0 pypi\r\n", | |
"notebook 6.4.4 pypi_0 pypi\r\n", | |
"numpy 1.21.2 py39h20f2e39_0 \r\n", | |
"numpy-base 1.21.2 py39h79a1101_0 \r\n", | |
"oauthlib 3.1.1 pypi_0 pypi\r\n", | |
"olefile 0.46 pyh9f0ad1d_1 conda-forge\r\n", | |
"omegaconf 2.1.1 pypi_0 pypi\r\n", | |
"opencv-python 4.5.3.56 pypi_0 pypi\r\n", | |
"opencv-python-headless 4.5.3.56 pypi_0 pypi\r\n", | |
"openh264 2.1.1 h780b84a_0 conda-forge\r\n", | |
"openjpeg 2.4.0 hb52868f_1 conda-forge\r\n", | |
"openssl 3.0.0 h7f98852_1 conda-forge\r\n", | |
"packaging 21.0 pypi_0 pypi\r\n", | |
"pandas 1.2.5 pypi_0 pypi\r\n", | |
"pandocfilters 1.5.0 pypi_0 pypi\r\n", | |
"parso 0.8.2 pypi_0 pypi\r\n", | |
"pathtools 0.1.2 pypi_0 pypi\r\n", | |
"pathy 0.6.0 pypi_0 pypi\r\n", | |
"pexpect 4.8.0 pypi_0 pypi\r\n", | |
"pickleshare 0.7.5 pypi_0 pypi\r\n", | |
"pillow 8.3.1 py39h2c7a002_0 \r\n", | |
"pip 21.3 pyhd8ed1ab_0 conda-forge\r\n", | |
"portalocker 2.3.2 pypi_0 pypi\r\n", | |
"preshed 3.0.5 pypi_0 pypi\r\n", | |
"pretrainedmodels 0.7.4 pypi_0 pypi\r\n", | |
"prometheus-client 0.11.0 pypi_0 pypi\r\n", | |
"promise 2.3 pypi_0 pypi\r\n", | |
"prompt-toolkit 3.0.20 pypi_0 pypi\r\n", | |
"protobuf 3.18.1 pypi_0 pypi\r\n", | |
"psutil 5.8.0 pypi_0 pypi\r\n", | |
"ptyprocess 0.7.0 pypi_0 pypi\r\n", | |
"pyasn1 0.4.8 pypi_0 pypi\r\n", | |
"pyasn1-modules 0.2.8 pypi_0 pypi\r\n", | |
"pycocotools 2.0.2 pypi_0 pypi\r\n", | |
"pycparser 2.20 pypi_0 pypi\r\n", | |
"pydantic 1.8.2 pypi_0 pypi\r\n", | |
"pydeprecate 0.3.1 pypi_0 pypi\r\n", | |
"pygments 2.10.0 pypi_0 pypi\r\n", | |
"pyparsing 2.4.7 pypi_0 pypi\r\n", | |
"pyrsistent 0.18.0 pypi_0 pypi\r\n", | |
"pystiche 1.0.1 pypi_0 pypi\r\n", | |
"python 3.9.7 hf930737_3_cpython conda-forge\r\n", | |
"python-dateutil 2.8.2 pypi_0 pypi\r\n", | |
"python_abi 3.9 2_cp39 conda-forge\r\n", | |
"pytorch 1.8.2 py3.9_cuda10.2_cudnn7.6.5_0 pytorch-lts\r\n", | |
"pytorch-lightning 1.4.9 pypi_0 pypi\r\n", | |
"pytz 2021.3 pypi_0 pypi\r\n", | |
"pywavelets 1.1.1 pypi_0 pypi\r\n", | |
"pyyaml 6.0 pypi_0 pypi\r\n", | |
"pyzmq 22.3.0 pypi_0 pypi\r\n", | |
"readline 8.1 h46c0cb4_0 conda-forge\r\n", | |
"requests 2.26.0 pypi_0 pypi\r\n", | |
"requests-oauthlib 1.3.0 pypi_0 pypi\r\n", | |
"requests-unixsocket 0.2.0 pypi_0 pypi\r\n", | |
"resnest 0.0.6b20211013 pypi_0 pypi\r\n", | |
"rsa 4.7.2 pypi_0 pypi\r\n", | |
"scikit-image 0.18.3 pypi_0 pypi\r\n", | |
"scikit-learn 1.0 pypi_0 pypi\r\n", | |
"scipy 1.7.1 pypi_0 pypi\r\n", | |
"segmentation-models-pytorch 0.2.0 pypi_0 pypi\r\n", | |
"send2trash 1.8.0 pypi_0 pypi\r\n", | |
"sentry-sdk 1.4.3 pypi_0 pypi\r\n", | |
"setuptools 58.2.0 py39hf3d152e_0 conda-forge\r\n", | |
"shapely 1.7.1 pypi_0 pypi\r\n", | |
"shortuuid 1.0.1 pypi_0 pypi\r\n", | |
"six 1.16.0 pyh6c4a22f_0 conda-forge\r\n", | |
"smart-open 5.2.1 pypi_0 pypi\r\n", | |
"smmap 4.0.0 pypi_0 pypi\r\n", | |
"sniffio 1.2.0 pypi_0 pypi\r\n", | |
"spacy 3.1.3 pypi_0 pypi\r\n", | |
"spacy-legacy 3.0.8 pypi_0 pypi\r\n", | |
"sqlite 3.36.0 h9cd32fc_2 conda-forge\r\n", | |
"srsly 2.4.1 pypi_0 pypi\r\n", | |
"subprocess32 3.5.4 pypi_0 pypi\r\n", | |
"tabulate 0.8.9 pypi_0 pypi\r\n", | |
"tbb 2021.3.0 h4bd325d_0 conda-forge\r\n", | |
"tensorboard 2.7.0 pypi_0 pypi\r\n", | |
"tensorboard-data-server 0.6.1 pypi_0 pypi\r\n", | |
"tensorboard-plugin-wit 1.8.0 pypi_0 pypi\r\n", | |
"termcolor 1.1.0 pypi_0 pypi\r\n", | |
"terminado 0.12.1 pypi_0 pypi\r\n", | |
"testpath 0.5.0 pypi_0 pypi\r\n", | |
"thinc 8.0.10 pypi_0 pypi\r\n", | |
"threadpoolctl 3.0.0 pypi_0 pypi\r\n", | |
"tifffile 2021.10.12 pypi_0 pypi\r\n", | |
"timm 0.4.12 pypi_0 pypi\r\n", | |
"tk 8.6.11 h27826a3_1 conda-forge\r\n", | |
"torchaudio 0.8.2 py39 pytorch-lts\r\n", | |
"torchmetrics 0.5.0 pypi_0 pypi\r\n", | |
"torchvision 0.9.2 py39_cu102 pytorch-lts\r\n", | |
"tornado 6.1 pypi_0 pypi\r\n", | |
"tqdm 4.62.3 pypi_0 pypi\r\n", | |
"traitlets 5.1.0 pypi_0 pypi\r\n", | |
"typer 0.4.0 pypi_0 pypi\r\n", | |
"typing_extensions 3.10.0.2 pyha770c72_0 conda-forge\r\n", | |
"tzdata 2021c he74cb21_0 conda-forge\r\n", | |
"urllib3 1.26.7 pypi_0 pypi\r\n", | |
"voila 0.2.16 pypi_0 pypi\r\n", | |
"wandb 0.12.4 pypi_0 pypi\r\n", | |
"wasabi 0.8.2 pypi_0 pypi\r\n", | |
"wcwidth 0.2.5 pypi_0 pypi\r\n", | |
"webencodings 0.5.1 pypi_0 pypi\r\n", | |
"websocket-client 1.2.1 pypi_0 pypi\r\n", | |
"werkzeug 2.0.2 pypi_0 pypi\r\n", | |
"wheel 0.37.0 pyhd8ed1ab_1 conda-forge\r\n", | |
"x264 1!161.3030 h7f98852_1 conda-forge\r\n", | |
"xz 5.2.5 h516909a_1 conda-forge\r\n", | |
"yacs 0.1.8 pypi_0 pypi\r\n", | |
"yarl 1.7.0 pypi_0 pypi\r\n", | |
"yaspin 2.1.0 pypi_0 pypi\r\n", | |
"zlib 1.2.11 h36c2ea0_1013 conda-forge\r\n", | |
"zstd 1.4.9 ha95c52a_0 conda-forge\r\n" | |
] | |
}, | |
{ | |
"name": "stdout", | |
"output_type": "stream", | |
"text": [ | |
"\n", | |
"Note: you may need to restart the kernel to use updated packages.\n" | |
] | |
} | |
], | |
"source": [ | |
"%conda list" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": 5, | |
"id": "d8ccf9e7", | |
"metadata": {}, | |
"outputs": [ | |
{ | |
"name": "stdout", | |
"output_type": "stream", | |
"text": [ | |
"usage: instance_segmentation [-h] [--config CONFIG]\r\n", | |
" [--print_config [={comments,skip_null}+]]\r\n", | |
" [--seed_everything SEED_EVERYTHING]\r\n", | |
" [--trainer.serve_sanity_check {true,false}]\r\n", | |
" [--trainer.logger LOGGER]\r\n", | |
" [--trainer.checkpoint_callback {true,false}]\r\n", | |
" [--trainer.callbacks CALLBACKS]\r\n", | |
" [--trainer.default_root_dir DEFAULT_ROOT_DIR]\r\n", | |
" [--trainer.gradient_clip_val GRADIENT_CLIP_VAL]\r\n", | |
" [--trainer.gradient_clip_algorithm GRADIENT_CLIP_ALGORITHM]\r\n", | |
" [--trainer.process_position PROCESS_POSITION]\r\n", | |
" [--trainer.num_nodes NUM_NODES]\r\n", | |
" [--trainer.num_processes NUM_PROCESSES]\r\n", | |
" [--trainer.devices DEVICES] [--trainer.gpus GPUS]\r\n", | |
" [--trainer.auto_select_gpus {true,false}]\r\n", | |
" [--trainer.tpu_cores TPU_CORES]\r\n", | |
" [--trainer.ipus IPUS]\r\n", | |
" [--trainer.log_gpu_memory LOG_GPU_MEMORY]\r\n", | |
" [--trainer.progress_bar_refresh_rate PROGRESS_BAR_REFRESH_RATE]\r\n", | |
" [--trainer.overfit_batches OVERFIT_BATCHES]\r\n", | |
" [--trainer.track_grad_norm TRACK_GRAD_NORM]\r\n", | |
" [--trainer.check_val_every_n_epoch CHECK_VAL_EVERY_N_EPOCH]\r\n", | |
" [--trainer.fast_dev_run FAST_DEV_RUN]\r\n", | |
" [--trainer.accumulate_grad_batches ACCUMULATE_GRAD_BATCHES]\r\n", | |
" [--trainer.max_epochs MAX_EPOCHS]\r\n", | |
" [--trainer.min_epochs MIN_EPOCHS]\r\n", | |
" [--trainer.max_steps MAX_STEPS]\r\n", | |
" [--trainer.min_steps MIN_STEPS]\r\n", | |
" [--trainer.max_time MAX_TIME]\r\n", | |
" [--trainer.limit_train_batches LIMIT_TRAIN_BATCHES]\r\n", | |
" [--trainer.limit_val_batches LIMIT_VAL_BATCHES]\r\n", | |
" [--trainer.limit_test_batches LIMIT_TEST_BATCHES]\r\n", | |
" [--trainer.limit_predict_batches LIMIT_PREDICT_BATCHES]\r\n", | |
" [--trainer.val_check_interval VAL_CHECK_INTERVAL]\r\n", | |
" [--trainer.flush_logs_every_n_steps FLUSH_LOGS_EVERY_N_STEPS]\r\n", | |
" [--trainer.log_every_n_steps LOG_EVERY_N_STEPS]\r\n", | |
" [--trainer.accelerator ACCELERATOR]\r\n", | |
" [--trainer.sync_batchnorm {true,false}]\r\n", | |
" [--trainer.precision PRECISION]\r\n", | |
" [--trainer.weights_summary WEIGHTS_SUMMARY]\r\n", | |
" [--trainer.weights_save_path WEIGHTS_SAVE_PATH]\r\n", | |
" [--trainer.num_sanity_val_steps NUM_SANITY_VAL_STEPS]\r\n", | |
" [--trainer.truncated_bptt_steps TRUNCATED_BPTT_STEPS]\r\n", | |
" [--trainer.resume_from_checkpoint RESUME_FROM_CHECKPOINT]\r\n", | |
" [--trainer.profiler PROFILER]\r\n", | |
" [--trainer.benchmark {true,false}]\r\n", | |
" [--trainer.deterministic {true,false}]\r\n", | |
" [--trainer.reload_dataloaders_every_n_epochs RELOAD_DATALOADERS_EVERY_N_EPOCHS]\r\n", | |
" [--trainer.reload_dataloaders_every_epoch {true,false}]\r\n", | |
" [--trainer.auto_lr_find AUTO_LR_FIND]\r\n", | |
" [--trainer.replace_sampler_ddp {true,false}]\r\n", | |
" [--trainer.terminate_on_nan {true,false}]\r\n", | |
" [--trainer.auto_scale_batch_size AUTO_SCALE_BATCH_SIZE]\r\n", | |
" [--trainer.prepare_data_per_node {true,false}]\r\n", | |
" [--trainer.plugins PLUGINS]\r\n", | |
" [--trainer.amp_backend AMP_BACKEND]\r\n", | |
" [--trainer.amp_level AMP_LEVEL]\r\n", | |
" [--trainer.distributed_backend DISTRIBUTED_BACKEND]\r\n", | |
" [--trainer.move_metrics_to_cpu {true,false}]\r\n", | |
" [--trainer.multiple_trainloader_mode MULTIPLE_TRAINLOADER_MODE]\r\n", | |
" [--trainer.stochastic_weight_avg {true,false}]\r\n", | |
" [--model.backbone BACKBONE] [--model.head HEAD]\r\n", | |
" [--model.pretrained {true,false}]\r\n", | |
" [--model.optimizer OPTIMIZER]\r\n", | |
" [--model.optimizer_kwargs OPTIMIZER_KWARGS]\r\n", | |
" [--model.scheduler SCHEDULER]\r\n", | |
" [--model.scheduler_kwargs SCHEDULER_KWARGS]\r\n", | |
" [--model.learning_rate LEARNING_RATE]\r\n", | |
" [--model.serializer SERIALIZER]\r\n", | |
" [--model.num_classes NUM_CLASSES]\r\n", | |
" {from_coco,from_voc,from_files,from_folders,from_datasets,from_pets}\r\n", | |
" ...\r\n", | |
"\r\n", | |
"pytorch-lightning trainer command line tool\r\n", | |
"\r\n", | |
"optional arguments:\r\n", | |
" -h, --help Show this help message and exit.\r\n", | |
" --config CONFIG Path to a configuration file in json or yaml format.\r\n", | |
" --print_config [={comments,skip_null}+]\r\n", | |
" Print configuration and exit.\r\n", | |
" --seed_everything SEED_EVERYTHING\r\n", | |
" Set to an int to run seed_everything with this value\r\n", | |
" before classes instantiation (type: Optional[int],\r\n", | |
" default: null)\r\n", | |
"\r\n", | |
"Customize every aspect of training via flags:\r\n", | |
" --trainer.serve_sanity_check {true,false}\r\n", | |
" (type: bool, default: False)\r\n", | |
" --trainer.logger LOGGER\r\n", | |
" Logger (or iterable collection of loggers) for\r\n", | |
" experiment tracking. A ``True`` value uses the default\r\n", | |
" ``TensorBoardLogger``. ``False`` will disable logging.\r\n", | |
" If multiple loggers are provided and the `save_dir`\r\n", | |
" property of that logger is not set, local files\r\n", | |
" (checkpoints, profiler traces, etc.) are saved in\r\n", | |
" ``default_root_dir`` rather than in the ``log_dir`` of\r\n", | |
" any of the individual loggers. (type:\r\n", | |
" Union[LightningLoggerBase,\r\n", | |
" Iterable[LightningLoggerBase], bool], default: True)\r\n", | |
" --trainer.checkpoint_callback {true,false}\r\n", | |
" If ``True``, enable checkpointing. It will configure a\r\n", | |
" default ModelCheckpoint callback if there is no user-\r\n", | |
" defined ModelCheckpoint in :paramref:`~pytorch_lightni\r\n", | |
" ng.trainer.trainer.Trainer.callbacks`. (type: bool,\r\n", | |
" default: True)\r\n", | |
" --trainer.callbacks CALLBACKS\r\n", | |
" Add a callback or list of callbacks. (type:\r\n", | |
" Union[List[Callback], Callback, null], default: null)\r\n", | |
" --trainer.default_root_dir DEFAULT_ROOT_DIR\r\n", | |
" Default path for logs and weights when no\r\n", | |
" logger/ckpt_callback passed. Default: ``os.getcwd()``.\r\n", | |
" Can be remote file paths such as `s3://mybucket/path`\r\n", | |
" or 'hdfs://path/' (type: Optional[str], default: null)\r\n", | |
" --trainer.gradient_clip_val GRADIENT_CLIP_VAL\r\n", | |
" 0 means don't clip. (type: Union[int, float], default:\r\n", | |
" 0.0)\r\n", | |
" --trainer.gradient_clip_algorithm GRADIENT_CLIP_ALGORITHM\r\n", | |
" 'value' means clip_by_value, 'norm' means\r\n", | |
" clip_by_norm. Default: 'norm' (type: str, default:\r\n", | |
" norm)\r\n", | |
" --trainer.process_position PROCESS_POSITION\r\n", | |
" orders the progress bar when running multiple models\r\n", | |
" on same machine. (type: int, default: 0)\r\n", | |
" --trainer.num_nodes NUM_NODES\r\n", | |
" number of GPU nodes for distributed training. (type:\r\n", | |
" int, default: 1)\r\n", | |
" --trainer.num_processes NUM_PROCESSES\r\n", | |
" number of processes for distributed training with\r\n", | |
" distributed_backend=\"ddp_cpu\" (type: int, default: 1)\r\n", | |
" --trainer.devices DEVICES\r\n", | |
" Will be mapped to either `gpus`, `tpu_cores`,\r\n", | |
" `num_processes` or `ipus`, based on the accelerator\r\n", | |
" type. (type: Union[int, str, List[int], null],\r\n", | |
" default: null)\r\n", | |
" --trainer.gpus GPUS number of gpus to train on (int) or which GPUs to\r\n", | |
" train on (list or str) applied per node (type:\r\n", | |
" Union[int, str, List[int], null], default: null)\r\n", | |
" --trainer.auto_select_gpus {true,false}\r\n", | |
" If enabled and `gpus` is an integer, pick available\r\n", | |
" gpus automatically. This is especially useful when\r\n", | |
" GPUs are configured to be in \"exclusive mode\", such\r\n", | |
" that only one process at a time can access them.\r\n", | |
" (type: bool, default: False)\r\n", | |
" --trainer.tpu_cores TPU_CORES\r\n", | |
" How many TPU cores to train on (1 or 8) / Single TPU\r\n", | |
" to train on [1] (type: Union[int, str, List[int],\r\n", | |
" null], default: null)\r\n", | |
" --trainer.ipus IPUS How many IPUs to train on. (type: Optional[int],\r\n", | |
" default: null)\r\n", | |
" --trainer.log_gpu_memory LOG_GPU_MEMORY\r\n", | |
" None, 'min_max', 'all'. Might slow performance (type:\r\n", | |
" Optional[str], default: null)\r\n", | |
" --trainer.progress_bar_refresh_rate PROGRESS_BAR_REFRESH_RATE\r\n", | |
" How often to refresh progress bar (in steps). Value\r\n", | |
" ``0`` disables progress bar. Ignored when a custom\r\n", | |
" progress bar is passed to\r\n", | |
" :paramref:`~Trainer.callbacks`. Default: None, means a\r\n", | |
" suitable value will be chosen based on the environment\r\n", | |
" (terminal, Google COLAB, etc.). (type: Optional[int],\r\n", | |
" default: null)\r\n", | |
" --trainer.overfit_batches OVERFIT_BATCHES\r\n", | |
" Overfit a fraction of training data (float) or a set\r\n", | |
" number of batches (int). (type: Union[int, float],\r\n", | |
" default: 0.0)\r\n", | |
" --trainer.track_grad_norm TRACK_GRAD_NORM\r\n", | |
" -1 no tracking. Otherwise tracks that p-norm. May be\r\n", | |
" set to 'inf' infinity-norm. (type: Union[int, float,\r\n", | |
" str], default: -1)\r\n", | |
" --trainer.check_val_every_n_epoch CHECK_VAL_EVERY_N_EPOCH\r\n", | |
" Check val every n train epochs. (type: int, default:\r\n", | |
" 1)\r\n", | |
" --trainer.fast_dev_run FAST_DEV_RUN\r\n", | |
" runs n if set to ``n`` (int) else 1 if set to ``True``\r\n", | |
" batch(es) of train, val and test to find any bugs (ie:\r\n", | |
" a sort of unit test). (type: Union[int, bool],\r\n", | |
" default: False)\r\n", | |
" --trainer.accumulate_grad_batches ACCUMULATE_GRAD_BATCHES\r\n", | |
" Accumulates grads every k batches or as set up in the\r\n", | |
" dict. (type: Union[int, Dict[int, int], List[list]],\r\n", | |
" default: 1)\r\n", | |
" --trainer.max_epochs MAX_EPOCHS\r\n", | |
" Stop training once this number of epochs is reached.\r\n", | |
" Disabled by default (None). If both max_epochs and\r\n", | |
" max_steps are not specified, defaults to\r\n", | |
" ``max_epochs`` = 1000. (type: Optional[int], default:\r\n", | |
" 3)\r\n", | |
" --trainer.min_epochs MIN_EPOCHS\r\n", | |
" Force training for at least these many epochs.\r\n", | |
" Disabled by default (None). If both min_epochs and\r\n", | |
" min_steps are not specified, defaults to\r\n", | |
" ``min_epochs`` = 1. (type: Optional[int], default:\r\n", | |
" null)\r\n", | |
" --trainer.max_steps MAX_STEPS\r\n", | |
" Stop training after this number of steps. Disabled by\r\n", | |
" default (None). (type: Optional[int], default: null)\r\n", | |
" --trainer.min_steps MIN_STEPS\r\n", | |
" Force training for at least these number of steps.\r\n", | |
" Disabled by default (None). (type: Optional[int],\r\n", | |
" default: null)\r\n", | |
" --trainer.max_time MAX_TIME\r\n", | |
" Stop training after this amount of time has passed.\r\n", | |
" Disabled by default (None). The time duration can be\r\n", | |
" specified in the format DD:HH:MM:SS (days, hours,\r\n", | |
" minutes seconds), as a :class:`datetime.timedelta`, or\r\n", | |
" a dictionary with keys that will be passed to\r\n", | |
" :class:`datetime.timedelta`. (type: Union[str,\r\n", | |
" timedelta, Dict[str, int], null], default: null)\r\n", | |
" --trainer.limit_train_batches LIMIT_TRAIN_BATCHES\r\n", | |
" How much of training dataset to check (float =\r\n", | |
" fraction, int = num_batches) (type: Union[int, float],\r\n", | |
" default: 1.0)\r\n", | |
" --trainer.limit_val_batches LIMIT_VAL_BATCHES\r\n", | |
" How much of validation dataset to check (float =\r\n", | |
" fraction, int = num_batches) (type: Union[int, float],\r\n", | |
" default: 1.0)\r\n", | |
" --trainer.limit_test_batches LIMIT_TEST_BATCHES\r\n", | |
" How much of test dataset to check (float = fraction,\r\n", | |
" int = num_batches) (type: Union[int, float], default:\r\n", | |
" 1.0)\r\n", | |
" --trainer.limit_predict_batches LIMIT_PREDICT_BATCHES\r\n", | |
" How much of prediction dataset to check (float =\r\n", | |
" fraction, int = num_batches) (type: Union[int, float],\r\n", | |
" default: 1.0)\r\n", | |
" --trainer.val_check_interval VAL_CHECK_INTERVAL\r\n", | |
" How often to check the validation set. Use float to\r\n", | |
" check within a training epoch, use int to check every\r\n", | |
" n steps (batches). (type: Union[int, float], default:\r\n", | |
" 1.0)\r\n", | |
" --trainer.flush_logs_every_n_steps FLUSH_LOGS_EVERY_N_STEPS\r\n", | |
" How often to flush logs to disk (defaults to every 100\r\n", | |
" steps). (type: int, default: 100)\r\n", | |
" --trainer.log_every_n_steps LOG_EVERY_N_STEPS\r\n", | |
" How often to log within steps (defaults to every 50\r\n", | |
" steps). (type: int, default: 50)\r\n", | |
" --trainer.accelerator ACCELERATOR\r\n", | |
" Previously known as distributed_backend (dp, ddp,\r\n", | |
" ddp2, etc...). Can also take in an accelerator object\r\n", | |
" for custom hardware. (type: Union[str, Accelerator,\r\n", | |
" null], default: null)\r\n", | |
" --trainer.sync_batchnorm {true,false}\r\n", | |
" Synchronize batch norm layers between process\r\n", | |
" groups/whole world. (type: bool, default: False)\r\n", | |
" --trainer.precision PRECISION\r\n", | |
" Double precision (64), full precision (32) or half\r\n", | |
" precision (16). Can be used on CPU, GPU or TPUs.\r\n", | |
" (type: int, default: 32)\r\n", | |
" --trainer.weights_summary WEIGHTS_SUMMARY\r\n", | |
" Prints a summary of the weights when training begins.\r\n", | |
" (type: Optional[str], default: top)\r\n", | |
" --trainer.weights_save_path WEIGHTS_SAVE_PATH\r\n", | |
" Where to save weights if specified. Will override\r\n", | |
" default_root_dir for checkpoints only. Use this if for\r\n", | |
" whatever reason you need the checkpoints stored in a\r\n", | |
" different place than the logs written in\r\n", | |
" `default_root_dir`. Can be remote file paths such as\r\n", | |
" `s3://mybucket/path` or 'hdfs://path/' Defaults to\r\n", | |
" `default_root_dir`. (type: Optional[str], default:\r\n", | |
" null)\r\n", | |
" --trainer.num_sanity_val_steps NUM_SANITY_VAL_STEPS\r\n", | |
" Sanity check runs n validation batches before starting\r\n", | |
" the training routine. Set it to `-1` to run all\r\n", | |
" batches in all validation dataloaders. (type: int,\r\n", | |
" default: 2)\r\n", | |
" --trainer.truncated_bptt_steps TRUNCATED_BPTT_STEPS\r\n", | |
" Deprecated in v1.3 to be removed in 1.5. Please use :p\r\n", | |
" aramref:`~pytorch_lightning.core.lightning.LightningMo\r\n", | |
" dule.truncated_bptt_steps` instead. (type:\r\n", | |
" Optional[int], default: null)\r\n", | |
" --trainer.resume_from_checkpoint RESUME_FROM_CHECKPOINT\r\n", | |
" Path/URL of the checkpoint from which training is\r\n", | |
" resumed. If there is no checkpoint file at the path,\r\n", | |
" start from scratch. If resuming from mid-epoch\r\n", | |
" checkpoint, training will start from the beginning of\r\n", | |
" the next epoch. (type: Union[str, Path, null],\r\n", | |
" default: null)\r\n", | |
" --trainer.profiler PROFILER\r\n", | |
" To profile individual steps during training and assist\r\n", | |
" in identifying bottlenecks. (type: Union[BaseProfiler,\r\n", | |
" str, null], default: null)\r\n", | |
" --trainer.benchmark {true,false}\r\n", | |
" If true enables cudnn.benchmark. (type: bool, default:\r\n", | |
" False)\r\n", | |
" --trainer.deterministic {true,false}\r\n", | |
" If true enables cudnn.deterministic. (type: bool,\r\n", | |
" default: False)\r\n", | |
" --trainer.reload_dataloaders_every_n_epochs RELOAD_DATALOADERS_EVERY_N_EPOCHS\r\n", | |
" Set to a non-negative integer to reload dataloaders\r\n", | |
" every n epochs. Default: 0 (type: int, default: 0)\r\n", | |
" --trainer.reload_dataloaders_every_epoch {true,false}\r\n", | |
" Set to True to reload dataloaders every epoch. ..\r\n", | |
" deprecated:: v1.4 ``reload_dataloaders_every_epoch``\r\n", | |
" has been deprecated in v1.4 and will be removed in\r\n", | |
" v1.6. Please use\r\n", | |
" ``reload_dataloaders_every_n_epochs``. (type: bool,\r\n", | |
" default: False)\r\n", | |
" --trainer.auto_lr_find AUTO_LR_FIND\r\n", | |
" If set to True, will make trainer.tune() run a\r\n", | |
" learning rate finder, trying to optimize initial\r\n", | |
" learning for faster convergence. trainer.tune() method\r\n", | |
" will set the suggested learning rate in self.lr or\r\n", | |
" self.learning_rate in the LightningModule. To use a\r\n", | |
" different key set a string instead of True with the\r\n", | |
" key name. (type: Union[bool, str], default: False)\r\n", | |
" --trainer.replace_sampler_ddp {true,false}\r\n", | |
" Explicitly enables or disables sampler replacement. If\r\n", | |
" not specified this will toggled automatically when DDP\r\n", | |
" is used. By default it will add ``shuffle=True`` for\r\n", | |
" train sampler and ``shuffle=False`` for val/test\r\n", | |
" sampler. If you want to customize it, you can set\r\n", | |
" ``replace_sampler_ddp=False`` and add your own\r\n", | |
" distributed sampler. (type: bool, default: True)\r\n", | |
" --trainer.terminate_on_nan {true,false}\r\n", | |
" If set to True, will terminate training (by raising a\r\n", | |
" `ValueError`) at the end of each training batch, if\r\n", | |
" any of the parameters or the loss are NaN or +/-inf.\r\n", | |
" (type: bool, default: False)\r\n", | |
" --trainer.auto_scale_batch_size AUTO_SCALE_BATCH_SIZE\r\n", | |
" If set to True, will `initially` run a batch size\r\n", | |
" finder trying to find the largest batch size that fits\r\n", | |
" into memory. The result will be stored in\r\n", | |
" self.batch_size in the LightningModule. Additionally,\r\n", | |
" can be set to either `power` that estimates the batch\r\n", | |
" size through a power search or `binsearch` that\r\n", | |
" estimates the batch size through a binary search.\r\n", | |
" (type: Union[str, bool], default: False)\r\n", | |
" --trainer.prepare_data_per_node {true,false}\r\n", | |
" If True, each LOCAL_RANK=0 will call prepare data.\r\n", | |
" Otherwise only NODE_RANK=0, LOCAL_RANK=0 will prepare\r\n", | |
" data (type: bool, default: True)\r\n", | |
" --trainer.plugins PLUGINS\r\n", | |
" Plugins allow modification of core behavior like ddp\r\n", | |
" and amp, and enable custom lightning plugins. (type:\r\n", | |
" Union[List[Union[Plugin, ClusterEnvironment, str]],\r\n", | |
" Plugin, ClusterEnvironment, str, null], default: null)\r\n", | |
" --trainer.amp_backend AMP_BACKEND\r\n", | |
" The mixed precision backend to use (\"native\" or\r\n", | |
" \"apex\") (type: str, default: native)\r\n", | |
" --trainer.amp_level AMP_LEVEL\r\n", | |
" The optimization level to use (O1, O2, etc...). (type:\r\n", | |
" str, default: O2)\r\n", | |
" --trainer.distributed_backend DISTRIBUTED_BACKEND\r\n", | |
" deprecated. Please use 'accelerator' (type:\r\n", | |
" Optional[str], default: null)\r\n", | |
" --trainer.move_metrics_to_cpu {true,false}\r\n", | |
" Whether to force internal logged metrics to be moved\r\n", | |
" to cpu. This can save some gpu memory, but can make\r\n", | |
" training slower. Use with attention. (type: bool,\r\n", | |
" default: False)\r\n", | |
" --trainer.multiple_trainloader_mode MULTIPLE_TRAINLOADER_MODE\r\n", | |
" How to loop over the datasets when there are multiple\r\n", | |
" train loaders. In 'max_size_cycle' mode, the trainer\r\n", | |
" ends one epoch when the largest dataset is traversed,\r\n", | |
" and smaller datasets reload when running out of their\r\n", | |
" data. In 'min_size' mode, all the datasets reload when\r\n", | |
" reaching the minimum length of datasets. (type: str,\r\n", | |
" default: max_size_cycle)\r\n", | |
" --trainer.stochastic_weight_avg {true,false}\r\n", | |
" Whether to use `Stochastic Weight Averaging (SWA)\r\n", | |
" <https://pytorch.org/blog/pytorch-1.6-now-includes-\r\n", | |
" stochastic-weight-averaging/>_` (type: bool, default:\r\n", | |
" False)\r\n", | |
"\r\n", | |
"The ``InstanceSegmentation`` is a :class:`~flash.Task` for detecting objects in images. For more details, see:\r\n", | |
" --model.backbone BACKBONE\r\n", | |
" Pretained backbone CNN architecture. Constructs a\r\n", | |
" model with a ResNet-50-FPN backbone when no backbone\r\n", | |
" is specified. (type: Optional[str], default:\r\n", | |
" resnet18_fpn)\r\n", | |
" --model.head HEAD (type: Optional[str], default: mask_rcnn)\r\n", | |
" --model.pretrained {true,false}\r\n", | |
" if true, returns a model pre-trained on COCO train2017\r\n", | |
" (type: bool, default: True)\r\n", | |
" --model.optimizer OPTIMIZER\r\n", | |
" The optimizer to use for training. Can either be the\r\n", | |
" actual class or the class name. (type:\r\n", | |
" Type[Optimizer], default: Adam)\r\n", | |
" --model.optimizer_kwargs OPTIMIZER_KWARGS\r\n", | |
" Additional kwargs to use when creating the optimizer\r\n", | |
" (if not passed as an instance). (type:\r\n", | |
" Optional[Dict[str, Any]], default: null)\r\n", | |
" --model.scheduler SCHEDULER\r\n", | |
" The scheduler or scheduler class to use. (type:\r\n", | |
" Union[Type[_LRScheduler], str, _LRScheduler, null],\r\n", | |
" default: null)\r\n", | |
" --model.scheduler_kwargs SCHEDULER_KWARGS\r\n", | |
" Additional kwargs to use when creating the scheduler\r\n", | |
" (if not passed as an instance). (type:\r\n", | |
" Optional[Dict[str, Any]], default: null)\r\n", | |
" --model.learning_rate LEARNING_RATE\r\n", | |
" The learning rate to use for training (type: float,\r\n", | |
" default: 0.0005)\r\n", | |
" --model.serializer SERIALIZER\r\n", | |
" (type: Union[Serializer, Mapping[str, Serializer],\r\n", | |
" null], default: null)\r\n", | |
" --model.num_classes NUM_CLASSES\r\n", | |
" the number of classes for detection, including\r\n", | |
" background (type: Optional[int], default: null)\r\n", | |
"\r\n", | |
"subcommands:\r\n", | |
" For more details of each subcommand add it as argument followed by --help.\r\n", | |
"\r\n", | |
" {from_coco,from_voc,from_files,from_folders,from_datasets,from_pets}\r\n" | |
] | |
} | |
], | |
"source": [ | |
"!flash instance_segmentation --help" | |
] | |
}, | |
{ | |
"cell_type": "code", | |
"execution_count": null, | |
"id": "4756df52", | |
"metadata": {}, | |
"outputs": [], | |
"source": [] | |
} | |
], | |
"metadata": { | |
"kernelspec": { | |
"display_name": "Python 3", | |
"language": "python", | |
"name": "python3" | |
}, | |
"language_info": { | |
"codemirror_mode": { | |
"name": "ipython", | |
"version": 3 | |
}, | |
"file_extension": ".py", | |
"mimetype": "text/x-python", | |
"name": "python", | |
"nbconvert_exporter": "python", | |
"pygments_lexer": "ipython3", | |
"version": "3.9.7" | |
} | |
}, | |
"nbformat": 4, | |
"nbformat_minor": 5 | |
} |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment