conda create -n openvino-env python=3.9 -y
conda activate openvino-env
sudo apt-get install python3.9-dev libpython3.9
pip install openvino-dev[caffe,tensorflow,onnx]
#or
pip install openvino-dev
Step 3: view all open_model_zoo available models
omz_info_dumper --print_all
4.1: Download the model
# omz_downloader --name yolo* --output_dir models
4.2: Optimize the model
# mo --input_model yolo.model --data_type FP16 --output_dir ir
git clone https://github.com/ultralytics/yolov5
cd yolov5
pip install -r requirements.txt
#run demo
python3 detect.py --weights yolov5n.pt
cd ..
#....export model
#....this one cmd will convert to pytorch --> onnx --> openvino
#....yolov5n_openvino_model files and dir will be created..
#
python3 export.py --weights yolov5n.pt --include openvino
wget -O face-demographics-walking.mp4 https://github.com/intel-iot-devkit/sample-videos/raw/master/face-demographics-walking.mp4
wget -O bottle-detection.mp4 https://github.com/intel-iot-devkit/sample-videos/raw/master/bottle-detection.mp4
wget -O head-pose-face-detection-female.mp4 https://github.com/intel-iot-devkit/sample-videos/raw/master/head-pose-face-detection-female.mp4
wget https://raw.githubusercontent.com/bharath5673/OpenVINO/main/Openvino-yolo.py
wget https://raw.githubusercontent.com/bharath5673/OpenVINO/main/yolo_80classes.txt
python3 Openvino-yolo.py ##cam input by default