Skip to content

Instantly share code, notes, and snippets.

@schelleg
Created December 2, 2019 21:21
Show Gist options
  • Select an option

  • Save schelleg/40fb06238709ece43c52cfc770860e20 to your computer and use it in GitHub Desktop.

Select an option

Save schelleg/40fb06238709ece43c52cfc770860e20 to your computer and use it in GitHub Desktop.
A (No-Cross) Compile of PyTorch and OpenCV using AWS A1 instances

This gist is a collection of bash scripts and a Jupyter noteookbooks to support the blog entitled:

"A (No-Cross) Compile of PyTorch and OpenCV using AWS A1 instances"

#!/bin/bash
# References
# https://docs.opencv.org/master/d2/de6/tutorial_py_setup_in_ubuntu.html
set -x
set -e
if [ "$1" == "a1" ]; then
threads=16
elif [ "$1" == "u96" ]; then
threads=4
elif [ "$1" == "install" ]; then
make install
ldconfig
else
echo '$0 <a1|u96|install>'
exit 0
fi
# if needed
# sudo apt-get update
# sudo apt-get -y install build-essential cmake zip python3-opencv python3-dev python3-numpy
opencv_version=3.4.3
wget -O opencv.zip https://github.com/opencv/opencv/archive/${opencv_version}.zip
wget -O opencv_contrib.zip https://github.com/opencv/opencv_contrib/archive/${opencv_version}.zip
unzip opencv.zip
unzip opencv_contrib.zip
mkdir opencv-${opencv_version}/build
cd opencv-${opencv_version}/build
cmake -D CMAKE_BUILD_TYPE=RELEASE \
-D CMAKE_INSTALL_PREFIX=/usr/local \
-D BUILD_WITH_DEBUG_INFO=OFF \
-D BUILD_DOCS=OFF \
-D BUILD_EXAMPLES=OFF \
-D BUILD_TESTS=ON \
-D BUILD_opencv_ts=OFF \
-D BUILD_PERF_TESTS=OFF \
-D INSTALL_C_EXAMPLES=OFF \
-D INSTALL_PYTHON_EXAMPLES=OFF \
-D ENABLE_NEON=ON \
-D WITH_LIBV4L=ON \
-D WITH_GSTREAMER=ON \
-D BUILD_opencv_dnn=OFF \
-D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib-${opencv_version}/modules \
../
make -j$threads
cd ../..
#!/bin/bash
# Following a nice blog here:
# https://medium.com/hardware-interfacing/how-to-install-pytorch-v4-0-on-raspberry-pi-3b-odroids-and-other-arm-based-devices-91d62f2933c7
set -x
set -e
if [ "$1" == "a1" ]; then
threads=16
sudo apt-get update
sudo apt-get -y install build-essential libopenblas-dev libblas-dev m4 cmake cython python3-dev python3-yaml python3-setuptools python3-pip
pip3 install wheel
elif [ "$1" == "u96" ]; then
threads=4
sudo apt-get update
sudo apt-get install libopenblas-dev
else
echo "$0 <a1|u96>"
exit 0
fi
mkdir pytorch_install && cd pytorch_install
git clone --recursive https://github.com/pytorch/pytorch
cd pytorch
export NO_CUDA=1
export NO_DISTRIBUTED=1
export NO_MKLDNN=1
export NO_NNPACK=1
export NO_QNNPACK=1
export MAX_JOBS=$threads
python3 setup.py bdist_wheel
Display the source blob
Display the rendered blob
Raw
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## PyTorch on PYNQ v2.5 and the Ultra96v1\n",
"\n",
"This notebook is a simple smoke test for a PyTorch installation running on the PYNQ v2.5 image. PyTorch cannot build on the off-the-shelf PYNQ image due to the small core/memory count on the Ultra96. Therefore I used a AWS A1 instance for building the pytorch bdist_wheel. \n",
"\n",
"This notebook is part of a PYNQ blog describing how to use the A1's to help build big-softare for embedded platforms like the Ultra96. I am only running two simple examples below to show the installation working on the board. \n",
"\n",
"- example #1 from [here](https://medium.com/hardware-interfacing/how-to-install-pytorch-v4-0-on-raspberry-pi-3b-odroids-and-other-arm-based-devices-91d62f2933c7)\n",
"- examples #2 from [here](https://dev.to/nestedsoftware/pytorch-hello-world-37mo)"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"torch version: 1.4.0a0+b2b1601\n",
"\n"
]
}
],
"source": [
"# sanity check on import from the A1 built, Ultra96 installed torch package\n",
"# I built from the HEAD of the pytorch repo, hence the git hash.\n",
"import torch\n",
"print(f'torch version: {torch.__version__}\\n')"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"tensor([[0.1611, 0.4185, 0.2366],\n",
" [0.0886, 0.0956, 0.1774],\n",
" [0.7228, 0.3696, 0.9871],\n",
" [0.9236, 0.4115, 0.4540],\n",
" [0.7930, 0.3345, 0.5961]])\n"
]
}
],
"source": [
"# example 1 - simple tensor build \n",
"a = torch.rand(5,3)\n",
"print (a)"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"network topology: Net(\n",
" (hidden_layer): Linear(in_features=1, out_features=1, bias=True)\n",
" (output_layer): Linear(in_features=1, out_features=1, bias=True)\n",
")\n",
"w_l1 = 1.58\n",
"b_l1 = -0.14\n",
"w_l2 = 2.45\n",
"b_l2 = -0.11\n",
"a_l2 = 0.8506\n",
"updated_w_l1 = 1.5814\n",
"updated_b_l1 = -0.1383\n",
"updated_w_l2 = 2.4529\n",
"updated_b_l2 = -0.1062\n",
"updated_a_l2 = 0.8515\n"
]
}
],
"source": [
"# example 2 - taking an optimization step\n",
"import torch.nn as nn\n",
"import torch.optim as optim\n",
"\n",
"\n",
"class Net(nn.Module):\n",
" def __init__(self):\n",
" super(Net, self).__init__()\n",
" self.hidden_layer = nn.Linear(1, 1)\n",
" self.hidden_layer.weight = torch.nn.Parameter(torch.tensor([[1.58]]))\n",
" self.hidden_layer.bias = torch.nn.Parameter(torch.tensor([-0.14]))\n",
"\n",
" self.output_layer = nn.Linear(1, 1)\n",
" self.output_layer.weight = torch.nn.Parameter(torch.tensor([[2.45]]))\n",
" self.output_layer.bias = torch.nn.Parameter(torch.tensor([-0.11]))\n",
"\n",
" def forward(self, x):\n",
" x = torch.sigmoid(self.hidden_layer(x))\n",
" x = torch.sigmoid(self.output_layer(x))\n",
" return x\n",
"\n",
"\n",
"net = Net()\n",
"print(f\"network topology: {net}\")\n",
"\n",
"print(f\"w_l1 = {round(net.hidden_layer.weight.item(), 4)}\")\n",
"print(f\"b_l1 = {round(net.hidden_layer.bias.item(), 4)}\")\n",
"print(f\"w_l2 = {round(net.output_layer.weight.item(), 4)}\")\n",
"print(f\"b_l2 = {round(net.output_layer.bias.item(), 4)}\")\n",
"\n",
"# run input data forward through network\n",
"input_data = torch.tensor([0.8])\n",
"output = net(input_data)\n",
"print(f\"a_l2 = {round(output.item(), 4)}\")\n",
"\n",
"# backpropagate gradient\n",
"target = torch.tensor([1.])\n",
"criterion = nn.MSELoss()\n",
"loss = criterion(output, target)\n",
"net.zero_grad()\n",
"loss.backward()\n",
"\n",
"# update weights and biases\n",
"optimizer = optim.SGD(net.parameters(), lr=0.1)\n",
"optimizer.step()\n",
"\n",
"print(f\"updated_w_l1 = {round(net.hidden_layer.weight.item(), 4)}\")\n",
"print(f\"updated_b_l1 = {round(net.hidden_layer.bias.item(), 4)}\")\n",
"print(f\"updated_w_l2 = {round(net.output_layer.weight.item(), 4)}\")\n",
"print(f\"updated_b_l2 = {round(net.output_layer.bias.item(), 4)}\")\n",
"\n",
"output = net(input_data)\n",
"print(f\"updated_a_l2 = {round(output.item(), 4)}\")"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.5"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment