git clone https://github.com/UCLA-VAST/Stream-HLS
cd Stream-HLS
sudo add-apt-repository ppa:deadsnakes/ppa
sudo apt update
sudo apt install python3.11 python3.11-venv
python3.11 -m venv env
. env/bin/activate
sudo apt install lld
cd extern
git clone -b llvmorg-18.1.2 https://github.com/llvm/llvm-project.git
cd llvm-project/build
cd build
cmake -G Ninja ../llvm \
-DLLVM_ENABLE_PROJECTS=mlir \
-DCMAKE_BUILD_TYPE=Debug \
-DLLVM_ENABLE_ASSERTIONS=ON \
-DCMAKE_C_COMPILER=clang \
-DLLVM_TARGETS_TO_BUILD=host \
-DCMAKE_CXX_COMPILER=clang++ \
-DLLVM_ENABLE_LLD=ON
cmake --build . --target check-mlir
# obtain ampl licence https://portal.ampl.com/account/ampl/
python -m pip install amplpy --upgrade
python -m amplpy.modules install highs gurobi
python -m amplpy.modules activate 672f05a4-b6ba-430e-ad19-c3d73fd0242d
./build-streamhls.sh $(readlink -f extern/llvm-project)
cp env/lib/python3.11/site-packages/ampl_module_base/bin/* env/bin
import torch_mlir
import torch
import time
import torch
import torch.nn as nn
class Model(nn.Module):
def forward(self, a):
return a * a ** a
model = Model()
model.train(False)
model.eval()
inputs = torch.asarray([1.0, 2.0])
module = torch_mlir.compile(
model,
inputs,
output_type="linalg-on-tensors",
use_tracing=True)
path = 'test.mlir'
with open(path, 'w') as f:
print(module.operation.get_asm(), file=f)
set -ex
. ../env/bin/activate
python3 test.py
streamhls-opt ./test.mlir \
-streamhls-host-pipeline \
> host.mlir
streamhls-translate ./host.mlir \
-emit-vivado-hls \
-vitis-hls-weights-dir=data \
-vitis-hls-is-host=true \
-o host_tb.cpp
streamhls-opt test.mlir \
-streamhls-kernel-pipeline="top-func=forward \
graph-file=graph\
report-file=report\
optimize-schedule=1\
parallelize-nodes=1\
combined-optimization=0\
board-dsps=1024 \
tiling-limit=16 \
time-limit-minutes=10 \
bufferize-func-args=0 \
optimize-conv-reuse=0 \
minimize-on-chip-buffers=0 \
debug-point=14" > kernel.mlir
streamhls-translate \
kernel.mlir \
-emit-vivado-hls \
-o kernel.cpp