Skip to content

Instantly share code, notes, and snippets.

@Curt-Park
Last active May 10, 2023 15:48
Show Gist options
  • Save Curt-Park/2353c6bed626d7928003f36072607069 to your computer and use it in GitHub Desktop.
Save Curt-Park/2353c6bed626d7928003f36072607069 to your computer and use it in GitHub Desktop.
#include <iostream>
#include <vector>
#include <array>
#include <opencv2/highgui.hpp>
#include <opencv2/opencv.hpp>
#include <onnxruntime/onnxruntime_cxx_api.h>
int main(int argc, const char* argv[]) {
std::cout << "Optimized? " << cv::useOptimized() << std::endl;
cv::setUseOptimized(true);
auto H = 1024, W = 1024;
cv::Mat image(W, H, CV_8UC1, cv::Scalar(200));
// Matrix Multiplication.
auto e1 = cv::getTickCount();
image.dot(image);
auto e2 = cv::getTickCount();
auto duration = (e2 - e1) / cv::getTickFrequency();
std::cout << "Matrix Dot Product " << "Elapsed(sec): " << duration << std::endl;
// Add zero values.
e1 = cv::getTickCount();
image(cv::Range(0, 50), cv::Range(0, W)) = 0;
image(cv::Range(0, H), cv::Range(0, 50)) = 0;
image(cv::Range(H-50, H), cv::Range(0, W)) = 0;
image(cv::Range(0, H), cv::Range(W-50, W)) = 0;
e2 = cv::getTickCount();
duration = (e2 - e1) / cv::getTickFrequency();
std::cout << "Matrix Value Assignment " << "Elapsed(sec): " << duration << std::endl;
// cv::imshow("Value assignments", image);
// cv::waitKey(0);
// Inversion.
e1 = cv::getTickCount();
auto invert = (image == 0);
e2 = cv::getTickCount();
duration = (e2 - e1) / cv::getTickFrequency();
std::cout << "Matrix Inversion " << "Elapsed(sec): " << duration << std::endl;
// cv::imshow("Invert", invert);
// cv::waitKey(0);
// Bitwise OR.
e1 = cv::getTickCount();
cv::Mat bitwiseOr(W, H, CV_8UC1, cv::Scalar(0));
cv::bitwise_or(image, invert, bitwiseOr);
e2 = cv::getTickCount();
duration = (e2 - e1) / cv::getTickFrequency();
std::cout << "Bitwise or " << "Elapsed(sec): " << duration << std::endl;
// cv::imshow("Bitwise or", bitwiseOr);
// cv::waitKey(0);
// Inference.
Ort::Env env;
Ort::Session session{env, "sam_decoder_uint8.onnx", Ort::SessionOptions{nullptr}};
auto memoryInfo = Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU);
const char* const inputNames[6] = {
"image_embeddings", "point_coords", "point_labels",
"mask_input", "has_mask_input", "orig_im_size"
};
const char* const outputNames[3] = {"masks", "iou_predictions", "low_res_masks"};
std::vector<Ort::Value> inputs;
// image_embedding
std::array<float32_t, 256 * 64 * 64> imageEmbedding;
std::array<int64_t, 4> imageEmbeddingShape{1, 256, 64, 64};
inputs.push_back(std::move(Ort::Value::CreateTensor<float32_t>(
memoryInfo,
imageEmbedding.data(), imageEmbedding.size(),
imageEmbeddingShape.data(), imageEmbeddingShape.size()
)));
// point_coords
std::array<float32_t, 1 * 1 * 2> pointCoords{W / 2.f, H / 2.f};
std::array<int64_t, 3> pointCoordsShape{1, 1, 2};
inputs.push_back(std::move(Ort::Value::CreateTensor<float32_t>(
memoryInfo,
pointCoords.data(), pointCoords.size(),
pointCoordsShape.data(), pointCoordsShape.size()
)));
// point_labels
std::array<float32_t, 1 * 1> pointLabels{1};
std::array<int64_t, 2> pointLabelsShape{1, 1};
inputs.push_back(std::move(Ort::Value::CreateTensor<float32_t>(
memoryInfo,
pointLabels.data(), pointLabels.size(),
pointLabelsShape.data(), pointLabelsShape.size()
)));
// mask input
std::array<float32_t, 1 * 1 * 256 * 256> maskInput;
std::array<int64_t, 4> maskInputShape{1, 1, 256, 256};
inputs.push_back(std::move(Ort::Value::CreateTensor<float32_t>(
memoryInfo,
maskInput.data(), maskInput.size(),
maskInputShape.data(), maskInputShape.size()
)));
// has mask input
std::array<float32_t, 1> hasMaskInput{0};
std::array<int64_t, 1> hasMaskInputShape{1};
inputs.push_back(std::move(Ort::Value::CreateTensor<float32_t>(
memoryInfo,
hasMaskInput.data(), hasMaskInput.size(),
hasMaskInputShape.data(), hasMaskInputShape.size()
)));
// orig_im_size
std::array<float32_t, 2> origImSize{1024, 1024};
std::array<int64_t, 1> origImSizeShape{2};
inputs.push_back(std::move(Ort::Value::CreateTensor<float32_t>(
memoryInfo,
origImSize.data(), origImSize.size(),
origImSizeShape.data(), origImSizeShape.size()
)));
e1 = cv::getTickCount();
auto outputs = session.Run(
Ort::RunOptions{},
inputNames, inputs.data(),
inputs.size(), outputNames, 3
);
e2 = cv::getTickCount();
duration = (e2 - e1) / cv::getTickFrequency();
std::cout << "Inference " << "Elapsed(sec): " << duration << std::endl;
e1 = cv::getTickCount();
for (auto i = 0; i <= 100; i++) {
outputs = session.Run(
Ort::RunOptions{},
inputNames, inputs.data(),
inputs.size(), outputNames, 3
);
}
e2 = cv::getTickCount();
duration = (e2 - e1) / cv::getTickFrequency();
std::cout << "Inference " << "Elapsed(sec): " << duration << std::endl;
// auto data = outputs[0].GetTensorMutableData<float32_t>();
// auto typeInfo = outputs[0].GetTensorTypeAndShapeInfo();
// std::cout << outputs.size() << std::endl;
// for (auto i = 0; i < typeInfo.GetDimensionsCount(); i++) {
// std::cout << typeInfo.GetShape()[i] << " ";
// }
// std::cout << std::endl;
// for (auto i = 0; i < typeInfo.GetElementCount(); i++) {
// std::cout << data[i] << " ";
// }
// std::cout << std::endl;
return 0;
}
@Curt-Park
Copy link
Author

Curt-Park commented May 10, 2023

How-to-setup:

  1. brew install opencv pkg-config
  2. sudo ln -s /opt/homebrew/include/opencv4/opencv2/ /usr/local/include/opencv2
  3. Download onnx from https://github.com/microsoft/onnxruntime/releases/tag/v1.14.1
  4. Unzip onnxruntime-osx-arm64-1.14.1.tgz and move to /usr/local
  5. sudo ln -s /usr/local/onnxruntime/include /usr/local/include/onnxruntime
  6. sudo ln -s /usr/local/onnxruntime/lib/libonnxruntime.1.14.1.dylib /usr/local/lib/libonnxruntime.1.14.1.dylib

How-to-build:

g++ main.cpp -o main --std=c++17 `pkg-config --cflags --libs opencv4` -I/usr/local/onnxruntime/include -L/usr/local/onnxruntime/lib -lonnxruntime

How-to-run:

./main
Optimized? 1
Matrix Dot Product Elapsed(sec): 0.000177042
Matrix Value Assignment Elapsed(sec): 1.5083e-05
Matrix Inversion Elapsed(sec): 2.375e-06
Bitwise or Elapsed(sec): 0.000256583
Inference Elapsed(sec): 0.0303222
Inference Elapsed(sec): 2.88551

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment