From 22474e9ad5f87be10be6563834ff55260c083cfa Mon Sep 17 00:00:00 2001 From: Onuralp SEZER Date: Thu, 10 Aug 2023 18:51:19 +0300 Subject: [PATCH] Improve YOLOv8 ONNX Runtime c++ example for all OS with `CmakeList.txt` support (#4274) Signed-off-by: Onuralp SEZER Signed-off-by: Onuralp SEZER Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Glenn Jocher --- .../YOLOv8-ONNXRuntime-CPP/CMakeLists.txt | 71 ++++++++++++++++ examples/YOLOv8-ONNXRuntime-CPP/README.md | 23 +++-- examples/YOLOv8-ONNXRuntime-CPP/inference.cpp | 27 +++--- examples/YOLOv8-ONNXRuntime-CPP/inference.h | 25 +++--- examples/YOLOv8-ONNXRuntime-CPP/main.cpp | 84 +++++++++++++++---- 5 files changed, 178 insertions(+), 52 deletions(-) create mode 100644 examples/YOLOv8-ONNXRuntime-CPP/CMakeLists.txt diff --git a/examples/YOLOv8-ONNXRuntime-CPP/CMakeLists.txt b/examples/YOLOv8-ONNXRuntime-CPP/CMakeLists.txt new file mode 100644 index 0000000..97a9d19 --- /dev/null +++ b/examples/YOLOv8-ONNXRuntime-CPP/CMakeLists.txt @@ -0,0 +1,71 @@ +cmake_minimum_required(VERSION 3.5) + +set(PROJECT_NAME Yolov8OnnxRuntimeCPPInference) +project(${PROJECT_NAME} VERSION 0.0.1 LANGUAGES CXX) + + +# -------------- Support C++17 for using filesystem ------------------# +set(CMAKE_CXX_STANDARD 17) +set(CMAKE_CXX_STANDARD_REQUIRED ON) +set(CMAKE_CXX_EXTENSIONS ON) +set(CMAKE_INCLUDE_CURRENT_DIR ON) + + +# OpenCV +find_package(OpenCV REQUIRED) +include_directories(${OpenCV_INCLUDE_DIRS}) + + + +# ONNXRUNTIME + +# Set ONNXRUNTIME_VERSION +set(ONNXRUNTIME_VERSION 1.15.1) + +if(WIN32) + # CPU + # set(ONNXRUNTIME_ROOT "${CMAKE_CURRENT_SOURCE_DIR}/onnxruntime-win-x64-${ONNXRUNTIME_VERSION}") + # GPU + set(ONNXRUNTIME_ROOT "${CMAKE_CURRENT_SOURCE_DIR}/onnxruntime-win-x64-gpu-${ONNXRUNTIME_VERSION}") +elseif(LINUX) + # CPU + # set(ONNXRUNTIME_ROOT "${CMAKE_CURRENT_SOURCE_DIR}/onnxruntime-linux-x64-${ONNXRUNTIME_VERSION}") + # GPU + set(ONNXRUNTIME_ROOT "${CMAKE_CURRENT_SOURCE_DIR}/onnxruntime-linux-x64-gpu-${ONNXRUNTIME_VERSION}") +elseif(APPLE) + set(ONNXRUNTIME_ROOT "${CMAKE_CURRENT_SOURCE_DIR}/onnxruntime-osx-arm64-${ONNXRUNTIME_VERSION}") + # Apple X64 binary + # set(ONNXRUNTIME_ROOT "${CMAKE_CURRENT_SOURCE_DIR}/onnxruntime-osx-x64-${ONNXRUNTIME_VERSION}") + # Apple Universal binary + # set(ONNXRUNTIME_ROOT "${CMAKE_CURRENT_SOURCE_DIR}/onnxruntime-osx-universal2-${ONNXRUNTIME_VERSION}") +endif() + +include_directories(${PROJECT_NAME} ${ONNXRUNTIME_ROOT}/include) + +set(PROJECT_SOURCES + main.cpp + inference.h + inference.cpp +) + +add_executable(${PROJECT_NAME} ${PROJECT_SOURCES}) + +if(WIN32) + target_link_libraries(${PROJECT_NAME} ${OpenCV_LIBS} ${ONNXRUNTIME_ROOT}/lib/onnxruntime.lib) +elseif(LINUX) + target_link_libraries(${PROJECT_NAME} ${OpenCV_LIBS} ${ONNXRUNTIME_ROOT}/lib/libonnxruntime.so) +elseif(APPLE) + target_link_libraries(${PROJECT_NAME} ${OpenCV_LIBS} ${ONNXRUNTIME_ROOT}/lib/libonnxruntime.dylib) +endif() + +# For windows system, copy onnxruntime.dll to the same folder of the executable file +if(WIN32) + add_custom_command(TARGET ${PROJECT_NAME} POST_BUILD + COMMAND ${CMAKE_COMMAND} -E copy_if_different + "${ONNXRUNTIME_ROOT}/lib/onnxruntime.dll" + $) +endif() + +# Download https://raw.githubusercontent.com/ultralytics/ultralytics/main/ultralytics/cfg/datasets/coco.yaml +# and put it in the same folder of the executable file +configure_file(coco.yaml ${CMAKE_CURRENT_BINARY_DIR}/coco.yaml COPYONLY) diff --git a/examples/YOLOv8-ONNXRuntime-CPP/README.md b/examples/YOLOv8-ONNXRuntime-CPP/README.md index c498664..b5e02e0 100644 --- a/examples/YOLOv8-ONNXRuntime-CPP/README.md +++ b/examples/YOLOv8-ONNXRuntime-CPP/README.md @@ -2,8 +2,6 @@ This example demonstrates how to perform inference using YOLOv8 in C++ with ONNX Runtime and OpenCV's API. -We recommend using Visual Studio to build the project. - ## Benefits - Friendly for deployment in the industrial sector. @@ -25,13 +23,20 @@ model = YOLO("yolov8n.pt") model.export(format="onnx", opset=12, simplify=True, dynamic=False, imgsz=640) ``` +Alternatively, you can use the following command for exporting the model in the terminal + +```bash +yolo export model=yolov8n.pt opset=12 simplify=True dynamic=False format=onnx imgsz=640,640 +``` + ## Dependencies -| Dependency | Version | -| ----------------------- | -------- | -| Onnxruntime-win-x64-gpu | >=1.14.1 | -| OpenCV | >=4.0.0 | -| C++ | >=17 | +| Dependency | Version | +| -------------------------------- | -------- | +| Onnxruntime(linux,windows,macos) | >=1.14.1 | +| OpenCV | >=4.0.0 | +| C++ | >=17 | +| Cmake | >=3.5 | Note: The dependency on C++17 is due to the usage of the C++17 filesystem feature. @@ -39,9 +44,9 @@ Note: The dependency on C++17 is due to the usage of the C++17 filesystem featur ```c++ // CPU inference -DCSP_INIT_PARAM params{ model_path, YOLO_ORIGIN_V8, {imgsz_w, imgsz_h}, class_num, 0.1, 0.5, false}; +DCSP_INIT_PARAM params{ model_path, YOLO_ORIGIN_V8, {imgsz_w, imgsz_h}, 0.1, 0.5, false}; // GPU inference -DCSP_INIT_PARAM params{ model_path, YOLO_ORIGIN_V8, {imgsz_w, imgsz_h}, class_num, 0.1, 0.5, true}; +DCSP_INIT_PARAM params{ model_path, YOLO_ORIGIN_V8, {imgsz_w, imgsz_h}, 0.1, 0.5, true}; // Load your image cv::Mat img = cv::imread(img_path); diff --git a/examples/YOLOv8-ONNXRuntime-CPP/inference.cpp b/examples/YOLOv8-ONNXRuntime-CPP/inference.cpp index 5af395d..953fa70 100644 --- a/examples/YOLOv8-ONNXRuntime-CPP/inference.cpp +++ b/examples/YOLOv8-ONNXRuntime-CPP/inference.cpp @@ -2,7 +2,6 @@ #include #define benchmark -#define ELOG DCSP_CORE::DCSP_CORE() { @@ -29,7 +28,7 @@ char* BlobFromImage(cv::Mat& iImg, T& iBlob) { for (int w = 0; w < imgWidth; w++) { - iBlob[c * imgWidth * imgHeight + h * imgWidth + w] = (std::remove_pointer::type)((iImg.at(h, w)[c]) / 255.0f); + iBlob[c * imgWidth * imgHeight + h * imgWidth + w] = typename std::remove_pointer::type((iImg.at(h, w)[c]) / 255.0f); } } } @@ -40,8 +39,8 @@ char* BlobFromImage(cv::Mat& iImg, T& iBlob) char* PostProcess(cv::Mat& iImg, std::vector iImgSize, cv::Mat& oImg) { cv::Mat img = iImg.clone(); - cv::resize(iImg, oImg, cv::Size(iImgSize.at(0), iImgSize.at(1))); - if (img.channels() == 1) + cv::resize(iImg, oImg, cv::Size(iImgSize.at(0), iImgSize.at(1))); + if (img.channels() == 1) { cv::cvtColor(oImg, oImg, cv::COLOR_GRAY2BGR); } @@ -75,17 +74,21 @@ char* DCSP_CORE::CreateSession(DCSP_INIT_PARAM &iParams) OrtCUDAProviderOptions cudaOption; cudaOption.device_id = 0; sessionOption.AppendExecutionProvider_CUDA(cudaOption); - //OrtOpenVINOProviderOptions ovOption; - //sessionOption.AppendExecutionProvider_OpenVINO(ovOption); } sessionOption.SetGraphOptimizationLevel(GraphOptimizationLevel::ORT_ENABLE_ALL); sessionOption.SetIntraOpNumThreads(iParams.IntraOpNumThreads); sessionOption.SetLogSeverityLevel(iParams.LogSeverityLevel); + +#ifdef _WIN32 int ModelPathSize = MultiByteToWideChar(CP_UTF8, 0, iParams.ModelPath.c_str(), static_cast(iParams.ModelPath.length()), nullptr, 0); wchar_t* wide_cstr = new wchar_t[ModelPathSize + 1]; MultiByteToWideChar(CP_UTF8, 0, iParams.ModelPath.c_str(), static_cast(iParams.ModelPath.length()), wide_cstr, ModelPathSize); wide_cstr[ModelPathSize] = L'\0'; const wchar_t* modelPath = wide_cstr; +#else + const char* modelPath = iParams.ModelPath.c_str(); +#endif // _WIN32 + session = new Ort::Session(env, modelPath, sessionOption); Ort::AllocatorWithDefaultOptions allocator; size_t inputNodesNum = session->GetInputCount(); @@ -96,7 +99,6 @@ char* DCSP_CORE::CreateSession(DCSP_INIT_PARAM &iParams) strcpy(temp_buf, input_node_name.get()); inputNodeNames.push_back(temp_buf); } - size_t OutputNodesNum = session->GetOutputCount(); for (size_t i = 0; i < OutputNodesNum; i++) { @@ -151,7 +153,7 @@ char* DCSP_CORE::RunSession(cv::Mat &iImg, std::vector& oResult) template char* DCSP_CORE::TensorProcess(clock_t& starttime_1, cv::Mat& iImg, N& blob, std::vector& inputNodeDims, std::vector& oResult) { - Ort::Value inputTensor = Ort::Value::CreateTensor::type>(Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU), blob, 3 * imgSize.at(0) * imgSize.at(1), inputNodeDims.data(), inputNodeDims.size()); + Ort::Value inputTensor = Ort::Value::CreateTensor::type>(Ort::MemoryInfo::CreateCpu(OrtDeviceAllocator, OrtMemTypeCPU), blob, 3 * imgSize.at(0) * imgSize.at(1), inputNodeDims.data(), inputNodeDims.size()); #ifdef benchmark clock_t starttime_2 = clock(); #endif // benchmark @@ -159,10 +161,11 @@ char* DCSP_CORE::TensorProcess(clock_t& starttime_1, cv::Mat& iImg, N& blob, std #ifdef benchmark clock_t starttime_3 = clock(); #endif // benchmark + Ort::TypeInfo typeInfo = outputTensor.front().GetTypeInfo(); auto tensor_info = typeInfo.GetTensorTypeAndShapeInfo(); std::vectoroutputNodeDims = tensor_info.GetShape(); - std::remove_pointer::type* output = outputTensor.front().GetTensorMutableData::type>(); + auto output = outputTensor.front().GetTensorMutableData::type>(); delete blob; switch (modelType) { @@ -183,7 +186,7 @@ char* DCSP_CORE::TensorProcess(clock_t& starttime_1, cv::Mat& iImg, N& blob, std for (int i = 0; i < strideNum; ++i) { float* classesScores = data + 4; - cv::Mat scores(1, classesNum, CV_32FC1, classesScores); + cv::Mat scores(1, this->classes.size(), CV_32FC1, classesScores); cv::Point class_id; double maxClassScore; cv::minMaxLoc(scores, 0, &maxClassScore, 0, &class_id); @@ -203,13 +206,14 @@ char* DCSP_CORE::TensorProcess(clock_t& starttime_1, cv::Mat& iImg, N& blob, std int width = int(w * x_factor); int height = int(h * y_factor); - boxes.push_back(cv::Rect(left, top, width, height)); + boxes.emplace_back(left, top, width, height); } data += signalResultNum; } std::vector nmsResult; cv::dnn::NMSBoxes(boxes, confidences, rectConfidenceThreshold, iouThreshold, nmsResult); + for (int i = 0; i < nmsResult.size(); ++i) { int idx = nmsResult[i]; @@ -266,6 +270,5 @@ char* DCSP_CORE::WarmUpSession() std::cout << "[DCSP_ONNX(CUDA)]: " << "Cuda warm-up cost " << post_process_time << " ms. " << std::endl; } } - return Ret; } diff --git a/examples/YOLOv8-ONNXRuntime-CPP/inference.h b/examples/YOLOv8-ONNXRuntime-CPP/inference.h index d00fecd..b30f9f0 100644 --- a/examples/YOLOv8-ONNXRuntime-CPP/inference.h +++ b/examples/YOLOv8-ONNXRuntime-CPP/inference.h @@ -1,15 +1,17 @@ #pragma once -#define _CRT_SECURE_NO_WARNINGS #define RET_OK nullptr +#ifdef _WIN32 +#include +#include +#include +#endif + #include #include -#include -#include "io.h" -#include "direct.h" -#include "opencv.hpp" -#include +#include +#include #include "onnxruntime_cxx_api.h" @@ -23,13 +25,12 @@ enum MODEL_TYPE }; + typedef struct _DCSP_INIT_PARAM { std::string ModelPath; MODEL_TYPE ModelType = YOLO_ORIGIN_V8; std::vector imgSize={640, 640}; - - int classesNum=80; float RectConfidenceThreshold = 0.6; float iouThreshold = 0.5; bool CudaEnable = false; @@ -55,16 +56,14 @@ public: public: char* CreateSession(DCSP_INIT_PARAM &iParams); - char* RunSession(cv::Mat &iImg, std::vector& oResult); - char* WarmUpSession(); - template char* TensorProcess(clock_t& starttime_1, cv::Mat& iImg, N& blob, std::vector& inputNodeDims, std::vector& oResult); + std::vector classes{}; private: Ort::Env env; @@ -74,9 +73,7 @@ private: std::vector inputNodeNames; std::vector outputNodeNames; - - int classesNum; - MODEL_TYPE modelType; + MODEL_TYPE modelType; std::vector imgSize; float rectConfidenceThreshold; float iouThreshold; diff --git a/examples/YOLOv8-ONNXRuntime-CPP/main.cpp b/examples/YOLOv8-ONNXRuntime-CPP/main.cpp index f13d782..f4ba03e 100644 --- a/examples/YOLOv8-ONNXRuntime-CPP/main.cpp +++ b/examples/YOLOv8-ONNXRuntime-CPP/main.cpp @@ -1,44 +1,94 @@ #include -#include #include "inference.h" #include - - +#include void file_iterator(DCSP_CORE*& p) { - std::filesystem::path img_path = R"(E:\project\Project_C++\DCPS_ONNX\TEST_ORIGIN)"; - int k = 0; - for (auto& i : std::filesystem::directory_iterator(img_path)) + std::filesystem::path current_path = std::filesystem::current_path(); + std::filesystem::path imgs_path = current_path/"images"; + for (auto& i : std::filesystem::directory_iterator(imgs_path)) { - if (i.path().extension() == ".jpg") + if (i.path().extension() == ".jpg" || i.path().extension() == ".png") { std::string img_path = i.path().string(); - //std::cout << img_path << std::endl; cv::Mat img = cv::imread(img_path); std::vector res; - char* ret = p->RunSession(img, res); - for (int i = 0; i < res.size(); i++) + p->RunSession(img, res); + + for (auto & re : res) { - cv::rectangle(img, res.at(i).box, cv::Scalar(125, 123, 0), 3); + cv::rectangle(img, re.box, cv::Scalar(0, 0 , 255), 3); + std::string label = p->classes[re.classId]; + cv::putText( + img, + label, + cv::Point(re.box.x, re.box.y - 5), + cv::FONT_HERSHEY_SIMPLEX, + 0.75, + cv::Scalar(255, 255, 0), + 2 + ); } - - k++; - cv::imshow("TEST_ORIGIN", img); + cv::imshow("Result", img); cv::waitKey(0); cv::destroyAllWindows(); - //cv::imwrite("E:\\output\\" + std::to_string(k) + ".png", img); } } } +int read_coco_yaml(DCSP_CORE*& p) +{ + // Open the YAML file + std::ifstream file("coco.yaml"); + if (!file.is_open()) { + std::cerr << "Failed to open file" << std::endl; + return 1; + } + + // Read the file line by line + std::string line; + std::vector lines; + while (std::getline(file, line)) { + lines.push_back(line); + } + + // Find the start and end of the names section + std::size_t start = 0; + std::size_t end = 0; + for (std::size_t i = 0; i < lines.size(); i++) { + if (lines[i].find("names:") != std::string::npos) { + start = i + 1; + } else if (start > 0 && lines[i].find(':') == std::string::npos) { + end = i; + break; + } + } + + // Extract the names + std::vector names; + for (std::size_t i = start; i < end; i++) { + std::stringstream ss(lines[i]); + std::string name; + std::getline(ss, name, ':'); // Extract the number before the delimiter + std::getline(ss, name); // Extract the string after the delimiter + names.push_back(name); + } + + p->classes = names; + return 0; +} int main() { DCSP_CORE* p1 = new DCSP_CORE; std::string model_path = "yolov8n.onnx"; - DCSP_INIT_PARAM params{ model_path, YOLO_ORIGIN_V8, {640, 640}, 80, 0.1, 0.5, false }; - char* ret = p1->CreateSession(params); + read_coco_yaml(p1); + // GPU inference + DCSP_INIT_PARAM params{ model_path, YOLO_ORIGIN_V8, {640, 640}, 0.1, 0.5, true }; + // CPU inference + // DCSP_INIT_PARAM params{ model_path, YOLO_ORIGIN_V8, {640, 640}, 0.1, 0.5, false }; + p1->CreateSession(params); file_iterator(p1); }