目录
目录
代码
CMakeLists.txt
编译
测试运行
效果
下载
目录
代码
std::vector<cv::Scalar> colors = { cv::Scalar(0, 0, 255) , cv::Scalar(0, 255, 0) , cv::Scalar(255, 0, 0) ,cv::Scalar(255, 100, 50) , cv::Scalar(50, 100, 255) , cv::Scalar(255, 50, 100) };
const std::vector<std::string> class_names = {
"person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light",
"fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow",
"elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee",
"skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard",
"tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple",
"sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch",
"potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone",
"microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear",
"hair drier", "toothbrush" };
#include <iostream>
#include <string>
#include <vector>
#include <openvino/openvino.hpp>
#include <opencv2/opencv.hpp>
#include <dirent.h>
#include <stdio.h>
#include <time.h>
#include <unistd.h>
std::vector<cv::Scalar> colors = { cv::Scalar(0, 0, 255) , cv::Scalar(0, 255, 0) , cv::Scalar(255, 0, 0) ,
cv::Scalar(255, 100, 50) , cv::Scalar(50, 100, 255) , cv::Scalar(255, 50, 100) };
const std::vector<std::string> class_names = {
"person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light",
"fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow",
"elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee",
"skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard",
"tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple",
"sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch",
"potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone",
"microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear",
"hair drier", "toothbrush" };
using namespace cv;
using namespace dnn;
Mat letterbox(const cv::Mat& source)
{
int col = source.cols;
int row = source.rows;
int _max = MAX(col, row);
Mat result = Mat::zeros(_max, _max, CV_8UC3);
source.copyTo(result(Rect(0, 0, col, row)));
return result;
}
int main()
{
clock_t start, end;
std::cout << "共8步" << std::endl;
char buffer[100];
getcwd(buffer, 100);
std::cout << "当前路径:" << buffer << std::endl;
// -------- Step 1. Initialize OpenVINO Runtime Core --------
std::cout << "1. Initialize OpenVINO Runtime Core" << std::endl;
ov::Core core;
// -------- Step 2. Compile the Model --------
std::cout << "2. Compile the Model" << std::endl;
String model_path = String(buffer) + "/yolov8s.xml";
std::cout << "model_path:\t" << model_path << std::endl;
ov::CompiledModel compiled_model;
try {
compiled_model = core.compile_model(model_path, "CPU");
}
catch (std::exception& e) {
std::cout << "Compile the Model 异常:" << e.what() << std::endl;
return 0;
}
// -------- Step 3. Create an Inference Request --------
std::cout << "3. Create an Inference Request" << std::endl;
ov::InferRequest infer_request = compiled_model.create_infer_request();
// -------- Step 4.Read a picture file and do the preprocess --------
std::cout << "4.Read a picture file and do the preprocess" << std::endl;
String img_path = String(buffer) + "/test2.jpg";
std::cout << "img_path:\t" << img_path << std::endl;
Mat img = cv::imread(img_path);
// Preprocess the image
Mat letterbox_img = letterbox(img);
float scale = letterbox_img.size[0] / 640.0;
Mat blob = blobFromImage(letterbox_img, 1.0 / 255.0, Size(640, 640), Scalar(), true);
// -------- Step 5. Feed the blob into the input node of the Model -------
std::cout << "5. Feed the blob into the input node of the Model" << std::endl;
// Get input port for model with one input
auto input_port = compiled_model.input();
// Create tensor from external memory
ov::Tensor input_tensor(input_port.get_element_type(), input_port.get_shape(), blob.ptr(0));
// Set input tensor for model with one input
infer_request.set_input_tensor(input_tensor);
start = clock();
// -------- Step 6. Start inference --------
std::cout << "6. Start inference" << std::endl;
infer_request.infer();
end = clock();
std::cout << "inference time = " << double(end - start) << "us" << std::endl;
// -------- Step 7. Get the inference result --------
std::cout << "7. Get the inference result" << std::endl;
auto output = infer_request.get_output_tensor(0);
auto output_shape = output.get_shape();
std::cout << "The shape of output tensor:\t" << output_shape << std::endl;
int rows = output_shape[2]; //8400
int dimensions = output_shape[1]; //84: box[cx, cy, w, h]+80 classes scores
std::cout << "8. Postprocess the result " << std::endl;
// -------- Step 8. Postprocess the result --------
float* data = output.data<float>();
Mat output_buffer(output_shape[1], output_shape[2], CV_32F, data);
transpose(output_buffer, output_buffer); //[8400,84]
float score_threshold = 0.25;
float nms_threshold = 0.5;
std::vector<int> class_ids;
std::vector<float> class_scores;
std::vector<Rect> boxes;
// Figure out the bbox, class_id and class_score
for (int i = 0; i < output_buffer.rows; i++) {
Mat classes_scores = output_buffer.row(i).colRange(4, 84);
Point class_id;
double maxClassScore;
minMaxLoc(classes_scores, 0, &maxClassScore, 0, &class_id);
if (maxClassScore > score_threshold) {
class_scores.push_back(maxClassScore);
class_ids.push_back(class_id.x);
float cx = output_buffer.at<float>(i, 0);
float cy = output_buffer.at<float>(i, 1);
float w = output_buffer.at<float>(i, 2);
float h = output_buffer.at<float>(i, 3);
int left = int((cx - 0.5 * w) * scale);
int top = int((cy - 0.5 * h) * scale);
int width = int(w * scale);
int height = int(h * scale);
boxes.push_back(Rect(left, top, width, height));
}
}
//NMS
std::vector<int> indices;
NMSBoxes(boxes, class_scores, score_threshold, nms_threshold, indices);
// -------- Visualize the detection results -----------
for (size_t i = 0; i < indices.size(); i++) {
int index = indices[i];
int class_id = class_ids[index];
rectangle(img, boxes[index], colors[class_id % 6], 2, 8);
std::string label = class_names[class_id] + ":" + std::to_string(class_scores[index]).substr(0, 4);
Size textSize = cv::getTextSize(label, FONT_HERSHEY_SIMPLEX, 0.5, 1, 0);
Rect textBox(boxes[index].tl().x, boxes[index].tl().y - 15, textSize.width, textSize.height + 5);
cv::rectangle(img, textBox, colors[class_id % 6], FILLED);
putText(img, label, Point(boxes[index].tl().x, boxes[index].tl().y - 5), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(255, 255, 255));
}
cv::imwrite("detection.png", img);
std::cout << "detect success" << std::endl;
cv::imshow("window",img);
cv::waitKey(0);
return 0;
}
CMakeLists.txt
cmake_minimum_required(VERSION 3.0)
project(openvino_test )
find_package(OpenCV REQUIRED )
find_package(OpenVINO REQUIRED )
file(COPY test.jpg DESTINATION ${CMAKE_CURRENT_BINARY_DIR})
file(COPY test2.jpg DESTINATION ${CMAKE_CURRENT_BINARY_DIR})
file(COPY yolov8s.xml DESTINATION ${CMAKE_CURRENT_BINARY_DIR})
file(COPY yolov8s.bin DESTINATION ${CMAKE_CURRENT_BINARY_DIR})
add_executable(openvino_test main.cpp )
target_link_libraries(openvino_test ${OpenCV_LIBS} openvino)
cmake_minimum_required(VERSION 3.0)
project(openvino_test )
find_package(OpenCV REQUIRED )
find_package(OpenVINO REQUIRED )
file(COPY test.jpg DESTINATION ${CMAKE_CURRENT_BINARY_DIR})
file(COPY test2.jpg DESTINATION ${CMAKE_CURRENT_BINARY_DIR})
file(COPY yolov8s.xml DESTINATION ${CMAKE_CURRENT_BINARY_DIR})
file(COPY yolov8s.bin DESTINATION ${CMAKE_CURRENT_BINARY_DIR})
add_executable(openvino_test main.cpp )
target_link_libraries(openvino_test ${OpenCV_LIBS} openvino)
编译
ll
mkdir build
cd build
cmake ..
make
ll
测试运行
./openvino_test
效果
文章来源:https://www.toymoban.com/news/detail-708459.html
下载
Demo下载文章来源地址https://www.toymoban.com/news/detail-708459.html
到了这里,关于Linux C++ OpenVINO 物体检测 Demo的文章就介绍完了。如果您还想了解更多内容,请在右上角搜索TOY模板网以前的文章或继续浏览下面的相关文章,希望大家以后多多支持TOY模板网!