我想在ROS平台上使用YOLOv4的OpenCV DNN模块来检测一个对象,但是它不起作用。不过我是在没有ROS平台的情况下用yolov4运行opencv dnn模块的。相同的代码片段不能在ROS平台上运行。
CMakeList.txt:(我链接了OpenCV)
cmake_minimum_required(VERSION 3.0.2)
project(test_opencv)
set(CMAKE_CXX_STANDARD_REQUIRED 17)
add_compile_options(--std=c++17 -g3)
set(OpenCV_DIR "/home/usernamepc/opencv_build/opencv/build")
find_package(OpenCV 4.4.0 REQUIRED)
我的代码在这里。这段代码在没有ROS的情况下有效,但在有ROS的情况下无效。
int main(int argc, char **argv){
ros::init(argc, argv, "test_opencv");
int inpWidth = 608;
int inpHeight = 608;
int count = 0;
std::cout << "OpenCV version : " << CV_VERSION << std::endl;
std::cout << "Major version : " << CV_MAJOR_VERSION << std::endl;
std::cout << "Minor version : " << CV_MINOR_VERSION << std::endl;
std::cout << "Subminor version : " << CV_SUBMINOR_VERSION << std::endl;
vector<string> class_names;
{
ifstream class_file("/home/USERNAMEPC/people_detection_ws/src/test_opencv/input/data/obj_license.names");
if (!class_file)
{
cerr << "failed to open classes.txt\n";
return 0;
}
string line;
while (getline(class_file, line))
{
class_names.push_back(line);
cout << "Data: " << line << endl;
}
}
string video = "/home/USERNAMEPC/people_detection_ws/src/test_opencv/input/plate/1.mp4";
VideoCapture cap(video);
auto net = readNetFromDarknet("/home/USERNAMEPC/people_detection_ws/src/test_opencv/input/cfg/yolov4_obj_license.cfg",
"/home/USERNAMEPC/people_detection_ws/src/test_opencv/input/custom_license.weights");
auto output_names = net.getUnconnectedOutLayersNames();
double inference_fps = 0;
double total_fps = 0;
Mat frame, blob;
vector<Mat> detections;
while (waitKey(1) < 1)
{
cap >> frame;
if (frame.empty())
{
waitKey();
break;
}
auto total_start = chrono::steady_clock::now();
imshow("frame", frame);
waitKey(1);
cv::dnn::blobFromImage(frame, blob, 0.00392, Size(inpWidth, inpHeight), Scalar(), true, false, CV_32F);
net.setInput(blob);
if (blob.empty())
{
std::cout << "blob is empty" << std::endl;
return -1;
}
auto dnn_start = chrono::steady_clock::now();
cout << "6 " << endl;
net.forward(detections, output_names);
cout << "7 " << endl;
auto dnn_end = chrono::steady_clock::now();
vector<int> indices[NUM_CLASSES];
vector<Rect> boxes[NUM_CLASSES];
vector<float> scores[NUM_CLASSES];
for (auto &output : detections)
{
const auto num_boxes = output.rows;
for (int i = 0; i < num_boxes; i++)
{
auto x = output.at<float>(i, 0) * frame.cols;
auto y = output.at<float>(i, 1) * frame.rows;
auto width = output.at<float>(i, 2) * frame.cols;
auto height = output.at<float>(i, 3) * frame.rows;
Rect rect(x - width / 2, y - height / 2, width, height);
for (int c = 0; c < NUM_CLASSES; c++)
{
auto confidence = *output.ptr<float>(i, 5 + c);
if (confidence >= CONFIDENCE_THRESHOLD)
{
boxes[c].push_back(rect);
scores[c].push_back(confidence);
detected_control = true;
}
}
}
}
for (int c = 0; c < NUM_CLASSES; c++)
{
NMSBoxes(boxes[c], scores[c], 0.0, NMS_THRESHOLD, indices[c]);
}
for (int c = 0; c < NUM_CLASSES; c++)
{
for (size_t i = 0; i < indices[c].size(); ++i)
{
const auto color = colors[c % NUM_COLORS];
auto idx = indices[c][i];
auto &rect = boxes[c][idx];
rectangle(frame, Point(rect.x, rect.y), Point(rect.x + rect.width, rect.y + rect.height), color, 3);
ostringstream label_ss;
label_ss << class_names[c] << ": " << fixed << setprecision(2) << scores[c][idx];
auto label = label_ss.str();
int baseline;
auto label_bg_sz = getTextSize(label.c_str(), FONT_HERSHEY_COMPLEX_SMALL, 1, 1, &baseline);
rectangle(frame, Point(rect.x, rect.y - label_bg_sz.height - baseline - 10), Point(rect.x + label_bg_sz.width, rect.y), color, FILLED);
putText(frame, label.c_str(), Point(rect.x, rect.y - baseline - 5), FONT_HERSHEY_COMPLEX_SMALL, 1, Scalar(0, 0, 0));
}
}
auto total_end = chrono::steady_clock::now();
inference_fps = 1000.0 / chrono::duration_cast<chrono::milliseconds>(dnn_end - dnn_start).count();
total_fps = 1000.0 / chrono::duration_cast<chrono::milliseconds>(total_end - total_start).count();
ostringstream stats_ss;
stats_ss << fixed << setprecision(2);
stats_ss << "Inference FPS: " << inference_fps << ", Total FPS: " << total_fps;
auto stats = stats_ss.str();
int baseline;
auto stats_bg_sz = getTextSize(stats.c_str(), FONT_HERSHEY_COMPLEX_SMALL, 1, 1, &baseline);
rectangle(frame, Point(0, 0), Point(stats_bg_sz.width, stats_bg_sz.height + 10), Scalar(0, 0, 0), FILLED);
putText(frame, stats.c_str(), Point(0, stats_bg_sz.height + 5), FONT_HERSHEY_COMPLEX_SMALL, 1, Scalar(255, 255, 255));
// namedWindow("output");
count++;
}
cout << "Inference FPS: " << inference_fps << ", Total FPS: " << total_fps << endl;
ros::spin();
return 0; }
当代码转到步骤时,我收到一个错误
net.forward(检测,输出名称);
错误如下:
OpenCV错误:断言失败(dims
当我使用GDB调试器时,我得到如下所示的错误输出:
__GI_raise(sig)中的(gdb)bt#0 0x00007ffff61b5e87=sig@entry=6)在/sysdeps/unix/sysv/linux/rese.c:51
#1 0x00007ffff61b77f1在__GI_abort()在堕胎. c: 79
#2 0x00007ffff680c957 in(),位于/usr/lib/x86_64-linux-gnu/libstdc.so.6
#3 0x00007ffff6812ae6 in () at /usr/lib/x86_64-linux-gnu/libstdc .so.6
# 4 0x 00007 ffff 6812 b21 in()at/usr/lib/x86 _ 64-Linux-GNU/libstdc . so . 6
# 5 0x 00007 ffff 6812d 54 in()at/usr/lib/x86 _ 64-Linux-GNU/libstdc . so . 6
cv::error中的# 6 0x 00007 ffff 77 c 38 a 2(cv::异常常量
# 7 0x 00007 ffff 77c 39 BF in cv::error(int,cv::String const
位于/usr/lib/x86 _ 64-Linux-GNU/libopencv _ core . so . 3.2的cv::Mat::shape(int,int) const()中的#8 0x00007ffff7734b1c
# 9 0x 00007 ffff 6 efb 344 in cv::dnn::ConvolutionLayerImpl::finalize(cv::_ input array const
#10 0x00007ffff6eb12d7 in cv::d nn::d nn4_v20200609::Layer::finalize(std::vector
#11 0x00007ff6ed1ff3,位于cv::dnn::dnv4_v20200609::Net::Impl::allocateLayer(int,std::map
cv::dnn::dnn 4 _ v 20200609::Net::Impl::allocate layers中的# 12 0x 00007 ffff 6 ed 3 ff 2(STD::vector
cv::dnn::dnn 4 _ v 20200609::Net::Impl::setup Net中的# 13 0x 00007 ffff 6 ed 7675(STD::vector
#14 0x00007ffff6ed8ac3在cv::dnn::dnn4_v20200609::::forward(cv::_OutputArray常量
# 15 0x 0000555555561 c0e in main(int,char**) (argc=1,argv = 0x 7 fffffffd 918)at/home/username PC/people _ detection _ ws/src/test _ opencv/src/test _ opencv . CPP:147
我如何解决这个问题?
我删除了cv_bridge
并从github上的源代码安装。一天结束时,代码正在运行。