OpenCV DNN调用训练好的caffe 模型(目标检测)

----回顾之前-----
需要准备的东西:

  • caffee 模型(已经训练好的官网):
    网盘
    password:dqvf
  • 在这里插入图片描述
  • opencv 环境

主要的代码片段:
所需的头文件添加:

#include <opencv4/opencv2/opencv.hpp>
#include <opencv2/dnn.hpp>
#include<fstream>
#include <iostream>

开始读取模型:
其中 prototxt 为模型卷积层中的属性文档.可以打开看看

    string model = "/home/sms/AI/pyopencv/ssd_caffe/MobileNetSSD_deploy.caffemodel";
    string config = "/home/sms/AI/pyopencv/ssd_caffe/MobileNetSSD_deploy.prototxt";
    //Net net = readNet(model,config,"caffe");
    // 网络读取模型
    Net net = readNetFromCaffe(config,model);
    if (net.empty()) {
        cout << "caffe date error" << endl;
        return 0;
    }else {
        cout <<"finish" << endl;
    }

设置 DNN 加载的模式(兴趣点) OpenCV 和 处理方式

    net.setPreferableBackend(DNN_BACKEND_OPENCV);
    net.setPreferableTarget(DNN_TARGET_CPU);

编写函数读取类的个数和种类:

vector<string> readLabel(string label_path_) 
{
    vector<string> className;
    ifstream fp(label_path_);
    string name;
    while (!fp.eof())
    {
        getline(fp,name);
        if (name.length())
        {
            className.push_back(name);
        }
    }
    fp.close();
    return className;
}

并且执行推理部分:

    Mat result_img  = net.forward("detection_out"); // 获取制定曾的输出(默认最后一层)
    cout << "forwadimg:" << result_img.size[0] << " "<<  result_img.size[1] << " " <<  result_img.size[2]  << " "<<  result_img.size[3] << endl;
    Mat detetion(result_img.size[2],result_img.size[3],CV_32F,result_img.ptr<float>());

设置置信度的阈值和判断结果,并且打印到窗口上面:
因为读取的时候会检索自己的类文本,所有结果需要 index -1

    float conf_threshod = 0.2;
        for (int i = 0; i < detetion.rows; i++)
        {
            float confidents = detetion.at<float>(i,2);
            if (conf_threshod < confidents) {
                int index_ = detetion.at<float>(i,1);
                float tl_x = detetion.at<float>(i,3) * src_img.cols;
                float tl_y = detetion.at<float>(i,4) * src_img.rows;
                float bt_x = detetion.at<float>(i,5) * src_img.cols;
                float bt_y = detetion.at<float>(i,6) * src_img.rows;
                Rect rect_ (tl_x, tl_y,(bt_x  - tl_x),(bt_y - tl_y));
                rectangle(src_img,rect_,Scalar(0,255,0),3,LINE_AA);
                putText(src_img,format("conf:%f,class:%s",confidents,layname[index_-1].c_str()),rect_.tl(),FONT_HERSHEY_COMPLEX,0.7,Scalar(0,0,255),2,LINE_AA);
            }
        }

所有代码:
其中包含了视频读取和图像读取的部分,更改 USE_IMAGE 参数(1 图片读取,0 视频读取)即可得到不同的模式切换.

#include <opencv4/opencv2/opencv.hpp>
#include <opencv4/opencv2/dnn/dnn.hpp>
#include <opencv2/dnn.hpp>
#include<fstream>
#include <iostream>

using namespace std;
using namespace cv;
using namespace cv::dnn;


#ifndef USE_IAMGE 
#define USE_IAMGE 0
#endif

vector<string> readLabel(string label_path_) 
{
    vector<string> className;
    ifstream fp(label_path_);
    string name;
    while (!fp.eof())
    {
        getline(fp,name);
        if (name.length())
        {
            className.push_back(name);
        }
    }
    fp.close();
    return className;
}
int main() {
    // 加载模型
    // string module_path = "./ssd_caffe/MobileNetSSD_deploy.caffemodel";
    // string config_path  = " ./ssd_caffe/MobileNetSSD_deploy.prototxt";
    string model = "/home/sms/AI/pyopencv/ssd_caffe/MobileNetSSD_deploy.caffemodel";
    string config = "/home/sms/AI/pyopencv/ssd_caffe/MobileNetSSD_deploy.prototxt";
    //Net net = readNet(model,config,"caffe");
    // 网络读取模型
    Net net = readNetFromCaffe(config,model);
    if (net.empty()) {
        cout << "caffe date error" << endl;
        return 0;
    }else {
        cout <<"finish" << endl;
    }
    net.setPreferableBackend(DNN_BACKEND_OPENCV);
    net.setPreferableTarget(DNN_TARGET_CPU);

    vector<string> lay_name = net.getLayerNames();
    for (int i = 0; i < lay_name.size(); i++)
    {
        int id = net.getLayerId(lay_name[i]);
        Ptr<Layer> layer = net.getLayer(id);
        cout << "id = " << id  << " " << "type:" << layer->type.c_str() << " " << "name:" << layer->name.c_str() << endl;
    }

    
    // 构建输入
#if USE_IAMGE == 0
    VideoCapture capture("/home/sms/video/car.mp4");  
#endif

    while (true)
    {
        Mat src_img ;
        // src_img = imread("/home/sms/tu/ball1.jpeg")
#if USE_IAMGE == 1
        src_img = imread("/home/sms/tu/bycle.jpeg");
#else
        capture >> src_img;
#endif
        if (!src_img.empty()) 
        {
            Mat input_image  = blobFromImage(src_img,0.0383, Size(300,300),Scalar(50,50,50),false,false);
            net.setInput(input_image);
            cout << "image input finish" << endl;
        } else
        {
            return 0;
        }
    // 执行推理
    Mat result_img  = net.forward("detection_out"); // 获取制定曾的输出(默认最后一层)
    cout << "forwadimg:" << result_img.size[0] << " "<<  result_img.size[1] << " " <<  result_img.size[2]  << " "<<  result_img.size[3] << endl;
    Mat detetion(result_img.size[2],result_img.size[3],CV_32F,result_img.ptr<float>());

    vector<string> layname = readLabel("/home/sms/AI/pyopencv/ssd_caffe/object_detection_classes_pascal_voc.txt");
    float conf_threshod = 0.2;
        for (int i = 0; i < detetion.rows; i++)
        {
            float confidents = detetion.at<float>(i,2);
            if (conf_threshod < confidents) {
                int index_ = detetion.at<float>(i,1);
                float tl_x = detetion.at<float>(i,3) * src_img.cols;
                float tl_y = detetion.at<float>(i,4) * src_img.rows;
                float bt_x = detetion.at<float>(i,5) * src_img.cols;
                float bt_y = detetion.at<float>(i,6) * src_img.rows;
                Rect rect_ (tl_x, tl_y,(bt_x  - tl_x),(bt_y - tl_y));
                rectangle(src_img,rect_,Scalar(0,255,0),3,LINE_AA);
                putText(src_img,format("conf:%f,class:%s",confidents,layname[index_-1].c_str()),rect_.tl(),FONT_HERSHEY_COMPLEX,0.7,Scalar(0,0,255),2,LINE_AA);
            }
        }
        imshow("src_out_img",src_img);
#if USE_IAMGE == 1
            waitKey(0);
#else
            waitKey(1);
#endif
    }
    

    return 0;
}

直接使用调整的参数不多:
需要调整的地方有:
输入进去的图片缩放因子-0.0383,值越小可是别的物体越多,准确率也会相对下降.

  Mat input_image  = blobFromImage(src_img,0.0383, Size(300,300),Scalar(50,50,50),false,false);

在这里插入图片描述

版权声明:本文为CSDN博主「splendid.rain生」的原创文章,遵循CC 4.0 BY-SA版权协议,转载请附上原文出处链接及本声明。
原文链接:https://blog.csdn.net/Msyusheng/article/details/122736066

splendid.rain生

我还没有学会写个人说明!

暂无评论

发表评论

相关推荐

(目标检测)基于opencv dnn模块的yolov5部署

这边文章将介绍基于dnn模块的yolov5 onnx模型的部署 包括读取模型和数据处理和后处理先给出整个项目的源码yolov5版本为4.0opencv 为 4.5.2使用的模型是自己训练的 类别数为5的模型不同的版本此源码可能会报错 由于ope