目录
- 一、使用双线程调用网络摄像头并执行算法
- 方法一
- 方法二
- 二、使用多线程调用多路摄像头并同步执行多个算法
在调用网络摄像头处理自己的算法时,当解码的速度与算法运行的速度差太多时,会出现类似下面的错误
error while decoding MB 148 4, bytestream
所以需要使用两个线程,一个线程调用摄像头,一个线程用来处理图像。
一、使用双线程调用网络摄像头并执行算法
方法一
#include <iostream>
#include <thread>
#include <mutex>
#include <atomic>
#include <opencv/core/core.hpp>
#include <opencv/highgui/highgui.hpp>
#include <opencv/imgproc/imgproc.hpp>
static std::mutex mutex;
static std::atomic_bool isOpen;
static void cameraThreadFunc(std::string camId, cv::Mat* pFrame)
{
cv::VideoCapture capture(camId);
//capture.set(cv::CAP_PROP_FOURCC, CV_FOURCC('M', 'J', 'P', 'G'));
capture.set(cv::CAP_PROP_FPS,);
if (!capture.isOpened()) {
isOpen = false;
std::cout << "Failed to open camera with index " << camId << std::endl;
}
cv::Mat frame;
while (isOpen) {
capture >> frame;
if (mutex.try_lock()) {
frame.copyTo(*pFrame);
mutex.unlock();
}
cv::waitKey();
}
capture.release();
}
int main(int argc, char* argv[])
{
std::string rtsps = "rtsp://admin:bs@192.168.1.64:554/h264/ch1/main/av_stream/1";
isOpen = true;
cv::Mat frame(, 2560, CV_8UC3);
//cv::Mat* frame = new cv::Mat(, 2560, CV_8UC3);
std::thread thread(cameraThreadFunc, rtsps, &frame); //调用摄像头
//std::thread thread(cameraThreadFunc, rtsps, frame);
//*****************************************************
//调用自己的算法处理图像
void* p_algorithm;
p_algorithm = (void*)(new WindNetDetect());
std::string net_bins = "./models/visdrone_-opt.bin";
std::string net_paras = "./models/visdrone_-opt.param";
int init_res = ((WindNetDetect*)p_algorithm)->init(net_bins, net_paras);
WindNetDetect* tmp = (WindNetDetect*)(p_algorithm);
std::vector<Object> objects;
//*****************************************************
while (isOpen) {
//*********************************
//调用目标检测的算法
tmp->detect(frame, objects);
tmp->draw_objects(frame, objects);
//*********************************
if (cv::waitKey() == 'q') {
break;
}
}
frame.release();
isOpen = false;
thread.join();
return;
}
方法二
// video_test.cpp
#include <iostream>
#include <string>
#include <thread>
#include <mutex>
#include <atomic>
#include <opencv/core/core.hpp>
#include <opencv/highgui/highgui.hpp>
#include <opencv/imgproc/imgproc.hpp>
class VideoCaptureMT {
public:
VideoCaptureMT(int index, int height=, int width=640);
VideoCaptureMT(std::string filePath, int height=, int width=640);
~VideoCaptureMT();
bool isOpened() {
return m_IsOpen;
}
void release() {
m_IsOpen = false;
}
bool read(cv::Mat& frame);
private:
void captureInit(int index, std::string filePath, int height, int width);
void captureFrame();
cv::VideoCapture* m_pCapture;
cv::Mat* m_pFrame;
std::mutex* m_pMutex;
std::thread* m_pThread;
std::atomic_bool m_IsOpen;
};
VideoCaptureMT::VideoCaptureMT(int index, int height, int width)
{
captureInit(index, std::string(), height, width);
}
VideoCaptureMT::VideoCaptureMT(std::string filePath, int height, int width)
{
captureInit(, filePath, height, width);
}
VideoCaptureMT::~VideoCaptureMT()
{
m_IsOpen = false;
m_pThread->join();
if (m_pCapture->isOpened()) {
m_pCapture->release();
}
delete m_pThread;
delete m_pMutex;
delete m_pCapture;
delete m_pFrame;
}
void VideoCaptureMT::captureInit(int index, std::string filePath, int height, int width)
{
if (!filePath.empty()) {
m_pCapture = new cv::VideoCapture(filePath);
}
else {
m_pCapture = new cv::VideoCapture(index);
}
m_pCapture->set(cv::CAP_PROP_FRAME_WIDTH, width);
m_pCapture->set(cv::CAP_PROP_FRAME_HEIGHT, height);
m_pCapture->set(cv::CAP_PROP_FPS,);
m_IsOpen = true;
m_pFrame = new cv::Mat(height, width, CV_UC3);
m_pMutex = new std::mutex();
m_pThread = new std::thread(&VideoCaptureMT::captureFrame, this);
}
void VideoCaptureMT::captureFrame()
{
cv::Mat frameBuff;
while (m_IsOpen) {
(*m_pCapture) >> frameBuff;
if (m_pMutex->try_lock()) {
frameBuff.copyTo(*m_pFrame);
m_pMutex->unlock();
}
cv::waitKey();
}
}
bool VideoCaptureMT::read(cv::Mat& frame)
{
if (m_pFrame->empty()) {
m_IsOpen = false;
}
else {
m_pMutex->lock();
m_pFrame->copyTo(frame);
m_pMutex->unlock();
}
return m_IsOpen;
}
int main(int argc, char* argv[])
{
VideoCaptureMT capture();
cv::Mat frame, gray;
while (capture.isOpened()) {
if (!capture.read(frame)) {
break;
}
cv::cvtColor(frame, gray, cv::COLOR_BGRGRAY);
cv::blur(gray, gray, cv::Size(, 3));
cv::Canny(gray, gray, , 38 , 3);
cv::waitKey();
cv::imshow("image", gray);
if (cv::waitKey() == 'q') {
break;
}
}
capture.release();
return;
}
二、使用多线程调用多路摄像头并同步执行多个算法
使用两个线程调用两个函数,get_cam1中包含了调用网络摄像头的类VideoCaptureMT (见上面代码),以及调用算法。另外一个函数是一样的。代码如下 :
void get_cam()
{
clock_t start, end, end, end2;
std::string rtsps = "rtsp://admin:bs@192.168.1.64:554/h264/ch1/main/av_stream/1";
VideoCaptureMT capture(rtsps);
//VideoCaptureMT capture, captureusb;
cv::namedWindow("外接摄像头检测",);
cv::Mat frame, gray;
while (capture.isOpened()) {
std::lock_guard<std::mutex> mtx_locker(mtx);
start = clock();
if (!capture.read(frame)) {
break;
}
//gray = frame;
end = clock();
std::vector<Object> objects;
tmp->detect(frame, objects);
for (size_t i =; i < objects.size(); i++)
{
const Object& obj = objects[i];
fprintf(stderr, "%d = %.f at %.2f %.2f %.2f x %.2f\n", obj.label, obj.prob,
obj.rect.x, obj.rect.y, obj.rect.width, obj.rect.height);
cv::rectangle(frame, obj.rect, cv::Scalar(, 0, 0));
char text[];
//sprintf(text, "%s %.f%%", class_names[obj.label], obj.prob * 100);
sprintf(text, "%s", class_names[obj.label]);
int baseLine =;
cv::Size label_size = cv::getTextSize(text, cv::FONT_HERSHEY_SIMPLEX,.5, 1, &baseLine);
int x = obj.rect.x;
int y = obj.rect.y - label_size.height - baseLine;
if (y <)
y =;
if (x + label_size.width > frame.cols)
x = frame.cols - label_size.width;
cv::rectangle(frame, cv::Rect(cv::Point(x, y), cv::Size(label_size.width, label_size.height + baseLine)),
cv::Scalar(, 255, 255), -1);
cv::putText(frame, text, cv::Point(x, y + label_size.height),
cv::FONT_HERSHEY_SIMPLEX,.5, cv::Scalar(0, 0, 0));
}
//cv::imwrite("./result.jpg", image);
cv::imshow("外接摄像头检测", frame);
end = clock();
float rumtime = (float)(end - start) / CLOCKS_PER_SEC;
std::stringstream buf;
buf.precision();//����Ĭ�Ͼ���
buf.setf(std::ios::fixed);//����С��λ
buf << rumtime;
std::string strtime;
strtime = buf.str();
std::cout << "strtime = " << strtime << std::endl;
//start = end;
if (cv::waitKey() == 'q') {
break;
}
}
capture.release();
}
void get_camusb()
{
clock_t start, end, end, end2;
std::string rtsps = "rtsp://wowzaecdemo.streamlock.net/vod/mp4:BigBuckBunny_115k.mp4";
VideoCaptureMT capture(rtsps);
cv::Mat frameusb;
cv::namedWindow("外接摄像头检测", 0);
//void* p_algorithmusb;
//p_algorithmusb = (void*)(new WindNetDetect());
//std::string net_binsusb = "./models/visdrone_-opt.bin";
//std::string net_parasusb = "./models/visdrone_-opt.param";
//int init_res = ((WindNetDetect*)p_algorithmusb)->init(net_binsusb, net_parasusb);
//WindNetDetect* tmp = (WindNetDetect*)(p_algorithmusb);
while (capture.isOpened()) {
std::lock_guard<std::mutex> mtx_locker(mtx);
start = clock();
if (!capture.read(frameusb)) {
break;
}
//gray = frame;
end = clock();
std::vector<Object> objectsusb;
tmp->detect(frameusb, objectsusb);
for (size_t i =; i < objectsusb.size(); i++)
{
const Object& obj = objectsusb[i];
fprintf(stderr, "%d = %.f at %.2f %.2f %.2f x %.2f\n", obj.label, obj.prob,
obj.rect.x, obj.rect.y, obj.rect.width, obj.rect.height);
cv::rectangle(frameusb, obj.rect, cv::Scalar(, 0, 0));
char text[];
//sprintf(text, "%s %.f%%", class_names[obj.label], obj.prob * 100);
sprintf(text, "%s", class_names[obj.label]);
int baseLine =;
cv::Size label_size = cv::getTextSize(text, cv::FONT_HERSHEY_SIMPLEX,.5, 1, &baseLine);
int x = obj.rect.x;
int y = obj.rect.y - label_size.height - baseLine;
if (y <)
y =;
if (x + label_size.width > frameusb.cols)
x = frameusb.cols - label_size.width;
cv::rectangle(frameusb, cv::Rect(cv::Point(x, y), cv::Size(label_size.width, label_size.height + baseLine)),
cv::Scalar(, 255, 255), -1);
cv::putText(frameusb, text, cv::Point(x, y + label_size.height),
cv::FONT_HERSHEY_SIMPLEX,.5, cv::Scalar(0, 0, 0));
}
//cv::imwrite("./result.jpg", image);
cv::imshow("外接摄像头检测", frameusb);
end = clock();
float rumtime = (float)(end - start) / CLOCKS_PER_SEC;
std::stringstream buf;
buf.precision();//����Ĭ�Ͼ���
buf.setf(std::ios::fixed);//����С��λ
buf << rumtime;
std::string strtime;
strtime = buf.str();
std::cout << "strtime = " << strtime << std::endl;
//start = end;
if (cv::waitKey() == 'q') {
break;
}
}
capture.release();
}
int main() {
void* p_algorithm;
p_algorithm = (void*)(new WindNetDetect());
std::string net_bins = "./models/visdrone_-opt.bin";
std::string net_paras = "./models/visdrone_-opt.param";
int init_res = ((WindNetDetect*)p_algorithm)->init(net_bins, net_paras);
tmp = (WindNetDetect*)(p_algorithm);
//HANDLE hThread = CreateThread(NULL, 0, get_cam1, NULL, 0, NULL);
//CloseHandle(hThread);
//HANDLE hThread = CreateThread(NULL, 0, get_camusb, NULL, 0, NULL);
//CloseHandle(hThread);
std::thread thrd_(get_cam1);
std::thread thrd_(get_camusb);
thrd_.join();
thrd_.join();
while (true)
{
std::cout << "Main Thread Display!" << std::endl;
Sleep();
}
return;
}
运行上面的代码时,两个函数没有同步运行。如下strtime1111表示一个线程调用的函数,strtime2222表示另一个线程调用的函数,能看出没有同时调用两个函数。
所以需对代码进行如下改进:
static std::mutex mutexk;
void get_cam()
{
clock_t start, end, end, end2;
std::string rtsps = "rtsp://admin:bs@192.168.1.64:554/h264/ch1/main/av_stream/1";
VideoCaptureMT capture(rtsps);
cv::Mat frame;
cv::namedWindow("ss",);
while (capture.isOpened()) {
//pthread_rwlock_rdlock(&rwlock);
//std::lock_guard<std::mutex> mtx_locker(mtx);
start = clock();
if (!capture.read(frame)) {
break;
}
if (mutexk.try_lock()) {
//gray = frame;
end = clock();
std::vector<Object> objects;
tmp->detect(frame, objects);
//tmp->draw_objects(frame, objects);
for (size_t i =; i < objects.size(); i++)
{
const Object& obj = objects[i];
fprintf(stderr, "%d = %.f at %.2f %.2f %.2f x %.2f\n", obj.label, obj.prob,
obj.rect.x, obj.rect.y, obj.rect.width, obj.rect.height);
cv::rectangle(frame, obj.rect, cv::Scalar(, 0, 0));
char text[];
//sprintf(text, "%s %.f%%", class_names[obj.label], obj.prob * 100);
sprintf(text, "%s", class_names[obj.label]);
int baseLine =;
cv::Size label_size = cv::getTextSize(text, cv::FONT_HERSHEY_SIMPLEX,.5, 1, &baseLine);
int x = obj.rect.x;
int y = obj.rect.y - label_size.height - baseLine;
if (y <)
y =;
if (x + label_size.width > frame.cols)
x = frame.cols - label_size.width;
cv::rectangle(frame, cv::Rect(cv::Point(x, y), cv::Size(label_size.width, label_size.height + baseLine)),
cv::Scalar(, 255, 255), -1);
cv::putText(frame, text, cv::Point(x, y + label_size.height),
cv::FONT_HERSHEY_SIMPLEX,.5, cv::Scalar(0, 0, 0));
}
//cv::imwrite("./result.jpg", image);
cv::imshow("ss", frame);
end = clock();
float rumtime = (float)(end - start) / CLOCKS_PER_SEC;
std::stringstream buf;
buf.precision();//����Ĭ�Ͼ���
buf.setf(std::ios::fixed);//����С��λ
buf << rumtime;
std::string strtime;
strtime = buf.str();
std::cout << "strtime = " << strtime << std::endl;
//start = end;
if (cv::waitKey() == 'q') {
break;
}
mutexk.unlock();
}
}
capture.release();
}
void get_camusb()
{
//std::lock_guard<std::mutex> mtx_locker(mtx);
clock_t start, end, end, end2;
//std::string rtsps = "rtsp://wowzaecdemo.streamlock.net/vod/mp4:BigBuckBunny_115k.mp4";
std::string rtsps = "rtsp://admin:bs@192.168.1.64:554/h264/ch1/main/av_stream/1";
VideoCaptureMT capture(rtsps);
cv::Mat frameusb;
cv::namedWindow("zz", 0);
while (capture.isOpened()) {
std::lock_guard<std::mutex> mtx_locker(mtx);
start = clock();
if (!capture.read(frameusb)) {
break;
}
if (mutexk.try_lock()) {
//gray = frame;
end = clock();
std::vector<Object> objectsusb;
tmp->detect(frameusb, objectsusb);
//tmp->draw_objects(frameusb, objectsusb);
for (size_t i =; i < objectsusb.size(); i++)
{
const Object& obj = objectsusb[i];
fprintf(stderr, "%d = %.f at %.2f %.2f %.2f x %.2f\n", obj.label, obj.prob,
obj.rect.x, obj.rect.y, obj.rect.width, obj.rect.height);
cv::rectangle(frameusb, obj.rect, cv::Scalar(, 0, 0));
char text[];
//sprintf(text, "%s %.f%%", class_names[obj.label], obj.prob * 100);
sprintf(text, "%s", class_names[obj.label]);
int baseLine =;
cv::Size label_size = cv::getTextSize(text, cv::FONT_HERSHEY_SIMPLEX,.5, 1, &baseLine);
int x = obj.rect.x;
int y = obj.rect.y - label_size.height - baseLine;
if (y <)
y =;
if (x + label_size.width > frameusb.cols)
x = frameusb.cols - label_size.width;
cv::rectangle(frameusb, cv::Rect(cv::Point(x, y), cv::Size(label_size.width, label_size.height + baseLine)),
cv::Scalar(, 255, 255), -1);
cv::putText(frameusb, text, cv::Point(x, y + label_size.height),
cv::FONT_HERSHEY_SIMPLEX,.5, cv::Scalar(0, 0, 0));
}
//cv::imwrite("./result.jpg", image);
cv::imshow("zz", frameusb);
end = clock();
float rumtime = (float)(end - start) / CLOCKS_PER_SEC;
std::stringstream buf;
buf.precision();//����Ĭ�Ͼ���
buf.setf(std::ios::fixed);//����С��λ
buf << rumtime;
std::string strtime;
strtime = buf.str();
std::cout << "strtime = " << strtime << std::endl;
//start = end;
if (cv::waitKey() == 'q') {
break;
}
mutexk.unlock();
}
}
capture.release();
}
int main() {
void* p_algorithm;
p_algorithm = (void*)(new WindNetDetect());
std::string net_bins = "./models/visdrone_-opt.bin";
std::string net_paras = "./models/visdrone_-opt.param";
int init_res = ((WindNetDetect*)p_algorithm)->init(net_bins, net_paras);
tmp = (WindNetDetect*)(p_algorithm);
std::thread thrd_(get_cam1);
std::thread thrd_(get_camusb);
thrd_.join();
thrd_.join();
while (true)
{
std::cout << "Main Thread Display!" << std::endl;
Sleep();
}
return;
}
使用多线线程调用多个函数同步执行模板如下:
#include <thread>
#include <mutex>
static std::mutex mutexk;
void func() {
while (capture.isOpened()) {
if (mutexk.try_lock()) {
//我的算法
my_func();
mutexk.unlock();
}
}
}
void func() {
while (capture.isOpened()) {
if (mutexk.try_lock()) {
//我的算法
my_func();
mutexk.unlock();
}
}
}
int main() {
std::thread thrd_(func1);
std::thread thrd_(func2);
thrd_.join();
thrd_.join();
return;
}
在ubuntu中c++opencv多线程显示图片时出现Gdk-WARNING **: xx: Fatal IO error
解决办法:
1、sudo apt install libx11-dev
2、在代码中添加头文件#include <X11/Xlib.h>
3、在创建线程之前添加XInitThreads()
4、在编译的时候需要添加-lX11