1、创建多个线程来拉取不同的视频流,不使用多线程的话,所有的视频流就在排一个队,会出现画面不同步的情况
代码:
from threading import Thread
import cv2
#视频流拉取类
class vStream:
def __init__(self,src):
self.capture = cv2.VideoCapture(src)
self.thread = Thread(target=(self.update),args=())
self.thread.daemon = True
self.thread.start()
def update(self):
while True:
_,self.frame = self.capture.read()
def getFrame(self):
return self.frame
#我的网络摄像头
cam1 = vStream('rtsp://admin:qwertyuiop123@192.168.31.103/Streaming/Channels/2')
cam2 = vStream('rtsp://admin:JGSYS123@192.168.31.123/Streaming/Channels/2')
while True:
try:
myframe1 = cam1.getFrame()
myframe2 = cam2.getFrame()
cv2.imshow('webCam1',myframe1)
cv2.imshow('webCam2',myframe2)
except:
print('frame not available')
if cv2.waitKey(1) ==ord('q'):
cam1.capture.release()
cam2.capture.release()
cv2.destroyAllWindows()
exit(1)
break
2、使用numpy将多个画面放在同一个frame中,图像就是矩阵,numpy是用来处理矩阵的类
代码:
import time
from threading import Thread
import cv2
import numpy as np
#视频流拉取类
class vStream:
def __init__(self,src,width,height):
#frame 初始尺寸
self.width = width
self.height = height
self.capture = cv2.VideoCapture(src)
self.thread = Thread(target=(self.update),args=())
self.thread.daemon = True
self.thread.start()
def update(self):
while True:
_,self.frame = self.capture.read()
self.frame2 = cv2.resize(self.frame,(self.width,self.height))
def getFrame(self):
return self.frame2
ip1 = 'rtsp://admin:qwertyuiop123@192.168.31.103/Streaming/Channels/2'
ip2 = 'rtsp://admin:JGSYS123@192.168.31.123/Streaming/Channels/2'
disW = 640
disH = 360
cam1 = vStream(ip1,disW,disH)
cam2 = vStream(ip2,disW,disH)
#显示帧率
font = cv2.FONT_HERSHEY_SIMPLEX
startTime = time.time()
dtav = 1#没啥意义,只是为了第一次进入循环时没有dtav避免错误
while True:
try:
myframe1 = cam1.getFrame()
myframe2 = cam2.getFrame()
#将myframe1,myframe2 放在同一个画面
frameCombine = np.hstack((myframe1,myframe2))
#显示帧率
dt = time.time() - startTime
startTime = time.time()
# 低通滤波,避免fps误差太大
dtav = .90 * dtav + .1 * dt
fps = 1 / dtav
fps = round(fps, 2)
# print('fps is:',round(dtav,1))
cv2.rectangle(frameCombine, (0, 0), (100, 40), (0, 0, 255), -1)
cv2.putText(frameCombine, str(round(fps, 1)) + 'fps', (0, 25), font, .75, (0, 255, 255, 2))
cv2.imshow('combine frame', frameCombine)
except:
print('frame not available')
keyCode = cv2.waitKey(1) & 0xFF
if keyCode == 27: # ESC键退出
cam1.capture.release()
cam2.capture.release()
cv2.destroyAllWindows()
exit(1)
break
文章来源:https://www.toymoban.com/news/detail-857184.html
进行人脸识别,网不好帧率太低文章来源地址https://www.toymoban.com/news/detail-857184.html
import face_recognition
import pickle
import time
from threading import Thread
import cv2
import numpy as np
with open('train.pkl','rb')as f:
Names = pickle.load(f)
Encodings = pickle.load(f)
#视频流拉取类
class vStream:
def __init__(self,src,width,height):
#frame 初始尺寸
self.width = width
self.height = height
self.capture = cv2.VideoCapture(src)
self.thread = Thread(target=(self.update),args=())
self.thread.daemon = True
self.thread.start()
def update(self):
while True:
_,self.frame = self.capture.read()
self.frame2 = cv2.resize(self.frame,(self.width,self.height))
def getFrame(self):
return self.frame2
ip1 = 'rtsp://admin:qwertyuiop123@192.168.31.103/Streaming/Channels/2'
ip2 = 'rtsp://admin:JGSYS123@192.168.31.123/Streaming/Channels/2'
disW = 640
disH = 360
cam1 = vStream(ip1,disW,disH)
cam2 = vStream(ip2,disW,disH)
#显示帧率
font = cv2.FONT_HERSHEY_SIMPLEX
startTime = time.time()
dtav = 1
scaleFactor=.25
while True:
try:
myframe1 = cam1.getFrame()
myframe2 = cam2.getFrame()
#将myframe1,myframe2 放在同一个画面
frameCombine = np.hstack((myframe1,myframe2))
#将图像转换缩小,加载模型识别
frameRGB = cv2.cvtColor(frameCombine, cv2.COLOR_BGR2RGB)
frameRGBSmall = cv2.resize(frameRGB, (0, 0), fx=scaleFactor, fy=scaleFactor)
facePositions = face_recognition.face_locations(frameRGBSmall, model='cnn')
allEncodings = face_recognition.face_encodings(frameRGBSmall, facePositions)
for (top, right, bottom, left), face_encoding in zip(facePositions, allEncodings):
name = 'Unkown Person'
matches = face_recognition.compare_faces(Encodings, face_encoding)
if True in matches:
first_match_index = matches.index(True)
name = Names[first_match_index]
# 将检测框放大scalefactor倍,上面图片是缩小三倍检测的结果,检测框也是缩小三倍的
top = int(top / scaleFactor)
right = int(right / scaleFactor)
bottom = int(bottom / scaleFactor)
left = int(left / scaleFactor)
cv2.rectangle(frameCombine, (left, top), (right, bottom), (0, 0, 255), 2)
cv2.putText(frameCombine, name, (left, top - 6), font, .75, (0, 0, 255), 2)
#显示帧率
dt = time.time() - startTime
startTime = time.time()
# 低通滤波,避免fps误差太大
dtav = .90 * dtav + .1 * dt
fps = 1 / dtav
fps = round(fps, 2)
# print('fps is:',round(dtav,1))
cv2.rectangle(frameCombine, (0, 0), (100, 40), (0, 0, 255), -1)
cv2.putText(frameCombine, str(round(fps, 1)) + 'fps', (0, 25), font, .75, (0, 255, 255, 2))
cv2.imshow('combine frame', frameCombine)
except:
print('frame not available')
keyCode = cv2.waitKey(1) & 0xFF
if keyCode == 27: # ESC键退出
cam1.capture.release()
cam2.capture.release()
cv2.destroyAllWindows()
exit(1)
break
到了这里,关于openCV 多线程调用多个海康威视摄像头(避免画面不同步),进行人脸识别的文章就介绍完了。如果您还想了解更多内容,请在右上角搜索TOY模板网以前的文章或继续浏览下面的相关文章,希望大家以后多多支持TOY模板网!