【目的】
测试一款外设,长时间播放后偶尔会闪现绿屏。想着挂一晚上自动化检测,获取到绿屏出现的时间戳 + 画面截图。
【实现】
利用ffmpeg的录制 + 切片截图 + opencv图像分析
因为mac和windows的linux的执行命令不同,分开写了2个脚本
需要安装ffmpeg/ffplay、opencv库等
【代码】
mac端脚本文章来源:https://www.toymoban.com/news/detail-518003.html
import os
from datetime import datetime
import cv2
import numpy as np
import json
import threading
import time
from multiprocessing import Process
pre_cmd1 = "mkdir recordingToolTmp"
os.popen(pre_cmd1).read()
pre_cmd1 = "mkdir recordingToolPicTmp"
os.popen(pre_cmd1).read()
class Job(threading.Thread):
def __init__(self, ss_num, between_time, *args, **kwargs):
super(Job, self).__init__(*args, **kwargs)
self.__running = threading.Event() # 用于停止线程的标识
self.__running.set() # 将running设置为True
# self.running = True
self.ss_num = ss_num
self.between_time = between_time
def run(self):
while True:
start_cmd = 'ffmpeg -f avfoundation -i "3:0" ./recordingToolTmp/Screen' + str(int(self.ss_num / self.between_time)) +'.ts'
if self.__running.isSet():
print("子线程运行中... ", time.time())
os.popen(start_cmd).read()
# 等待线程被kill的最大等待时间,例如:rtmp2是60s
time.sleep(self.between_time * 2 + 1)
else:
print("子线程退出....")
return
print("in while True")
class Coo():
def __init__(self):
self.tmp_thread = None
# 开启子进程
def execute(self,ss_num,between_time):
t = Job(ss_num,between_time)
t.setDaemon(True)
t.start()
self.tmp_thread = t
t.join()
class CustErr(Exception):
pass
def main(ss_num,between_time):
a = Coo()
a.execute(ss_num,between_time)
class recordingTool():
file_name = None
f_file = None
def check_device(self):
check_cmd = 'ffmpeg -f avfoundation -list_devices true -i ""'
os.popen(check_cmd).read()
def test_length(self,url):
info_cmd = "ffprobe -v quiet -print_format json -show_format -show_streams " + url
data_json = os.popen(info_cmd).read()
d = json.loads(data_json)
duration = d["format"]["duration"]
# file.write("\n" + "video length:" + str(duration) + "\n")
word = "\n" + "video length:" + str(duration) + "\n"
self.writeWordByHour(word)
return duration
def video_to_pic(self,url,i):
pic_dir_cmd = "mkdir recordingToolPicTmp/" + str(i)
os.popen(pic_dir_cmd).read()
cmd = "ffmpeg -i " + url + " -r 5 -s 1289,720 -ss 00:00:00 ./recordingToolPicTmp/" + str(i) + "/%d.png"
os.popen(cmd).read()
def choose_color(self,color):
if color == "white":
lower_orange = [0, 0, 221]
upper_orange = [180, 30, 255]
if color == "gray":
lower_orange = [0, 0, 100]
upper_orange = [180, 43, 220]
elif color == "green":
# lower_orange = [35, 43, 46]
# upper_orange = [77, 255, 255]
lower_orange = [30, 65, 65]
upper_orange = [80, 255, 255]
elif color == "blue":
lower_orange = [100, 43, 46]
upper_orange = [124, 255, 255]
elif color == "black":
lower_orange = [0, 0, 0]
upper_orange = [180, 255, 46]
return lower_orange, upper_orange
def writeWordByHour(self,word):
if self.f_file is None:
self.file_name = datetime.now().strftime("%Y-%m-%d-%H") + ".txt"
make_file_cmd = "touch " + self.file_name
os.popen(make_file_cmd).read()
self.f_file = open('./' + self.file_name, 'w', encoding='utf-8')
else:
new_file_name = datetime.now().strftime("%Y-%m-%d-%H") + ".txt"
if new_file_name != self.file_name:
self.f_file.close()
self.file_name = new_file_name
make_file_cmd = "touch " + self.file_name
os.popen(make_file_cmd).read()
self.f_file = open('./' + self.file_name, 'w', encoding='utf-8')
self.f_file.write(word)
def closeFile(self):
self.f_file.close()
def test_video_opencv(self,url,i,color):
start_time = time.time()
self.video_to_pic(url,i)
pic_len = len(os.listdir("./recordingToolPicTmp/" + str(i) + "/"))
gray_num = 0
gray_index = []
for png_num in range(1, pic_len + 1):
img = cv2.imread("./recordingToolPicTmp/" + str(i) + "/" + str(png_num) + ".png")
# gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # 转换了灰度化
# ret, img = cv2.threshold(gray, 160, 255, cv2.THRESH_BINARY) # 将灰度图像二值化
# img = 255 - img
lower_orange_array,upper_orange_array = self.choose_color(color)
lower_orange = np.array(lower_orange_array)
upper_orange = np.array(upper_orange_array)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, lower_orange, upper_orange)
# cv2.imshow('image', mask)
# cv2.waitKey(0)
binary = cv2.threshold(mask, 127, 255, cv2.THRESH_BINARY)[1]
binary = cv2.dilate(binary, None, iterations=2)
if int(cv2.__version__[0]) > 2:
contours, _ = cv2.findContours(binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
else:
_, contours, _ = cv2.findContours(binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# 3-求轮廓的面积
pic_sum = 0
space = img.shape[0] * img.shape[1]
for cts in contours:
pic_sum += cv2.contourArea(cts)
if color == "white" and pic_sum / space > 0.95:
gray_num += 1
gray_index.append(pic_sum)
# file.write(color + " screen : ./recordingToolPicTmp/" + str(i) + "/" + str(png_num) + ".png" + "\n")
if color == "green" and pic_sum / space >= 0.01:
gray_num += 1
gray_index.append(pic_sum)
word = color + " screen : ./recordingToolPicTmp/" + str(i) + "/" + str(png_num) + ".png ," + str(datetime.now().strftime('%Y-%m-%d %H:%M:%S')) + "\n"
mkdir_cmd = "mkdir ./tmp/" + str(i)
os.popen(mkdir_cmd).read()
cp_cmd = "cp ./recordingToolPicTmp/" + str(i) + "/" + str(png_num) + ".png ./tmp/" + str(i) + "/"
os.popen(cp_cmd).read()
cp_cmd = "cp ./recordingToolTmp/Screen" + str(i) + ".ts ./tmp"
os.popen(cp_cmd).read()
self.writeWordByHour(str(pic_sum/space))
self.writeWordByHour(word)
if color != "white" and pic_sum / space > 0.95:
gray_num += 1
gray_index.append(pic_sum)
# file.write(color + " screen: ./recordingToolPicTmp/" + str(i) + "/" + str(png_num) + ".png" + "\n")
end_time = time.time()
print(f"test_video_opencv time consumption: {str(end_time - start_time)} seconds")
return gray_num
if __name__ == '__main__':
r = recordingTool()
rm_cmd = "rm -rf ./recordingToolTmp/*"
os.popen(rm_cmd).read()
rm_cmd = "rm -rf ./recordingToolPicTmp/*"
os.popen(rm_cmd).read()
rm_cmd = "rm -rf ./tmp/*"
os.popen(rm_cmd).read()
between_time = 10
num = 32
for i in range(num):
print(i)
url = "./recordingToolTmp/Screen" + str(int(i / between_time - 1)) + ".ts"
if i == 0:
start_p = Process(target=main, args=(0, between_time))
start_p.start()
elif i % between_time == 0:
read_cmd = "ps -ef | grep ffmpeg"
process_info = os.popen(read_cmd).read()
process_infos = process_info.split("\n")
for info in process_infos:
if info.find("Screen") > -1:
del_cmd = "kill -9 " + info.strip().split(" ")[1] + " " + info.strip().split(" ")[2]
os.popen(del_cmd).read()
r.test_length(url)
r.test_video_opencv(url, str(int(i / between_time - 1)), "green")
rm_cmd = "rm -rf ./recordingToolPicTmp/" + str(int(i / between_time - 1)) + "/*"
os.popen(rm_cmd).read()
rm_cmd = "rm -rf ./recordingToolTmp/Screen" + str(int(i / between_time - 1)) + ".ts"
os.popen(rm_cmd).read()
start_p = Process(target=main, args=(i, between_time))
start_p.start()
if i == num - 1:
r.writeWordByHour("last second1")
r.closeFile()
time.sleep(1)
read_cmd = "ps -ef | grep ffmpeg | awk '{print $2,$3}'"
process_info = os.popen(read_cmd).read()
del_cmd = "kill -9 " + process_info.replace("\n", " ")
os.popen(del_cmd).read()
time.sleep(1)
print("主线程结束退出")
windows端脚本文章来源地址https://www.toymoban.com/news/detail-518003.html
# – coding: utf-8 –
import os
import subprocess
import time
from datetime import datetime
import cv2
import numpy as np
import json
import threading
from time import sleep
pwd_cmd = os.popen("pwd").read()
print(pwd_cmd)
del_cmd1 = "rmdir /s /q .\\recordingToolPicTmp"
os.popen(del_cmd1).read()
del_cmd2 = "rmdir /s /q .\\recordingToolTmp"
os.popen(del_cmd2).read()
del_cmd = "rmdir /s /q .\\tmp"
os.popen(del_cmd).read()
class Job(threading.Thread):
def __init__(self, ss_num, between_time, *args, **kwargs):
super(Job, self).__init__(*args, **kwargs)
self.__flag = threading.Event() # 用于暂停线程的标识
self.__flag.set() # 设置为True
self.__running = threading.Event() # 用于停止线程的标识
self.__running.set() # 将running设置为True
self.ss_num = ss_num
self.between_time = between_time
self.task = None
def run(self):
while self.__running.isSet():
start_cmd = 'ffmpeg -f dshow -i video="screen-capture-recorder" ./recordingToolTmp/Screen' + str(int(self.ss_num / self.between_time)) + '.mp4'
print("子线程运行中... ", time.time())
self.task = subprocess.Popen(start_cmd, shell=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
# self.task = os.popen(start_cmd).read()
print(start_cmd)
# 等待线程被kill的最大等待时间,例如:rtmp2是60s
sleep(self.between_time * 2 + 1)
print("in while True")
self.__flag.wait() # 为True时立即返回, 为False时阻塞直到self.__flag为True后返回
def pause(self):
print("thread is pause")
self.__flag.clear() # 设置为False, 让线程阻塞
def resume(self):
print("thread is resume")
self.__flag.set() # 设置为True, 让线程停止阻塞
def exit(self):
print("thread is stop")
self.task.stdin.write('q'.encode("GBK"))
self.task.communicate()
self.task.kill()
self.__flag.set() # 将线程从暂停状态恢复, 如何已经暂停的话
self.__running.clear() # 设置为False
class Coo():
def __init__(self):
self.cur_thread = None
# 开启子进程
def execute(self,ss_num,between_time):
self.cur_thread = Job(ss_num,between_time)
self.cur_thread.setDaemon(False) ## True主进程执行完成程序终止会导致子进程直接终止 False 主进程执行完成后程序不终止,等待子进程全部执行完成
self.cur_thread.start()
# self.cur_thread.join() ## 开启后会阻塞主进程,等待子进程完成
# 终止子进程
def exit(self):
self.cur_thread.exit()
class recordingTool():
file_name = None
f_file = None
def check_device(self):
check_cmd = 'ffmpeg -list_devices true -f dshow -i dummy'
os.popen(check_cmd).read()
print("CHECKOUT")
def test_length(self,url):
info_cmd = "ffprobe -v quiet -print_format json -show_format -show_streams " + url
data_json = os.popen(info_cmd).read()
d = json.loads(data_json)
duration = d["format"]["duration"]
# file.write("\n" + "video length:" + str(duration) + "\n")
word = "\n" + "video length:" + str(duration) + "\n"
self.writeWordByHour(word)
return duration
def video_to_pic(self,url,i):
pic_dir_cmd = "mkdir recordingToolPicTmp\\" + str(i)
os.popen(pic_dir_cmd).read()
cmd = "ffmpeg -i " + url + " -r 1 -s 1289,720 -ss 00:00:00 ./recordingToolPicTmp/" + str(i) + "/%d.png"
os.popen(cmd).read()
def choose_color(self,color):
if color == "white":
lower_orange = [0, 0, 221]
upper_orange = [180, 30, 255]
if color == "gray":
lower_orange = [0, 0, 100]
upper_orange = [180, 43, 220]
elif color == "green":
# lower_orange = [35, 43, 46]
# upper_orange = [77, 255, 255]
lower_orange = [30, 65, 65]
upper_orange = [80, 255, 255]
elif color == "blue":
lower_orange = [100, 43, 46]
upper_orange = [124, 255, 255]
elif color == "black":
lower_orange = [0, 0, 0]
upper_orange = [180, 255, 46]
return lower_orange, upper_orange
def writeWordByHour(self,word):
if self.f_file is None:
self.file_name = datetime.now().strftime("%Y-%m-%d-%H-%M") + ".txt"
make_file_cmd = "type nul> " + self.file_name
os.popen(make_file_cmd).read()
self.f_file = open('./' + self.file_name, 'w', encoding='utf-8')
else:
new_file_name = datetime.now().strftime("%Y-%m-%d-%H-%M") + ".txt"
if new_file_name != self.file_name:
self.f_file.close()
self.file_name = new_file_name
make_file_cmd = "type nul> " + self.file_name
os.popen(make_file_cmd).read()
self.f_file = open('./' + self.file_name, 'w', encoding='utf-8')
self.f_file.write(word)
def closeFile(self):
self.f_file.close()
def closeFfmpeg(self,proc):
if (proc != None):
proc.StandardInput.WriteLine("q")
proc.StandardInput.AutoFlush = True
def test_video_opencv(self,url,i,color):
start_time = time.time()
self.video_to_pic(url,i)
pic_len = len(os.listdir("./recordingToolPicTmp/" + str(i) + "/"))
gray_num = 0
gray_index = []
for png_num in range(1, pic_len + 1):
img = cv2.imread("./recordingToolPicTmp/" + str(i) + "/" + str(png_num) + ".png")
# gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) # 转换了灰度化
# ret, img = cv2.threshold(gray, 160, 255, cv2.THRESH_BINARY) # 将灰度图像二值化
# img = 255 - img
lower_orange_array,upper_orange_array = self.choose_color(color)
lower_orange = np.array(lower_orange_array)
upper_orange = np.array(upper_orange_array)
hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, lower_orange, upper_orange)
# cv2.imshow('image', mask)
# cv2.waitKey(0)
binary = cv2.threshold(mask, 127, 255, cv2.THRESH_BINARY)[1]
binary = cv2.dilate(binary, None, iterations=2)
if int(cv2.__version__[0]) > 2:
contours, _ = cv2.findContours(binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
else:
_, contours, _ = cv2.findContours(binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# 3-求轮廓的面积
pic_sum = 0
space = img.shape[0] * img.shape[1]
for cts in contours:
pic_sum += cv2.contourArea(cts)
if color == "white" and pic_sum / space > 0.95:
gray_num += 1
gray_index.append(pic_sum)
# file.write(color + " screen : ./recordingToolPicTmp/" + str(i) + "/" + str(png_num) + ".png" + "\n")
if color == "green" and pic_sum / space >= 0.01:
gray_num += 1
gray_index.append(pic_sum)
word = color + " screen : ./recordingToolPicTmp/" + str(i) + "/" + str(png_num) + ".png ," + str(datetime.now().strftime('%Y-%m-%d %H:%M:%S')) + "\n"
mkdir_cmd = "mkdir .\\tmp\\" + str(i)
os.popen(mkdir_cmd).read()
cp_cmd_1 = "cp .\\recordingToolPicTmp\\" + str(i) + "\\" + str(png_num) + ".png .\\tmp\\" + str(i)
os.popen(cp_cmd_1).read()
cp_cmd = "cp .\\recordingToolTmp\\Screen" + str(i) + ".mp4 .\\tmp\\"
os.popen(cp_cmd).read()
# self.writeWordByHour(str(pic_sum/space))
self.writeWordByHour(word)
if color != "white" and pic_sum / space > 0.05:
gray_num += 1
gray_index.append(pic_sum)
word = color + " screen: ./recordingToolPicTmp/" + str(i) + "/" + str(png_num) + ".png" + "\n"
self.writeWordByHour(word)
end_time = time.time()
print(f"test_video_opencv time consumption: {str(end_time - start_time)} seconds")
return gray_num
if __name__ == '__main__':
pre_cmd1 = "mkdir recordingToolTmp"
os.popen(pre_cmd1).read()
pre_cmd1 = "mkdir recordingToolPicTmp"
os.popen(pre_cmd1).read()
pre_cmd1 = "mkdir tmp"
os.popen(pre_cmd1).read()
r = recordingTool()
between_time = 5
num = 17
obj = Coo()
for i in range(num):
print(i)
url = "./recordingToolTmp/Screen" + str(int(i / between_time - 1)) + ".mp4"
if i == 0:
obj.execute(0, between_time)
elif i % between_time == 0:
obj.exit()
obj = Coo()
r.test_length(url)
r.test_video_opencv(url, str(int(i / between_time - 1)), "green")
del_cmd1 = "rmdir /s /q .\\recordingToolPicTmp\\" + str(int(i / between_time - 1))
delcmd1 = os.popen(del_cmd1).read()
del_cmd2 = "del /s /q .\\recordingToolTmp\\Screen" + str(int(i / between_time - 1)) + ".mp4"
delcmd2 = os.popen(del_cmd2).read()
obj.execute(i, between_time)
if i == num-1:
r.writeWordByHour("last second1")
obj.exit()
r.closeFile()
# kill_all_ffmpeg_cmd = "taskkill /f /im ffmpeg.exe"
# os.popen(kill_all_ffmpeg_cmd).read()
# sleep(1)
kill_all_python_cmd = "taskkill /f /im python.exe"
os.popen(kill_all_python_cmd).read()
sleep(1)
ppid_cmd = os.getppid()
kill_all_os_cmd = "taskkill /f " + str(ppid_cmd)
os.popen(kill_all_os_cmd).read()
sleep(1)
print("主线程结束退出")
到了这里,关于【opencv】ffmpeg录制 + opencv绿屏识别脚本的文章就介绍完了。如果您还想了解更多内容,请在右上角搜索TOY模板网以前的文章或继续浏览下面的相关文章,希望大家以后多多支持TOY模板网!