opencv.js官网
4.5.0文档
以下内容整理于opencv.js官网。
简介
OpenCV由Gary Bradski于1999年在英特尔创建。第一次发行是在2000年。OpenCV支持c++、Python、Java等多种编程语言,支持Windows、Linux、Os X、Android、iOS等平台。基于CUDA和OpenCL的高速GPU操作接口也在积极开发中。OpenCV.js将OpenCV带到开放的web平台,并使JavaScript程序员可以使用它。
图片处理
读取图片
readImage.html
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Hello OpenCV.js</title>
</head>
<body>
<h2>读取图片</h2>
<p id="status">OpenCV.js is loading...</p>
<div>
<div class="inputoutput">
<img id="imageSrc" alt="No Image" />
<div class="caption">imageSrc <input type="file" id="fileInput" name="file" /></div>
</div>
<div class="inputoutput">
<canvas id="canvasOutput" ></canvas>
<div class="caption">canvasOutput</div>
</div>
</div>
<script type="text/javascript">
let imgElement = document.getElementById('imageSrc');
let inputElement = document.getElementById('fileInput');
inputElement.addEventListener('change', (e) => {
imgElement.src = URL.createObjectURL(e.target.files[0]);
}, false);
imgElement.onload = function() {
let mat = cv.imread(imgElement);
cv.imshow('canvasOutput', mat);
mat.delete();
};
var Module = {
// https://emscripten.org/docs/api_reference/module.html#Module.onRuntimeInitialized
onRuntimeInitialized() {
document.getElementById('status').innerHTML = 'OpenCV.js is ready.';
}
};
</script>
<!-- <script async src="https://docs.opencv.org/4.5.0/opencv.js" type="text/javascript"></script> -->
<script async src="opencvjs/opencv.js" type="text/javascript"></script>
<script src="opencvjs/utils.js" type="text/javascript"></script>
</body>
</html>
浏览器运行。
灰度图
blackAWhite.html
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Adaptive Threshold Example</title>
<link href="opencvjs/style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<h2>IMAGE</h2>
<p></p>
<div>
<div class="control"><button id="tryIt" disabled>Start</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false"></textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" accept="image/*" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script type="text/javascript" src="opencvjs/opencv.js"></script>
<script src="opencvjs/utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let dst = new cv.Mat();
// To distinguish the input and output, we graying the image.
// You can try different conversions.
cv.cvtColor(src, dst, cv.COLOR_RGBA2GRAY);
cv.imshow('canvasOutput', dst);
src.delete();
dst.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
tryIt.removeAttribute('disabled');
});
</script>
</body>
</html>
浏览器运行。
阈值
inRangeImage.html
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Hello OpenCV.js</title>
</head>
<body>
<h2></h2>
<p id="status">OpenCV.js is loading...</p>
<div>
<div class="inputoutput">
<img id="imageSrc" alt="No Image" />
<div class="caption">imageSrc <input type="file" id="fileInput" name="file" /></div>
</div>
<div class="inputoutput">
<canvas id="canvasOutput" ></canvas>
<div class="caption">canvasOutput</div>
</div>
</div>
<script type="text/javascript">
let imgElement = document.getElementById('imageSrc');
let inputElement = document.getElementById('fileInput');
inputElement.addEventListener('change', (e) => {
imgElement.src = URL.createObjectURL(e.target.files[0]);
}, false);
imgElement.onload = function() {
let src = cv.imread('imageSrc');
let dst = new cv.Mat();
let low = new cv.Mat(src.rows, src.cols, src.type(), [0, 0, 0, 0]);
let high = new cv.Mat(src.rows, src.cols, src.type(), [150, 150, 150, 255]);
// You can try more different parameters
cv.inRange(src, low, high, dst);
cv.imshow('canvasOutput', dst);
src.delete(); dst.delete(); low.delete(); high.delete();
};
var Module = {
// https://emscripten.org/docs/api_reference/module.html#Module.onRuntimeInitialized
onRuntimeInitialized() {
document.getElementById('status').innerHTML = 'OpenCV.js is ready.';
}
};
</script>
<script async src="opencvjs/opencv.js" type="text/javascript"></script>
<script src="opencvjs/utils.js" type="text/javascript"></script>
</body>
</html>
其它
imageTemplate.html
修改框内的代码即可对图片进行不同的操作,此代码适用于大部分的官网中有关图片操作的代码。
视频处理
均值漂移
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>CamShift Example</title>
<link href="opencvjs/style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<div>
<div class="control"><button id="startAndStop" disabled>Start</button></div>
<textarea class="code" rows="29" cols="100" id="codeEditor" spellcheck="false"></textarea>
</div>
<p class="err" id="errorMessage"></p>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<video id="videoInput" width="320" height="240" muted loop></video>
</td>
<td>
<canvas id="canvasOutput" width="320" height="240"></canvas>
</td>
<td></td>
<td></td>
</tr>
<tr>
<td>
<div class="caption">videoInput</div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
<td></td>
<td></td>
</tr>
</table>
</div>
<script src="https://webrtc.github.io/adapter/adapter-5.0.4.js" type="text/javascript"></script>
<script type="text/javascript" src="opencvjs/opencv.js"></script>
<script src="opencvjs/utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let video = document.getElementById('videoInput');
let cap = new cv.VideoCapture(video);
// take first frame of the video
let frame = new cv.Mat(video.height, video.width, cv.CV_8UC4);
cap.read(frame);
// hardcode the initial location of window
let trackWindow = new cv.Rect(150, 60, 63, 125);
// set up the ROI for tracking
let roi = frame.roi(trackWindow);
let hsvRoi = new cv.Mat();
cv.cvtColor(roi, hsvRoi, cv.COLOR_RGBA2RGB);
cv.cvtColor(hsvRoi, hsvRoi, cv.COLOR_RGB2HSV);
let mask = new cv.Mat();
let lowScalar = new cv.Scalar(30, 30, 0);
let highScalar = new cv.Scalar(180, 180, 180);
let low = new cv.Mat(hsvRoi.rows, hsvRoi.cols, hsvRoi.type(), lowScalar);
let high = new cv.Mat(hsvRoi.rows, hsvRoi.cols, hsvRoi.type(), highScalar);
cv.inRange(hsvRoi, low, high, mask);
let roiHist = new cv.Mat();
let hsvRoiVec = new cv.MatVector();
hsvRoiVec.push_back(hsvRoi);
cv.calcHist(hsvRoiVec, [0], mask, roiHist, [180], [0, 180]);
cv.normalize(roiHist, roiHist, 0, 255, cv.NORM_MINMAX);
// delete useless mats.
roi.delete(); hsvRoi.delete(); mask.delete(); low.delete(); high.delete(); hsvRoiVec.delete();
// Setup the termination criteria, either 10 iteration or move by atleast 1 pt
let termCrit = new cv.TermCriteria(cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_COUNT, 10, 1);
let hsv = new cv.Mat(video.height, video.width, cv.CV_8UC3);
let hsvVec = new cv.MatVector();
hsvVec.push_back(hsv);
let dst = new cv.Mat();
let trackBox = null;
const FPS = 30;
function processVideo() {
try {
if (!streaming) {
// clean and stop.
frame.delete(); dst.delete(); hsvVec.delete(); roiHist.delete(); hsv.delete();
return;
}
let begin = Date.now();
// start processing.
cap.read(frame);
cv.cvtColor(frame, hsv, cv.COLOR_RGBA2RGB);
cv.cvtColor(hsv, hsv, cv.COLOR_RGB2HSV);
cv.calcBackProject(hsvVec, [0], roiHist, dst, [0, 180], 1);
// apply camshift to get the new location
[trackBox, trackWindow] = cv.CamShift(dst, trackWindow, termCrit);
// Draw it on image
let pts = cv.rotatedRectPoints(trackBox);
cv.line(frame, pts[0], pts[1], [255, 0, 0, 255], 3);
cv.line(frame, pts[1], pts[2], [255, 0, 0, 255], 3);
cv.line(frame, pts[2], pts[3], [255, 0, 0, 255], 3);
cv.line(frame, pts[3], pts[0], [255, 0, 0, 255], 3);
cv.imshow('canvasOutput', frame);
// schedule the next one.
let delay = 1000/FPS - (Date.now() - begin);
setTimeout(processVideo, delay);
} catch (err) {
utils.printError(err);
}
};
// schedule the first one.
setTimeout(processVideo, 0);
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
let streaming = false;
let videoInput = document.getElementById('videoInput');
let startAndStop = document.getElementById('startAndStop');
let canvasOutput = document.getElementById('canvasOutput');
let canvasContext = canvasOutput.getContext('2d');
startAndStop.addEventListener('click', () => {
if (!streaming) {
utils.clearError();
videoInput.play().then(() => {
onVideoStarted();
});
} else {
videoInput.pause();
videoInput.currentTime = 0;
onVideoStopped();
}
});
function onVideoStarted() {
streaming = true;
startAndStop.innerText = 'Stop';
videoInput.height = videoInput.width * (videoInput.videoHeight / videoInput.videoWidth);
utils.executeCode('codeEditor');
}
function onVideoStopped() {
streaming = false;
canvasContext.clearRect(0, 0, canvasOutput.width, canvasOutput.height);
startAndStop.innerText = 'Start';
}
utils.loadOpenCv(() => {
videoInput.addEventListener('canplay', () => {
startAndStop.removeAttribute('disabled');
});
videoInput.src = 'video/cup.mp4';
});
</script>
</body>
</html>
背景差分
backgroundSubtraction.html
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<link href="opencvjs/style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<div>
<div class="control"><button id="startAndStop" disabled>Start</button></div>
<textarea class="code" rows="29" cols="100" id="codeEditor" spellcheck="false"></textarea>
</div>
<p class="err" id="errorMessage"></p>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<video id="videoInput" width="320" height="240" muted></video>
</td>
<td>
<canvas id="canvasOutput" width="320" height="240"></canvas>
</td>
<td></td>
<td></td>
</tr>
<tr>
<td>
<div class="caption">videoInput</div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
<td></td>
<td></td>
</tr>
</table>
</div>
<script type="text/javascript" src="opencvjs/opencv.js"></script>
<script src="opencvjs/utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let video = document.getElementById('videoInput');
let cap = new cv.VideoCapture(video);
let frame = new cv.Mat(video.height, video.width, cv.CV_8UC4);
let fgmask = new cv.Mat(video.height, video.width, cv.CV_8UC1);
let fgbg = new cv.BackgroundSubtractorMOG2(500, 16, true);
const FPS = 30;
function processVideo() {
try {
if (!streaming) {
// clean and stop.
frame.delete(); fgmask.delete(); fgbg.delete();
return;
}
let begin = Date.now();
// start processing.
cap.read(frame);
fgbg.apply(frame, fgmask);
cv.imshow('canvasOutput', fgmask);
// schedule the next one.
let delay = 1000/FPS - (Date.now() - begin);
setTimeout(processVideo, delay);
} catch (err) {
utils.printError(err);
}
};
// schedule the first one.
setTimeout(processVideo, 0);
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
let streaming = false;
let videoInput = document.getElementById('videoInput');
let startAndStop = document.getElementById('startAndStop');
startAndStop.addEventListener('click', () => {
if (!streaming) {
utils.clearError();
videoInput.play().then(() => {
onVideoStarted();
});
} else {
videoInput.pause();
videoInput.currentTime = 0;
onVideoStopped();
}
});
function onVideoStarted() {
streaming = true;
startAndStop.innerText = 'Stop';
videoInput.height = videoInput.width * (videoInput.videoHeight / videoInput.videoWidth);
utils.executeCode('codeEditor');
}
function onVideoStopped() {
streaming = false;
startAndStop.innerText = 'Start';
}
videoInput.addEventListener('ended', () => {
onVideoStopped();
});
utils.loadOpenCv(() => {
videoInput.addEventListener('canplay', () => {
startAndStop.removeAttribute('disabled');
});
videoInput.src = 'video/box.mp4';
});
</script>
</body>
</html>
卡纳德方法
LucasKanade.html
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<link href="opencvjs/style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<div>
<div class="control"><button id="startAndStop" disabled>Start</button></div>
<textarea class="code" rows="29" cols="100" id="codeEditor" spellcheck="false"></textarea>
</div>
<p class="err" id="errorMessage"></p>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<video id="videoInput" width="320" height="240" muted></video>
</td>
<td>
<canvas id="canvasOutput" width="320" height="240"></canvas>
</td>
<td></td>
<td></td>
</tr>
<tr>
<td>
<div class="caption">videoInput</div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
<td></td>
<td></td>
</tr>
</table>
</div>
<script type="text/javascript" src="opencvjs/opencv.js"></script>
<script src="opencvjs/utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let video = document.getElementById('videoInput');
let cap = new cv.VideoCapture(video);
// take first frame of the video
let frame1 = new cv.Mat(video.height, video.width, cv.CV_8UC4);
cap.read(frame1);
let prvs = new cv.Mat();
cv.cvtColor(frame1, prvs, cv.COLOR_RGBA2GRAY);
frame1.delete();
let hsv = new cv.Mat();
let hsv0 = new cv.Mat(video.height, video.width, cv.CV_8UC1);
let hsv1 = new cv.Mat(video.height, video.width, cv.CV_8UC1, new cv.Scalar(255));
let hsv2 = new cv.Mat(video.height, video.width, cv.CV_8UC1);
let hsvVec = new cv.MatVector();
hsvVec.push_back(hsv0); hsvVec.push_back(hsv1); hsvVec.push_back(hsv2);
let frame2 = new cv.Mat(video.height, video.width, cv.CV_8UC4);
let next = new cv.Mat(video.height, video.width, cv.CV_8UC1);
let flow = new cv.Mat(video.height, video.width, cv.CV_32FC2);
let flowVec = new cv.MatVector();
let mag = new cv.Mat(video.height, video.width, cv.CV_32FC1);
let ang = new cv.Mat(video.height, video.width, cv.CV_32FC1);
let rgb = new cv.Mat(video.height, video.width, cv.CV_8UC3);
const FPS = 30;
function processVideo() {
try {
if (!streaming) {
// clean and stop.
prvs.delete(); hsv.delete(); hsv0.delete(); hsv1.delete(); hsv2.delete();
hsvVec.delete(); frame2.delete(); flow.delete(); flowVec.delete(); next.delete();
mag.delete(); ang.delete(); rgb.delete();
return;
}
let begin = Date.now();
// start processing.
cap.read(frame2);
cv.cvtColor(frame2, next, cv.COLOR_RGBA2GRAY);
cv.calcOpticalFlowFarneback(prvs, next, flow, 0.5, 3, 15, 3, 5, 1.2, 0);
cv.split(flow, flowVec);
let u = flowVec.get(0);
let v = flowVec.get(1);
cv.cartToPolar(u, v, mag, ang);
u.delete(); v.delete();
ang.convertTo(hsv0, cv.CV_8UC1, 180/Math.PI/2);
cv.normalize(mag, hsv2, 0, 255, cv.NORM_MINMAX, cv.CV_8UC1);
cv.merge(hsvVec, hsv);
cv.cvtColor(hsv, rgb, cv.COLOR_HSV2RGB);
cv.imshow('canvasOutput', rgb);
next.copyTo(prvs);
// schedule the next one.
let delay = 1000/FPS - (Date.now() - begin);
setTimeout(processVideo, delay);
} catch (err) {
utils.printError(err);
}
};
// schedule the first one.
setTimeout(processVideo, 0);
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
let streaming = false;
let videoInput = document.getElementById('videoInput');
let startAndStop = document.getElementById('startAndStop');
startAndStop.addEventListener('click', () => {
if (!streaming) {
utils.clearError();
videoInput.play().then(() => {
onVideoStarted();
});
} else {
videoInput.pause();
videoInput.currentTime = 0;
onVideoStopped();
}
});
function onVideoStarted() {
streaming = true;
startAndStop.innerText = 'Stop';
videoInput.height = videoInput.width * (videoInput.videoHeight / videoInput.videoWidth);
utils.executeCode('codeEditor');
}
function onVideoStopped() {
streaming = false;
startAndStop.innerText = 'Start';
}
videoInput.addEventListener('ended', () => {
onVideoStopped();
});
utils.loadOpenCv(() => {
videoInput.addEventListener('canplay', () => {
startAndStop.removeAttribute('disabled');
});
videoInput.src = 'video/box.mp4';
});
</script>
</body>
</html>
人脸识别
人脸识别_照片
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Face Detection Example</title>
<link href="opencvjs/style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<div>
<div class="control"><button id="tryIt" disabled>Start</button></div>
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false"></textarea>
<p class="err" id="errorMessage"></p>
</div>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<canvas id="canvasInput"></canvas>
</td>
<td>
<canvas id="canvasOutput"></canvas>
</td>
</tr>
<tr>
<td>
<div class="caption">canvasInput <input type="file" id="fileInput" name="file" /></div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
</tr>
</table>
</div>
<script type="text/javascript" src="opencvjs/opencv.js"></script>
<script src="opencvjs/utils.js" type="text/javascript"></script>
<script id="codeSnippet" type="text/code-snippet">
let src = cv.imread('canvasInput');
let gray = new cv.Mat();
cv.cvtColor(src, gray, cv.COLOR_RGBA2GRAY, 0);
let faces = new cv.RectVector();
let eyes = new cv.RectVector();
let faceCascade = new cv.CascadeClassifier();
let eyeCascade = new cv.CascadeClassifier();
// load pre-trained classifiers
faceCascade.load('haarcascade_frontalface_default.xml');
eyeCascade.load('haarcascade_eye.xml');
// detect faces
let msize = new cv.Size(0, 0);
faceCascade.detectMultiScale(gray, faces, 1.1, 3, 0, msize, msize);
for (let i = 0; i < faces.size(); ++i) {
let roiGray = gray.roi(faces.get(i));
let roiSrc = src.roi(faces.get(i));
let point1 = new cv.Point(faces.get(i).x, faces.get(i).y);
let point2 = new cv.Point(faces.get(i).x + faces.get(i).width,
faces.get(i).y + faces.get(i).height);
cv.rectangle(src, point1, point2, [255, 0, 0, 255]);
// detect eyes in face ROI
eyeCascade.detectMultiScale(roiGray, eyes);
for (let j = 0; j < eyes.size(); ++j) {
let point1 = new cv.Point(eyes.get(j).x, eyes.get(j).y);
let point2 = new cv.Point(eyes.get(j).x + eyes.get(j).width,
eyes.get(j).y + eyes.get(j).height);
cv.rectangle(roiSrc, point1, point2, [0, 0, 255, 255]);
}
roiGray.delete(); roiSrc.delete();
}
cv.imshow('canvasOutput', src);
src.delete(); gray.delete(); faceCascade.delete();
eyeCascade.delete(); faces.delete(); eyes.delete();
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
utils.loadImageToCanvas('lena.jpg', 'canvasInput');
utils.addFileInputHandler('fileInput', 'canvasInput');
let tryIt = document.getElementById('tryIt');
tryIt.addEventListener('click', () => {
utils.executeCode('codeEditor');
});
utils.loadOpenCv(() => {
let eyeCascadeFile = 'haarcascade_eye.xml';
utils.createFileFromUrl(eyeCascadeFile, eyeCascadeFile, () => {
let faceCascadeFile = 'haarcascade_frontalface_default.xml';
utils.createFileFromUrl(faceCascadeFile, faceCascadeFile, () => {
tryIt.removeAttribute('disabled');
});
});
});
</script>
</body>
</html>
以上测试图片来自百度。
可以看到,有的地方不是耳朵也会被标记成耳朵,原因是官方的数据集数据比较微小,数据不够准确,但是眼睛的地方能标记出来就已经达到我们的学习目的了,以后可以用更精确的数据集替换效果就会好很多。
人脸识别_相机
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Face Detection Camera Example</title>
<link href="opencvjs/style.css" rel="stylesheet" type="text/css" />
</head>
<body>
<!-- <h2>Face Detection Camera Example</h2> -->
<div>
<div class="control"><button id="startAndStop">Start</button></div>
<textarea class="code" rows="29" cols="80" id="codeEditor" spellcheck="false">
</textarea>
</div>
<p class="err" id="errorMessage"></p>
<div>
<table cellpadding="0" cellspacing="0" width="0" border="0">
<tr>
<td>
<video id="videoInput" width=320 height=240></video>
</td>
<td>
<canvas id="canvasOutput" width=320 height=240></canvas>
</td>
<td></td>
<td></td>
</tr>
<tr>
<td>
<div class="caption">videoInput</div>
</td>
<td>
<div class="caption">canvasOutput</div>
</td>
<td></td>
<td></td>
</tr>
</table>
</div>
<script type="text/javascript" src="opencvjs/opencv.js"></script>
<script src="opencvjs/utils.js" type="text/javascript"></script>
<!-- <script src="opencvjs/adapter-5.0.4.js" type="text/javascript"></script> -->
<!-- <script src="opencvjs/known.js" type="text/javascript"></script> -->
<script id="codeSnippet" type="text/code-snippet">
let video = document.getElementById('videoInput');
let src = new cv.Mat(video.height, video.width, cv.CV_8UC4);
let dst = new cv.Mat(video.height, video.width, cv.CV_8UC4);
let gray = new cv.Mat();
let cap = new cv.VideoCapture(video);
let faces = new cv.RectVector();
let classifier = new cv.CascadeClassifier();
// load pre-trained classifiers
classifier.load('haarcascade_frontalface_default.xml');
const FPS = 30;
function processVideo() {
try {
if (!streaming) {
// clean and stop.
src.delete();
dst.delete();
gray.delete();
faces.delete();
classifier.delete();
return;
}
let begin = Date.now();
// start processing.
cap.read(src);
src.copyTo(dst);
cv.cvtColor(dst, gray, cv.COLOR_RGBA2GRAY, 0);
// detect faces.
classifier.detectMultiScale(gray, faces, 1.1, 3, 0);
// draw faces.
for (let i = 0; i < faces.size(); ++i) {
let face = faces.get(i);
let point1 = new cv.Point(face.x, face.y);
let point2 = new cv.Point(face.x + face.width, face.y + face.height);
cv.rectangle(dst, point1, point2, [255, 0, 0, 255]);
}
cv.imshow('canvasOutput', dst);
// schedule the next one.
let delay = 1000/FPS - (Date.now() - begin);
setTimeout(processVideo, delay);
} catch (err) {
utils.printError(err);
}
};
// schedule the first one.
setTimeout(processVideo, 0);
</script>
<script type="text/javascript">
let utils = new Utils('errorMessage');
utils.loadCode('codeSnippet', 'codeEditor');
let streaming = false;
let videoInput = document.getElementById('videoInput');
let startAndStop = document.getElementById('startAndStop');
let canvasOutput = document.getElementById('canvasOutput');
let canvasContext = canvasOutput.getContext('2d');
startAndStop.addEventListener('click', () => {
if (!streaming) {
utils.clearError();
utils.startCamera('qvga', onVideoStarted, 'videoInput');
} else {
utils.stopCamera();
onVideoStopped();
}
});
function onVideoStarted() {
streaming = true;
startAndStop.innerText = 'Stop';
videoInput.width = videoInput.videoWidth;
videoInput.height = videoInput.videoHeight;
utils.executeCode('codeEditor');
}
function onVideoStopped() {
streaming = false;
canvasContext.clearRect(0, 0, canvasOutput.width, canvasOutput.height);
startAndStop.innerText = 'Start';
}
utils.loadOpenCv(() => {
let faceCascadeFile = 'haarcascade_frontalface_default.xml';
utils.createFileFromUrl(faceCascadeFile, faceCascadeFile, () => {
// startAndStop.removeAttribute('disabled');
});
});
</script>
</body>
</html>
4.8.0文档
4.8.0相对于4.5.0新增了深度学习模块。
深度神经网络
选择相应的model,从对应的链接中得到对应的数据集文件,看第一块代码的配置项是否一致。
上传对应的文件,点击运行。
起初认为是时间问题,晚上挂了一段时间,早上起来依旧没有任何数据 ,打开浏览器审查页面,发现有很多报错,无论如何我都得不到想要的结果,这块就放弃了。
补充
完整文件
gitee获取
去掉文本代码区域
在实际开发中我们是不需要textarea的,所以必须去掉它。但是我尝试更改了html文件和js文件,没有成功,所以干脆直接让textarea区域:display:none。
<textarea class="code" rows="9" cols="100" id="codeEditor" spellcheck="false" style="display: none;"></textarea>
参考文献
代码哈士,opencv.js人脸识别简单
opencv.js官网的js文件不是真正的js文件,官网文档里需要建立其环境,这片文章介绍通过爬取得到官方的资源。 文章来源:https://www.toymoban.com/news/detail-632044.html
opencv.js官网 文章来源地址https://www.toymoban.com/news/detail-632044.html
到了这里,关于OpenCv.js(图像处理)学习历程的文章就介绍完了。如果您还想了解更多内容,请在右上角搜索TOY模板网以前的文章或继续浏览下面的相关文章,希望大家以后多多支持TOY模板网!