前言
最近研究了几天车牌识别的项目,现在记录一下学习的过程,基于OpenCV编写Python代码来完成这一任务。
文章末尾有源码,有兴趣的读者可以用jupyter notebook一步一步看执行过程和结果。
本项目车牌识别的步骤为:
- 加载图片
- 高斯去噪
- 灰度转换
- 边缘检测
- 闭运算,腐蚀膨胀
- 中值滤波去噪
- 轮廓检测
- 车牌位置筛选,图像矫正
- 颜色筛选确定车牌
详细过程
显示图片的函数
# 导入所需模块
import cv2
from matplotlib import pyplot as plt
import numpy as np
import json
# 显示图片
def cv_show(name, img):
cv2.imshow(name, img)
cv2.waitKey()
cv2.destroyAllWindows()
# plt显示彩色图片
def plt_show0(img):
b, g, r = cv2.split(img)
img = cv2.merge([r, g, b])
plt.imshow(img)
plt.show()
# plt显示灰度图片
def plt_show(img):
plt.imshow(img, cmap='gray')
plt.show()
def point_limit(point):
if point[0] < 0:
point[0] = 0
if point[1] < 0:
point[1] = 0
1、加载图片
图片来源于网图,如有侵权,请联系我删除!
# 加载图片
rawImage = cv2.imread("./image/car16.jpg")
pic_hight, pic_width = rawImage.shape[:2]
plt_show0(rawImage)
2、高斯去噪
函数解释
cv2.GaussianBlur(src,ksize,sigmaX,sigmaY,borderType)
src:原始圖像
ksize: 滤波核的大小。滤波核大小是指在滤波处理过程中其邻域图像的高度和宽 度。需要注意,滤波核的值必须是奇数。
sigmaX: 是卷积核在水平方向上(X 轴方向)的标准差,其控制的是权重比例。
sigmaY是卷积核在垂直方向上(Y轴方向)的标准差。如果将该值设置为0,则只采用sigmaX的值
borderType是边界样式,该值决定了以何种方式处理边界。一般情况下,不需要考虑该值
# 高斯去噪
image = cv2.GaussianBlur(rawImage, (3, 3), 0)
3、灰度转换
# 灰度转换
gray_image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
plt_show(gray_image)
4、边缘检测
边缘检测主要有两种方法:Sobel边缘检测和Canny边缘检测,本文采用Sobel算子。
Sobel 算子是高斯平滑和微分运算的组合,抗噪能力很强,用途也很多,尤其是效率要求高但对细纹理不是很在意的时候。
函数解释
cv2.Sobel(src,ddepth,dx,dy[,ksize[,scale[,delta[,borderType]]]])
src 为原始图像
ddepth代表输出图像的深度。
dx代表x方向上的求导阶数。
dy代表y方向上的求导阶数。
ksize代表Sobel核的大小。该值为-1时,则会使用Scharr算子进行运算。
scale代表计算导数值时所采用的缩放因子,默认情况下该值是1,是没有缩放的。
delta代表加在目标图像dst上的值,该值是可选的,默认为0。
borderType代表边界样式。
# sobel算子边缘检测(做了一个y方向的检测)
# 为什么对Y方向取边缘:让图像变“瘦”,便于把车牌揉成一团。
Sobel_x = cv2.Sobel(gray_image, cv2.CV_16S, 1, 0)
absX = cv2.convertScaleAbs(Sobel_x) # 转回uint8
image = absX
plt_show(image)
自适应阈值处理
ret, image = cv2.threshold(image, 0, 255, cv2.THRESH_OTSU)
plt_show(image)
5、闭运算
函数解释:
cv2.morphologyEx(src, op, kernel)
src 为原始图像
op进行变化的方式(开闭)
kernel表示方框的大小
闭运算就是先进行一遍膨胀,把内部的黑洞区填充,再进行一遍腐蚀,整体平滑且面积几乎不受影响。
# 闭运算,是白色部分练成整体
kernelX = cv2.getStructuringElement(cv2.MORPH_RECT, (15,4))
image = cv2.morphologyEx(image, cv2.MORPH_CLOSE, kernelX,iterations = 3)
plt_show(image)
6、去除小白点(膨胀腐蚀)
# 去除一些小的白点
kernelX = cv2.getStructuringElement(cv2.MORPH_RECT, (20, 1))
kernelY = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 19))
# 膨胀,腐蚀
image = cv2.dilate(image, kernelX)
image = cv2.erode(image, kernelX)
# 腐蚀,膨胀
image = cv2.erode(image, kernelY)
image = cv2.dilate(image, kernelY)
plt_show(image)
7、中值滤波
中值滤波:会取当前像素点及其周围临近像素点(一共有奇数个像素点)的像素值,将这些像素值排序,然后将位于中间位置的像素值作为当前像素点的像素值
函数解释
cv2.medianBlur(src,ksize)
# 中值滤波去除噪点
image = cv2.medianBlur(image, 15)
plt_show(image)
8、轮廓检测
# 轮廓检测
# cv2.RETR_EXTERNAL表示只检测外轮廓
# cv2.CHAIN_APPROX_SIMPLE压缩水平方向,垂直方向,对角线方向的元素,只保留该方向的终点坐标,例如一个矩形轮廓只需4个点来保存轮廓信息
contours, hierarchy = cv2.findContours(image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# 绘制轮廓
image1 = rawImage.copy()
cv2.drawContours(image1, contours, -1, (0, 255, 0), 5)
plt_show0(image1)
9、筛选轮廓
# 筛选出车牌位置的轮廓
car_contours = []
car_min_contours = []
for item in contours:
# cv2.boundingRect用一个最小的矩形,把找到的形状包起来
rect = cv2.boundingRect(item)
rect_min = cv2.minAreaRect(item)
x = rect[0]
y = rect[1]
weight = rect[2]
height = rect[3]
print(weight/height)
# 440mm×140mm
if (weight > (height * 2)) and (weight < (height * 5)):
image = rawImage[y:y + height, x:x + weight]
car_contours.append(image)
car_min_contours.append(rect_min)
plt_show0(image)
10、轮廓矫正
car_images = adjust_image(car_min_contours,car_contours)
for car in car_images:
plt_show0(car)
11、颜色筛选
colors = []
color_images = []
for contour in car_images:
color,image = recognition_color(contour)
colors.append(color)
color_images.append(image)
for index,car in enumerate(color_images):
if colors[index] in ("blue","green","yello"):
car_image = car.copy()
print(colors[index])
plt_show0(car)
源码
config.js
{
"config":[
{
"open":1,
"blur":3,
"morphologyr":4,
"morphologyc":19,
"col_num_limit":10,
"row_num_limit":21
},
{
"open":0,
"blur":3,
"morphologyr":5,
"morphologyc":12,
"col_num_limit":10,
"row_num_limit":18
}
]
}
项目
# 导入所需模块
import cv2
from matplotlib import pyplot as plt
import numpy as np
import json
# 显示图片
def cv_show(name, img):
cv2.imshow(name, img)
cv2.waitKey()
cv2.destroyAllWindows()
# plt显示彩色图片
def plt_show0(img):
b, g, r = cv2.split(img)
img = cv2.merge([r, g, b])
plt.imshow(img)
plt.show()
# plt显示灰度图片
def plt_show(img):
plt.imshow(img, cmap='gray')
plt.show()
def point_limit(point):
if point[0] < 0:
point[0] = 0
if point[1] < 0:
point[1] = 0
cfg = ""
f = open('./config.js')
j = json.load(f)
for c in j["config"]:
if c["open"]:
cfg = c.copy()
break
def accurate_place(card_img_hsv, limit1, limit2, color):
row_num, col_num = card_img_hsv.shape[:2]
xl = col_num
xr = 0
yh = 0
yl = row_num
# col_num_limit = self.cfg["col_num_limit"]
row_num_limit = cfg["row_num_limit"]
col_num_limit = col_num * 0.8 if color != "green" else col_num * 0.5 # 绿色有渐变
for i in range(row_num):
count = 0
for j in range(col_num):
H = card_img_hsv.item(i, j, 0)
S = card_img_hsv.item(i, j, 1)
V = card_img_hsv.item(i, j, 2)
if limit1 < H <= limit2 and 34 < S and 46 < V:
count += 1
if count > col_num_limit:
if yl > i:
yl = i
if yh < i:
yh = i
for j in range(col_num):
count = 0
for i in range(row_num):
H = card_img_hsv.item(i, j, 0)
S = card_img_hsv.item(i, j, 1)
V = card_img_hsv.item(i, j, 2)
if limit1 < H <= limit2 and 34 < S and 46 < V:
count += 1
if count > row_num - row_num_limit:
if xl > j:
xl = j
if xr < j:
xr = j
return xl, xr, yh, yl
# 矫正图片
def adjust_image(car_min_contours,car_contours):
card_imgs = []
#矩形区域可能是倾斜的矩形,需要矫正,以便使用颜色定位
for index,rect in enumerate(car_min_contours):
if rect[2] > -1 and rect[2] < 1:#创造角度,使得左、高、右、低拿到正确的值
angle = 1
else:
angle = rect[2]
rect = (rect[0], (rect[1][0]+5, rect[1][1]+5), angle)#扩大范围,避免车牌边缘被排除
box = cv2.boxPoints(rect)
heigth_point = right_point = [0, 0]
left_point = low_point = [pic_width, pic_hight]
for point in box:
if left_point[0] > point[0]:
left_point = point
if low_point[1] > point[1]:
low_point = point
if heigth_point[1] < point[1]:
heigth_point = point
if right_point[0] < point[0]:
right_point = point
if left_point[1] <= right_point[1]:#正角度
print("正")
# new_right_point = [right_point[0], heigth_point[1]]
# pts2 = np.float32([left_point, heigth_point, new_right_point])#字符只是高度需要改变
# pts1 = np.float32([left_point, heigth_point, right_point])
# M = cv2.getAffineTransform(pts1, pts2)
# dst = cv2.warpAffine(rawImage, M, (pic_width, pic_hight))
# point_limit(new_right_point)
# point_limit(heigth_point)
# point_limit(left_point)
# card_img = dst[int(left_point[1]):int(heigth_point[1]), int(left_point[0]):int(new_right_point[0])]
# card_imgs.append(card_img)
card_imgs.append(car_contours[index])
elif left_point[1] > right_point[1]:#负角度
print("负")
new_left_point = [left_point[0], heigth_point[1]]
pts2 = np.float32([new_left_point, heigth_point, right_point])#字符只是高度需要改变
pts1 = np.float32([left_point, heigth_point, right_point])
M = cv2.getAffineTransform(pts1, pts2)
dst = cv2.warpAffine(rawImage, M, (pic_width, pic_hight))
point_limit(right_point)
point_limit(heigth_point)
point_limit(new_left_point)
card_img = dst[int(right_point[1]):int(heigth_point[1]), int(new_left_point[0]):int(right_point[0])]
card_imgs.append(card_img)
return card_imgs
# 颜色定位车牌
def recognition_color(image):
green = yello = blue = black = white = 0
card_img_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
row_num, col_num = card_img_hsv.shape[:2]
card_img_count = row_num * col_num
for i in range(row_num):
for j in range(col_num):
H = card_img_hsv.item(i, j, 0)
S = card_img_hsv.item(i, j, 1)
V = card_img_hsv.item(i, j, 2)
if 11 < H <= 34 and S > 34: # 图片分辨率调整
yello += 1
elif 35 < H <= 99 and S > 34: # 图片分辨率调整
green += 1
elif 99 < H <= 124 and S > 34: # 图片分辨率调整
blue += 1
if 0 < H < 180 and 0 < S < 255 and 0 < V < 46:
black += 1
elif 0 < H < 180 and 0 < S < 43 and 221 < V < 225:
white += 1
color = "no"
limit1 = limit2 = 0
if yello * 2 >= card_img_count:
color = "yello"
limit1 = 11
limit2 = 34 # 有的图片有色偏偏绿
elif green * 2 >= card_img_count:
color = "green"
limit1 = 35
limit2 = 99
elif blue * 2 >= card_img_count:
color = "blue"
limit1 = 100
limit2 = 124 # 有的图片有色偏偏紫
elif black + white >= card_img_count * 0.7: # TODO
color = "bw"
xl, xr, yh, yl = accurate_place(card_img_hsv, limit1, limit2, color)
if yl == yh and xl == xr:
return color,image
need_accurate = False
if yl >= yh:
yl = 0
yh = row_num
need_accurate = True
if xl >= xr:
xl = 0
xr = col_num
need_accurate = True
image = image[yl:yh, xl:xr] if color != "green" or yl < (yh-yl)//4 else image[yl-(yh-yl)//4:yh, xl:xr]
if need_accurate:#可能x或y方向未缩小,需要再试一次
card_img_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
xl, xr, yh, yl = accurate_place(card_img_hsv, limit1, limit2, color)
if yl == yh and xl == xr:
return color,image
if yl >= yh:
yl = 0
yh = row_num
if xl >= xr:
xl = 0
xr = col_num
image = image[yl:yh, xl:xr] if color != "green" or yl < (yh-yl)//4 else image[yl-(yh-yl)//4:yh, xl:xr]
return color,image
# 加载图片
rawImage = cv2.imread("./image/car16.jpg")
pic_hight, pic_width = rawImage.shape[:2]
# plt_show0(rawImage)
# 高斯去噪
image = cv2.GaussianBlur(rawImage, (3, 3), 0)
#中值去噪
# image = cv2.medianBlur(rawImage,3)
# plt_show0(image)
# 灰度处理
gray_image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
# plt_show(gray_image)
# sobel算子边缘检测(做了一个y方向的检测)
Sobel_x = cv2.Sobel(gray_image, cv2.CV_16S, 1, 0)
absX = cv2.convertScaleAbs(Sobel_x) # 转回uint8
image = absX
# plt_show(image)
# 自适应阈值处理
ret, image = cv2.threshold(image, 0, 255, cv2.THRESH_OTSU)
# plt_show(image)
# 闭运算,是白色部分练成整体
kernelX = cv2.getStructuringElement(cv2.MORPH_RECT, (15,4))
image = cv2.morphologyEx(image, cv2.MORPH_CLOSE, kernelX,iterations = 3)
# plt_show(image)
# 去除一些小的白点
kernelX = cv2.getStructuringElement(cv2.MORPH_RECT, (20, 1))
kernelY = cv2.getStructuringElement(cv2.MORPH_RECT, (1, 19))
# 膨胀,腐蚀
image = cv2.dilate(image, kernelX)
image = cv2.erode(image, kernelX)
# 腐蚀,膨胀
image = cv2.erode(image, kernelY)
image = cv2.dilate(image, kernelY)
# plt_show(image)
# 中值滤波去除噪点
image = cv2.medianBlur(image, 15)
# plt_show(image)
# 轮廓检测
# cv2.RETR_EXTERNAL表示只检测外轮廓
# cv2.CHAIN_APPROX_SIMPLE压缩水平方向,垂直方向,对角线方向的元素,只保留该方向的终点坐标,例如一个矩形轮廓只需4个点来保存轮廓信息
contours, hierarchy = cv2.findContours(image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# 绘制轮廓
image1 = rawImage.copy()
cv2.drawContours(image1, contours, -1, (0, 255, 0), 5)
# plt_show0(image1)
# 筛选出车牌位置的轮廓
car_contours = []
car_min_contours = []
for item in contours:
# cv2.boundingRect用一个最小的矩形,把找到的形状包起来
rect = cv2.boundingRect(item)
rect_min = cv2.minAreaRect(item)
x = rect[0]
y = rect[1]
weight = rect[2]
height = rect[3]
# print(weight/height)
# 440mm×140mm
if (weight > (height * 2)) and (weight < (height * 5)):
image = rawImage[y:y + height, x:x + weight]
car_contours.append(image)
car_min_contours.append(rect_min)
# plt_show0(image)
car_images = adjust_image(car_min_contours,car_contours)
# for car in car_images:
# plt_show0(car)
colors = []
color_images = []
for contour in car_images:
color,image = recognition_color(contour)
colors.append(color)
# plt_show0(image)
color_images.append(image)
for index,car in enumerate(color_images):
if colors[index] in ("blue","green","yello"):
car_image = car.copy()
print(colors[index])
plt_show0(car)
结语
项目到这里就算就算结束了。
部分代码也是借鉴别人的,像大佬学习,我也一直在优化,各种调参,但有些太倾斜,车牌在图片中显示区域偏小的也识别不出来,读者可以自己尝试修改代码,找到完美的解决方案。
字符分割和识别我还没有做,后面会学习的。
参考链接:https://github.com/wzh191920/License-Plate-Recognition文章来源:https://www.toymoban.com/news/detail-743486.html
侵删致歉!文章来源地址https://www.toymoban.com/news/detail-743486.html
到了这里,关于opencv-车牌识别的文章就介绍完了。如果您还想了解更多内容,请在右上角搜索TOY模板网以前的文章或继续浏览下面的相关文章,希望大家以后多多支持TOY模板网!