首页 > 解决方案 > Pytesseract 没有给出预期的输出

问题描述

我是 pyhton 的新手,我正在使用 haar 级联制作车牌识别系统。我的代码可以很好地检测车牌并制作轮廓,但 pytesseract ocr 无法识别字符并给出奇怪的结果。请帮忙。

使用 haar 级联检测到的板

在检测到的区域上制作的轮廓

这是输出

import cv2
import numpy as np
import pytesseract

plate_cascade = cv2.CascadeClassifier('C:/Users/Jai/Desktop/Test/haarcascade_plate.xml')

img = cv2.imread('C:/Users/Jai/Desktop/Test/images/NumberPlates/IMG_20181029_194221.jpg', cv2.IMREAD_COLOR)

gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = np.array(gray, dtype='uint8')
cv2.imshow('gray', gray)

plates = plate_cascade.detectMultiScale(gray, 1.3, 5)

for (x,y,w,h) in plates:
    cv2.rectangle(img, (x,y), (x+w,y+h),(255,0,0),5)
    roiGray = gray[y:y+h, x:x+w]
    roiImg = img[y:y+h, x:x+w]

roiUGray = cv2.bitwise_not(roiGray)
cv2.imshow('img', img)
cv2.imshow('roi_gray', roiUGray)

ret, thresh = cv2.threshold(roiUGray, 127, 255,0)
cv2.imshow("img", img)
height, width = thresh.shape
newImage = np.zeros((height, width, 3), np.uint8)
newImage[:, :] = (0,0,0)
im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)

num = 0
area = 0

for j in range(len(contours)):
    if hierarchy[0][j][3] != -1:
        area = area + cv2.contourArea(contours[j])
        num += 1

if num != 0:
    avgArea = float(area)/float(num)

for j in range(len(contours)):
    if hierarchy[0][j][3] != -1 and cv2.contourArea(contours[j]) > 0.2*avgArea:
        cv2.drawContours(newImage, contours[j], -1, (255,255,255), 1)

blur = cv2.GaussianBlur(newImage, (1, 1), 0)
cv2.imshow("roi", blur)

ocr_result = pytesseract.image_to_string(blur, lang='eng')
print(ocr_result)

cv2.waitKey(0)
cv2.destroyAllWindows()

标签: pythonpython-3.xopencvpython-tesseract

解决方案


该方法使用轮廓检测​​来找到车牌的区域,并对其进行透视变换。然后它使用自适应阈值来检测数字并进行中值模糊以消除干扰 pytesseract 操作的噪声。

蓝色框来自原始图像。红框来自我的车牌识别haar cascade。绿色是车牌的轮廓检测。

这是透视变换的输出。我使用该imutils模块来执行此操作。

这是我使用该skimage模块的自适应阈值和模糊的输出。

使用它,我得到了以下输出:

\fUP1ADN7120

并删除所有不是大写或数字的字符给出:

UP1ADN7120

只有一个字错了。

这种方法还不错,但是使用另一种方法可以做得更好。如果这对您不起作用,那么您可以创建一个 CNN。

这是代码:

import cv2
import numpy as np
import pytesseract
import imutils.perspective
from skimage.filters import threshold_local

plate_cascade = cv2.CascadeClassifier('/usr/local/lib/python3.5/dist-packages/cv2/data/haarcascade_russian_plate_number.xml')

img = cv2.imread('GNA0d.jpg', cv2.IMREAD_COLOR)

gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
gray = np.array(gray, dtype='uint8')

plates = plate_cascade.detectMultiScale(gray, 1.3, 5)

for (x,y,w,h) in plates:
    cv2.rectangle(img, (x,y), (x+w,y+h),(0,0,255),2)
    roiGray = gray[y:y+h, x:x+w]
    roiImg = img[y:y+h, x:x+w]

blur = cv2.GaussianBlur(roiGray, (5, 5), 0)
edges = cv2.Canny(blur, 75, 200)

contours = cv2.findContours(edges.copy(), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[1]
contours = sorted(contours, key = cv2.contourArea, reverse = True)[:5]  #sort the contours, only getting the biggest to improve speed

for contour in contours:
    peri = cv2.arcLength(contour, True)
    approx = cv2.approxPolyDP(contour, 0.02 * peri, True)

    #find contours with 4 edges
    if len(approx) == 4:
        screenCnt = approx
        break

orig = roiImg.copy()
cv2.drawContours(roiImg, [screenCnt], -1, (0, 255, 0), 2)

#do a perspective transform
warped = imutils.perspective.four_point_transform(orig, screenCnt.reshape(4, 2))
graywarp = imutils.perspective.four_point_transform(roiGray, screenCnt.reshape(4, 2))

#threshold using adaptive thresholding
T = threshold_local(graywarp, 11, offset = 10, method = "gaussian")
graywarp = (graywarp > T).astype("uint8") * 255

#do a median blur to remove noise
graywarp = cv2.medianBlur(graywarp, 3)

text = pytesseract.image_to_string(graywarp)
print(text)
print("".join([c for c in text if c.isupper() or c.isdigit()]))


cv2.imshow("warped", warped)
cv2.imshow("graywarp", graywarp)
cv2.imshow('img', img)


cv2.waitKey(0)
cv2.destroyAllWindows()

相当多的内容来自pyimagesearch,他比我解释得更好。


推荐阅读