我相信cv2.imread()
可以很好地加载图像,但是尺寸为2976x3838
时是如此之大,您的IDE无法显示图像。我认为您错误地应用了cv2.HoughLinesP()
。代替使用cv2.HoughLinesP()
,这是一种检测行的替代方法
想法是先阈值,然后找到木板的边界框以创建遮罩。从此蒙版,我们执行透视变换以获得自顶向下的图像。这将使我们能够更好地检测线
一旦检测到木板,就可以提取ROI
然后我们只需检测垂直和水平线
结果
import cv2
import numpy as np
def perspective_transform(image,corners):
def order_corner_points(corners):
# Separate corners into individual points
# Index 0 - top-right
# 1 - top-left
# 2 - bottom-left
# 3 - bottom-right
corners = [(corner[0][0],corner[0][1]) for corner in corners]
top_r,top_l,bottom_l,bottom_r = corners[0],corners[1],corners[2],corners[3]
return (top_l,top_r,bottom_r,bottom_l)
# Order points in clockwise order
ordered_corners = order_corner_points(corners)
top_l,bottom_l = ordered_corners
# Determine width of new image which is the max distance between
# (bottom right and bottom left) or (top right and top left) x-coordinates
width_A = np.sqrt(((bottom_r[0] - bottom_l[0]) ** 2) + ((bottom_r[1] - bottom_l[1]) ** 2))
width_B = np.sqrt(((top_r[0] - top_l[0]) ** 2) + ((top_r[1] - top_l[1]) ** 2))
width = max(int(width_A),int(width_B))
# Determine height of new image which is the max distance between
# (top right and bottom right) or (top left and bottom left) y-coordinates
height_A = np.sqrt(((top_r[0] - bottom_r[0]) ** 2) + ((top_r[1] - bottom_r[1]) ** 2))
height_B = np.sqrt(((top_l[0] - bottom_l[0]) ** 2) + ((top_l[1] - bottom_l[1]) ** 2))
height = max(int(height_A),int(height_B))
# Construct new points to obtain top-down view of image in
# top_r,bottom_r order
dimensions = np.array([[0,0],[width - 1,height - 1],[0,height - 1]],dtype = "float32")
# Convert to Numpy format
ordered_corners = np.array(ordered_corners,dtype="float32")
# Find perspective transform matrix
matrix = cv2.getPerspectiveTransform(ordered_corners,dimensions)
# Return the transformed image
return cv2.warpPerspective(image,matrix,(width,height))
image = cv2.imread('1.jpg')
original = image.copy()
blur = cv2.bilateralFilter(image,9,75,75)
gray = cv2.cvtColor(blur,cv2.COLOR_BGR2GRAY)
thresh = cv2.threshold(gray,255,cv2.THRESH_OTSU + cv2.THRESH_BINARY_INV)[1]
cnts = cv2.findContours(thresh,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
mask = np.zeros(image.shape,dtype=np.uint8)
for c in cnts:
area = cv2.contourArea(c)
peri = cv2.arcLength(c,True)
approx = cv2.approxPolyDP(c,0.015 * peri,True)
if area > 150000 and len(approx) == 4:
cv2.drawContours(image,[c],(36,12),3)
cv2.drawContours(mask,(255,255),-1)
transformed = perspective_transform(original,approx)
mask = cv2.bitwise_and(mask,original)
# Remove horizontal lines
gray = cv2.cvtColor(transformed,cv2.COLOR_BGR2GRAY)
board_thresh = cv2.threshold(gray,cv2.THRESH_OTSU + cv2.THRESH_BINARY_INV)[1]
horizontal_kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(55,1))
detect_horizontal = cv2.morphologyEx(board_thresh,cv2.MORPH_OPEN,horizontal_kernel,iterations=2)
cnts = cv2.findContours(detect_horizontal,cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
cv2.drawContours(transformed,-1,9)
pass
# Remove vertical lines
vertical_kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(1,55))
detect_vertical = cv2.morphologyEx(board_thresh,vertical_kernel,iterations=2)
cnts = cv2.findContours(detect_vertical,9)
cv2.imwrite('thresh.png',thresh)
cv2.imwrite('image.png',image)
cv2.imwrite('mask.png',mask)
cv2.imwrite('transformed.png',transformed)
cv2.waitKey()
本文链接:https://www.f2er.com/3164727.html