| | |
| | | import numpy as np |
| | | import os |
| | | import sys |
| | | import math |
| | | from operator import itemgetter |
| | | |
| | | |
| | |
| | | |
| | | |
| | | def remove_glare(img): |
| | | """ |
| | | Inspired from: |
| | | http://www.amphident.de/en/blog/preprocessing-for-automatic-pattern-identification-in-wildlife-removing-glare.html |
| | | The idea is to find area that has low saturation but high value, which is what a glare usually look like. |
| | | """ |
| | | img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV) |
| | | _, s, v = cv2.split(img_hsv) |
| | | non_sat = (s < 32) * 255 # Find all pixels that are not very saturated |
| | |
| | | |
| | | # Slightly increase the area for each pixel |
| | | glare = cv2.dilate(glare.astype(np.uint8), disk) |
| | | #glare = cv2.dilate(glare.astype(np.uint8), disk); |
| | | |
| | | #corrected = cv2.inpaint(img, glare, 7, cv2.INPAINT_TELEA) |
| | | glare_reduced = np.ones((img.shape[0], img.shape[1], 3), dtype=np.uint8) * 200 |
| | | glare = cv2.cvtColor(glare, cv2.COLOR_GRAY2BGR) |
| | | corrected = np.where(glare, glare_reduced, img) |
| | | return corrected |
| | | |
| | | |
| | | def find_card(img, thresh_val=80, blur_radius=None, dilate_radius=None, min_hyst=80, max_hyst=200, min_line_length=None, max_line_gap=None, debug=False): |
| | | # Default values |
| | | if blur_radius is None: |
| | | blur_radius = math.floor(min(img.shape[:2]) / 100 + 0.5) // 2 * 2 + 1 # Rounded to the nearest odd |
| | | if dilate_radius is None: |
| | | dilate_radius = math.floor(min(img.shape[:2]) / 67 + 0.5) |
| | | if min_line_length is None: |
| | | min_line_length = min(img.shape[:2]) / 3 |
| | | if max_line_gap is None: |
| | | max_line_gap = min(img.shape[:2]) / 10 |
| | | |
| | | thresh_radius = math.floor(min(img.shape[:2]) / 50 + 0.5) // 2 * 2 + 1 # Rounded to the nearest odd |
| | | |
| | | print(blur_radius, dilate_radius, thresh_radius, min_line_length, max_line_gap) |
| | | ''' |
| | | blur_radius = 3 |
| | | dilate_radius = 3 |
| | | thresh_radius = 3 |
| | | min_line_length = 5 |
| | | max_line_gap = 5 |
| | | ''' |
| | | |
| | | img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) |
| | | # Median blur better removes background textures than Gaussian blur |
| | | img_blur = cv2.medianBlur(img_gray, blur_radius) |
| | | # Truncate the bright area while detecting the border |
| | | img_thresh = cv2.adaptiveThreshold(img_blur, 128, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, thresh_radius, 5) |
| | | # _, img_thresh = cv2.threshold(img_blur, thresh_val, 255, cv2.THRESH_TRUNC) |
| | | |
| | | # Dilate the image to emphasize thick borders around the card |
| | | kernel_dilate = np.ones((dilate_radius, dilate_radius), np.uint8) |
| | | img_dilate = cv2.dilate(img_thresh, kernel_dilate, iterations=1) |
| | | img_dilate = cv2.erode(img_dilate, kernel_dilate, iterations=1) |
| | | |
| | | img_contour = img_dilate.copy() |
| | | _, contours, _ = cv2.findContours(img_contour, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) |
| | | img_contour = cv2.cvtColor(img_contour, cv2.COLOR_GRAY2BGR) |
| | | img_contour = cv2.drawContours(img_contour, contours, -1, (128, 0, 0), 1) |
| | | |
| | | # find the biggest area |
| | | c = max(contours, key=cv2.contourArea) |
| | | |
| | | x, y, w, h = cv2.boundingRect(c) |
| | | # draw the book contour (in green) |
| | | img_contour = cv2.drawContours(img_contour, [c], -1, (0, 255, 0), 1) |
| | | |
| | | # Canny edge - low minimum hysteresis to detect glowed area, |
| | | # and high maximum hysteresis to compensate for high false positives. |
| | | img_canny = cv2.Canny(img_dilate, min_hyst, max_hyst) |
| | | |
| | | detected_lines = cv2.HoughLinesP(img_dilate, 1, np.pi / 180, threshold=300, |
| | | minLineLength=min_line_length, |
| | | maxLineGap=max_line_gap) |
| | | card_found = detected_lines is not None |
| | | if card_found: |
| | | print(len(detected_lines)) |
| | | |
| | | img_hough = cv2.cvtColor(img_canny.copy(), cv2.COLOR_GRAY2BGR) |
| | | if card_found: |
| | | for line in detected_lines: |
| | | x1, y1, x2, y2 = line[0] |
| | | cv2.line(img_hough, (x1, y1), (x2, y2), (0, 0, 255), 1) |
| | | |
| | | img_thresh = cv2.cvtColor(img_thresh, cv2.COLOR_GRAY2BGR) |
| | | img_dilate = cv2.cvtColor(img_dilate, cv2.COLOR_GRAY2BGR) |
| | | #img_canny = cv2.cvtColor(img_canny, cv2.COLOR_GRAY2BGR) |
| | | return img_thresh, img_dilate, img_contour, img_hough |
| | | |
| | | |
| | | def detect_frame(net, classes, img, thresh_conf=0.5, thresh_nms=0.4, in_dim=(416, 416), display=True, out_path=None): |
| | | img_copy = img.copy() |
| | | # Create a 4D blob from a frame. |
| | |
| | | no_glare = remove_glare(img_copy) |
| | | img_concat = np.concatenate((img, no_glare), axis=1) |
| | | cv2.imshow('result', img_concat) |
| | | |
| | | ''' |
| | | for i in range(len(obj_list)): |
| | | class_id, confidence, box = obj_list[i] |
| | | left, top, width, height = box |
| | | img_snip = img[max(0, top):min(img.shape[0], top + height), max(0, left):min(img.shape[1], left + width)] |
| | | #cv2.imshow('feature#%d' % i, img_snip) |
| | | img_hsv = cv2.cvtColor(img_snip, cv2.COLOR_BGR2HSV) |
| | | h, s, v = cv2.split(img_hsv) |
| | | #h = cv2.cvtColor(h, cv2.COLOR_GRAY2BGR) |
| | | s = cv2.cvtColor(s, cv2.COLOR_GRAY2BGR) |
| | | v = cv2.cvtColor(v, cv2.COLOR_GRAY2BGR) |
| | | img_concat = np.concatenate((img_snip, s, v), axis=1) |
| | | cv2.imshow('feature#%d - hsv' % i, img_concat) |
| | | ''' |
| | | img_snip = img_copy[max(0, top):min(img.shape[0], top + height), |
| | | max(0, left):min(img.shape[1], left + width)] |
| | | img_thresh, img_dilate, img_canny, img_hough = find_card(img_snip) |
| | | img_concat = np.concatenate((img_snip, img_thresh, img_dilate, img_canny, img_hough), axis=1) |
| | | cv2.imshow('feature#%d' % i, img_concat) |
| | | cv2.waitKey(0) |
| | | cv2.destroyAllWindows() |
| | | |
| | |
| | | no_glare = remove_glare(img) |
| | | img_concat = np.concatenate((frame, no_glare), axis=1) |
| | | cv2.imshow('result', img_concat) |
| | | ''' |
| | | for i in range(len(obj_list)): |
| | | class_id, confidence, box = obj_list[i] |
| | | left, top, width, height = box |
| | | img_snip = img[max(0, top):min(img.shape[0], top + height), |
| | | max(0, left):min(img.shape[1], left + width)] |
| | | # cv2.imshow('feature#%d' % i, img_snip) |
| | | img_hsv = cv2.cvtColor(img_snip, cv2.COLOR_BGR2HSV) |
| | | h, s, v = cv2.split(img_hsv) |
| | | # h = cv2.cvtColor(h, cv2.COLOR_GRAY2BGR) |
| | | s = cv2.cvtColor(s, cv2.COLOR_GRAY2BGR) |
| | | v = cv2.cvtColor(v, cv2.COLOR_GRAY2BGR) |
| | | img_concat = np.concatenate((img_snip, s, v), axis=1) |
| | | cv2.imshow('feature#%d - hsv' % i, img_concat) |
| | | img_thresh, img_dilate, img_canny, img_hough = find_card(img_snip) |
| | | img_concat = np.concatenate((img_snip, img_thresh, img_dilate, img_canny, img_hough), axis=1) |
| | | cv2.imshow('feature#%d' % i, img_concat) |
| | | for i in range(len(obj_list), max_num_obj): |
| | | cv2.imshow('feature#%d - hsv' % i, np.zeros((1, 1), dtype=np.uint8)) |
| | | ''' |
| | | #if len(obj_list) > 0: |
| | | #cv2.waitKey(0) |
| | | cv2.imshow('feature#%d' % i, np.zeros((1, 1), dtype=np.uint8)) |
| | | if len(obj_list) > 0: |
| | | cv2.waitKey(0) |
| | | if out_path is not None: |
| | | vid_writer.write(frame.astype(np.uint8)) |
| | | cv2.waitKey(1) |
| | |
| | | |
| | | def main(): |
| | | # Specify paths for all necessary files |
| | | test_path = os.path.abspath('../data/test18.jpg') |
| | | test_path = os.path.abspath('../data/test1.jpg') |
| | | weight_path = 'weights/second_general/tiny_yolo_final.weights' |
| | | cfg_path = 'cfg/tiny_yolo.cfg' |
| | | class_path = "data/obj.names" |