From 8e9ec5135c73193d8c34b2e31e8201082c747e7b Mon Sep 17 00:00:00 2001
From: Edmond Yoo <hj3yoo@uwaterloo.ca>
Date: Mon, 20 Aug 2018 22:40:33 +0000
Subject: [PATCH] Succcesful detection algo for a single card

---
 card_detector.py |  146 +++++++++++++++++++++++++++++++++++-------------
 1 files changed, 106 insertions(+), 40 deletions(-)

diff --git a/card_detector.py b/card_detector.py
index 81e3b6b..c405d8a 100644
--- a/card_detector.py
+++ b/card_detector.py
@@ -1,34 +1,44 @@
 import cv2
 import numpy as np
 import pandas as pd
+import math
+from screeninfo import get_monitors
 
-if __name__ == '__main__':
-    # img_test = cv2.imread('data/rtr-174-jarad-golgari-lich-lord.jpg')
-    # img_test = cv2.imread('data/cn2-78-queen-marchesa.png')
-    img_test = cv2.imread('data/c16-143-burgeoning.png')
-    # img_test = cv2.imread('data/li38_handOfCards.jpg')
-    # img_test = cv2.imread('data/pro_tour_side.png')
-    img_gray = cv2.cvtColor(img_test, cv2.COLOR_BGR2GRAY)
-    _, img_thresh = cv2.threshold(img_gray, 50, 255, cv2.THRESH_BINARY)
-    # cv2.imshow('original', img_test)
-    cv2.imshow('threshold', img_thresh)
+def detect_a_card(img, thresh_val=80, blur_radius=None, dilate_radius=None, min_hyst=80, max_hyst=200,
+                  min_line_length=None, max_line_gap=None, debug=False):
+    dim_img = (len(img[0]), len(img)) # (width, height)
+    # Intermediate variables
 
-    kernel = np.ones((7, 7), np.uint8)
-    img_dilate = cv2.dilate(img_thresh, kernel, iterations=1)
-    # img_erode = cv2.erode(img_thresh, kernel, iterations=1)
-    # img_open = cv2.morphologyEx(img_thresh, cv2.MORPH_OPEN, kernel)
-    img_close = cv2.morphologyEx(img_thresh, cv2.MORPH_CLOSE, kernel)
-    cv2.imshow('dilated', img_dilate)
-    # cv2.imshow('eroded', img_erode)
-    # cv2.imshow('opened', img_open)
-    # cv2.imshow('closed', img_close)
-    img_edge = cv2.Canny(img_dilate, 100, 200)
-    cv2.imshow('edge', img_edge)
+    # Default values
+    if blur_radius is None:
+        blur_radius = math.floor(min(dim_img) / 100 + 0.5) // 2 * 2 + 1  # Rounded to the nearest odd
+    if dilate_radius is None:
+        dilate_radius = math.floor(min(dim_img) / 100)
+    if min_line_length is None:
+        min_line_length = min(dim_img) / 10
+    if max_line_gap is None:
+        max_line_gap = min(dim_img) / 10
 
-    lines = cv2.HoughLines(img_edge, 1, np.pi / 180, 200)
-    if lines is not None:
+    img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
+    # Median blur better removes background textures than Gaussian blur
+    img_blur = cv2.medianBlur(img_gray, blur_radius)
+    # Truncate the bright area while detecting the border
+    _, img_thresh = cv2.threshold(img_blur, thresh_val, 255, cv2.THRESH_TRUNC)
+
+    # Dilate the image to emphasize thick borders around the card
+    kernel_dilate = np.ones((dilate_radius, dilate_radius), np.uint8)
+    img_dilate = cv2.dilate(img_thresh, kernel_dilate, iterations=1)
+
+    # Canny edge - low minimum hysteresis to detect glowed area,
+    # and high maximum hysteresis to compensate for high false positives.
+    img_canny = cv2.Canny(img_dilate, min_hyst, max_hyst)
+
+    # Apply Hough transformation to detect the edges
+    '''
+    detected_lines = cv2.HoughLines(img_canny, 1, np.pi / 180, 200)
+    if detected_lines is not None:
         img_hough = cv2.cvtColor(img_dilate.copy(), cv2.COLOR_GRAY2BGR)
-        for line in lines:
+        for line in detected_lines:
             rho, theta = line[0]
             a = np.cos(theta)
             b = np.sin(theta)
@@ -39,25 +49,81 @@
             x2 = int(x0 - 1000 * (-b))
             y2 = int(y0 - 1000 * (a))
             cv2.line(img_hough, (x1, y1), (x2, y2), (0, 0, 255), 2)
-        cv2.imshow('hough', img_hough)
     else:
         print('Hough couldn\'t find any lines')
+        return False
     '''
-    lines = cv2.HoughLinesP(img_edge, 1, np.pi / 180, 200, 50, 100)
-    if lines is not None:
-        img_hough = cv2.cvtColor(img_dilate.copy(), cv2.COLOR_GRAY2BGR)
-        for line in lines:
-            x1, y1, x2, y2 = line[0]
-            cv2.line(img_hough, (x1, y1), (x2, y2), (0, 0, 255), 3)
-        cv2.imshow('hough', img_hough)
-    else:
+    detected_lines = cv2.HoughLinesP(img_canny, 1, np.pi / 180, threshold=60,
+                                     minLineLength=min_line_length,
+                                     maxLineGap=max_line_gap)
+    card_found = detected_lines is not None
+
+    if card_found:
+        if debug:
+            img_hough = cv2.cvtColor(img_dilate.copy(), cv2.COLOR_GRAY2BGR)
+            for line in detected_lines:
+                x1, y1, x2, y2 = line[0]
+                cv2.line(img_hough, (x1, y1), (x2, y2), (0, 0, 255), 3)
+    elif not debug:
         print('Hough couldn\'t find any lines')
 
-    img_contour, contours, hierchy = cv2.findContours(img_dilate, cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)
-    img_contour = cv2.cvtColor(img_contour, cv2.COLOR_GRAY2BGR)
-    if len(contours) > 0:
-        cv2.drawContours(img_contour, contours, -1, (0, 0, 255), 1)
+    # Debug: display intermediate results from various steps
+    if debug:
+        '''
+        cv2.imshow('Original', img)
+        cv2.imshow('Thresholded', img_thresh)
+        cv2.imshow('Dilated', img_dilate)
+        cv2.imshow('Canny Edge', img_canny)
+        if card_found:
+            cv2.imshow('Detected Lines', img_hough)
+        '''
+        img_blank = np.zeros((len(img), len(img[0]), 3), np.uint8)
+        img_thresh = cv2.cvtColor(img_thresh, cv2.COLOR_GRAY2BGR)
+        #img_dilate = cv2.cvtColor(img_dilate, cv2.COLOR_GRAY2BGR)
+        img_canny = cv2.cvtColor(img_canny, cv2.COLOR_GRAY2BGR)
+        if not card_found:
+            img_hough = img_blank
 
-    cv2.imshow('contours', img_contour)
-    '''
-    cv2.waitKey(0
\ No newline at end of file
+        # Append all images together
+        img_row_1 = np.concatenate((img, img_thresh), axis=1)
+        img_row_2 = np.concatenate((img_canny, img_hough), axis=1)
+        img_result = np.concatenate((img_row_1, img_row_2), axis=0)
+
+        # Resize the final image to fit into the main monitor's resolution
+        screen_size = get_monitors()[0]
+        resize_ratio = max(len(img_result[0]) / screen_size.width, len(img_result) / screen_size.height, 1)
+        img_result = cv2.resize(img_result, (int(len(img_result[0]) // resize_ratio),
+                                             int(len(img_result) // resize_ratio)))
+        cv2.imshow('Result', img_result)
+        cv2.waitKey(0)
+
+    # TODO: output meaningful data
+    return card_found
+
+
+def main():
+    img_test = cv2.imread('data/tilted_card_2.jpg')
+    card_found = detect_a_card(img_test,
+                               dilate_radius=2,
+                               min_hyst=30,
+                               max_hyst=80,
+                               min_line_length=10,
+                               max_line_gap=50,
+                               debug=True)
+    if card_found:
+        return
+
+    for dilate_radius in range(1, 6):
+        for min_hyst in range(50, 91, 10):
+            for max_hyst in range(180, 119, -20):
+                print('dilate_radius=%d, min_hyst=%d, max_hyst=%d: ' % (dilate_radius, min_hyst, max_hyst),
+                      end='', flush=True)
+                card_found = detect_a_card(img_test, dilate_radius=dilate_radius,
+                                           min_hyst=min_hyst, max_hyst=max_hyst, debug=True)
+                if card_found:
+                    print('Card found')
+                else:
+                    print('Not found')
+
+if __name__ == '__main__':
+    main()

--
Gitblit v1.10.0