Constantin Wenger
2022-02-01 be11e02f1d08beb40fff7d54d1c7a343ba10b275
opencv_dnn.py
old mode 100644 new mode 100755
@@ -1,186 +1,790 @@
import argparse
import ast
import collections
import cv2
import imagehash as ih
import numpy as np
from operator import itemgetter
import os
import sys
import pandas as pd
from PIL import Image
import time
from multiprocessing import Pool
from config import Config
import fetch_data
# Disclaimer: majority of the basic framework in this file is modified from the following tutorial:
# https://www.learnopencv.com/deep-learning-based-object-detection-using-yolov3-with-opencv-python-c/
"""
As of the current version, the YOLO network has been removed from this code during optimization.
It was found out that YOLO was adding too much processing delay, and the benefits from using it couldn't justify
such heavy cost.
If you're interested to see the implementation using YOLO, please check out the previous commit:
https://github.com/hj3yoo/mtg_card_detector/tree/dea64611730c84a59c711c61f7f80948f82bcd31
"""
def do_calc(args):
    card_pool = args[0]
    hash_size = args[1]
    new_pool = pd.DataFrame(columns=list(card_pool.columns.values))
    for hs in hash_size:
        new_pool['card_hash_%d' % hs] = np.NaN
        new_pool['set_hash_%d' % 64] = np.NaN
        #new_pool['art_hash_%d' % hs] = np.NaN
    for ind, card_info in card_pool.iterrows():
        if ind % 100 == 0:
            print('Calculating hashes: %dth card' % ind)
        card_names = []
        # Double-faced cards have a different json format than normal cards
        if card_info['layout'] in ['transform', 'double_faced_token']:
            if isinstance(card_info['card_faces'], str):
                card_faces = ast.literal_eval(card_info['card_faces'])
            else:
                card_faces = card_info['card_faces']
            for i in range(len(card_faces)):
                card_names.append(card_faces[i]['name'])
        else:  # if card_info['layout'] == 'normal':
            card_names.append(card_info['name'])
        for card_name in card_names:
            # Fetch the image - name can be found based on the card's information
            card_info['name'] = card_name
            cname = card_name
            if cname == 'con':
                cname == 'con__'
            img_name = '%s/card_img/png/%s/%s_%s.png' % (Config.data_dir, card_info['set'],
                                                         card_info['collector_number'],
                                                         fetch_data.get_valid_filename(cname))
            card_img = cv2.imread(img_name)
            # If the image doesn't exist, download it from the URL
            if card_img is None:
                set_name = card_info['set']
                if set_name == 'con':
                    set_name = 'con__'
                fetch_data.fetch_card_image(card_info,
                                            out_dir='%s/card_img/png/%s' % (Config.data_dir, set_name))
                card_img = cv2.imread(img_name)
            if card_img is None:
                print('WARNING: card %s is not found!' % img_name)
                continue
            """
            img_cc = cv2.cvtColor(card_img, cv2.COLOR_BGR2GRAY)
            img_thresh = cv2.adaptiveThreshold(img_cc, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 11, 5)
            # Dilute the image, then erode them to remove minor noises
            kernel = np.ones((3, 3), np.uint8)
            img_dilate = cv2.dilate(img_thresh, kernel, iterations=1)
            img_erode = cv2.erode(img_dilate, kernel, iterations=1)
            cnts, hier = cv2.findContours(img_erode, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
            cnts2 = sorted(cnts, key=cv2.contourArea, reverse=True)
            cnts2 = cnts2[:10]
            if True:
                cv2.drawContours(img_cc, cnts2, -1, (0, 255, 0), 3)
                #cv2.imshow('Contours', card_img)
                #cv2.waitKey(10000)
            """
            set_img = card_img[595:635, 600:690]
            #cv2.imshow(card_info['name'], set_img)
            # Compute value of the card's perceptual hash, then store it to the database
            #img_art = Image.fromarray(card_img[121:580, 63:685])  # For 745*1040 size card image
            img_card = Image.fromarray(card_img)
            img_set = Image.fromarray(set_img)
            #cv2.imshow('Set' + card_names[0], set_img)
            for hs in hash_size:
                card_hash = ih.phash(img_card, hash_size=hs)
                set_hash = ih.phash(img_set, hash_size=64)
                card_info['card_hash_%d' % hs] = card_hash
                card_info['set_hash_%d' % 64] = set_hash
                #print('Setting set_hash_%d' % hs)
                #art_hash = ih.phash(img_art, hash_size=hs)
                #card_info['art_hash_%d' % hs] = art_hash
            new_pool.loc[0 if new_pool.empty else new_pool.index.max() + 1] = card_info
    return new_pool
def calc_image_hashes(card_pool, save_to=None, hash_size=None):
    """
    Calculate perceptual hash (pHash) value for each cards in the database, then store them if needed
    :param card_pool: pandas dataframe containing all card information
    :param save_to: path for the pickle file to be saved
    :param hash_size: param for pHash algorithm
    :return: pandas dataframe
    """
    if hash_size is None:
        hash_size = [16, 32]
    elif isinstance(hash_size, int):
        hash_size = [hash_size]
    num_cores = 16
    num_partitions = round(card_pool.shape[0]/1000)
    if num_partitions < min(num_cores, card_pool.shape[0]):
        num_partitions = min(num_cores, card_pool.shape[0])
    pool = Pool(num_cores)
    df_split = np.array_split(card_pool, num_partitions)
    new_pool = pd.concat(pool.map(do_calc, [(split, hash_size) for split in df_split]))
    pool.close()
    pool.join()
    # Since some double-faced cards may result in two different cards, create a new dataframe to store the result
    if save_to is not None:
        new_pool.to_pickle(save_to)
    return new_pool
# Get the names of the output layers
def get_outputs_names(net):
    # Get the names of all the layers in the network
    layers_names = net.getLayerNames()
    # Get the names of the output layers, i.e. the layers with unconnected outputs
    return [layers_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]
# www.pyimagesearch.com/2014/08/25/4-point-opencv-getperspective-transform-example/
def order_points(pts):
    """
    initialzie a list of coordinates that will be ordered such that the first entry in the list is the top-left,
    the second entry is the top-right, the third is the bottom-right, and the fourth is the bottom-left
    :param pts: array containing 4 points
    :return: ordered list of 4 points
    """
    rect = np.zeros((4, 2), dtype="float32")
    # the top-left point will have the smallest sum, whereas
    # the bottom-right point will have the largest sum
    s = pts.sum(axis=1)
    rect[0] = pts[np.argmin(s)]
    rect[2] = pts[np.argmax(s)]
    # now, compute the difference between the points, the
    # top-right point will have the smallest difference,
    # whereas the bottom-left will have the largest difference
    diff = np.diff(pts, axis=1)
    rect[1] = pts[np.argmin(diff)]
    rect[3] = pts[np.argmax(diff)]
    # return the ordered coordinates
    return rect
# Remove the bounding boxes with low confidence using non-maxima suppression
def postprocess(frame, outs, classes, thresh_conf, thresh_nms):
    frame_height = frame.shape[0]
    frame_width = frame.shape[1]
def four_point_transform(image, pts):
    """
    Transform a quadrilateral section of an image into a rectangular area
    From: www.pyimagesearch.com/2014/08/25/4-point-opencv-getperspective-transform-example/
    :param image: source image
    :param pts: 4 corners of the quadrilateral
    :return: rectangular image of the specified area
    """
    # obtain a consistent order of the points and unpack them
    # individually
    rect = order_points(pts)
    (tl, tr, br, bl) = rect
    # Scan through all the bounding boxes output from the network and keep only the
    # ones with high confidence scores. Assign the box's class label as the class with the highest score.
    class_ids = []
    confidences = []
    boxes = []
    for out in outs:
        for detection in out:
            scores = detection[5:]
            class_id = np.argmax(scores)
            confidence = scores[class_id]
            if confidence > thresh_conf:
                center_x = int(detection[0] * frame_width)
                center_y = int(detection[1] * frame_height)
                width = int(detection[2] * frame_width)
                height = int(detection[3] * frame_height)
                left = int(center_x - width / 2)
                top = int(center_y - height / 2)
                class_ids.append(class_id)
                confidences.append(float(confidence))
                boxes.append([left, top, width, height])
    # compute the width of the new image, which will be the
    # maximum distance between bottom-right and bottom-left
    # x-coordiates or the top-right and top-left x-coordinates
    widthA = np.sqrt(((br[0] - bl[0]) ** 2) + ((br[1] - bl[1]) ** 2))
    widthB = np.sqrt(((tr[0] - tl[0]) ** 2) + ((tr[1] - tl[1]) ** 2))
    maxWidth = max(int(widthA), int(widthB))
    # Perform non maximum suppression to eliminate redundant overlapping boxes with
    # lower confidences.
    indices = cv2.dnn.NMSBoxes(boxes, confidences, thresh_conf, thresh_nms)
    for i in indices:
        i = i[0]
        box = boxes[i]
        left = box[0]
        top = box[1]
        width = box[2]
        height = box[3]
        draw_pred(frame, class_ids[i], classes, confidences[i], left, top, left + width, top + height)
    # compute the height of the new image, which will be the
    # maximum distance between the top-right and bottom-right
    # y-coordinates or the top-left and bottom-left y-coordinates
    heightA = np.sqrt(((tr[0] - br[0]) ** 2) + ((tr[1] - br[1]) ** 2))
    heightB = np.sqrt(((tl[0] - bl[0]) ** 2) + ((tl[1] - bl[1]) ** 2))
    maxHeight = max(int(heightA), int(heightB))
    # now that we have the dimensions of the new image, construct
    # the set of destination points to obtain a "birds eye view",
    # (i.e. top-down view) of the image, again specifying points
    # in the top-left, top-right, bottom-right, and bottom-left
    # order
    dst = np.array([
        [0, 0],
        [maxWidth - 1, 0],
        [maxWidth - 1, maxHeight - 1],
        [0, maxHeight - 1]], dtype="float32")
    # compute the perspective transform matrix and then apply it
    mat = cv2.getPerspectiveTransform(rect, dst)
    warped = cv2.warpPerspective(image, mat, (maxWidth, maxHeight))
    # If the image is horizontally long, rotate it by 90
    if maxWidth > maxHeight:
        center = (maxHeight / 2, maxHeight / 2)
        mat_rot = cv2.getRotationMatrix2D(center, 270, 1.0)
        warped = cv2.warpAffine(warped, mat_rot, (maxHeight, maxWidth))
    # return the warped image
    return warped
# Draw the predicted bounding box
def draw_pred(frame, class_id, classes, conf, left, top, right, bottom):
    # Draw a bounding box.
    cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255))
def remove_glare(img):
    """
    Reduce the effect of glaring in the image
    Inspired from:
    http://www.amphident.de/en/blog/preprocessing-for-automatic-pattern-identification-in-wildlife-removing-glare.html
    The idea is to find area that has low saturation but high value, which is what a glare usually look like.
    :param img: source image
    :return: corrected image with glaring smoothened out
    """
    img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    _, s, v = cv2.split(img_hsv)
    non_sat = (s < 32) * 255  # Find all pixels that are not very saturated
    label = '%.2f' % conf
    # Slightly decrease the area of the non-satuared pixels by a erosion operation.
    disk = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
    non_sat = cv2.erode(non_sat.astype(np.uint8), disk)
    # Get the label for the class name and its confidence
    if classes:
        assert (class_id < len(classes))
        label = '%s:%s' % (classes[class_id], label)
    # Set all brightness values, where the pixels are still saturated to 0.
    v[non_sat == 0] = 0
    # filter out very bright pixels.
    glare = (v > 200) * 255
    # Display the label at the top of the bounding box
    label_size, base_line = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
    top = max(top, label_size[1])
    cv2.putText(frame, label, (left, top), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255))
    # Slightly increase the area for each pixel
    glare = cv2.dilate(glare.astype(np.uint8), disk)
    glare_reduced = np.ones((img.shape[0], img.shape[1], 3), dtype=np.uint8) * 200
    glare = cv2.cvtColor(glare, cv2.COLOR_GRAY2BGR)
    corrected = np.where(glare, glare_reduced, img)
    return corrected
def detect_frame(net, classes, img, thresh_conf=0.5, thresh_nms=0.4, in_dim=(416, 416), out_path=None):
    # Create a 4D blob from a frame.
    blob = cv2.dnn.blobFromImage(img, 1 / 255, in_dim, [0, 0, 0], 1, crop=False)
def find_card(img, thresh_c=5, kernel_size=(3, 3), size_thresh=10000, debug=False):
    """
    Find contours of all cards in the image
    :param img: source image
    :param thresh_c: value of the constant C for adaptive thresholding
    :param kernel_size: dimension of the kernel used for dilation and erosion
    :param size_thresh: threshold for size (in pixel) of the contour to be a candidate
    :return: list of candidate contours
    """
    # Typical pre-processing - grayscale, blurring, thresholding
    img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    img_blur = cv2.medianBlur(img_gray, 5)
    img_thresh = cv2.adaptiveThreshold(img_blur, 255, cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY_INV, 11, thresh_c)
    if debug:
        cv2.imshow('Thres', img_thresh)
    # Dilute the image, then erode them to remove minor noises
    kernel = np.ones(kernel_size, np.uint8)
    img_dilate = cv2.dilate(img_thresh, kernel, iterations=1)
    img_erode = cv2.erode(img_dilate, kernel, iterations=1)
    if debug:
        cv2.imshow('Eroded', img_erode)
    # Find the contour
    cnts, hier = cv2.findContours(img_erode, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
    if len(cnts) == 0:
#        print('no contours')
        return []
    img_cont = cv2.cvtColor(img_erode, cv2.COLOR_GRAY2BGR)
    img_cont_base = img_cont.copy()
    cnts2 = sorted(cnts, key=cv2.contourArea, reverse=True)
    cnts2 = cnts2[:10]
#    for i in range(0, len(cnts2)):
#        print(i, len(cnts2[i]))
    if debug:
        cv2.drawContours(img_cont, cnts2, -1, (0, 255, 0), 3)
        cv2.imshow('Contours', img_cont)
    # The hierarchy from cv2.findContours() is similar to a tree: each node has an access to the parent, the first child
    # their previous and next node
    # Using recursive search, find the uppermost contour in the hierarchy that satisfies the condition
    # The candidate contour must be rectangle (has 4 points) and should be larger than a threshold
    cnts_rect = []
    stack = [(0, hier[0][0])]
    while len(stack) > 0:
        i_cnt, h = stack.pop()
        i_next, i_prev, i_child, i_parent = h
        if i_next != -1:
            stack.append((i_next, hier[0][i_next]))
        cnt = cnts[i_cnt]
        size = cv2.contourArea(cnt)
        peri = cv2.arcLength(cnt, True)
        approx = cv2.approxPolyDP(cnt, 0.04 * peri, True)
        #print('Base Size:', size)
        #print('Len Approx:', len(approx))
        if size >= size_thresh and len(approx) == 4:
            # lets see if we got a contour very close in size as child
            if i_child != -1:
                img_ccont = img_cont_base.copy()
                # lets collect all children
                c_list = [cnts[i_child]]
                h_info = hier[0][i_child]
                while h_info[0] != -1:
                    cld = cnts[h_info[0]]
                    c_list.append(cld)
                    h_info = hier[0][h_info[0]]
                # child with biggest area
                c_list.sort(key=cv2.contourArea, reverse=True)
                c_cnt = c_list[0]  # the biggest child
                if debug:
                    cv2.drawContours(img_ccont, c_list[:1], -1, (0, 255, 0), 1)
                    cv2.imshow('CCont', img_ccont)
                c_size = cv2.contourArea(c_cnt)
                c_approx = cv2.approxPolyDP(c_cnt, 0.04 * peri, True)
                if len(c_approx) == 4 and (c_size/size) > 0.85:
                    rect = cv2.minAreaRect(c_cnt)
                    box = cv2.boxPoints(rect)
                    box = np.intp(box)
                    #print(c_cnt)
                    #print(box)
    # Sets the input to the network
    net.setInput(blob)
    # Runs the forward pass to get output of the output layers
    outs = net.forward(get_outputs_names(net))
    # Remove the bounding boxes with low confidence
    postprocess(img, outs, classes, thresh_conf, thresh_nms)
    # Put efficiency information. The function getPerfProfile returns the
    # overall time for inference(t) and the timings for each of the layers(in layersTimes)
    t, _ = net.getPerfProfile()
    label = 'Inference time: %.2f ms' % (t * 1000.0 / cv2.getTickFrequency())
    cv2.putText(img, label, (0, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255))
    if out_path is not None:
        cv2.imwrite(out_path, img.astype(np.uint8))
                    #print('CSize:', c_size, '%:', c_size/size)
                    b2 = []
                    for x in box:
                        b2.append([x])
                    cnts_rect.append(np.array(b2))
                else:
                    #print('CF:', (c_size/size))
                    #print('Size:', size)
                    cnts_rect.append(approx)
            else:
                #print('CF:', (c_size/size))
                #print('Size:', size)
                cnts_rect.append(approx)
        else:
            if i_child != -1:
                stack.append((i_child, hier[0][i_child]))
    return cnts_rect
def detect_video(net, classes, capture, thresh_conf=0.5, thresh_nms=0.4, in_dim=(416, 416), out_path=None):
    if out_path is not None:
        vid_writer = cv2.VideoWriter(out_path, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 30,
                                     (round(capture.get(cv2.CAP_PROP_FRAME_WIDTH)),
                                      round(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))))
    while True:
        ret, frame = capture.read()
        if not ret:
            # End of video
            print("End of video. Press any key to exit")
            cv2.waitKey(0)
            break
def draw_card_graph(exist_cards, card_pool, f_len):
    """
    Given the history of detected cards in the current and several previous frames, draw a simple graph
    displaying the detected cards with its confidence level
    :param exist_cards: History of all detected cards in the previous (f_len) frames
    :param card_pool: pandas dataframe of all card's information
    :param f_len: length of windows (in frames) to consider for confidence level
    :return:
    """
    # Lots of constants to set the dimension of each elements
    w_card = 63  # Width of the card image displayed
    h_card = 88
    gap = 25  # Offset between each elements
    gap_sm = 10  # Small offset
    w_bar = 300  # Length of the confidence bar at 100%
    h_bar = 12
    txt_scale = 0.8
    n_cards_p_col = 4  # Number of cards displayed per one column
    w_img = gap + (w_card + gap + w_bar + gap) * 2  # Dimension of the entire graph (for 2 columns)
    h_img = 480
    img_graph = np.zeros((h_img, w_img, 3), dtype=np.uint8)
    x_anchor = gap
    y_anchor = gap
    i = 0
    # Cards are displayed from the most confident to the least
    # Confidence level is calculated by number of frames that the card was detected in
    for key, val in sorted(exist_cards.items(), key=itemgetter(1), reverse=True)[:n_cards_p_col * 2]:
        card_name = key[:key.find('(') - 1]
        card_set = key[key.find('(') + 1:key.find(')')]
        confidence = sum(val) / f_len
        card_info = card_pool[(card_pool['name'] == card_name) & (card_pool['set'] == card_set)].iloc[0]
        img_name = '%s/card_img/tiny/%s/%s_%s.png' % (Config.data_dir, card_info['set'],
                                                      card_info['collector_number'],
                                                      fetch_data.get_valid_filename(card_info['name']))
        # If the card image is not found, just leave it blank
        if os.path.exists(img_name):
            card_img = cv2.imread(img_name)
        else:
            card_img = np.ones((h_card, w_card, 3)) * 255
            cv2.putText(card_img, 'X', ((w_card - int(txt_scale * 25)) // 2, (h_card + int(txt_scale * 25)) // 2),
                        cv2.FONT_HERSHEY_SIMPLEX, txt_scale, (0, 0, 0), 2)
        # Insert the card image, card name, and confidence bar to the graph
        img_graph[y_anchor:y_anchor + h_card, x_anchor:x_anchor + w_card] = card_img
        cv2.putText(img_graph, '%s (%s)' % (card_name, card_set),
                    (x_anchor + w_card + gap, y_anchor + gap_sm + int(txt_scale * 25)), cv2.FONT_HERSHEY_SIMPLEX,
                    txt_scale, (255, 255, 255), 1)
        cv2.rectangle(img_graph, (x_anchor + w_card + gap, y_anchor + h_card - (gap_sm + h_bar)),
                      (x_anchor + w_card + gap + int(w_bar * confidence), y_anchor + h_card - gap_sm), (0, 255, 0),
                      thickness=cv2.FILLED)
        y_anchor += h_card + gap
        i += 1
        if i % n_cards_p_col == 0:
            x_anchor += w_card + gap + w_bar + gap
            y_anchor = gap
        pass
    return img_graph
def detect_frame(img, card_pool, hash_size=32, size_thresh=10000,
                 out_path=None, display=True, debug=False):
    """
    Identify all cards in the input frame, display or save the frame if needed
    :param img: input frame
    :param card_pool: pandas dataframe of all card's information
    :param hash_size: param for pHash algorithm
    :param size_thresh: threshold for size (in pixel) of the contour to be a candidate
    :param out_path: path to save the result
    :param display: flag for displaying the result
    :param debug: flag for debug mode
    :return: list of detected card's name/set and resulting image
    """
    img_result = img.copy()  # For displaying and saving
    det_cards = []
    # Detect contours of all cards in the image
    cnts = find_card(img_result, size_thresh=size_thresh, debug=debug)
    #print('Contours:', len(cnts))
    for i in range(len(cnts)):
        #print('Contour', i)
        cnt = cnts[i]
        # For the region of the image covered by the contour, transform them into a rectangular image
        pts = np.float32([p[0] for p in cnt])
        img_warp = four_point_transform(img, pts)
        # To identify the card from the card image, perceptual hashing (pHash) algorithm is used
        # Perceptual hash is a hash string built from features of the input medium. If two media are similar
        # (ie. has similar features), their resulting pHash value will be very close.
        # Using this property, the matching card for the given card image can be found by comparing pHash of
        # all cards in the database, then finding the card that results in the minimal difference in pHash value.
        '''
        # Create a 4D blob from a frame.
        blob = cv2.dnn.blobFromImage(frame, 1 / 255, in_dim, [0, 0, 0], 1, crop=False)
        # Sets the input to the network
        net.setInput(blob)
        # Runs the forward pass to get output of the output layers
        outs = net.forward(get_outputs_names(net))
        # Remove the bounding boxes with low confidence
        postprocess(frame, outs, classes, thresh_conf, thresh_nms)
        # Put efficiency information. The function getPerfProfile returns the
        # overall time for inference(t) and the timings for each of the layers(in layersTimes)
        t, _ = net.getPerfProfile()
        label = 'Inference time: %.2f ms' % (t * 1000.0 / cv2.getTickFrequency())
        cv2.putText(frame, label, (0, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255))
        img_art = img_warp[47:249, 22:294]
        img_art = Image.fromarray(img_art.astype('uint8'), 'RGB')
        art_hash = ih.phash(img_art, hash_size=hash_size).hash.flatten()
        card_pool['hash_diff'] = card_pool['art_hash'].apply(lambda x: np.count_nonzero(x != art_hash))
        '''
        detect_frame(net, classes, frame,
                     thresh_conf=thresh_conf, thresh_nms=thresh_nms, in_dim=in_dim, out_path=None)
        cv2.imshow('result', frame)
        if out_path is not None:
            vid_writer.write(frame.astype(np.uint8))
        cv2.waitKey(1)
        img_card = Image.fromarray(img_warp.astype('uint8'), 'RGB')
        img_card_size = img_warp.shape
        #print(img_card_size)
        cut = [round(img_card_size[0]*0.57),round(img_card_size[0]*0.615),round(img_card_size[1]*0.81),round(img_card_size[1]*0.940)]
        #print(cut)
        img_set_part = img_warp[cut[0]:cut[1], cut[2]:cut[3]]
        #print(img_set_part.shape)
        img_set = Image.fromarray(img_set_part.astype('uint8'), 'RGB')
        #print('img set')
        if debug:
            cv2.imshow("Set Img#%d" % i, img_set_part)
        # the stored values of hashes in the dataframe is pre-emptively flattened already to minimize computation time
        card_hash = ih.phash(img_card, hash_size=hash_size).hash.flatten()
        card_pool['hash_diff'] = card_pool['card_hash_%d' % hash_size]
        card_pool['hash_diff'] = card_pool['hash_diff'].apply(lambda x: np.count_nonzero(x != card_hash))
        min_card = card_pool[card_pool['hash_diff'] == min(card_pool['hash_diff'])].iloc[0]
        hash_diff = min_card['hash_diff']
        top_matches = sorted(card_pool['hash_diff'])
        card_one = card_pool[card_pool['hash_diff'] == top_matches[0]].iloc[0]
        card_two = card_pool[card_pool['hash_diff'] == top_matches[1]].iloc[0]
        if card_one['name'] == card_two['name'] and card_one['set'] != card_two['set']:
            set_img_hash = ih.whash(img_set, hash_size=hash_size).hash.flatten()
            cd_data = pd.DataFrame(columns=list(card_pool.columns.values))
#            print(list(card_pool.columns.values))
            candidates = []
            for ix in range(0, 2):
                cd = card_pool[card_pool['hash_diff'] == top_matches[ix]].iloc[0]
                cd_data.loc[0 if cd_data.empty else cd_data.index.max()+1] = cd
#                print('Idx:', ix, 'Name:', cd['name'], 'Set:', cd['set'], 'Diff:', top_matches[ix])
            cd_data['set_hash_diff'] = cd_data['set_hash_%d' % 64]
            cd_data['set_hash_diff'] = cd_data['set_hash_diff'].apply(lambda x: np.count_nonzero(x != set_img_hash))
            conf = sorted(cd_data['set_hash_diff'])
            #print('Confs:', conf)
            best_match = cd_data[cd_data['set_hash_diff'] == min(cd_data['set_hash_diff'])].iloc[0]
            #print('Best Match', 'Name:', best_match['name'], 'Set:', best_match['set'])
            min_card = best_match
        card_name = min_card['name']
        card_set = min_card['set']
        det_cards.append((card_name, card_set))
        # Render the result, and display them if needed
        cv2.drawContours(img_result, [cnt], -1, (0, 255, 0), 2)
        cv2.putText(img_result, card_name, (int(min(pts[0][0], pts[1][0])), int(min(pts[0][1], pts[1][1]))),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
        if debug:
            # cv2.rectangle(img_warp, (22, 47), (294, 249), (0, 255, 0), 2)
            cv2.putText(img_warp, card_name + ':' + card_set + ', ' + str(hash_diff), (0, 20),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.4, (255, 255, 255), 1)
            cv2.imshow('card#%d' % i, img_warp)
    if display:
        cv2.imshow('Result', img_result)
        inp = cv2.waitKey(0)
    if out_path is not None:
        vid_writer.release()
    cv2.destroyAllWindows()
    pass
        print(out_path)
        cv2.imwrite(out_path, img_result.astype(np.uint8))
    return det_cards, img_result
def main():
    # Specify paths for all necessary files
    test_path = '../data/test1.mp4'
    weight_path = 'weights/second_general/tiny_yolo_final.weights'
    cfg_path = 'cfg/tiny_yolo.cfg'
    class_path = "data/obj.names"
    out_dir = 'out'
    if not os.path.isfile(test_path):
        print('The test file %s doesn\'t exist!' % os.path.abspath(test_path))
    if not os.path.isfile(weight_path):
        print('The weight file %s doesn\'t exist!' % os.path.abspath(test_path))
    if not os.path.isfile(cfg_path):
        print('The config file %s doesn\'t exist!' % os.path.abspath(test_path))
    if not os.path.isfile(class_path):
        print('The class file %s doesn\'t exist!' % os.path.abspath(test_path))
    # Setup
    # Read class names from text file
    with open(class_path, 'r') as f:
        classes = [line.strip() for line in f.readlines()]
    # Load up the neural net using the config and weights
    net = cv2.dnn.readNetFromDarknet(cfg_path, weight_path)
    #net.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
    #net.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)
    # Save the detection result if out_dir is provided
    if out_dir is None or out_dir == '':
        out_path = None
def detect_video(capture, card_pool, hash_size=32, size_thresh=10000,
                 out_path=None, display=True, show_graph=True, debug=False, crop_x=0, crop_y=0):
    """
    Identify all cards in the continuous video stream, display or save the result if needed
    :param capture: input video stream
    :param card_pool: pandas dataframe of all card's information
    :param hash_size: param for pHash algorithm
    :param size_thresh: threshold for size (in pixel) of the contour to be a candidate
    :param out_path: path to save the result
    :param display: flag for displaying the result
    :param show_graph: flag to show graph
    :param debug: flag for debug mode
    :return: list of detected card's name/set and resulting image
    :return:
    """
    list_names_from = 0
    # Get the dimension of the output video, and set it up
    if show_graph:
        img_graph = draw_card_graph({}, pd.DataFrame(), -1)  # Black image of the graph just to get the dimension
        width = round(capture.get(cv2.CAP_PROP_FRAME_WIDTH)) - 2*crop_x  + img_graph.shape[1]
        height = max(round(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)) - 2*crop_y, img_graph.shape[0])
        height += 200  # some space to display last detected cards
    else:
        out_path = out_dir + '/' + os.path.split(test_path)[1]
    # Check if test file is image or video
    test_ext = test_path[test_path.find('.') + 1:]
    if test_ext in ['jpg', 'jpeg', 'bmp', 'png', 'tiff']:
        img = cv2.imread(test_path)
        detect_frame(net, classes, img, out_path=out_path)
    else:
        capture = cv2.VideoCapture(test_path)
        detect_video(net, classes, capture, out_path=out_path)
        width = round(capture.get(cv2.CAP_PROP_FRAME_WIDTH))
        height = round(capture.get(cv2.CAP_PROP_FRAME_HEIGHT))
    if out_path is not None:
        vid_writer = cv2.VideoWriter(out_path, cv2.VideoWriter_fourcc(*'MJPG'), 10.0, (width, height))
    max_num_obj = 0
    f_len = 10  # number of frames to consider to check for existing cards
    exist_cards = {}
    exist_card_single = {}
    written_out_cards = set()
    found_cards = []
    try:
        while True:
            ret, frame = capture.read()
            if not ret:
                continue
            y_max_index = -crop_y
            if crop_y == 0:
                y_max_index = frame.shape[0]
            x_max_index = -crop_x
            if crop_x == 0:
               x_max_index = frame.shape[1]
            croped_img = frame[crop_y:y_max_index, crop_x:x_max_index]
            fimg = cv2.flip(croped_img, -1)
            start_time = time.time()
            if not ret:
                # End of video
                print("End of video. Press any key to exit")
                cv2.waitKey(0)
                break
            if fimg is None:
                print("flipped image is none")
                break
            # Detect all cards from the current frame
            det_cards, img_result = detect_frame(fimg, card_pool, hash_size=hash_size, size_thresh=size_thresh,
                                                 out_path=None, display=False, debug=debug)
            if show_graph:
                # If the card was already detected in the previous frame, append 1 to the list
                # If the card previously detected was not found in this trame, append 0 to the list
                # If the card wasn't previously detected, make a new list and add 1 to it
                # If the same card is detected multiple times in the same frame, keep track of the duplicates
                # The confidence will be calculated based on the number of frames the card was detected for
                det_cards_count = collections.Counter(det_cards).items()
                det_cards_list = []
                for card, count in det_cards_count:
                    card_name, card_set = card
                    for i in range(count): 1
                    key = '%s (%s) #%d' % (card_name, card_set, i + 1)
                    det_cards_list.append(key)
                gone = []
                for key, val in exist_cards.items():
                    if key in det_cards_list:
                        exist_cards[key] = exist_cards[key][1 - f_len:] + [1]
                    else:
                        exist_cards[key] = exist_cards[key][1 - f_len:] + [0]
                    if len(val) == f_len and sum(val) == 0:
                        gone.append(key)  # not there anymore
                det_card_map = {}
                gone_single =  []
                for card_name, card_set in det_cards:
                    skey = '%s (%s)' % (card_name, card_set)
                    det_card_map[skey] = (card_name, card_set)
                for key, val in exist_card_single.items():
                    if key in det_card_map:
                        exist_card_single[key] = exist_card_single[key][1 - f_len:] + [1]
                    else:
                        exist_card_single[key] = exist_card_single[key][1 - f_len:] + [0]
                    if len(val) == f_len and sum(val) == 0:
                        gone_single.append(key)
                        if key in written_out_cards:
                            written_out_cards.remove(key)
                    if len(val) == f_len and sum(val) == f_len:
                        if key not in written_out_cards and key in det_card_map:
                            written_out_cards.add(key)
                            found_cards.append(det_card_map[key])
                            list_names_from += 1
                for key in det_card_map:
                    if key not in exist_card_single.keys():
                        exist_card_single[key] = [1]
                for key in gone_single:
                    exist_card_single.pop(key)
                for key in det_cards_list:
                    if key not in exist_cards.keys():
                        exist_cards[key] = [1]
                for key in gone:
                    exist_cards.pop(key)
                # Draw the graph based on the history of detected cards, then concatenate it with the result image
                img_graph = draw_card_graph(exist_cards, card_pool, f_len)
                img_save = np.zeros((height, width, 3), dtype=np.uint8)
                img_save[0:img_result.shape[0], 0:img_result.shape[1]] = img_result
                img_save[0:img_graph.shape[0], img_result.shape[1]:img_result.shape[1] + img_graph.shape[1]] = img_graph
                start_at = max(0,list_names_from-10)
                end_at = min(len(found_cards), list_names_from)
                for c, card in enumerate(reversed(found_cards[start_at:end_at]), 1):
                    cv2.putText(img_save, f'{card[0]} ({card[1].upper()})',(0, height-200+18*c), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0))
            else:
                img_save = img_result
            # Display the result
            if display:
                cv2.imshow('result', img_save)
            if debug:
                max_num_obj = max(max_num_obj, len(det_cards))
                for i in range(len(det_cards), max_num_obj):
                    cv2.imshow('card#%d' % i, np.zeros((1, 1), dtype=np.uint8))
            elapsed_ms = (time.time() - start_time) * 1000
            print('Elapsed time: %.2f ms' % elapsed_ms)
            if out_path is not None:
                vid_writer.write(img_save.astype(np.uint8))
            if debug:
                print("Waiting for keypress to continue")
                inp = cv2.waitKey(0)
            else:
                inp = cv2.waitKey(1)
            if 'u' == chr(inp & 255):
                if len(found_cards) > 0:
                    del found_cards[list_names_from-1]
                    list_names_from = min(len(found_cards), max(0, list_names_from))
                #os.sleep(1000)
            elif 'p' == chr(inp & 255):
                list_names_from = max(1, list_names_from - 1)
            elif 'o' == chr(inp & 255):
                list_names_from = min(len(found_cards),list_names_from + 1)
            elif 'q' == chr(inp & 255):
                break
    except KeyboardInterrupt:
        print("KeyboardInterrupt happened")
    finally:
        write_found_cards(found_cards)
        capture.release()
        if out_path is not None:
            vid_writer.release()
        cv2.destroyAllWindows()
def write_found_cards(found_cards):
    with open('detect.txt', 'w') as of:
        counter = collections.Counter(found_cards)
        for key in counter:
            of.write(f'{counter[key]} [{key[1].upper()}] {key[0]}\n')
def main(args):
    # Specify paths for all necessary files
    hash_sizes = {16, 32}
    hash_sizes.add(args.hash_size)
    pck_path = os.path.abspath('card_pool.pck')
    if os.path.isfile(pck_path):
        card_pool = pd.read_pickle(pck_path)
    else:
        print('Warning: pickle for card database %s is not found!' % pck_path)
        # Merge database for all cards, then calculate pHash values of each, store them
        df_list = []
        for set_name in Config.all_set_list:
            if set_name == 'con':
                set_name = 'con__'
            csv_name = '%s/csv/%s.csv' % (Config.data_dir, set_name)
            df = fetch_data.load_all_cards_text(csv_name)
            df_list.append(df)
        card_pool = pd.concat(df_list, sort=True)
        card_pool.reset_index(drop=True, inplace=True)
        card_pool.drop('Unnamed: 0', axis=1, inplace=True, errors='ignore')
        card_pool = calc_image_hashes(card_pool, save_to=pck_path, hash_size=hash_sizes)
    ch_key = 'card_hash_%d' % args.hash_size
    set_key = 'set_hash_%d' % 64
    if ch_key not in card_pool.columns:
        # we did not generate this hash_size yet
        print('We need to add hash_size=%d' % (args.hash_size,))
        card_pool = calc_image_hashes(card_pool, save_to=pck_path, hash_size=[args.hash_size])
    card_pool = card_pool[['name', 'set', 'collector_number', ch_key, set_key]]
    # Processing time is almost linear to the size of the database
    # Program can be much faster if the search scope for the card can be reduced
    #card_pool = card_pool[card_pool['set'].isin(Config.set_2003_list)]
    # ImageHash is basically just one numpy.ndarray with (hash_size)^2 number of bits. pre-emptively flattening it
    # significantly increases speed for subtracting hashes in the future.
    card_pool[ch_key] = card_pool[ch_key].apply(lambda x: x.hash.flatten())
    card_pool[set_key] = card_pool[set_key].apply(lambda x: x.hash.flatten())
    print("Hash-Database setup done")
    # If the test file isn't given, use webcam to capture video
    if args.in_path is None:
        if args.stream_url is None:
            print("Using webcam")
            capture = cv2.VideoCapture(0)
            capture.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*"MJPG"))
            capture.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
            capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)
        else:
            print(f"Using streami {args.stream_url}")
            capture = cv2.VideoCapture(args.stream_url)
        thres = int((1920-2*args.crop_x)*(1080-2*args.crop_y)*(float(args.threshold_percent)/100))
        print('Threshold:', thres)
        if args.out_path is None:
            out_path = None
        else:
            out_path = '%s/result.avi' % args.out_path
        detect_video(capture, card_pool, hash_size=args.hash_size, out_path=out_path,
                     display=args.display, show_graph=args.show_graph, debug=args.debug, crop_x=args.crop_x, crop_y=args.crop_y, size_thresh=thres)
        capture.release()
    else:
        print(f"Using image or video {args.in_path}")
        # Save the detection result if args.out_path is provided
        if args.out_path is None:
            out_path = None
        else:
            f_name = os.path.split(args.in_path)[1]
            out_path = '%s/%s.avi' % (args.out_path, f_name[:f_name.find('.')])
        if not os.path.isfile(args.in_path):
            print('The test file %s doesn\'t exist!' % os.path.abspath(args.in_path))
            return
        # Check if test file is image or video
        test_ext = args.in_path[args.in_path.find('.') + 1:]
        if test_ext in ['jpg', 'jpeg', 'bmp', 'png', 'tiff']:
            # Test file is an image
            img = cv2.imread(args.in_path)
            if img is None:
                print('Could not read', args.in_path)
            detect_frame(img, card_pool, hash_size=args.hash_size, out_path=out_path, display=args.display,
                         debug=args.debug)
        else:
            # Test file is a video
            capture = cv2.VideoCapture(args.in_path)
            detect_video(capture, card_pool, hash_size=args.hash_size, out_path=out_path, display=args.display,
                         show_graph=args.show_graph, debug=args.debug)
            capture.release()
    pass
if __name__ == '__main__':
    main()
    parser = argparse.ArgumentParser()
    parser.add_argument('-i', '--in', dest='in_path', help='Path of the input file. For webcam, leave it blank',
                        type=str)
    parser.add_argument('-o', '--out', dest='out_path', help='Path of the output directory to save the result',
                        type=str)
    parser.add_argument('-hs', '--hash_size', dest='hash_size',
                        help='Size of the hash for pHash algorithm', type=int, default=16)
    parser.add_argument('-dsp', '--display', dest='display', help='Display the result', action='store_true',
                        default=False)
    parser.add_argument('-dbg', '--debug', dest='debug', help='Enable debug mode', action='store_true', default=False)
    parser.add_argument('-gph', '--show_graph', dest='show_graph', help='Display the graph for video output',
                        action='store_true', default=False)
    parser.add_argument('-s', '--stream', dest='stream_url', type=str)
    parser.add_argument('-cx', '--crop-x', dest='crop_x', help='crop x amount of pixel on each side in x-axis', type=int, default=0)
    parser.add_argument('-cy', '--crop-y', dest='crop_y', help='crop x amount of pixel on each side in y-axis', type=int, default=0)
    parser.add_argument('-tp', '--threshold-percent', dest='threshold_percent', help='percentage amount that the card image needs to take up to be detected',type=int, default=5)
    args = parser.parse_args()
    if not args.display and args.out_path is None:
        # Then why the heck are you running this thing in the first place?
        print('The program isn\'t displaying nor saving any output file. Please change the setting and try again.')
        exit()
    main(args)