2 files modified
1 files added
| | |
| | | #img_art = Image.fromarray(card_img[121:580, 63:685]) # For 745*1040 size card image |
| | | img_card = Image.fromarray(card_img) |
| | | img_set = Image.fromarray(set_img) |
| | | #cv2.imshow('Set' + card_names[0], set_img) |
| | | for hs in hash_size: |
| | | card_hash = ih.phash(img_card, hash_size=hs) |
| | | set_hash = ih.whash(img_set, hash_size=64) |
| | |
| | | # Find the contour |
| | | cnts, hier = cv2.findContours(img_erode, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) |
| | | if len(cnts) == 0: |
| | | #print('no contours') |
| | | print('no contours') |
| | | return [] |
| | | img_cont = cv2.cvtColor(img_erode, cv2.COLOR_GRAY2BGR) |
| | | img_cont_base = img_cont.copy() |
| | |
| | | size = cv2.contourArea(cnt) |
| | | peri = cv2.arcLength(cnt, True) |
| | | approx = cv2.approxPolyDP(cnt, 0.04 * peri, True) |
| | | print('Base Size:', size) |
| | | print('Len Approx:', len(approx)) |
| | | if size >= size_thresh and len(approx) == 4: |
| | | # lets see if we got a contour very close in size as child |
| | | if i_child != -1: |
| | |
| | | c_cnt = c_list[0] # the biggest child |
| | | if debug: |
| | | cv2.drawContours(img_ccont, c_list[:1], -1, (0, 255, 0), 1) |
| | | cv2.imshow('CCont %d' % i_cnt, img_ccont) |
| | | cv2.imshow('CCont', img_ccont) |
| | | c_size = cv2.contourArea(c_cnt) |
| | | c_approx = cv2.approxPolyDP(c_cnt, 0.04 * peri, True) |
| | | if len(c_approx) == 4 and (c_size/size) > 0.85: |
| | |
| | | det_cards = [] |
| | | # Detect contours of all cards in the image |
| | | cnts = find_card(img_result, size_thresh=size_thresh, debug=debug) |
| | | print('Countours:', len(cnts)) |
| | | for i in range(len(cnts)): |
| | | print('Contour', i) |
| | | cnt = cnts[i] |
| | | # For the region of the image covered by the contour, transform them into a rectangular image |
| | | pts = np.float32([p[0] for p in cnt]) |
| | |
| | | img_set_part = img_warp[cut[0]:cut[1], cut[2]:cut[3]] |
| | | print(img_set_part.shape) |
| | | img_set = Image.fromarray(img_set_part.astype('uint8'), 'RGB') |
| | | print('img set') |
| | | if debug: |
| | | cv2.imshow("Set Img#%d" % i, img_set_part) |
| | | |
| | |
| | | cv2.imshow('card#%d' % i, img_warp) |
| | | if display: |
| | | cv2.imshow('Result', img_result) |
| | | cv2.waitKey(0) |
| | | inp = cv2.waitKey(0) |
| | | |
| | | if out_path is not None: |
| | | print(out_path) |
| | | cv2.imwrite(out_path, img_result.astype(np.uint8)) |
| | | return det_cards, img_result |
| | | |
| | |
| | | print('Elapsed time: %.2f ms' % elapsed_ms) |
| | | if out_path is not None: |
| | | vid_writer.write(img_save.astype(np.uint8)) |
| | | cv2.waitKey(1) |
| | | inp = cv2.waitKey(0) |
| | | if 'q' == chr(inp & 255): |
| | | break |
| | | except KeyboardInterrupt: |
| | | capture.release() |
| | | if out_path is not None: |
| | |
| | | capture.set(cv2.CAP_PROP_FOURCC, cv2.VideoWriter_fourcc(*"MJPG")) |
| | | capture.set(cv2.CAP_PROP_FRAME_WIDTH, 1920) |
| | | capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080) |
| | | |
| | | thres = int(((1920-2*500)*(1080-2*200)*0.3)) |
| | | print('Threshold:', thres) |
| | | detect_video(capture, card_pool, hash_size=args.hash_size, out_path='%s/result.avi' % args.out_path, |
| | | display=args.display, show_graph=args.show_graph, debug=args.debug, crop_x=500, crop_y=200) |
| | | display=args.display, show_graph=args.show_graph, debug=args.debug, crop_x=500, crop_y=200, size_thresh=thres) |
| | | capture.release() |
| | | else: |
| | | # Save the detection result if args.out_path is provided |
| | |
| | | #PIL==5.1.0 |
| | | shapely==1.6.4 |
| | | #urllib |
| | | |
| | | imagehash |
| New file |
| | |
| | | #!/bin/python3 |
| | | |
| | | import cv2 as cv |
| | | import numpy as np |
| | | from matplotlib import pyplot as plt |
| | | from matplotlib import use, backends |
| | | |
| | | |
| | | use('GTK3Cairo') |
| | | |
| | | if __name__ == '__main__': |
| | | img = cv.imread('cards_orig.jpg') |
| | | img = cv.cvtColor(img, cv.COLOR_BGR2GRAY) |
| | | _, img = cv.threshold(img, 50, 255, cv.THRESH_BINARY) |
| | | img2 = img.copy() |
| | | #template = cv.imread('data2/icons/m19.png', 0) |
| | | template = cv.imread('m19_ico3.jpg', 0) |
| | | template = cv.resize(template, (109, 46), interpolation = cv.INTER_CUBIC) |
| | | print(template.shape) |
| | | h, w = template.shape |
| | | methods = ['cv.TM_CCOEFF', 'cv.TM_CCOEFF_NORMED', 'cv.TM_CCORR', |
| | | 'cv.TM_CCORR_NORMED', 'cv.TM_SQDIFF', 'cv.TM_SQDIFF_NORMED'] |
| | | for meth in methods: |
| | | img = img2.copy() |
| | | method = eval(meth) |
| | | |
| | | res = cv.matchTemplate(img, template, method) |
| | | min_val, max_val, min_loc, max_loc = cv.minMaxLoc(res) |
| | | |
| | | if meth in ['cv.TM_SQDIFF', 'cv.MT_SQDIFF_NORMED']: |
| | | top_left = min_loc |
| | | else: |
| | | top_left = max_loc |
| | | |
| | | bottom_right = (top_left[0] + w, top_left[1] + h) |
| | | print(top_left, bottom_right) |
| | | img2show = cv.cvtColor(img, cv.COLOR_GRAY2BGR) |
| | | cv.rectangle(img2show, top_left, bottom_right, 1202404, 2) |
| | | plt.subplot(221), plt.imshow(res, cmap = 'gray') |
| | | plt.title('Matching Result'), plt.xticks([]), plt.yticks([]) |
| | | plt.subplot(222), plt.imshow(img2show, cmap = 'gray') |
| | | plt.title('Detected Point'), plt.xticks([]), plt.yticks([]) |
| | | plt.suptitle(meth) |
| | | plt.subplot(223), plt.imshow(template, cmap = 'gray') |
| | | plt.title('Template'), plt.xticks([]), plt.yticks([]) |
| | | |
| | | plt.show() |
| | | |