Aggregating card images, importing texture images for background
3 files modified
1 files added
| | |
| | | import math |
| | | from screeninfo import get_monitors |
| | | |
| | | |
| | | def detect_a_card(img, thresh_val=80, blur_radius=None, dilate_radius=None, min_hyst=80, max_hyst=200, |
| | | min_line_length=None, max_line_gap=None, debug=False): |
| | | dim_img = (len(img[0]), len(img)) # (width, height) |
| | |
| | | import json |
| | | import pandas as pd |
| | | import re |
| | | import os |
| | | |
| | | |
| | | def fetch_all_cards_text(url='https://api.scryfall.com/cards/search?q=layout:normal+format:modern+lang:en+frame:2003', |
| | | csv_name=''): |
| | | has_more = True |
| | | cards = [] |
| | | # get cards dataset as a json from the query |
| | | while has_more: |
| | | res_file_dir, http_message = request.urlretrieve(url) |
| | | with open(res_file_dir) as res_file: |
| | |
| | | url = res_json['next_page'] |
| | | print(len(cards)) |
| | | |
| | | # Convert them into a dataframe, and truncate unnecessary columns |
| | | df = pd.DataFrame.from_dict(cards) |
| | | df['image'] = '' |
| | | for ind, row in df.iterrows(): |
| | | df.set_value(ind, 'image', row['image_uris']['png']) |
| | | |
| | | if csv_name != '': |
| | | df = df[['artist', 'border_color', 'collector_number', 'color_identity', 'colors', 'flavor_text', 'image_uris', |
| | | 'image', 'mana_cost', 'legalities', 'name', 'oracle_text', 'rarity', 'type_line', 'set', 'set_name', |
| | | 'power', 'toughness']] |
| | | df.to_csv(csv_name, sep=';') |
| | | 'mana_cost', 'legalities', 'name', 'oracle_text', 'rarity', 'type_line', 'set', 'set_name', 'power', |
| | | 'toughness']] |
| | | df.to_csv(csv_name, sep=';') # Comma doesn't work, since some columns are saved as a dict |
| | | |
| | | return cards |
| | | return df |
| | | |
| | | |
| | | # Pulled from Django framework (https://github.com/django/django/blob/master/django/utils/text.py) |
| | | def get_valid_filename(s): |
| | | """ |
| | | NOTE: Pulled from Django framework (https://github.com/django/django/blob/master/django/utils/text.py) |
| | | Return the given string converted to a string that can be used for a clean |
| | | filename. Remove leading and trailing spaces; convert other spaces to |
| | | underscores; and remove anything that is not an alphanumeric, dash, |
| | |
| | | return re.sub(r'(?u)[^-\w.]', '', s) |
| | | |
| | | |
| | | def fetch_cards_image(cards_json, out_dir, size='large'): |
| | | for card in cards_json: |
| | | request.urlretrieve(card['image_uris'][size], '%s\%s' % (out_dir, card['name'])) |
| | | def fetch_cards_image(df, out_dir='', size='png'): |
| | | for ind, row in df.iterrows(): |
| | | png_url = row['image_uris'][size] |
| | | if out_dir == '': |
| | | out_dir = 'data/png/%s' % row['set'] |
| | | if not os.path.exists(out_dir): |
| | | os.makedirs(out_dir) |
| | | img_name = '%s/%s_%s.png' % (out_dir, row['collector_number'], get_valid_filename(row['name'])) |
| | | request.urlretrieve(png_url, filename=img_name) |
| | | print(img_name) |
| | | pass |
| | | |
| | | |
| | | def main(): |
| | | fetch_all_cards_text(csv_name='data/all_cards.csv') |
| | | df = fetch_all_cards_text(url='https://api.scryfall.com/cards/search?q=layout:normal+set:rtr+lang:en', |
| | | csv_name='data/all_cards.csv') |
| | | fetch_cards_image(df) |
| | | pass |
| | | |
| | | |
| | | if __name__ == '__main__': |
| | | main() |
| | | pass |
| New file |
| | |
| | | from glob import glob |
| | | import matplotlib.pyplot as plt |
| | | import matplotlib.image as mpimage |
| | | import pickle |
| | | import math |
| | | import random |
| | | import os |
| | | |
| | | |
| | | # Referenced from geaxgx's playing-card-detection: https://github.com/geaxgx/playing-card-detection |
| | | class Backgrounds: |
| | | def __init__(self, images=None, dumps_dir='data/dtd/images'): |
| | | if images is not None: |
| | | self._images = images |
| | | else: # load from pickle |
| | | if not os.path.exists(dumps_dir): |
| | | print('Warning: directory for dump %s doesn\'t exist' % dumps_dir) |
| | | return |
| | | self._images = [] |
| | | for dump_name in glob(dumps_dir + '/*.pck'): |
| | | with open(dump_name, 'rb') as dump: |
| | | print('Loading ' + dump_name) |
| | | images = pickle.load(dump) |
| | | self._images += images |
| | | if len(self._images) == 0: |
| | | self._images = load_dtd() |
| | | print('# of images loaded: %d' % len(self._images)) |
| | | |
| | | def get_random(self, display=False): |
| | | bg = self._images[random.randint(0, len(self._images) - 1)] |
| | | if display: |
| | | plt.show(bg) |
| | | return bg |
| | | |
| | | |
| | | def load_dtd(dtd_dir='data/dtd/images', dump_it=True, dump_batch_size=1000): |
| | | if not os.path.exists(dtd_dir): |
| | | print('Warning: directory for DTD 5s doesn\'t exist.' % dtd_dir) |
| | | print('You can download the dataset using this command:' |
| | | '!wget https://www.robots.ox.ac.uk/~vgg/data/dtd/download/dtd-r1.0.1.tar.gz') |
| | | return [] |
| | | bg_images = [] |
| | | # Search the directory for all images, and append them |
| | | for subdir in glob(dtd_dir + "/*"): |
| | | for f in glob(subdir + "/*.jpg"): |
| | | bg_images.append(mpimage.imread(f)) |
| | | print("# of images loaded :", len(bg_images)) |
| | | |
| | | # Save them as a pickle if necessary |
| | | if dump_it: |
| | | for i in range(math.ceil(len(bg_images) / dump_batch_size)): |
| | | dump_name = '%s/dtd_dump_%d.pck' % (dtd_dir, i) |
| | | with open(dump_name, 'wb') as dump: |
| | | print('Dumping ' + dump_name) |
| | | pickle.dump(bg_images[i * dump_batch_size:(i + 1) * dump_batch_size], dump) |
| | | |
| | | return bg_images |
| | | |
| | | |
| | | def main(): |
| | | #bg_images = load_dtd() |
| | | bg = Backgrounds() |
| | | bg.get_random(display=True) |
| | | return |
| | | |
| | | |
| | | if __name__ == '__main__': |
| | | main() |