4 files modified
3 files added
1 files renamed
| | |
| | | OPENCV=1 |
| | | AVX=0 |
| | | OPENMP=0 |
| | | LIBSO=0 |
| | | LIBSO=1 |
| | | |
| | | # set GPU=1 and CUDNN=1 to speedup on GPU |
| | | # set CUDNN_HALF=1 to further speedup 3 x times (Mixed-precision using Tensor Cores) on GPU Tesla V100, Titan V, DGX-2 |
| | |
| | | |
| | | I've been training a new model with a full YOLOv3 configuration (previous one used Tiny YOLOv3), and it's been taking a lot more resources: |
| | | |
| | | <img src="https://github.com/hj3yoo/darknet/blob/master/figures/4_learning_curve.jpg" width="640"> |
| | | <img src="https://github.com/hj3yoo/darknet/blob/master/figures/2_learning_curve.jpg" width="640"> |
| | | |
| | | The author of darknet did mention that full network will take significantly more training effort, so I'll just have to wait. At this rate, it should reach 50k epoch in about a week :/ |
| | | |
| | | |
| | | ## Sept 13th, 2018 |
| | | ---------------------- |
| | | |
| | | The training for full YOLOv3 model has turned sour - the loss saturated around 0.45, and didn't seem like it would improve in any reasonable amount of time. |
| | | |
| | | <img src="https://github.com/hj3yoo/darknet/blob/master/figures/3_learning_curve.jpg" width="640"> |
| | | |
| | | As expected, the performance of the model with 0.45 loss was fairly bad. Not to mention that it's quite slower, too. I've decided to continue with tiny YOLOv3 weights. I tried to train it further, but it was already saturated, and was the best it could get. |
| | | |
| | | --------------------- |
| | | |
| | | Bad news, I couldn't find any repo that has python wrapper for darknet to pursue this project further. There is a [python example](https://github.com/AlexeyAB/darknet/blob/master/darknet.py) in the original repo of this fork, but [it doesn't support video input](https://github.com/AlexeyAB/darknet/issues/955). Other darknet repos are in the same situation. |
| | | |
| | | I suppose there is a poor man's alternative - feed individual frames from the video into the detection script for image. I'll have to give it a shot. |
| | |
| | | import math |
| | | import random |
| | | import os |
| | | import cv2 |
| | | |
| | | def sample(probs): |
| | | s = sum(probs) |
| | |
| | | print("Unable to show image: "+str(e)) |
| | | return detections |
| | | |
| | | |
| | | def capture(thresh=.5, hier_thresh=.5, nms=.45, configPath="./cfg/yolov3.cfg", weightPath="yolov3.weights", |
| | | metaPath="./data/coco.data", showImage=True, makeImageOnly=False, initOnly=False): |
| | | global metaMain, netMain, altNames # pylint: disable=W0603 |
| | | netMain = load_net_custom(configPath.encode("ascii"), weightPath.encode("ascii"), 0, 1) # batch size = 1 |
| | | metaMain = load_meta(metaPath.encode("ascii")) |
| | | |
| | | num = c_int(0) |
| | | pnum = pointer(num) |
| | | num = pnum[0] |
| | | |
| | | capture = cv2.VideoCapture('../data/test3.mp4') |
| | | print(capture.get(cv2.CAP_PROP_FPS)) |
| | | |
| | | capture.set(cv2.CAP_PROP_FRAME_WIDTH, 1024) |
| | | capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 768) |
| | | |
| | | while True: |
| | | ret, frame = capture.read() |
| | | im, arr = array_to_image(frame) |
| | | predict_image(netMain, im) |
| | | dets = get_network_boxes(netMain, im.w, im.h, thresh, hier_thresh, None, 0, pnum, 1) |
| | | if nms: |
| | | do_nms_sort(dets, num, metaMain.classes, nms) |
| | | res = [] |
| | | for j in range(num): |
| | | for i in range(metaMain.classes): |
| | | if dets[j].prob[i] > 0: |
| | | b = dets[j].bbox |
| | | nameTag = metaMain.names[i] |
| | | res.append((nameTag, dets[j].prob[i], (b.x, b.y, b.w, b.h))) |
| | | print(res) |
| | | cv2.imshow('frame', frame) |
| | | if cv2.waitKey(1) & 0xFF == ord('q'): |
| | | break |
| | | |
| | | capture.release() |
| | | cv2.destroyAllWindows() |
| | | |
| | | |
| | | if __name__ == "__main__": |
| | | print(performDetect()) |
| | | performDetect(imagePath="../data/test1.jpg", thresh=0.25, configPath="./cfg/tiny_yolo.cfg", |
| | | weightPath="./weights/second_general/tiny_yolo_17000.weights", |
| | | metaPath="./data/obj.data", showImage=True, makeImageOnly=False, initOnly=False) |
| | | #print(performDetect(showImage=False)) |
| | | #capture() |