| | |
| | | -gencode arch=compute_35,code=sm_35 \ |
| | | -gencode arch=compute_50,code=[sm_50,compute_50] \ |
| | | -gencode arch=compute_52,code=[sm_52,compute_52] \ |
| | | -gencode arch=compute_61,code=[sm_61,compute_61] |
| | | -gencode arch=compute_61,code=[sm_61,compute_61] |
| | | |
| | | OS := $(shell uname) |
| | | |
| | | # Tesla V100 |
| | | # ARCH= -gencode arch=compute_70,code=[sm_70,compute_70] |
| | | |
| | | # GTX 1080, GTX 1070, GTX 1060, GTX 1050, GTX 1030, Titan Xp, Tesla P40, Tesla P4 |
| | | # ARCH= -gencode arch=compute_61,code=sm_61 -gencode arch=compute_61,code=compute_61 |
| | | |
| | | # GP100/Tesla P100 DGX-1 |
| | | # ARCH= -gencode arch=compute_60,code=sm_60 |
| | | |
| | | # For Jetson Tx1 uncomment: |
| | | # ARCH= -gencode arch=compute_51,code=[sm_51,compute_51] |
| | | |
| | | # For Jetson Tx2 uncomment: |
| | | # For Jetson Tx2 or Drive-PX2 uncomment: |
| | | # ARCH= -gencode arch=compute_62,code=[sm_62,compute_62] |
| | | |
| | | # This is what I use, uncomment if you know your arch and want to specify |
| | | # ARCH= -gencode arch=compute_52,code=compute_52 |
| | | |
| | | |
| | | VPATH=./src/ |
| | | EXEC=darknet |
| | |
| | | LDFLAGS+= -lgomp |
| | | endif |
| | | |
| | | ifeq ($(GPU), 1) |
| | | ifeq ($(GPU), 1) |
| | | COMMON+= -DGPU -I/usr/local/cuda/include/ |
| | | CFLAGS+= -DGPU |
| | | ifeq ($(OS),Darwin) #MAC |
| | | LDFLAGS+= -L/usr/local/cuda/lib -lcuda -lcudart -lcublas -lcurand |
| | | else |
| | | LDFLAGS+= -L/usr/local/cuda/lib64 -lcuda -lcudart -lcublas -lcurand |
| | | endif |
| | | endif |
| | | |
| | | ifeq ($(CUDNN), 1) |
| | | COMMON+= -DCUDNN |
| | | ifeq ($(CUDNN), 1) |
| | | COMMON+= -DCUDNN |
| | | ifeq ($(OS),Darwin) #MAC |
| | | CFLAGS+= -DCUDNN -I/usr/local/cuda/include |
| | | LDFLAGS+= -L/usr/local/cuda/lib -lcudnn |
| | | else |
| | | CFLAGS+= -DCUDNN -I/usr/local/cudnn/include |
| | | LDFLAGS+= -L/usr/local/cudnn/lib64 -lcudnn |
| | | endif |
| | | endif |
| | | |
| | | OBJ=gemm.o utils.o cuda.o convolutional_layer.o list.o image.o activations.o im2col.o col2im.o blas.o crop_layer.o dropout_layer.o maxpool_layer.o softmax_layer.o data.o matrix.o network.o connected_layer.o cost_layer.o parser.o option_list.o darknet.o detection_layer.o captcha.o route_layer.o writing.o box.o nightmare.o normalization_layer.o avgpool_layer.o coco.o dice.o yolo.o detector.o layer.o compare.o classifier.o local_layer.o swag.o shortcut_layer.o activation_layer.o rnn_layer.o gru_layer.o rnn.o rnn_vid.o crnn_layer.o demo.o tag.o cifar.o go.o batchnorm_layer.o art.o region_layer.o reorg_layer.o super.o voxel.o tree.o |
| | | OBJ=http_stream.o gemm.o utils.o cuda.o convolutional_layer.o list.o image.o activations.o im2col.o col2im.o blas.o crop_layer.o dropout_layer.o maxpool_layer.o softmax_layer.o data.o matrix.o network.o connected_layer.o cost_layer.o parser.o option_list.o darknet.o detection_layer.o captcha.o route_layer.o writing.o box.o nightmare.o normalization_layer.o avgpool_layer.o coco.o dice.o yolo.o detector.o layer.o compare.o classifier.o local_layer.o swag.o shortcut_layer.o activation_layer.o rnn_layer.o gru_layer.o rnn.o rnn_vid.o crnn_layer.o demo.o tag.o cifar.o go.o batchnorm_layer.o art.o region_layer.o reorg_layer.o reorg_old_layer.o super.o voxel.o tree.o yolo_layer.o upsample_layer.o |
| | | ifeq ($(GPU), 1) |
| | | LDFLAGS+= -lstdc++ |
| | | OBJ+=convolutional_kernels.o activation_kernels.o im2col_kernels.o col2im_kernels.o blas_kernels.o crop_layer_kernels.o dropout_layer_kernels.o maxpool_layer_kernels.o network_kernels.o avgpool_layer_kernels.o |
| | |
| | | endif |
| | | |
| | | $(EXEC): $(OBJS) |
| | | $(CC) $(COMMON) $(CFLAGS) $^ -o $@ $(LDFLAGS) |
| | | $(CPP) $(COMMON) $(CFLAGS) $^ -o $@ $(LDFLAGS) |
| | | |
| | | $(OBJDIR)%.o: %.c $(DEPS) |
| | | $(CC) $(COMMON) $(CFLAGS) -c $< -o $@ |
| | | |
| | | $(OBJDIR)%.o: %.cpp $(DEPS) |
| | | $(CPP) $(COMMON) $(CFLAGS) -c $< -o $@ |
| | | |
| | | $(OBJDIR)%.o: %.cu $(DEPS) |
| | | $(NVCC) $(ARCH) $(COMMON) --compiler-options "$(CFLAGS)" -c $< -o $@ |
| | | |