| | |
| | | learning_rate=0.1 |
| | | max_batches = 0 |
| | | policy=steps |
| | | steps=50000, 90000 |
| | | scales=.1, .1 |
| | | steps=50000 |
| | | scales=.1 |
| | | |
| | | [convolutional] |
| | | filters=256 |
| | | filters=512 |
| | | size=3 |
| | | stride=1 |
| | | pad=1 |
| | |
| | | |
| | | [convolutional] |
| | | filters=256 |
| | | size=1 |
| | | stride=1 |
| | | pad=1 |
| | | activation=leaky |
| | | batch_normalize=1 |
| | | |
| | | [convolutional] |
| | | filters=512 |
| | | size=3 |
| | | stride=1 |
| | | pad=1 |
| | |
| | | |
| | | [convolutional] |
| | | filters=256 |
| | | size=1 |
| | | stride=1 |
| | | pad=1 |
| | | activation=leaky |
| | | batch_normalize=1 |
| | | |
| | | [convolutional] |
| | | filters=512 |
| | | size=3 |
| | | stride=1 |
| | | pad=1 |
| | |
| | | |
| | | [convolutional] |
| | | filters=256 |
| | | size=1 |
| | | stride=1 |
| | | pad=1 |
| | | activation=leaky |
| | | batch_normalize=1 |
| | | |
| | | [convolutional] |
| | | filters=512 |
| | | size=3 |
| | | stride=1 |
| | | pad=1 |
| | |
| | | |
| | | [convolutional] |
| | | filters=256 |
| | | size=1 |
| | | stride=1 |
| | | pad=1 |
| | | activation=leaky |
| | | batch_normalize=1 |
| | | |
| | | [convolutional] |
| | | filters=512 |
| | | size=3 |
| | | stride=1 |
| | | pad=1 |
| | |
| | | batch_normalize=1 |
| | | |
| | | [convolutional] |
| | | filters=256 |
| | | size=1 |
| | | stride=1 |
| | | pad=1 |
| | | activation=leaky |
| | | batch_normalize=1 |
| | | |
| | | [convolutional] |
| | | filters=1 |
| | | size=1 |
| | | stride=1 |