Created
April 9, 2018 01:03
-
-
Save jph00/f48a68b86e1a9bcc07cac23c20a7c51e to your computer and use it in GitHub Desktop.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
[convolutional] batch_normalize=1 filters=32 size=3 stride=1 pad=1 activation=leaky | |
# Downsample | |
[convolutional] batch_normalize=1 filters=64 size=3 stride=2 pad=1 activation=leaky | |
[convolutional] batch_normalize=1 filters=32 size=1 stride=1 pad=1 activation=leaky | |
[convolutional] batch_normalize=1 filters=64 size=3 stride=1 pad=1 activation=leaky | |
[shortcut] from=-3 activation=linear | |
# Downsample | |
[convolutional] batch_normalize=1 filters=128 size=3 stride=2 pad=1 activation=leaky | |
[convolutional] batch_normalize=1 filters=64 size=1 stride=1 pad=1 activation=leaky | |
[convolutional] batch_normalize=1 filters=128 size=3 stride=1 pad=1 activation=leaky | |
[shortcut] from=-3 activation=linear | |
[convolutional] batch_normalize=1 filters=64 size=1 stride=1 pad=1 activation=leaky | |
[convolutional] batch_normalize=1 filters=128 size=3 stride=1 pad=1 activation=leaky | |
[shortcut] from=-3 activation=linear | |
# Downsample | |
[convolutional] batch_normalize=1 filters=256 size=3 stride=2 pad=1 activation=leaky | |
[convolutional] batch_normalize=1 filters=128 size=1 stride=1 pad=1 activation=leaky | |
[convolutional] batch_normalize=1 filters=256 size=3 stride=1 pad=1 activation=leaky | |
[shortcut] from=-3 activation=linear | |
[convolutional] batch_normalize=1 filters=128 size=1 stride=1 pad=1 activation=leaky | |
[convolutional] batch_normalize=1 filters=256 size=3 stride=1 pad=1 activation=leaky | |
[shortcut] from=-3 activation=linear | |
[convolutional] batch_normalize=1 filters=128 size=1 stride=1 pad=1 activation=leaky | |
[convolutional] batch_normalize=1 filters=256 size=3 stride=1 pad=1 activation=leaky | |
[shortcut] from=-3 activation=linear | |
[convolutional] batch_normalize=1 filters=128 size=1 stride=1 pad=1 activation=leaky | |
[convolutional] batch_normalize=1 filters=256 size=3 stride=1 pad=1 activation=leaky | |
[shortcut] from=-3 activation=linear | |
[convolutional] batch_normalize=1 filters=128 size=1 stride=1 pad=1 activation=leaky | |
[convolutional] batch_normalize=1 filters=256 size=3 stride=1 pad=1 activation=leaky | |
[shortcut] from=-3 activation=linear | |
[convolutional] batch_normalize=1 filters=128 size=1 stride=1 pad=1 activation=leaky | |
[convolutional] batch_normalize=1 filters=256 size=3 stride=1 pad=1 activation=leaky | |
[shortcut] from=-3 activation=linear | |
[convolutional] batch_normalize=1 filters=128 size=1 stride=1 pad=1 activation=leaky | |
[convolutional] batch_normalize=1 filters=256 size=3 stride=1 pad=1 activation=leaky | |
[shortcut] from=-3 activation=linear | |
[convolutional] batch_normalize=1 filters=128 size=1 stride=1 pad=1 activation=leaky | |
[convolutional] batch_normalize=1 filters=256 size=3 stride=1 pad=1 activation=leaky | |
[shortcut] from=-3 activation=linear | |
# Downsample | |
[convolutional] batch_normalize=1 filters=512 size=3 stride=2 pad=1 activation=leaky | |
[convolutional] batch_normalize=1 filters=256 size=1 stride=1 pad=1 activation=leaky | |
[convolutional] batch_normalize=1 filters=512 size=3 stride=1 pad=1 activation=leaky | |
[shortcut] from=-3 activation=linear | |
[convolutional] batch_normalize=1 filters=256 size=1 stride=1 pad=1 activation=leaky | |
[convolutional] batch_normalize=1 filters=512 size=3 stride=1 pad=1 activation=leaky | |
[shortcut] from=-3 activation=linear | |
[convolutional] batch_normalize=1 filters=256 size=1 stride=1 pad=1 activation=leaky | |
[convolutional] batch_normalize=1 filters=512 size=3 stride=1 pad=1 activation=leaky | |
[shortcut] from=-3 activation=linear | |
[convolutional] batch_normalize=1 filters=256 size=1 stride=1 pad=1 activation=leaky | |
[convolutional] batch_normalize=1 filters=512 size=3 stride=1 pad=1 activation=leaky | |
[shortcut] from=-3 activation=linear | |
[convolutional] batch_normalize=1 filters=256 size=1 stride=1 pad=1 activation=leaky | |
[convolutional] batch_normalize=1 filters=512 size=3 stride=1 pad=1 activation=leaky | |
[shortcut] from=-3 activation=linear | |
[convolutional] batch_normalize=1 filters=256 size=1 stride=1 pad=1 activation=leaky | |
[convolutional] batch_normalize=1 filters=512 size=3 stride=1 pad=1 activation=leaky | |
[shortcut] from=-3 activation=linear | |
[convolutional] batch_normalize=1 filters=256 size=1 stride=1 pad=1 activation=leaky | |
[convolutional] batch_normalize=1 filters=512 size=3 stride=1 pad=1 activation=leaky | |
[shortcut] from=-3 activation=linear | |
[convolutional] batch_normalize=1 filters=256 size=1 stride=1 pad=1 activation=leaky | |
[convolutional] batch_normalize=1 filters=512 size=3 stride=1 pad=1 activation=leaky | |
[shortcut] from=-3 activation=linear | |
# Downsample | |
[convolutional] batch_normalize=1 filters=1024 size=3 stride=2 pad=1 activation=leaky | |
[convolutional] batch_normalize=1 filters=512 size=1 stride=1 pad=1 activation=leaky | |
[convolutional] batch_normalize=1 filters=1024 size=3 stride=1 pad=1 activation=leaky | |
[shortcut] from=-3 activation=linear | |
[convolutional] batch_normalize=1 filters=512 size=1 stride=1 pad=1 activation=leaky | |
[convolutional] batch_normalize=1 filters=1024 size=3 stride=1 pad=1 activation=leaky | |
[shortcut] from=-3 activation=linear | |
[convolutional] batch_normalize=1 filters=512 size=1 stride=1 pad=1 activation=leaky | |
[convolutional] batch_normalize=1 filters=1024 size=3 stride=1 pad=1 activation=leaky | |
[shortcut] from=-3 activation=linear | |
[convolutional] batch_normalize=1 filters=512 size=1 stride=1 pad=1 activation=leaky | |
[convolutional] batch_normalize=1 filters=1024 size=3 stride=1 pad=1 activation=leaky | |
[shortcut] from=-3 activation=linear | |
###################### | |
[convolutional] batch_normalize=1 filters=512 size=1 stride=1 pad=1 activation=leaky | |
[convolutional] batch_normalize=1 size=3 stride=1 pad=1 filters=1024 activation=leaky | |
[convolutional] batch_normalize=1 filters=512 size=1 stride=1 pad=1 activation=leaky | |
[convolutional] batch_normalize=1 size=3 stride=1 pad=1 filters=1024 activation=leaky | |
[convolutional] batch_normalize=1 filters=512 size=1 stride=1 pad=1 activation=leaky | |
[convolutional] batch_normalize=1 size=3 stride=1 pad=1 filters=1024 activation=leaky | |
[convolutional] size=1 stride=1 pad=1 filters=255 activation=linear |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment