This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# Assume 26 unique characters | |
alphabet = ['a', 'b', ... , 'z'] | |
# two sample sequences, inputs and targets | |
x = np.array(list('abc')) # inputs | |
y = np.array(list('xyz')) # targets | |
# define one-hot encoder and label encoder | |
onehot_encoder = OneHotEncoder(sparse=False).fit(alphabet) | |
label_encoder = {ch: i for i, ch in enumerate(alphabet)} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
input_size = 50 # representing the one-hot encoded vector size | |
hidden_size = 100 # number of hidden nodes in the LSTM layer | |
n_layers = 2 # number of LSTM layers | |
output_size = 50 # output of 50 scores for the next character | |
lstm = nn.LSTM(input_size, hidden_size, n_layers, batch_first=True) | |
linear = nn.Linear(hidden_size, output_size) | |
# Data Flow Protocol | |
# 1. network input shape: (batch_size, seq_length, num_features) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
input_size = 50 # representing the one-hot encoded vector size | |
hidden_size = 100 # number of hidden nodes in the LSTM layer | |
n_layers = 2 # number of LSTM layers | |
output_size = 50 # output of 50 scores for the next character | |
lstm = nn.LSTM(input_size, n_hidden, n_layers, batch_first=True) | |
linear = nn.Linear(n_hidden, output_size) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
input_size = 1 # The number of variables in your sequence data. | |
n_hidden = 100 # The number of hidden nodes in the LSTM layer. | |
n_layers = 2 # The total number of LSTM layers to stack. | |
out_size = 1 # The size of the output you desire from your RNN. | |
lstm = nn.LSTM(input_size, n_hidden, n_layers, batch_first=True) | |
linear = nn.Linear(n_hidden, 1) | |
# Data Flow Protocol: | |
# 1. network input shape: (batch_size, seq_length, num_features) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
input_size = 1 # The number of variables in your sequence data. | |
n_hidden = 100 # The number of hidden nodes in the LSTM layer. | |
n_layers = 2 # The total number of LSTM models layers | |
out_size = 1 # The size of the output you desire from your RNN | |
lstm = nn.LSTM(input_size, n_hidden, n_layers) | |
linear = nn.Linear(n_hidden, 1) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
# load in content and style image, and create target image by copying content image | |
content = load_image('data/style/clouds-19.jpg').to(device) | |
style = load_image('data/style/abstract-art-freedom.jpg', shape=content.shape[-2:]).to(device) | |
target = content.clone().requires_grad_(True).to(device) | |
style_weights = {'conv1_1': .2, | |
'conv2_1': .2, | |
'conv3_1': .2, | |
'conv4_1': .2, | |
'conv5_1': .2} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def style_loss(s_grams, t_features, weights): | |
""" | |
Compute style loss, i.e. the weighted sum of MSE of all layers. | |
""" | |
# for each style feature, get target and style gramians, compare | |
loss = 0 | |
for layer in weights: | |
_, d, h, w = s_features[layer].shape | |
t_gram = gramian(t_features[layer]) | |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def content_loss(c_features, t_features): | |
""" | |
Compute mean squared content loss of all feature maps. | |
""" | |
loss = 0.5 * (t_features['conv4_2'] - c_features['conv4_2']) ** 2 | |
return torch.mean(loss) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def gramian(tensor): | |
t = tensor.view(tensor.shape[1], -1) | |
return t @ t.T |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
def get_features(model, image): | |
layers = { | |
'0' : 'conv1_1', | |
'5' : 'conv2_1', | |
'10': 'conv3_1', | |
'19': 'conv4_1', | |
'21': 'conv4_2', | |
'28': 'conv5_1' | |
} |
NewerOlder