Commit 97b82e7e authored by Perez Visaires, Jon's avatar Perez Visaires, Jon

Último cambio.

parent 20d84a8b
This diff is collapsed.
......@@ -8,11 +8,13 @@ sys.path.append("../../tools") # Herramientas propias de MantaFlow.
import uniio # Lectura de ficheros .uni
# Carga los datos del campos escalar de la densidad.
def load_data_density (num_initial, num_sims, frames):
base_path = "../../data"
densities = []
densities = [] # Guardamos los frames de todas las escenas en la lista densities.
for sim in range(num_initial, num_sims):
......@@ -50,11 +52,13 @@ def load_data_density (num_initial, num_sims, frames):
# ------------------------------------------------------------------------------------------------------------------- #
# Carga los datos del campo de velocidad.
def load_data_velocity(num_initial, num_sims, frames):
base_path = "../../data"
velocities = []
velocities = [] # Guarda los datos de todas las escenas en la lista velocities.
for sim in range(num_initial, num_sims):
......@@ -69,7 +73,6 @@ def load_data_velocity(num_initial, num_sims, frames):
h = header["dimX"]
w = header["dimY"]
range
arr = content[:, ::-1, :, :] # Cambia el orden del eje Y.
arr = np.reshape(arr, [w, h, 3]) # Deshecha el eje Z.
......@@ -94,6 +97,8 @@ def load_data_velocity(num_initial, num_sims, frames):
# ------------------------------------------------------------------------------------------------------------------- #
# Crea los sets de entrenamiento y validación con el formato adecuado.
def make_sets(field, input_shape):
load_num = len(field)
......
import tensorflow as tf
import numpy as np
# Calcula la matriz Jacobiana de un campo vectorial.
def jacobian_2D(field):
dfxdx = field[:, 1:, 0] - field[:, :-1, 0]
......@@ -14,9 +16,23 @@ def jacobian_2D(field):
dfydy = tf.concat([dfydy, tf.expand_dims(dfydy[-1, :], axis = 0)], axis = 0)
jacobian = tf.stack([dfxdx, dfxdy, dfydx, dfydy], axis = -1)
vorticity = tf.expand_dims(dfydx - dfxdy, axis = -1)
return jacobian
# Calcula la divergencia de un campo vectorial.
def divergence_2D(field):
dfxdx = field[:-1, 1:, 0] - field[:-1, :-1, 0]
dfydy = field[1:, :-1, 1] - field[:-1, :-1, 1]
divergence = tf.expand_dims(dfxdx + dfydy, axis = -1)
return divergence
# Calcula el rotacional de un campo vectorial.
def curl_2D(field):
dfydx = field[:, 1:, 1] - field[:, :-1, 1]
......@@ -25,7 +41,7 @@ def curl_2D(field):
dfydx = tf.concat([dfydx, tf.expand_dims(dfydx[:, -1], axis = 1)], axis = 1)
dfxdy = tf.concat([dfxdy, tf.expand_dims(dfxdy[-1, :], axis = 0)], axis = 0)
curl = dfydx - dfxdy
curl = tf.stack([dfydx, dfxdy], axis = -1)
return curl
......@@ -49,6 +65,8 @@ def streamfunction_custom_loss(y_true, y_pred):
return curl_loss
# La función de coste con el cálculo de la matriz Jacobiana incluido.
def jacobian_custom_loss(y_true, y_pred, j_weight = 1.0, d_weight = 1.0):
jacobian_loss = tf.reduce_mean(tf.abs(jacobian_2D(y_true) - jacobian_2D(y_pred)))
......
# ------------------------------------------------------------------------------------------------------------------- #
import tensorflow as tf
import numpy as np
......@@ -6,14 +7,14 @@ from tensorflow.keras.layers import RepeatVector, LSTM, Conv1D, Reshape, Input,
from tensorflow.keras.losses import mean_absolute_error, mean_squared_error
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.models import Model
from tensorflow.keras.utils import plot_model
from math import floor
# ------------------------------------------------------------------------------------------------------------------- #
def prepare_data_lstm(encoder, train_data, vali_data, batch_size, time_steps, out_time_steps, frames):
# Función que crea un generador para entrenar la red LSTM.
encoded_train = encoder.predict(train_data)
encoded_vali = encoder.predict(vali_data)
def prepare_data_lstm(encoded_train, encoded_vali, batch_size, time_steps, out_time_steps, frames):
def generator_count(encoded_data, batch_size, time_steps, out_time_steps, frames):
......@@ -39,7 +40,7 @@ def prepare_data_lstm(encoder, train_data, vali_data, batch_size, time_steps, ou
np.random.set_state(rng)
np.random.shuffle(array)
def restructure_encoded_data(encoded_data, time_steps, out_time_steps, batch_size):
def restructure_encoded_data(encoded_data, time_steps, out_time_steps, batch_size): # Divide los datos de una escena en paquetes de 6 y 1.
content_shape = encoded_data[0].shape # (256,)
final_sample_count = encoded_data.shape[0] - time_steps - out_time_steps # frames, frames - batch_size, frames - 2 * batch_size, ...
......@@ -58,7 +59,7 @@ def prepare_data_lstm(encoder, train_data, vali_data, batch_size, time_steps, ou
return X_data, y_data
def generator_scene(encoded_data, batch_size, time_steps, out_time_steps, frames):
def generator_scene(encoded_data, batch_size, time_steps, out_time_steps, frames): # Crea el generador con el que se entrena la red LSTM.
scene_count, sample_count, scene_iteration_count = generator_count(encoded_data, batch_size, time_steps, out_time_steps, frames)
......@@ -93,6 +94,8 @@ def prepare_data_lstm(encoder, train_data, vali_data, batch_size, time_steps, ou
return train_gen_samples, train_generator, vali_gen_samples, vali_generator
# Crea el modelo LSTM, con dos unidades LSTM y una unidad de convolución.
def make_lstm(time_steps, out_time_steps, latent_dimension, encoder_lstm_neurons, decoder_lstm_neurons, activation, use_bias, dropout, recurrent_dropout, stateful, lstm_optimizer, loss):
lstm_input = Input(shape = (time_steps, latent_dimension))
......@@ -136,6 +139,8 @@ def make_lstm(time_steps, out_time_steps, latent_dimension, encoder_lstm_neurons
lstm_output = x
lstm = Model(inputs = lstm_input, outputs = lstm_output)
# plot_model(lstm, to_file = "../../modelos/plots/lstm.png", show_shapes = True)
# plot_model(lstm, to_file = "../../modelos/plots/lstm_expanded.png", show_shapes = True, expand_nested = True)
lstm.summary()
lstm.compile(loss = loss,
......@@ -146,6 +151,8 @@ def make_lstm(time_steps, out_time_steps, latent_dimension, encoder_lstm_neurons
# ------------------------------------------------------------------------------------------------------------------- #
# El optimizador del LSTM es RMSprop.
def make_optimizer_lstm():
optimizer = RMSprop(lr = 0.000126,
......
......@@ -13,7 +13,7 @@ def training_plot(network_train, epochs, batch_size, dropout, learning_rate, ide
plot_val_metric2 = network_train.history["val_" + metric2]
plt.figure(figsize = (15, 4))
plt.figure(figsize = (22, 5))
ax = plt.subplot(1, 3, 1)
plt.plot(plot_epochs, plot_loss, label = loss_name, color = "royalblue")
......
......@@ -10,90 +10,120 @@ USE_GPU = True
if USE_GPU:
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
from tensorflow.keras.models import load_model
from random import randrange
sys.path.append("../functions")
from data_functions import load_data_velocity
from data_functions import load_data_velocity, load_data_density
from loss_physics import jacobian_custom_loss
# Parametros para las predicciones.
NUM_INITIAL = 1900
NUM_SIMS = 1910
FRAMES = 200
TIME_STEPS = 6
OUT_TIME_STEPS = 1
# Cargamos los datos de velocidad y densidad.
velocities = load_data_velocity(num_initial = NUM_INITIAL,
num_sims = NUM_SIMS,
frames = FRAMES)
autoencoder = load_model("../../modelos/AE_single_vel_Jacobian_BS-256_LR-0.0015_DO-0.0.h5", {"jacobian_custom_loss" : jacobian_custom_loss})
encoder = load_model("../../modelos/encoder_single_vel_Jacobian_BS-256_LR-0.0015_DO-0.0.h5")
decoder = load_model("../../modelos/decoder_single_vel_Jacobian_BS-256_LR-0.0015_DO-0.0.h5")
lstm = load_model("../../modelos/LSTM_single_mae_BS-16_LR-_DO-0.0132.h5")
densities = load_data_density(num_initial = NUM_INITIAL,
num_sims = NUM_SIMS,
frames = FRAMES)
# Recuperamos los distintos modelos de encoder y decoder, más el LSTM.
encoder_u = load_model("../../modelos/encoder_u_vel_MAE_BS-256_LR-0.00015_DO-0.0.h5")
encoder_v = load_model("../../modelos/encoder_v_vel_MAE_BS-256_LR-0.00015_DO-0.0.h5")
decoder_u = load_model("../../modelos/decoder_u_vel_MAE_BS-256_LR-0.00015_DO-0.0.h5")
decoder_v = load_model("../../modelos/decoder_v_vel_MAE_BS-256_LR-0.00015_DO-0.0.h5")
encoder_den = load_model("../../modelos/encoder_single_den_MAE_BS-256_LR-0.00015_DO-0.0.h5")
decoder_den = load_model("../../modelos/decoder_den_MAE_BS-256_LR-0.00015_DO-0.0.h5")
lstm = load_model("../../modelos/LSTM_mae_BS-16_LR-_DO-0.0132.h5")
# Elegimos una escena de forma aleatoria entre las cargadas.
NUM_SCENES = velocities.shape[0] // FRAMES
RAND_SCENE = randrange(0, NUM_SCENES)
scene = velocities[RAND_SCENE*FRAMES : RAND_SCENE*FRAMES + FRAMES, :, :, :]
scene_u = scene[:, :, :, 0:1]
scene_v = scene[:, :, :, 1:2]
# Definimos los datos de los componentes de la velocidad y la densidad de la escena elegida.
autoencoder_scene = autoencoder.predict(scene)
autoencoder_scene_u = autoencoder_scene[:, :, :, 0:1]
autoencoder_scene_v = autoencoder_scene[:, :, :, 1:2]
scene_vel = velocities[RAND_SCENE*FRAMES : RAND_SCENE*FRAMES + FRAMES, :, :, :]
scene_u = scene_vel[:, :, :, 0:1]
scene_v = scene_vel[:, :, :, 1:2]
scene_den = densities[RAND_SCENE*FRAMES : RAND_SCENE*FRAMES + FRAMES, :, :, :]
encoded_scene = encoder.predict(scene)
# Codificamos los datos de la escena con sus respectivos encoders.
encoded_scene_u = encoder_u.predict(scene_u)
encoded_scene_v = encoder_v.predict(scene_v)
encoded_scene_den = encoder_den.predict(scene_den)
# Juntamos todos los datos codificados en un solo vector con el que alimentar el modelo LSTM.
encoded_scene = np.concatenate((encoded_scene_den, encoded_scene_u, encoded_scene_v), axis = -1)
latent_dim = encoded_scene.shape[-1]
lstm_scene = []
lstm_scene_den = []
lstm_scene_u = []
lstm_scene_v = []
# Introducimos los datos en el LSTM y recogemos la prediccion en las listas lstm_scene.
for i in range(FRAMES - 5):
time_frames = encoded_scene[i : i + 6]
time_frames = time_frames.reshape(1, 6, latent_dim)
lstm_prediction = lstm.predict(time_frames, batch_size = 1)
decoded_frame = decoder.predict(lstm_prediction)
lstm_scene.append(decoded_frame)
lstm_list = np.split(ary = lstm_prediction, indices_or_sections = 3, axis = -1)
decoded_frame_den = decoder_den.predict(lstm_list[0])
lstm_scene_den.append(decoded_frame_den)
decoded_frame_u = decoder_u.predict(lstm_list[1])
lstm_scene_u.append(decoded_frame_u)
decoded_frame_v = decoder_v.predict(lstm_list[2])
lstm_scene_v.append(decoded_frame_v)
lstm_scene = np.reshape(lstm_scene, (len(lstm_scene), 64, 64, 2))
lstm_scene_u = lstm_scene[:, :, :, 0:1]
lstm_scene_v = lstm_scene[:, :, :, 1:2]
# Devolvemos a su versión original las predicciones dadas por el LSTM.
n = 10
lstm_scene_den = np.reshape(lstm_scene_den, (len(lstm_scene_den), 64, 64, 1))
lstm_scene_u = np.reshape(lstm_scene_u, (len(lstm_scene_u), 64, 64, 1))
lstm_scene_v = np.reshape(lstm_scene_v, (len(lstm_scene_v), 64, 64, 1))
# Ploteamos una comparativa.
n = 5
plt.figure(figsize = (10, 3))
for i in range(n):
ax = plt.subplot(3, n, i + 1)
plt.imshow(scene_u[i].reshape(64, 64))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax = plt.subplot(3, n, i + 1 + n)
plt.imshow(autoencoder_scene_u[i].reshape(64, 64))
ax = plt.subplot(2, n, i + 1)
plt.imshow(scene_den[i].reshape(64, 64))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax = plt.subplot(3, n, i + 1 + n + n)
plt.imshow(lstm_scene_u[i].reshape(64, 64))
ax = plt.subplot(2, n, i + 1 + n)
plt.imshow(lstm_scene_den[i].reshape(64, 64))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.savefig("../../plots/comparativa.png")
# Ploteamos una escena entera y las predicciones obtenidas.
out_dir = "../../imagenes"
if not os.path.exists(out_dir): os.makedirs(out_dir)
for i in range(TIME_STEPS, FRAMES):
scipy.misc.toimage(np.reshape(scene_u[i - TIME_STEPS], [64, 64])).save("{}/in_{}.png".format(out_dir, i))
scipy.misc.toimage(np.reshape(autoencoder_scene_u[i - TIME_STEPS], [64, 64])).save("{}/out_{}.png".format(out_dir, i))
scipy.misc.toimage(np.reshape(lstm_scene_u[i - TIME_STEPS], [64, 64])).save("{}/pred_{}.png".format(out_dir, i))
scipy.misc.toimage(np.reshape(scene_den[i - TIME_STEPS], [64, 64])).save("{}/in_{}.png".format(out_dir, i))
scipy.misc.toimage(np.reshape(lstm_scene_den[i - TIME_STEPS], [64, 64])).save("{}/pred_{}.png".format(out_dir, i))
import numpy as np
import sys
import matplotlib.pyplot as plt
sys.path.append("../functions")
from data_functions import load_data_velocity, make_sets
from loss_physics import jacobian_2D, divergence_2D, curl_2D
NUM_INITIAL = 1000
NUM_SIMS = 1010
NUM_FRAMES = 200
velocity_field = load_data_velocity(num_initial = NUM_INITIAL,
num_sims = NUM_SIMS,
frames = NUM_FRAMES)
input_shape = (velocity_field.shape[1],
velocity_field.shape[2],
velocity_field.shape[3])
train_data, vali_data = make_sets(field = velocity_field, input_shape = input_shape)
u_field = velocity_field[:, :, :, 0:1]
input_shape_u = (u_field.shape[1],
u_field.shape[2],
u_field.shape[3])
train_data_u, vali_data_u = make_sets(field = u_field, input_shape = input_shape_u)
v_field = velocity_field[:, :, :, 1:2]
input_shape_v = (v_field.shape[1],
v_field.shape[2],
v_field.shape[3])
train_data_v, vali_data_v = make_sets(field = v_field, input_shape = input_shape_v)
plt.matshow(divergence_2D(train_data[0, :, :, :])[:, :, 0])
plt.savefig("divergence.png")
......@@ -6,45 +6,42 @@ import h5py
import os
import sys
# Activa el uso de GPU con TensorFlow.
USE_GPU = True
if USE_GPU:
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.callbacks import ModelCheckpoint
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
sys.path.append("../functions")
from data_functions import load_data_density, load_data_velocity, make_sets
from plot_functions import training_plot
from autoencoder_functions import train_autoencoder_multiple, train_autoencoder_single, make_layered_autoencoder, make_adam, make_staged_autoencoder, save_encoder, save_decoder
from autoencoder_functions import train_autoencoder_batch, train_autoencoder_single
from loss_physics import jacobian_custom_loss
##### -------------------------------------------------- Hiperparámetros -------------------------------------------- #####
NUM_SIMS = 1200 # Índice máximo escenas.
NUM_SIMS = 1999 # Índice máximo escenas.
NUM_INITIAL = 1000 # Índice inicial escenas.
NUM_SCENES = NUM_SIMS - 1000 # Número de escenas.
NUM_FRAMES = 200 # Frames por escena.
EPOCHS = 20 # Epochs para entrenamiento normal.
EPOCHS = 100 # Epochs para entrenamiento normal.
EPOCHS_LIST = 100 # Epochs para cada batch size de la lista.
PRE_EPOCHS = 1 # Epochs de preentrenamiento.
BATCH_MULTIPLE = False # Probar distintos batch sizes, comparar diferencias.
PRE_TRAINING = True # Realizar preentrenamiento.
batch_size_list = [1024, 512, 256, 128, 64, 32, 16, 8, 4] # Posibles batch sizes de prueba.
batch_size_list = [1024, 512, 256, 128] # Posibles batch sizes de prueba.
BATCH_SIZE = 256 # Batch size oficial.
PRE_BATCH_SIZE = 256 # Bacth size para preentrenamiento.
learning_rate_list = [0.01, 0.001, 0.0001, 0.00001]
learning_rate_list = [0.001, 0.0001, 0.00001]
LEARNING_RATE = 0.0015
dropout_list = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5]
dropout_list = [0.0, 0.1, 0.2]
DROPOUT = 0.0 # Porcentaje de nodos que apagar mediante dropout.
FEATURE_MULTIPLIER = 8 # Controla la cantidad de filtros de convolución utilizados por el autoencoder, y la dimension del espacio latente de la red.
......@@ -52,33 +49,20 @@ SURFACE_KERNEL_SIZE = 4 # Matriz 4x4
KERNEL_SIZE = 2 # Matriz 2x2
INIT_FUNCTION = "glorot_normal" # Inicialización aleatoria de los pesos de la red neuronal.
COMBI_FIELD = False
# Elegir entre entrenar el autoencoder de la velocidad o densidad.
VEL_FIELD = True
DEN_FIELD = False
##### -------------------------------------------------- Carga de datos --------------------------------------------- #####
if COMBI_FIELD:
density_field = load_data_density(num_initial = NUM_INITIAL,
num_sims = NUM_SIMS,
frames = NUM_FRAMES)
velocity_field = load_data_velocity(num_initial = NUM_INITIAL,
num_sims = NUM_SIMS,
frames = NUM_FRAMES)
MULTI_INPUT = False
combination_field = np.concatenate((velocity_field, density_field), axis = 3)
input_shape = (combination_field.shape[1],
combination_field.shape[2],
combination_field.shape[3])
# Utilizar la función de coste con la matriz Jacobiana.
CUSTOM_LOSS = False
train_data, vali_data = make_sets(field = combination_field, input_shape = input_shape)
##### -------------------------------------------------- Carga de datos --------------------------------------------- #####
id_type = "comb"
# Carga los datos del campo de velocidad y los separa también en sus componentes.
elif VEL_FIELD:
if VEL_FIELD:
velocity_field = load_data_velocity(num_initial = NUM_INITIAL,
num_sims = NUM_SIMS,
......@@ -106,7 +90,13 @@ elif VEL_FIELD:
train_data_v, vali_data_v = make_sets(field = v_field, input_shape = input_shape_v)
id_type = "vel"
id_type = "vel" # Identificador a la hora de guardar modelos y plots.
MULTI_INPUT = True # Dos inputs para el autoencoder, uno por cada componente.
input_shape_reduced = input_shape_u
# Carga los datos del campo escalar de la densidad.
elif DEN_FIELD:
......@@ -119,35 +109,57 @@ elif DEN_FIELD:
density_field.shape[3])
train_data, vali_data = make_sets(field = density_field, input_shape = input_shape)
id_type = "den"
# Asignaciones extra sin importancia.
train_data_u = train_data
train_data_v = train_data
vali_data_u = vali_data
vali_data_v = vali_data
input_shape_reduced = input_shape
id_type = "den" # Identificador a la hora de guardar modelos y plots.
##### -------------------------------------------------- Autoencoder 2D --------------------------------------------- #####
loss = jacobian_custom_loss
loss_name = "Jacobian"
# Selecciona la función de coste Jacobiana, si no utiliza MAE como función de coste.
if CUSTOM_LOSS:
loss = jacobian_custom_loss
loss_name = "Jacobian"
else:
loss = "mae"
loss_name = "MAE"
##### -------------------------------------------------- Autoencoder Entrenamiento ---------------------------------- #####
if BATCH_MULTIPLE:
train_autoencoder_multiple(dropout_list = dropout_list,
id_type = id_type,
learning_rate_list = learning_rate_list,
batch_size_list = batch_size_list,
input_shape = input_shape,
train_data = train_data,
vali_data = vali_data,
epochs = EPOCHS_LIST,
pre_training = PRE_TRAINING,
pre_epochs = PRE_EPOCHS,
pre_batch_size = PRE_BATCH_SIZE,
loss = loss,
loss_name = loss_name,
feature_multiplier = FEATURE_MULTIPLIER,
surface_kernel_size = SURFACE_KERNEL_SIZE,
kernel_size = KERNEL_SIZE,
init_function = INIT_FUNCTION)
train_autoencoder_batch(dropout_list = dropout_list,
id_type = id_type,
multi_input = MULTI_INPUT,
learning_rate_list = learning_rate_list,
batch_size_list = batch_size_list,
input_shape = input_shape,
input_shape_reduced = input_shape_reduced,
train_data = train_data,
train_data_u = train_data_u,
train_data_v = train_data_v,
vali_data = vali_data,
vali_data_u = vali_data_u,
vali_data_v = vali_data_v,
epochs = EPOCHS_LIST,
pre_training = PRE_TRAINING,
pre_epochs = PRE_EPOCHS,
pre_batch_size = PRE_BATCH_SIZE,
loss = loss,
loss_name = loss_name,
feature_multiplier = FEATURE_MULTIPLIER,
surface_kernel_size = SURFACE_KERNEL_SIZE,
kernel_size = KERNEL_SIZE,
init_function = INIT_FUNCTION)
# ------------------------------------------------------------------------------------------------------------------- #
......@@ -155,11 +167,17 @@ else:
train_autoencoder_single(dropout = DROPOUT,
id_type = id_type,
multi_input = MULTI_INPUT,
learning_rate = LEARNING_RATE,
batch_size = BATCH_SIZE,
input_shape = input_shape,
input_shape = input_shape,
input_shape_reduced = input_shape_reduced,
train_data = train_data,
train_data_u = train_data_u,
train_data_v = train_data_v,
vali_data = vali_data,
vali_data_u = vali_data_u,
vali_data_v = vali_data_v,
epochs = EPOCHS,
pre_training = PRE_TRAINING,
pre_epochs = PRE_EPOCHS,
......
......@@ -18,7 +18,7 @@ from tensorflow.keras.callbacks import ModelCheckpoint
sys.path.append("../functions")
from data_functions import load_data_velocity, make_sets
from data_functions import load_data_velocity, load_data_density, make_sets
from lstm_functions import make_lstm, make_optimizer_lstm, prepare_data_lstm
from plot_functions import training_plot
......@@ -26,10 +26,10 @@ from plot_functions import training_plot
TIME_STEPS = 6 # 6 frames para alimentar al LSTM.
OUT_TIME_STEPS = 1 # Predicción de 1 frame por el LSTM.
LATENT_DIMENSION = 256 # 512 features en el codificado.
LATENT_DIMENSION = 768 # 512 features en el codificado.
NUM_FRAMES = 200
NUM_INITIAL = 1000
NUM_SIMS = 1200
NUM_SIMS = 1009
ENCODER_NEURONS = 256
DECODER_NEURONS = 512
......@@ -38,7 +38,7 @@ ACTIVATION = "tanh"
LOSS = "mae"