Commit 88029284 authored by Perez Visaires, Jon's avatar Perez Visaires, Jon

New physical loss.

parent f168a76f
......@@ -478,7 +478,7 @@ def make_adam(adam_learning_rate = 0.00015, adam_epsilon = 1e-08, adam_lr_decay
# ------------------------------------------------------------------------------------------------------------------- #
def make_staged_autoencoder(input_shape, layer_conv, layer_deconv, optimizer):
def make_staged_autoencoder(input_shape, layer_conv, layer_deconv, optimizer, loss):
stages = []
......@@ -495,7 +495,7 @@ def make_staged_autoencoder(input_shape, layer_conv, layer_deconv, optimizer):
stage_output = x
stage_1 = Model(inputs = stage_input, outputs = stage_output)
stage_1.compile(optimizer = optimizer, loss = "mse", metrics = ["mae"])
stage_1.compile(optimizer = optimizer, loss = loss, metrics = ["mse", "mae"])
stages.append(stage_1)
### Etapa 2 ###
......@@ -513,7 +513,7 @@ def make_staged_autoencoder(input_shape, layer_conv, layer_deconv, optimizer):
stage_output = x
stage_2 = Model(inputs = stage_input, outputs = stage_output)
stage_2.compile(optimizer = optimizer, loss = "mse", metrics = ["mae"])
stage_2.compile(optimizer = optimizer, loss = loss, metrics = ["mse", "mae"])
stages.append(stage_2)
### Etapa 3 ###
......@@ -533,7 +533,7 @@ def make_staged_autoencoder(input_shape, layer_conv, layer_deconv, optimizer):
stage_output = x
stage_3 = Model(inputs = stage_input, outputs = stage_output)
stage_3.compile(optimizer = optimizer, loss = "mse", metrics = ["mae"])
stage_3.compile(optimizer = optimizer, loss = loss, metrics = ["mse", "mae"])
stages.append(stage_3)
### Etapa 4 ###
......@@ -555,7 +555,7 @@ def make_staged_autoencoder(input_shape, layer_conv, layer_deconv, optimizer):
stage_output = x
stage_4 = Model(inputs = stage_input, outputs = stage_output)
stage_4.compile(optimizer = optimizer, loss = "mse", metrics = ["mae"])
stage_4.compile(optimizer = optimizer, loss = loss, metrics = ["mse", "mae"])
stages.append(stage_4)
### Etapa 5 ###
......@@ -579,7 +579,7 @@ def make_staged_autoencoder(input_shape, layer_conv, layer_deconv, optimizer):
stage_output = x
stage_5 = Model(inputs = stage_input, outputs = stage_output)
stage_5.compile(optimizer = optimizer, loss = "mse", metrics = ["mae"])
stage_5.compile(optimizer = optimizer, loss = loss, metrics = ["mse", "mae"])
stages.append(stage_5)
### Etapa 6 ###
......@@ -605,7 +605,7 @@ def make_staged_autoencoder(input_shape, layer_conv, layer_deconv, optimizer):
stage_output = x
stage_6 = Model(inputs = stage_input, outputs = stage_output)
stage_6.compile(optimizer = optimizer, loss = "mse", metrics = ["mae"])
stage_6.compile(optimizer = optimizer, loss = loss, metrics = ["mse", "mae"])
stages.append(stage_6)
return stages
......@@ -631,13 +631,13 @@ def save_encoder(layer_conv, input_shape):
encoder.save("../modelos/encoder_true.h5")
def save_decoder(layer_conv, layer_deconv):
def save_decoder(layer_conv, layer_deconv, latent_dim):
decoder_target_shape = (layer_conv[5].output_shape[1],
layer_conv[5].output_shape[2],
layer_conv[5].output_shape[3])
decoder_input = Input(shape = (256,))
decoder_input = Input(shape = (latent_dim,))
x = decoder_input
......
......@@ -6,7 +6,7 @@ import h5py
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.callbacks import ModelCheckpoint
......@@ -14,11 +14,12 @@ from tensorflow.keras.callbacks import ModelCheckpoint
from data_functions import load_data_density, load_data_velocity, make_sets
from plot_functions import training_plot
from autoencoder_functions import make_layered_autoencoder, make_adam, make_staged_autoencoder, save_encoder, save_decoder
from loss_physics import jacobian_custom_loss
##### -------------------------------------------------- Hiperparámetros -------------------------------------------- #####
NUM_SIMS = 1010 # Índice máximo escenas.
NUM_SIMS = 1900 # Índice máximo escenas.
NUM_INITIAL = 1000 # Índice inicial escenas.
NUM_SCENES = NUM_SIMS - 1000 # Número de escenas.
NUM_FRAMES = 200 # Frames por escena.
......@@ -28,7 +29,7 @@ AE_EPOCHS_LIST = 5 # Epochs para cada batch size de la lista.
PRE_EPOCHS = 1 # Epochs de preentrenamiento.
AE_BATCH_MULTIPLE = False # Probar distintos batch sizes, comparar diferencias.
PRE_TRAINING = False # Realizar preentrenamiento.
PRE_TRAINING = True # Realizar preentrenamiento.
ae_batch_list = [1024, 512, 256, 128, 64, 32] # Posibles batch sizes de prueba.
AE_BATCH_SIZE = 128 # Batch size oficial.
......@@ -97,15 +98,15 @@ KERNEL_SIZE = 2 # Matriz 2x2
DROPOUT = 0.0 # Porcentaje de nodos que apagar mediante dropout.
INIT_FUNCTION = "glorot_normal" # Inicialización aleatoria de los pesos de la red neuronal.
input_shape = input_shape_u
train_data = train_data_u
vali_data = vali_data_u
input_shape = input_shape_velocity
train_data = train_data_velocity
vali_data = vali_data_velocity
layer_conv, layer_deconv = make_layered_autoencoder(input_shape = input_shape, feature_multiplier = FEATURE_MULTIPLIER, surface_kernel_size = SURFACE_KERNEL_SIZE, kernel_size = KERNEL_SIZE, dropout = DROPOUT, init_func = INIT_FUNCTION)
optimizer = make_adam()
autoencoder_stages = make_staged_autoencoder(input_shape = input_shape, layer_conv = layer_conv, layer_deconv = layer_deconv, optimizer = optimizer)
autoencoder_stages = make_staged_autoencoder(input_shape = input_shape, layer_conv = layer_conv, layer_deconv = layer_deconv, optimizer = optimizer, loss = jacobian_custom_loss)
##### -------------------------------------------------- Autoencoder Entrenamiento ---------------------------------- #####
......@@ -171,7 +172,7 @@ else:
# ------------------------------------------------------------------------------------------------------------------- #
LATENT_DIM = 256
LATENT_DIM = 512
save_encoder(layer_conv, input_shape)
save_decoder(layer_conv, layer_deconv, latent_dim = LATENT_DIM)
import tensorflow as tf
import numpy as np
def curl(field):
def jacobian_2D(field):
dfydx = field[:, 1:, 1:2] - field[:, :-1, 1:2]
dfxdy = field[1:, :, 0:1] - field[:-1, :, 0:1]
dfxdx = field[:, 1:, 0] - field[:, :-1, 0]
dfxdy = field[1:, :, 0] - field[:-1, :, 0]
dfydx = field[:, 1:, 1] - field[:, :-1, 1]
dfydy = field[1:, :, 1] - field[:-1, :, 1]
dfydx = tf.concat([dfydx, tf.expand_dims(dfydx[:, -1, :], axis = 1)], axis = 1)
dfxdy = tf.concat([dfxdy, tf.expand_dims(dfxdy[-1, :, :], axis = 0)], axis = 0)
dfxdx = tf.concat([dfxdx, tf.expand_dims(dfxdx[:, -1], axis = 1)], axis = 1)
dfydx = tf.concat([dfydx, tf.expand_dims(dfydx[:, -1], axis = 1)], axis = 1)
dfxdy = tf.concat([dfxdy, tf.expand_dims(dfxdy[-1, :], axis = 0)], axis = 0)
dfydy = tf.concat([dfydy, tf.expand_dims(dfydy[-1, :], axis = 0)], axis = 0)
jacobian = tf.stack([dfxdx, dfxdy, dfydx, dfydy], axis = -1)
return jacobian
def curl_2D(field):
dfydx = field[:, 1:, 1] - field[:, :-1, 1]
dfxdy = field[1:, :, 0] - field[:-1, :, 0]
dfydx = tf.concat([dfydx, tf.expand_dims(dfydx[:, -1], axis = 1)], axis = 1)
dfxdy = tf.concat([dfxdy, tf.expand_dims(dfxdy[-1, :], axis = 0)], axis = 0)
curl = dfydx - dfxdy
return curl
def curl_custom_loss(y_true, y_pred):
def velocity_from_streamfunction(field):
dfdx = field[:, 1:, 0] - field[:, :-1, 0]
dfdy = field[1:, :, 0] - field[:-1, :, 0]
dfdx = tf.concat([dfdx, tf.expand_dims(dfdx[:, -1], axis = 1)], axis = 1)
dfdy = tf.concat([dfdy, tf.expand_dims(dfdy[-1, :], axis = 0)], axis = 0)
u = dfdx
v = -dfdy
velocity = tf.stack([u, v], axis = -1)
return velocity
def streamfunction_custom_loss(y_true, y_pred):
curl_loss = tf.reduce_mean(tf.abs(y_true - curl(y_pred)))
return curl_loss
curl_difference = tf.reduce_mean(tf.abs(y_true - curl(y_pred)))
def jacobian_custom_loss(y_true, y_pred, j_weight = 1.0, d_weight = 1.0):
return curl_difference
jacobian_loss = tf.reduce_mean(tf.abs(jacobian_2D(y_true) - jacobian_2D(y_pred)))
direct_loss = tf.reduce_mean(tf.abs(y_true - y_pred))
loss = j_weight * jacobian_loss + d_weight * direct_loss
return loss
No preview for this file type
No preview for this file type
No preview for this file type
plots/model_ae_true_single_BS-128.png

38.4 KB | W: | H:

plots/model_ae_true_single_BS-128.png

39.5 KB | W: | H:

plots/model_ae_true_single_BS-128.png
plots/model_ae_true_single_BS-128.png
plots/model_ae_true_single_BS-128.png
plots/model_ae_true_single_BS-128.png
  • 2-up
  • Swipe
  • Onion skin
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment