Commit a1f486f5 authored by Perez Visaires, Jon's avatar Perez Visaires, Jon

Loss física.

parent de411726
This diff is collapsed.
......@@ -4,42 +4,90 @@ import tensorflow as tf
import numpy as np
import h5py
import os
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.callbacks import ModelCheckpoint
from datos_funciones import carga_datos, crea_sets
from plots_funciones import training_plot
from autoencoder_funciones import crea_autoencoder_capas, crea_adam, crea_autoencoder_etapas, guarda_encoder, guarda_decoder
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
##### -------------------------------------------------- Selección GPU ---------------------------------------------- #####
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.callbacks import ModelCheckpoint
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
from data_functions import load_data_density, load_data_velocity, make_sets
from plot_functions import training_plot
from autoencoder_functions import make_layered_autoencoder, make_adam, make_staged_autoencoder, save_encoder, save_decoder
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
##### -------------------------------------------------- Hiperparámetros -------------------------------------------- #####
NUM_SIMS = 1900 # Índice máximo escenas.
NUM_INICIO = 1000
NUM_SIMS = 1010 # Índice máximo escenas.
NUM_INITIAL = 1000 # Índice inicial escenas.
NUM_SCENES = NUM_SIMS - 1000 # Número de escenas.
NUM_FRAMES = 200 # Frames por escena.
AE_EPOCHS = 100 # Epochs para entrenamiento normal.
AE_EPOCHS = 25 # Epochs para entrenamiento normal.
AE_EPOCHS_LIST = 5 # Epochs para cada batch size de la lista.
PRE_EPOCHS = 1 # Epochs de preentrenamiento.
AE_BATCH_MULTIPLE = False # Probar distintos batch sizes, comparar diferencias.
PRE_TRAINING = True # Realizar preentrenamiento.
PRE_TRAINING = False # Realizar preentrenamiento.
ae_batch_list = [1024, 512, 256, 128, 64, 32] # Posibles batch sizes de prueba.
AE_BATCH_SIZE = 128 # Batch size oficial.
PRE_BATCH_SIZE = 128 # Bacth size para preentrenamiento.
COMBI_FIELD = False
VEL_FIELD = True
DEN_FIELD = False
##### -------------------------------------------------- Carga de datos --------------------------------------------- #####
densities = carga_datos(num_inicio = NUM_INICIO, num_sims = NUM_SIMS, frames = NUM_FRAMES)
density_field = load_data_density(num_initial = NUM_INITIAL, num_sims = NUM_SIMS, frames = NUM_FRAMES)
velocity_field = load_data_velocity(num_initial = NUM_INITIAL, num_sims = NUM_SIMS, frames = NUM_FRAMES)
if COMBI_FIELD:
combination_field = np.concatenate((velocity_field, density_field), axis = 3)
input_shape_combination = (combination_field.shape[1],
combination_field.shape[2],
combination_field.shape[3])
train_data_combination, vali_data_combination = make_sets(field = combination_field, input_shape = input_shape_combination)
elif VEL_FIELD:
input_shape_velocity = (velocity_field.shape[1],
velocity_field.shape[2],
velocity_field.shape[3])
train_data_velocity, vali_data_velocity = make_sets(field = velocity_field, input_shape = input_shape_velocity)
u_field = velocity_field[:, :, :, 0:1]
input_shape_u = (u_field.shape[1],
u_field.shape[2],
u_field.shape[3])
train_data_u, vali_data_u = make_sets(field = u_field, input_shape = input_shape_u)
v_field = velocity_field[:, :, :, 1:2]
input_shape_v = (v_field.shape[1],
v_field.shape[2],
v_field.shape[3])
train_data_v, vali_data_v = make_sets(field = v_field, input_shape = input_shape_v)
elif DEN_FIELD:
input_shape_density = (density_field.shape[1],
density_field.shape[2],
density_field.shape[3])
train_data_density, vali_data_density = make_sets(field = density_field, input_shape = input_shape_density)
train_data, vali_data = crea_sets(densities)
##### -------------------------------------------------- Autoencoder 2D --------------------------------------------- #####
......@@ -47,26 +95,27 @@ FEATURE_MULTIPLIER = 8 # Controla la cantidad de filtros de convolución utiliz
SURFACE_KERNEL_SIZE = 4 # Matriz 4x4
KERNEL_SIZE = 2 # Matriz 2x2
DROPOUT = 0.0 # Porcentaje de nodos que apagar mediante dropout.
INIT_FUNCTION = "glorot_normal" # Inicialización de los pesos de la red neuronal.
INIT_FUNCTION = "glorot_normal" # Inicialización aleatoria de los pesos de la red neuronal.
input_shape = (train_data.shape[1],
train_data.shape[2],
train_data.shape[3])
input_shape = input_shape_u
train_data = train_data_u
vali_data = vali_data_u
layer_conv, layer_deconv = crea_autoencoder_capas(input_shape = input_shape, feature_multiplier = FEATURE_MULTIPLIER, surface_kernel_size = SURFACE_KERNEL_SIZE, kernel_size = KERNEL_SIZE, dropout = DROPOUT, init_func = INIT_FUNCTION)
layer_conv, layer_deconv = make_layered_autoencoder(input_shape = input_shape, feature_multiplier = FEATURE_MULTIPLIER, surface_kernel_size = SURFACE_KERNEL_SIZE, kernel_size = KERNEL_SIZE, dropout = DROPOUT, init_func = INIT_FUNCTION)
optimizer = crea_adam()
optimizer = make_adam()
autoencoder_stages = make_staged_autoencoder(input_shape = input_shape, layer_conv = layer_conv, layer_deconv = layer_deconv, optimizer = optimizer)
stages = crea_autoencoder_etapas(input_shape = input_shape, layer_conv = layer_conv, layer_deconv = layer_deconv, optimizer = optimizer)
##### -------------------------------------------------- Autoencoder Entrenamiento ---------------------------------- #####
autoencoder = stages [-1]
autoencoder = autoencoder_stages[-1]
autoencoder_clean_weights = autoencoder.get_weights()
if PRE_TRAINING:
for stage in stages:
for stage in autoencoder_stages:
autoencoder_pre_train = stage.fit(train_data, train_data,
epochs = PRE_EPOCHS,
......@@ -77,6 +126,7 @@ if PRE_TRAINING:
autoencoder.save("../modelos/autoencoder_true_pretraining.h5")
autoencoder_pre_weights = autoencoder.get_weights()
# ------------------------------------------------------------------------------------------------------------------- #
mc = ModelCheckpoint(filepath = "../modelos/autoencoder_true.h5",
......@@ -121,5 +171,7 @@ else:
# ------------------------------------------------------------------------------------------------------------------- #
guarda_encoder(layer_conv, input_shape)
guarda_decoder(layer_conv, layer_deconv)
LATENT_DIM = 256
save_encoder(layer_conv, input_shape)
save_decoder(layer_conv, layer_deconv, latent_dim = LATENT_DIM)
# ------------------------------------------------------------------------------------------------------------------- #
import os
import sys
import numpy as np
sys.path.append("../tools") # Herramientas propias de MantaFlow.
import uniio # Lectura de ficheros .uni
def load_data_density (num_initial, num_sims, frames):
base_path = "../data"
densities = []
for sim in range(num_initial, num_sims):
if os.path.exists("%s/simSimple_%04d" % (base_path, sim)): # Comprueba la existencia de las carpetas (cada una 200 frames de datos).
for i in range(0, frames):
filename = "%s/simSimple_%04d/density_%04d.uni" # Nombre de cada frame (densidad).
uni_path = filename % (base_path, sim, i) # 200 frames por sim, rellena parametros de la ruta.
header, content = uniio.readUni(uni_path) # Devuelve un array Numpy [Z, Y, X, C].
h = header["dimX"]
w = header["dimY"]
arr = content[:, ::-1, :, :] # Cambia el orden del eje Y.
arr = np.reshape(arr, [w, h, 1]) # Deshecha el eje Z.
densities.append(arr)
load_num = len(densities)
if load_num < 2 * frames:
print("Error - Usa al menos dos simulaciones completas")
exit(True)
densities = np.reshape(densities, (len(densities), 64, 64, 1)) # Reconvierte la lista en array de Numpy.
print("Forma del array: {}".format(densities.shape))
print("Dimensiones del array: {}".format(densities.ndim))
print("Número de pixels en total: {}".format(densities.size))
return densities
# ------------------------------------------------------------------------------------------------------------------- #
def load_data_velocity(num_initial, num_sims, frames):
base_path = "../data"
velocities = []
for sim in range(num_initial, num_sims):
if os.path.exists("%s/simSimple_%04d" % (base_path, sim)): # Comprueba la existencia de las carpetas (cada una 200 frames de datos).
for i in range(0, frames):
filename = "%s/simSimple_%04d/vel_%04d.uni" # Nombre de cada frame (velocidad).
uni_path = filename % (base_path, sim, i) # 200 frames por sim, rellena parametros de la ruta.
header, content = uniio.readUni(uni_path) # Devuelve un array Numpy [Z, Y, X, C].
h = header["dimX"]
w = header["dimY"]
arr = content[:, ::-1, :, :] # Cambia el orden del eje Y.
arr = np.reshape(arr, [w, h, 3]) # Deshecha el eje Z.
velocities.append(arr)
load_num = len(velocities)
if load_num < 2 * frames:
print("Error - Usa al menos dos simulaciones completas")
exit(True)
velocities = np.reshape(velocities, (len(velocities), 64, 64, 3)) # Reconvierte la lista en array de Numpy
velocities = velocities[:, :, :, 0:2] # Campo de velocidad en 2 dimensiones solamente.
print("Forma del array: {}".format(velocities.shape))
print("Dimensiones del array: {}".format(velocities.ndim))
print("Número de pixels en total: {}".format(velocities.size))
return velocities
# ------------------------------------------------------------------------------------------------------------------- #
def make_sets(field, input_shape):
load_num = len(field)
vali_set_size = max(200, int(load_num * 0.1)) # Al menos una sim completa o el 10% de los datos.
vali_data = field[load_num - vali_set_size : load_num, :] # "load_num" datos del final de "densities".
train_data = field[0 : load_num - vali_set_size, :] # El resto de datos de "densities".
print("Separamos en {} frames de entrenamiento y {} frames de validación.".format(train_data.shape[0], vali_data.shape[0]))
train_data = np.reshape(train_data, (len(train_data), input_shape[0], input_shape[1], input_shape[2])) # Reconvertimos a arrays de Numpy.
vali_data = np.reshape(vali_data, (len(vali_data), input_shape[0], input_shape[1], input_shape[2]))
print("Forma del set de entrenamiento: {}".format(train_data.shape))
print("Forma del set de validación: {}".format(vali_data.shape))
return train_data, vali_data
# ------------------------------------------------------------------------------------------------------------------- #
import tensorflow as tf
import numpy as np
def loss_fluid_velocity(y_true, y_pred):
def curl(field):
dfydx = field[:, 1:, 1:2] - field[:, :-1, 1:2]
dfxdy = field[1:, :, 0:1] - field[:-1, :, 0:1]
dfydx = tf.concat([dfydx, tf.expand_dims(dfydx[:, -1, :], axis = 1)], axis = 1)
dfxdy = tf.concat([dfxdy, tf.expand_dims(dfxdy[-1, :, :], axis = 0)], axis = 0)
curl = dfydx - dfxdy
return curl
def curl_custom_loss(y_true, y_pred):
curl_difference = tf.reduce_mean(tf.abs(y_true - curl(y_pred)))
return curl_difference
# ------------------------------------------------------------------------------------------------------------------- #
import tensorflow as tf
import numpy as np
from tensorflow.keras.layers import RepeatVector, LSTM, Conv1D, Reshape, Input, Flatten
from tensorflow.keras.losses import mean_absolute_error, mean_squared_error