Commit 88016011 authored by Perez Visaires, Jon's avatar Perez Visaires, Jon

Archivos de GPU

parent 1d122c70
This diff is collapsed.
##### -------------------------------------------------- Librerías -------------------------------------------------- #####
import tensorflow as tf
import numpy as np
import h5py
import os
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.callbacks import ModelCheckpoint
from datos_funciones import carga_datos, crea_sets
from plots_funciones import training_plot
from autoencoder_funciones import crea_autoencoder_capas, crea_adam, crea_autoencoder_etapas, guarda_encoder, guarda_decoder
##### -------------------------------------------------- Selección GPU ---------------------------------------------- #####
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
##### -------------------------------------------------- Hiperparámetros -------------------------------------------- #####
NUM_SIMS = 1900 # Índice máximo escenas.
NUM_INICIO = 1000
NUM_SCENES = NUM_SIMS - 1000 # Número de escenas.
NUM_FRAMES = 200 # Frames por escena.
AE_EPOCHS = 100 # Epochs para entrenamiento normal.
AE_EPOCHS_LIST = 5 # Epochs para cada batch size de la lista.
PRE_EPOCHS = 1 # Epochs de preentrenamiento.
AE_BATCH_MULTIPLE = False # Probar distintos batch sizes, comparar diferencias.
PRE_TRAINING = True # Realizar preentrenamiento.
ae_batch_list = [1024, 512, 256, 128, 64, 32] # Posibles batch sizes de prueba.
AE_BATCH_SIZE = 128 # Batch size oficial.
PRE_BATCH_SIZE = 128 # Bacth size para preentrenamiento.
##### -------------------------------------------------- Carga de datos --------------------------------------------- #####
densities = carga_datos(num_inicio = NUM_INICIO, num_sims = NUM_SIMS, frames = NUM_FRAMES)
train_data, vali_data = crea_sets(densities)
##### -------------------------------------------------- Autoencoder 2D --------------------------------------------- #####
FEATURE_MULTIPLIER = 8 # Controla la cantidad de filtros de convolución utilizados por el autoencoder, y la dimension del espacio latente de la red.
SURFACE_KERNEL_SIZE = 4 # Matriz 4x4
KERNEL_SIZE = 2 # Matriz 2x2
DROPOUT = 0.0 # Porcentaje de nodos que apagar mediante dropout.
INIT_FUNCTION = "glorot_normal" # Inicialización de los pesos de la red neuronal.
input_shape = (train_data.shape[1],
train_data.shape[2],
train_data.shape[3])
layer_conv, layer_deconv = crea_autoencoder_capas(input_shape = input_shape, feature_multiplier = FEATURE_MULTIPLIER, surface_kernel_size = SURFACE_KERNEL_SIZE, kernel_size = KERNEL_SIZE, dropout = DROPOUT, init_func = INIT_FUNCTION)
optimizer = crea_adam()
stages = crea_autoencoder_etapas(input_shape = input_shape, layer_conv = layer_conv, layer_deconv = layer_deconv, optimizer = optimizer)
##### -------------------------------------------------- Autoencoder Entrenamiento ---------------------------------- #####
autoencoder = stages [-1]
autoencoder_clean_weights = autoencoder.get_weights()
if PRE_TRAINING:
for stage in stages:
autoencoder_pre_train = stage.fit(train_data, train_data,
epochs = PRE_EPOCHS,
batch_size = PRE_BATCH_SIZE,
validation_data = (vali_data, vali_data),
shuffle = True)
autoencoder.save("../modelos/autoencoder_true_pretraining.h5")
autoencoder_pre_weights = autoencoder.get_weights()
# ------------------------------------------------------------------------------------------------------------------- #
mc = ModelCheckpoint(filepath = "../modelos/autoencoder_true.h5",
monitor = "val_loss",
mode = "min",
save_best_only = True,
verbose = 1)
if AE_BATCH_MULTIPLE:
for BATCH_SIZE in ae_batch_list:
if PRE_TRAINING:
autoencoder.set_weights(autoencoder_pre_weights)
autoencoder_train = autoencoder.fit(train_data, train_data,
epochs = AE_EPOCHS_LIST,
batch_size = BATCH_SIZE,
verbose = 1,
validation_data = (vali_data, vali_data),
shuffle = True,
callbacks = [mc])
training_plot(network_train = autoencoder_train, epochs = AE_EPOCHS_LIST, batch_size = BATCH_SIZE, dropout = DROPOUT, loss = "mse", metric = "mae", identification = "ae_true_list")
else:
if PRE_TRAINING:
autoencoder.set_weights(autoencoder_pre_weights)
autoencoder_train = autoencoder.fit(train_data, train_data,
epochs = AE_EPOCHS,
batch_size = AE_BATCH_SIZE,
verbose = 1,
validation_data = (vali_data, vali_data),
shuffle = True,
callbacks = [mc])
training_plot(network_train = autoencoder_train, epochs = AE_EPOCHS, batch_size = AE_BATCH_SIZE, dropout = DROPOUT, loss = "mse", metric = "mae", identification = "ae_true_single")
# ------------------------------------------------------------------------------------------------------------------- #
guarda_encoder(layer_conv, input_shape)
guarda_decoder(layer_conv, layer_deconv)
This diff is collapsed.
# ------------------------------------------------------------------------------------------------------------------- #
import os
import sys
import numpy as np
sys.path.append("../tools") # Herramientas propias de MantaFlow.
import uniio # Lectura de ficheros .uni
def carga_datos(num_inicio, num_sims, frames):
base_path = "../data"
densities = []
for sim in range(num_inicio, num_sims):
if os.path.exists("%s/simSimple_%04d" % (base_path, sim)): # Comprueba la existencia de las carpetas (cada una 200 frames de datos).
for i in range(0, frames):
filename = "%s/simSimple_%04d/density_%04d.uni" # Nombre de cada frame (densidad).
uni_path = filename % (base_path, sim, i) # 200 frames por sim, rellena parametros de la ruta.
header, content = uniio.readUni(uni_path) # Devuelve un array Numpy [Z, Y, X, C].
h = header["dimX"]
w = header["dimY"]
arr = content[:, ::-1, :, :] # Cambia el orden del eje Y.
arr = np.reshape(arr, [w, h, 1]) # Deshecha el eje Z.
densities.append(arr)
load_num = len(densities)
if load_num < 2 * frames:
print("Error - Usa al menos dos simulaciones completas")
exit(True)
densities = np.reshape(densities, (len(densities), 64, 64, 1)) # Reconvierte la lista en array de Numpy.
print("Forma del array: {}".format(densities.shape))
print("Dimensiones del array: {}".format(densities.ndim))
print("Número de pixels en total: {}".format(densities.size))
return densities
# ------------------------------------------------------------------------------------------------------------------- #
def carga_datos_velocity(num_inicio, num_sims, frames):
base_path = "../data"
velocities = []
for sim in range(num_inicio, num_sims):
if os.path.exists("%s/simSimple_%04d" % (base_path, sim)): # Comprueba la existencia de las carpetas (cada una 200 frames de datos).
for i in range(0, frames):
filename = "%s/simSimple_%04d/vel_%04d.uni" # Nombre de cada frame (velocidad).
uni_path = filename % (base_path, sim, i) # 200 frames por sim, rellena parametros de la ruta.
header, content = uniio.readUni(uni_path) # Devuelve un array Numpy [Z, Y, X, C].
h = header["dimX"]
w = header["dimY"]
arr = content[:, ::-1, :, :] # Cambia el orden del eje Y.
arr = np.reshape(arr, [w, h, 1]) # Deshecha el eje Z.
velocities.append(arr)
load_num = len(velocities)
if load_num < 2 * frames:
print("Error - Usa al menos dos simulaciones completas")
exit(True)
velocities = np.reshape(velocities, (len(velocities), 64, 64, 1)) # Reconvierte la lista en array de Numpy
print("Forma del array: {}".format(velocities.shape))
print("Dimensiones del array: {}".format(velocities.ndim))
print("Número de pixels en total: {}".format(velocities.size))
return densities
# ------------------------------------------------------------------------------------------------------------------- #
def crea_sets(densities):
load_num = len(densities)
vali_set_size = max(200, int(load_num * 0.1)) # Al menos una sim completa o el 10% de los datos.
vali_data = densities[load_num - vali_set_size : load_num, :] # "load_num" datos del final de "densities".
train_data = densities[0 : load_num - vali_set_size, :] # El resto de datos de "densities".
print("Separamos en {} frames de entrenamiento y {} frames de validación.".format(train_data.shape[0], vali_data.shape[0]))
train_data = np.reshape(train_data, (len(train_data), 64, 64, 1)) # Reconvertimos a arrays de Numpy.
vali_data = np.reshape(vali_data, (len(vali_data), 64, 64, 1))
print("Forma del set de entrenamiento: {}".format(train_data.shape))
print("Forma del set de validación: {}".format(vali_data.shape))
return train_data, vali_data
# ------------------------------------------------------------------------------------------------------------------- #
import imageio
import PIL
import numpy as np
FRAMES = 200
TIME_STEPS = 6
images_in = []
images_out = []
images_pred = []
images_combi = []
for number in range(TIME_STEPS, FRAMES):
images_in.append(imageio.imread("../imagenes/in_" + str(number) + ".png"))
images_out.append(imageio.imread("../imagenes/out_" + str(number) + ".png"))
imageio.mimsave("../gifs/in.gif", images_in)
imageio.mimsave("../gifs/out.gif", images_out)
for number in range(TIME_STEPS, FRAMES):
images_pred.append(imageio.imread("../imagenes/pred_" + str(number) + ".png"))
imageio.mimsave("../gifs/pred.gif", images_pred)
for number in range(TIME_STEPS, FRAMES):
list_im = ["../imagenes/in_" + str(number) + ".png", "../imagenes/out_" + str(number) + ".png", "../imagenes/pred_" + str(number) + ".png"]
imgs = [PIL.Image.open(i) for i in list_im]
min_shape = sorted([(np.sum(i.size), i.size) for i in imgs])[0][1]
imgs_comb = np.hstack((np.asarray(i.resize(min_shape)) for i in imgs))
imgs_comb = PIL.Image.fromarray(imgs_comb)
imgs_comb.save("../imagenes/combi_" + str(number) + ".png")
for number in range(TIME_STEPS, FRAMES):
images_combi.append(imageio.imread("../imagenes/combi_" + str(number) + ".png"))
imageio.mimsave("../gifs/combi.gif", images_combi)
import tensorflow as tf
def loss_fluid_velocity(y_true, y_pred):
# ------------------------------------------------------------------------------------------------------------------- #
import tensorflow as tf
import numpy as np
from tensorflow.keras.layers import RepeatVector, LSTM, Conv1D, Reshape, Input, Flatten
from tensorflow.keras.losses import mean_absolute_error, mean_squared_error
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.models import Model
from math import floor
# ------------------------------------------------------------------------------------------------------------------- #