Commit 8c447e09 authored by Perez Visaires, Jon's avatar Perez Visaires, Jon

Velocidad en vez de densidad.

parent 88029284
# ------------------------------------------------------------------------------------------------------------------- #
import tensorflow as tf
import numpy as np
import math
from tensorflow.keras.layers import Dense, Conv2D, Conv2DTranspose, MaxPooling2D, UpSampling2D, Input, Flatten, Reshape, Activation, BatchNormalization, Dropout, LeakyReLU
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
# ------------------------------------------------------------------------------------------------------------------- #
def make_autoencoder_dense(input_shape, image_dim, encoding_dim, dropout, batch_normalization):
encoder_input = Input(shape = input_shape)
x = encoder_input
x = Flatten()(x)
x = Dense(units = encoding_dim)(x)
x = Activation(activation = "relu")(x)
x = BatchNormalization()(x) if batch_normalization else x
x = Dropout(dropout) if dropout > 0.0 else x
encoder_output = x
encoder = Model(inputs = encoder_input, outputs = encoder_output)
decoder_input = Input(shape = encoder.output_shape)
x = decoder_input
x = Dense(units = image_dim)(x)
x = Activation(activation = "sigmoid")(x)
x = BatchNormalization()(x) if batch_normalization else x
x = Dropout(dropout)(x) if dropout > 0.0 else x
x = Reshape(target_shape = input_shape)(x)
decoder_output = x
decoder = Model(inputs = decoder_input, outputs = decoder_output)
autoencoder_input = Input(shape = input_shape)
x = autoencoder_input
x = encoder(x)
x = decoder(x)
autoencoder_output = x
autoencoder = Model(inputs = autoencoder_input, outputs = autoencoder_output)
return autoencoder, encoder, decoder
def make_autoencoder_dense_layered(layer_number, input_shape, image_dim, encoding_dim, dropout, batch_normalization):
encoder_layer = []
decoder_layer = []
max_layer_num = math.floor(math.log(image_dim/encoding_dim)/math.log(2))
for layer in range(layer_number):
if layer == 0:
encoding_dim = image_dim / 2
autoencoder, encoder, decoder = make_autoencoder_dense(input_shape, image_dim, encoding_dim, dropout, batch_normalization)
encoder_layer.append(encoder)
decoder_layer.append(decoder)
elif layer < max_layer_num:
encoding_dim = encoding_dim / 2
autoencoder, encoder, decoder = make_autoencoder_dense(input_shape = encoder_layer[layer-1].output_shape, image_dim = encoder_output_shape[1], encoding_dim, dropout, batch_normalization)
encoder_layer.append(encoder)
decoder_layer.append(decoder)
......@@ -4,13 +4,13 @@ import os
import sys
import numpy as np
sys.path.append("../tools") # Herramientas propias de MantaFlow.
sys.path.append("../../tools") # Herramientas propias de MantaFlow.
import uniio # Lectura de ficheros .uni
def load_data_density (num_initial, num_sims, frames):
base_path = "../data"
base_path = "../../data"
densities = []
......@@ -52,7 +52,7 @@ def load_data_density (num_initial, num_sims, frames):
def load_data_velocity(num_initial, num_sims, frames):
base_path = "../data"
base_path = "../../data"
velocities = []
......@@ -69,6 +69,7 @@ def load_data_velocity(num_initial, num_sims, frames):
h = header["dimX"]
w = header["dimY"]
range
arr = content[:, ::-1, :, :] # Cambia el orden del eje Y.
arr = np.reshape(arr, [w, h, 3]) # Deshecha el eje Z.
......
# ------------------------------------------------------------------------------------------------------------------- #
import tensorflow as tf
import numpy as np
from tensorflow.keras.layers import RepeatVector, LSTM, Conv1D, Reshape, Input, Flatten
from tensorflow.keras.losses import mean_absolute_error, mean_squared_error
from tensorflow.keras.optimizers import RMSprop
......@@ -9,7 +10,7 @@ from math import floor
# ------------------------------------------------------------------------------------------------------------------- #
def prepara_datos_lstm(encoder, train_data, vali_data, batch_size, time_steps, out_time_steps, frames):
def prepare_data_lstm(encoder, train_data, vali_data, batch_size, time_steps, out_time_steps, frames):
encoded_train = encoder.predict(train_data)
encoded_vali = encoder.predict(vali_data)
......@@ -92,7 +93,7 @@ def prepara_datos_lstm(encoder, train_data, vali_data, batch_size, time_steps, o
return train_gen_samples, train_generator, vali_gen_samples, vali_generator
def crea_lstm(time_steps, out_time_steps, latent_dimension, encoder_lstm_neurons, decoder_lstm_neurons, activation, use_bias, dropout, recurrent_dropout, stateful, lstm_optimizer, loss):
def make_lstm(time_steps, out_time_steps, latent_dimension, encoder_lstm_neurons, decoder_lstm_neurons, activation, use_bias, dropout, recurrent_dropout, stateful, lstm_optimizer, loss):
lstm_input = Input(shape = (time_steps, latent_dimension))
......@@ -135,16 +136,17 @@ def crea_lstm(time_steps, out_time_steps, latent_dimension, encoder_lstm_neurons
lstm_output = x
lstm = Model(inputs = lstm_input, outputs = lstm_output)
lstm.summary()
lstm.compile(loss = loss,
optimizer = lstm_optimizer,
metrics = ["mse"])
metrics = ["mse", "mae"])
return lstm
# ------------------------------------------------------------------------------------------------------------------- #
def crea_optimizador_lstm():
def make_optimizer_lstm():
optimizer = RMSprop(lr = 0.000126,
rho = 0.9,
......
......@@ -2,37 +2,42 @@
import matplotlib.pyplot as plt
def training_plot(network_train, epochs, batch_size, dropout, identification, loss, metric):
def training_plot(network_train, epochs, batch_size, dropout, learning_rate, identification, loss_name, metric1, metric2):
plot_epochs = range(epochs)
plot_loss = network_train.history["loss"]
plot_val_loss = network_train.history["val_loss"]
plot_metric = network_train.history[metric]
plot_val_metric = network_train.history["val_" + metric]
plot_metric1 = network_train.history[metric1]
plot_val_metric1 = network_train.history["val_" + metric1]
plot_metric2 = network_train.history[metric2]
plot_val_metric2 = network_train.history["val_" + metric2]
plt.figure(figsize = (15, 5))
plt.figure(figsize = (15, 4))
ax = plt.subplot(1, 2, 1)
plt.plot(plot_epochs, plot_loss, label = loss.upper())
plt.plot(plot_epochs, plot_val_loss, label = "Validation " + loss.upper())
ax = plt.subplot(1, 3, 1)
plt.plot(plot_epochs, plot_loss, label = loss_name, color = "royalblue")
plt.plot(plot_epochs, plot_val_loss, label = "Validation " + loss_name, color = "darkturquoise")
plt.legend()
plt.xlabel("Epoch")
plt.ylabel(loss.upper())
plt.ylabel(loss_name)
ax = plt.subplot(1, 2, 2)
plt.plot(plot_epochs, plot_metric, label = metric.upper())
plt.plot(plot_epochs, plot_val_metric, label = "Validation " + metric.upper())
ax = plt.subplot(1, 3, 2)
plt.plot(plot_epochs, plot_metric1, label = metric1.upper(), color = "seagreen")
plt.plot(plot_epochs, plot_val_metric1, label = "Validation " + metric1.upper(), color = "limegreen")
plt.legend()
plt.xlabel("Epoch")
plt.ylabel(metric.upper())
plt.ylabel(metric1.upper())
if dropout > 0.0:
plt.savefig("../plots/model_" + identification + "_DO-" + str(dropout * 100) + "_BS-" + str(batch_size) + ".png")
ax = plt.subplot(1, 3, 3)
plt.plot(plot_epochs, plot_metric2, label = metric2.upper(), color = "firebrick")
plt.plot(plot_epochs, plot_val_metric2, label = "Validation " + metric2.upper(), color = "tomato")
plt.legend()
plt.xlabel("Epoch")
plt.ylabel(metric2.upper())
else:
plt.savefig("../../plots/model_" + identification + "_BS-" + str(batch_size) + "_LR-" + str(learning_rate) + "_DO-" + str(dropout) + ".png")
plt.savefig("../plots/model_" + identification + "_BS-" + str(batch_size) + ".png")
# ------------------------------------------------------------------------------------------------------------------- #
......
import os
import sys
import tensorflow as tf
import numpy as np
import scipy.misc
import matplotlib.pyplot as plt
USE_GPU = True
if USE_GPU:
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
from tensorflow.keras.models import load_model
from random import randrange
from datos_funciones import carga_datos
sys.path.append("../functions")
NUM_INICIO = 1900
NUM_SIMS = 2000
from data_functions import load_data_velocity
from loss_physics import jacobian_custom_loss
NUM_INITIAL = 1900
NUM_SIMS = 1910
FRAMES = 200
TIME_STEPS = 6
OUT_TIME_STEPS = 1
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
densities = carga_datos(num_inicio = NUM_INICIO, num_sims = NUM_SIMS, frames = FRAMES)
velocities = load_data_velocity(num_initial = NUM_INITIAL,
num_sims = NUM_SIMS,
frames = FRAMES)
autoencoder = load_model("../modelos/autoencoder_24e-5.h5")
encoder = load_model("../modelos/encoder_24e-5.h5")
decoder = load_model("../modelos/decoder_24e-5.h5")
lstm = load_model("../modelos/lstm_true.h5")
autoencoder = load_model("../../modelos/AE_single_vel_Jacobian_BS-256_LR-0.0015_DO-0.0.h5", {"jacobian_custom_loss" : jacobian_custom_loss})
encoder = load_model("../../modelos/encoder_single_vel_Jacobian_BS-256_LR-0.0015_DO-0.0.h5")
decoder = load_model("../../modelos/decoder_single_vel_Jacobian_BS-256_LR-0.0015_DO-0.0.h5")
lstm = load_model("../../modelos/LSTM_single_mae_BS-16_LR-_DO-0.0132.h5")
NUM_SCENES = densities.shape[0] // FRAMES
NUM_SCENES = velocities.shape[0] // FRAMES
RAND_SCENE = randrange(0, NUM_SCENES)
scene = densities[RAND_SCENE*FRAMES : RAND_SCENE*FRAMES + FRAMES, :, :, :]
scene = velocities[RAND_SCENE*FRAMES : RAND_SCENE*FRAMES + FRAMES, :, :, :]
scene_u = scene[:, :, :, 0:1]
scene_v = scene[:, :, :, 1:2]
autoencoder_scene = autoencoder.predict(scene)
autoencoder_scene_u = autoencoder_scene[:, :, :, 0:1]
autoencoder_scene_v = autoencoder_scene[:, :, :, 1:2]
encoded_scene = encoder.predict(scene)
latent_dim = encoded_scene.shape[-1]
......@@ -45,7 +59,9 @@ for i in range(FRAMES - 5):
decoded_frame = decoder.predict(lstm_prediction)
lstm_scene.append(decoded_frame)
lstm_scene = np.reshape(lstm_scene, (len(lstm_scene), 64, 64, 1))
lstm_scene = np.reshape(lstm_scene, (len(lstm_scene), 64, 64, 2))
lstm_scene_u = lstm_scene[:, :, :, 0:1]
lstm_scene_v = lstm_scene[:, :, :, 1:2]
n = 10
......@@ -54,30 +70,30 @@ plt.figure(figsize = (10, 3))
for i in range(n):
ax = plt.subplot(3, n, i + 1)
plt.imshow(scene[i].reshape(64, 64))
plt.imshow(scene_u[i].reshape(64, 64))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax = plt.subplot(3, n, i + 1 + n)
plt.imshow(autoencoder_scene[i].reshape(64, 64))
plt.imshow(autoencoder_scene_u[i].reshape(64, 64))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax = plt.subplot(3, n, i + 1 + n + n)
plt.imshow(lstm_scene[i].reshape(64, 64))
plt.imshow(lstm_scene_u[i].reshape(64, 64))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.savefig("../plots/comparativa.png")
plt.savefig("../../plots/comparativa.png")
out_dir = "../imagenes"
out_dir = "../../imagenes"
if not os.path.exists(out_dir): os.makedirs(out_dir)
for i in range(TIME_STEPS, FRAMES):
scipy.misc.toimage(np.reshape(scene[i - TIME_STEPS], [64, 64])).save("{}/in_{}.png".format(out_dir, i))
scipy.misc.toimage(np.reshape(autoencoder_scene[i - TIME_STEPS], [64, 64])).save("{}/out_{}.png".format(out_dir, i))
scipy.misc.toimage(np.reshape(lstm_scene[i - TIME_STEPS], [64, 64])).save("{}/pred_{}.png".format(out_dir, i))
scipy.misc.toimage(np.reshape(scene_u[i - TIME_STEPS], [64, 64])).save("{}/in_{}.png".format(out_dir, i))
scipy.misc.toimage(np.reshape(autoencoder_scene_u[i - TIME_STEPS], [64, 64])).save("{}/out_{}.png".format(out_dir, i))
scipy.misc.toimage(np.reshape(lstm_scene_u[i - TIME_STEPS], [64, 64])).save("{}/pred_{}.png".format(out_dir, i))
......@@ -4,26 +4,32 @@ import tensorflow as tf
import numpy as np
import h5py
import os
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.callbacks import ModelCheckpoint
import sys
USE_GPU = True
if USE_GPU:
from datos_funciones import carga_datos, crea_sets
from lstm_funciones import crea_lstm, crea_optimizador_lstm, prepara_datos_lstm
from plots_funciones import training_plot
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
##### -------------------------------------------------- Selección GPU ---------------------------------------------- #####
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.callbacks import ModelCheckpoint
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
sys.path.append("../functions")
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
from data_functions import load_data_velocity, make_sets
from lstm_functions import make_lstm, make_optimizer_lstm, prepare_data_lstm
from plot_functions import training_plot
# ------------------------------------------------------------------------------------------------------------------- #
TIME_STEPS = 6 # 6 frames para alimentar al LSTM.
OUT_TIME_STEPS = 1 # Predicción de 1 frame por el LSTM.
LATENT_DIMENSION = 256 # 256 features en el codificado.
LATENT_DIMENSION = 256 # 512 features en el codificado.
NUM_FRAMES = 200
NUM_SIMS = 1900
NUM_INITIAL = 1000
NUM_SIMS = 1200
ENCODER_NEURONS = 256
DECODER_NEURONS = 512
......@@ -41,17 +47,42 @@ STATEFUL = False
# ------------------------------------------------------------------------------------------------------------------- #
densities = carga_datos(num_sims = NUM_SIMS, frames = NUM_FRAMES)
velocity_field = load_data_velocity(num_initial = NUM_INITIAL,
num_sims = NUM_SIMS,
frames = NUM_FRAMES)
input_shape = (velocity_field.shape[1],
velocity_field.shape[2],
velocity_field.shape[3])
train_data, vali_data = make_sets(field = velocity_field, input_shape = input_shape)
train_data, vali_data = crea_sets(densities)
encoder = load_model("../../modelos/encoder_single_vel_Jacobian_BS-256_LR-0.015_DO-0.0.h5")
encoder = load_model("../modelos/encoder_true.h5")
encoder.summary()
train_gen_samples, train_generator, vali_gen_samples, vali_generator = prepara_datos_lstm(encoder = encoder, train_data = train_data, vali_data = vali_data, batch_size = LSTM_BATCH_SIZE, time_steps = TIME_STEPS, out_time_steps = OUT_TIME_STEPS, frames = NUM_FRAMES)
train_gen_samples, train_generator, vali_gen_samples, vali_generator = prepare_data_lstm(encoder = encoder,
train_data = train_data,
vali_data = vali_data,
batch_size = LSTM_BATCH_SIZE,
time_steps = TIME_STEPS,
out_time_steps = OUT_TIME_STEPS,
frames = NUM_FRAMES)
optimizador = crea_optimizador_lstm()
optimizer = make_optimizer_lstm()
lstm = crea_lstm(time_steps = TIME_STEPS, out_time_steps = OUT_TIME_STEPS, latent_dimension = LATENT_DIMENSION, encoder_lstm_neurons = ENCODER_NEURONS, decoder_lstm_neurons = DECODER_NEURONS, activation = ACTIVATION, use_bias = USE_BIAS, dropout = DROPOUT, recurrent_dropout = RECURRENT_DROPOUT, stateful = STATEFUL, lstm_optimizer = optimizador, loss = LOSS)
lstm = make_lstm(time_steps = TIME_STEPS,
out_time_steps = OUT_TIME_STEPS,
latent_dimension = LATENT_DIMENSION,
encoder_lstm_neurons = ENCODER_NEURONS,
decoder_lstm_neurons = DECODER_NEURONS,
activation = ACTIVATION,
use_bias = USE_BIAS,
dropout = DROPOUT,
recurrent_dropout = RECURRENT_DROPOUT,
stateful = STATEFUL,
lstm_optimizer = optimizer,
loss = LOSS)
lstm_train = lstm.fit_generator(generator = train_generator,
steps_per_epoch = train_gen_samples,
......@@ -64,7 +95,15 @@ lstm_train = lstm.fit_generator(generator = train_generator,
workers = 1)
lstm.save("../modelos/lstm_true.h5")
lstm.save("../../modelos/LSTM_single_" + str(LOSS) + "_BS-" + str(LSTM_BATCH_SIZE) + "_LR-" + str("") + "_DO-" + str(DROPOUT) + ".h5")
training_plot(network_train = lstm_train, epochs = LSTM_EPOCHS, batch_size = LSTM_BATCH_SIZE, dropout = DROPOUT, identification = "lstm_true_single", loss = LOSS, metric = "mse")
training_plot(network_train = lstm_train,
epochs = LSTM_EPOCHS,
batch_size = LSTM_BATCH_SIZE,
dropout = DROPOUT,
learning_rate = str(),
identification = "LSTM_single",
loss_name = str(LOSS),
metric1 = "mse",
metric2 = "mae")
No preview for this file type
plots/comparativa.png

69.4 KB | W: | H:

plots/comparativa.png

82 KB | W: | H:

plots/comparativa.png
plots/comparativa.png
plots/comparativa.png
plots/comparativa.png
  • 2-up
  • Swipe
  • Onion skin
Instalación MantaFlow en Ubuntu:
* Instalar prerequesitos:
sudo apt-get install cmake g++ git python3-dev qt5-qmake qt5-default
- cmake: Herramienta para crear, testear y empaquetar software (compilador).
- g++: GNU C++, conjunto libre de compiladores C++ para Linux.
- python3-dev: Instalar / actualizar a la última versión de Python 3?
- qt5-qmake y qt5-default: QT5, crear GUIs multiplataforma.
* Instalar librerías para el enviroment TensorFlow:
pip install tensorflow-gpu keras numpy sklearn imageio scipy matplotlib h5py
- tensorflow-gpu: soporte GPU para TensorFlow.
- keras: Biblioteca OpenSource de Redes Neuronales escrita en Python (corre encima de TensorFlow). Útil para lanzar prototipos de manera rápida.
- numpy: Soporte para cálculo vetorial y matricial en Python.
- sklearn: Scikit-learn, biblioteca Machine Learning para Python.
- imageio: Leer y escribir una amplia variedad de imaganes, videos y formatos de data científicos y volumétricos.
- scipy: Herramientas y algoritmos matemáticos para Python.
- matplotlib: Generación de gráficos a partir de datos para Python y Numpy.
- h5py: Interfaz Python para el formato de data binario HDF5.
*IMPORTANTE* Las versiones utilizadas para crear MantaFlow son: TensorFlow 1.7.0, CUDA V9.0.176, CUDNN 7.0.5, Keras 2.1.6
Será posible utilizar MantaFlow con versiones actualizadas de estas bibliotecas sin romper nada? Probar.
* Crear directorio MantaFlow y copiar repositorio mediante Git:
mkdir MantaFLow
cd MantaFlow
git clone https://bitbucket.org/mantaflow/manta.git
* Construir el proyecto utilizando CMake, establecer un directorio "build", y seleccionar las opciones de compilación de CMake.
mkdir manta/build
cd manta/build
cmake .. -DGUI=ON -DOPENMP=ON -DNUMPY=ON -DPYTHON_VERSION=3.6
make -j4
- Opciones de CMake:
> GUI: Construye la versión GUI de QT.
> OPENMP: Activa la aceleración multicore mediante OpenMP.
> NUMPY: Soporte para arrays de Numpy como tipo de datos nativos en Kernels Manta.
> PYTHON_VERSION: Selecciona una versión específica de Python. Importante si hay varias versiones de Python instaladas al mismo tiempo.
> OPENVDB: Soporte para volúmenes openvdb. El comando save('data.vdb') guarda un archivo OpenVDB Volume para ser leido por Blender. (Render 3D para simus).
Configurar PyCharm en Ubuntu:
* Paquete Snap:
sudo snap install pycharm-community --classic
<