Commit f168a76f authored by Perez Visaires, Jon's avatar Perez Visaires, Jon

Limpieza de duplicados.

parent a1f486f5
This diff is collapsed.
# ------------------------------------------------------------------------------------------------------------------- #
import os
import sys
import numpy as np
sys.path.append("../tools") # Herramientas propias de MantaFlow.
import uniio # Lectura de ficheros .uni
def carga_datos(num_inicio, num_sims, frames):
base_path = "../data"
densities = []
for sim in range(num_inicio, num_sims):
if os.path.exists("%s/simSimple_%04d" % (base_path, sim)): # Comprueba la existencia de las carpetas (cada una 200 frames de datos).
for i in range(0, frames):
filename = "%s/simSimple_%04d/density_%04d.uni" # Nombre de cada frame (densidad).
uni_path = filename % (base_path, sim, i) # 200 frames por sim, rellena parametros de la ruta.
header, content = uniio.readUni(uni_path) # Devuelve un array Numpy [Z, Y, X, C].
h = header["dimX"]
w = header["dimY"]
arr = content[:, ::-1, :, :] # Cambia el orden del eje Y.
arr = np.reshape(arr, [w, h, 1]) # Deshecha el eje Z.
densities.append(arr)
load_num = len(densities)
if load_num < 2 * frames:
print("Error - Usa al menos dos simulaciones completas")
exit(True)
densities = np.reshape(densities, (len(densities), 64, 64, 1)) # Reconvierte la lista en array de Numpy.
print("Forma del array: {}".format(densities.shape))
print("Dimensiones del array: {}".format(densities.ndim))
print("Número de pixels en total: {}".format(densities.size))
return densities
# ------------------------------------------------------------------------------------------------------------------- #
def carga_datos_velocity(num_inicio, num_sims, frames):
base_path = "../data"
velocities = []
for sim in range(num_inicio, num_sims):
if os.path.exists("%s/simSimple_%04d" % (base_path, sim)): # Comprueba la existencia de las carpetas (cada una 200 frames de datos).
for i in range(0, frames):
filename = "%s/simSimple_%04d/vel_%04d.uni" # Nombre de cada frame (velocidad).
uni_path = filename % (base_path, sim, i) # 200 frames por sim, rellena parametros de la ruta.
header, content = uniio.readUni(uni_path) # Devuelve un array Numpy [Z, Y, X, C].
h = header["dimX"]
w = header["dimY"]
arr = content[:, ::-1, :, :] # Cambia el orden del eje Y.
arr = np.reshape(arr, [w, h, 1]) # Deshecha el eje Z.
velocities.append(arr)
load_num = len(velocities)
if load_num < 2 * frames:
print("Error - Usa al menos dos simulaciones completas")
exit(True)
velocities = np.reshape(velocities, (len(velocities), 64, 64, 1)) # Reconvierte la lista en array de Numpy
print("Forma del array: {}".format(velocities.shape))
print("Dimensiones del array: {}".format(velocities.ndim))
print("Número de pixels en total: {}".format(velocities.size))
return densities
# ------------------------------------------------------------------------------------------------------------------- #
def crea_sets(densities):
load_num = len(densities)
vali_set_size = max(200, int(load_num * 0.1)) # Al menos una sim completa o el 10% de los datos.
vali_data = densities[load_num - vali_set_size : load_num, :] # "load_num" datos del final de "densities".
train_data = densities[0 : load_num - vali_set_size, :] # El resto de datos de "densities".
print("Separamos en {} frames de entrenamiento y {} frames de validación.".format(train_data.shape[0], vali_data.shape[0]))
train_data = np.reshape(train_data, (len(train_data), 64, 64, 1)) # Reconvertimos a arrays de Numpy.
vali_data = np.reshape(vali_data, (len(vali_data), 64, 64, 1))
print("Forma del set de entrenamiento: {}".format(train_data.shape))
print("Forma del set de validación: {}".format(vali_data.shape))
return train_data, vali_data
# ------------------------------------------------------------------------------------------------------------------- #
# ------------------------------------------------------------------------------------------------------------------- #
import tensorflow as tf
import numpy as np
from tensorflow.keras.layers import RepeatVector, LSTM, Conv1D, Reshape, Input, Flatten
from tensorflow.keras.losses import mean_absolute_error, mean_squared_error
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.models import Model
from math import floor
# ------------------------------------------------------------------------------------------------------------------- #
def prepara_datos_lstm(encoder, train_data, vali_data, batch_size, time_steps, out_time_steps, frames):
encoded_train = encoder.predict(train_data)
encoded_vali = encoder.predict(vali_data)
def generator_count(encoded_data, batch_size, time_steps, out_time_steps, frames):
scene_count = len(encoded_data) // frames
sample_count = frames
scene_iteration_count = floor((sample_count + 1 - (time_steps + out_time_steps)) / batch_size)
return scene_count, sample_count, scene_iteration_count
def generator_batch_samples(encoded_data, batch_size, time_steps, out_time_steps, frames):
scene_count, sample_count, scene_iteration_count = generator_count(encoded_data, batch_size, time_steps, out_time_steps, frames)
batch_samples = scene_count * scene_iteration_count
return batch_samples
def shuffle_in_unison(*np_arrays):
rng = np.random.get_state()
for array in np_arrays:
np.random.set_state(rng)
np.random.shuffle(array)
def restructure_encoded_data(encoded_data, time_steps, out_time_steps, batch_size):
content_shape = encoded_data[0].shape # (256,)
final_sample_count = encoded_data.shape[0] - time_steps - out_time_steps # frames, frames - batch_size, frames - 2 * batch_size, ...
final_sample_count = min(batch_size, final_sample_count) # 8
X_data = np.zeros((final_sample_count, time_steps) + content_shape) # (8, 6, 256)
y_data = np.zeros((final_sample_count, out_time_steps) + content_shape) # (8, 1, 256)
curTS = 0
for z in range(time_steps, final_sample_count + time_steps):
X_data[curTS] = np.array(encoded_data[curTS:z])
y_data[curTS] = np.array(encoded_data[z:z+out_time_steps])
curTS += 1
return X_data, y_data
def generator_scene(encoded_data, batch_size, time_steps, out_time_steps, frames):
scene_count, sample_count, scene_iteration_count = generator_count(encoded_data, batch_size, time_steps, out_time_steps, frames)
while True:
for i in range(scene_count):
scene = encoded_train[(i * frames):((i + 1) * frames)] # Selecciona escenas individualmente.
for j in range(scene_iteration_count): # Número de batches que entran en una escena individual.
start = j * batch_size
end = sample_count
data = scene[start:end]
X, Y = restructure_encoded_data(data, time_steps, out_time_steps, batch_size)
X = X.reshape(*X.shape[0:2], -1)
Y = np.squeeze(Y.reshape(Y.shape[0], out_time_steps, -1))
shuffle_in_unison(X, Y)
yield X, Y
train_gen_samples = generator_batch_samples(encoded_train, batch_size, time_steps, out_time_steps, frames)
print ("Number of train batch samples per epoch: {}".format(train_gen_samples))
train_generator = generator_scene(encoded_train, batch_size, time_steps, out_time_steps, frames)
vali_gen_samples = generator_batch_samples(encoded_vali, batch_size, time_steps, out_time_steps, frames)
print ("Number of validation batch samples per epoch: {}".format(vali_gen_samples))
vali_generator = generator_scene(encoded_vali, batch_size, time_steps, out_time_steps, frames)
return train_gen_samples, train_generator, vali_gen_samples, vali_generator
def crea_lstm(time_steps, out_time_steps, latent_dimension, encoder_lstm_neurons, decoder_lstm_neurons, activation, use_bias, dropout, recurrent_dropout, stateful, lstm_optimizer, loss):
lstm_input = Input(shape = (time_steps, latent_dimension))
x = lstm_input
x = LSTM(units = encoder_lstm_neurons,
activation = activation,
use_bias = use_bias,
recurrent_activation = "hard_sigmoid",
kernel_initializer = "glorot_uniform",
recurrent_initializer = "orthogonal",
bias_initializer = "zeros",
unit_forget_bias = True,
dropout = dropout,
recurrent_dropout = recurrent_dropout,
return_sequences = False,
go_backwards = True,
stateful = stateful)(x)
x = RepeatVector(out_time_steps)(x)
x = LSTM(units = decoder_lstm_neurons,
activation = activation,
use_bias = use_bias,
recurrent_activation = "hard_sigmoid",
kernel_initializer = "glorot_uniform",
recurrent_initializer = "orthogonal",
bias_initializer = "zeros",
unit_forget_bias = True,
dropout = dropout,
recurrent_dropout = recurrent_dropout,
return_sequences = True,
go_backwards = False,
stateful = stateful)(x)
x = Conv1D(filters = latent_dimension, kernel_size = 1)(x)
x = Flatten()(x) if out_time_steps == 1 else x
lstm_output = x
lstm = Model(inputs = lstm_input, outputs = lstm_output)
lstm.compile(loss = loss,
optimizer = lstm_optimizer,
metrics = ["mse"])
return lstm
# ------------------------------------------------------------------------------------------------------------------- #
def crea_optimizador_lstm():
optimizer = RMSprop(lr = 0.000126,
rho = 0.9,
epsilon = 1e-08,
decay = 0.000334)
return optimizer
# ------------------------------------------------------------------------------------------------------------------- #
# ------------------------------------------------------------------------------------------------------------------- #
import matplotlib.pyplot as plt
def training_plot(network_train, epochs, batch_size, dropout, identification, loss, metric):
plot_epochs = range(epochs)
plot_loss = network_train.history["loss"]
plot_val_loss = network_train.history["val_loss"]
plot_metric = network_train.history[metric]
plot_val_metric = network_train.history["val_" + metric]
plt.figure(figsize = (15, 5))
ax = plt.subplot(1, 2, 1)
plt.plot(plot_epochs, plot_loss, label = loss.upper())
plt.plot(plot_epochs, plot_val_loss, label = "Validation " + loss.upper())
plt.legend()
plt.xlabel("Epoch")
plt.ylabel(loss.upper())
ax = plt.subplot(1, 2, 2)
plt.plot(plot_epochs, plot_metric, label = metric.upper())
plt.plot(plot_epochs, plot_val_metric, label = "Validation " + metric.upper())
plt.legend()
plt.xlabel("Epoch")
plt.ylabel(metric.upper())
if dropout > 0.0:
plt.savefig("../plots/model_" + identification + "_DO-" + str(dropout * 100) + "_BS-" + str(batch_size) + ".png")
else:
plt.savefig("../plots/model_" + identification + "_BS-" + str(batch_size) + ".png")
# ------------------------------------------------------------------------------------------------------------------- #
import os
import tensorflow as tf
import numpy as np
import scipy.misc
import matplotlib.pyplot as plt
from tensorflow.keras.models import load_model
from random import randrange
from datos_funciones import carga_datos
NUM_INICIO = 1900
NUM_SIMS = 2000
FRAMES = 200
TIME_STEPS = 6
OUT_TIME_STEPS = 1
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
densities = carga_datos(num_inicio = NUM_INICIO, num_sims = NUM_SIMS, frames = FRAMES)
autoencoder = load_model("../modelos/autoencoder_24e-5.h5")
encoder = load_model("../modelos/encoder_24e-5.h5")
decoder = load_model("../modelos/decoder_24e-5.h5")
lstm = load_model("../modelos/lstm_true.h5")
NUM_SCENES = densities.shape[0] // FRAMES
RAND_SCENE = randrange(0, NUM_SCENES)
scene = densities[RAND_SCENE*FRAMES : RAND_SCENE*FRAMES + FRAMES, :, :, :]
autoencoder_scene = autoencoder.predict(scene)
encoded_scene = encoder.predict(scene)
latent_dim = encoded_scene.shape[-1]
lstm_scene = []
for i in range(FRAMES - 5):
time_frames = encoded_scene[i : i + 6]
time_frames = time_frames.reshape(1, 6, latent_dim)
lstm_prediction = lstm.predict(time_frames, batch_size = 1)
decoded_frame = decoder.predict(lstm_prediction)
lstm_scene.append(decoded_frame)
lstm_scene = np.reshape(lstm_scene, (len(lstm_scene), 64, 64, 1))
n = 10
plt.figure(figsize = (10, 3))
for i in range(n):
ax = plt.subplot(3, n, i + 1)
plt.imshow(scene[i].reshape(64, 64))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax = plt.subplot(3, n, i + 1 + n)
plt.imshow(autoencoder_scene[i].reshape(64, 64))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax = plt.subplot(3, n, i + 1 + n + n)
plt.imshow(lstm_scene[i].reshape(64, 64))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.savefig("../plots/comparativa.png")
out_dir = "../imagenes"
if not os.path.exists(out_dir): os.makedirs(out_dir)
for i in range(TIME_STEPS, FRAMES):
scipy.misc.toimage(np.reshape(scene[i - TIME_STEPS], [64, 64])).save("{}/in_{}.png".format(out_dir, i))
scipy.misc.toimage(np.reshape(autoencoder_scene[i - TIME_STEPS], [64, 64])).save("{}/out_{}.png".format(out_dir, i))
scipy.misc.toimage(np.reshape(lstm_scene[i - TIME_STEPS], [64, 64])).save("{}/pred_{}.png".format(out_dir, i))
No preview for this file type
This image diff could not be displayed because it is too large. You can view the blob instead.
This image diff could not be displayed because it is too large. You can view the blob instead.
This image diff could not be displayed because it is too large. You can view the blob instead.
This image diff could not be displayed because it is too large. You can view the blob instead.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment