Commit 1d122c70 authored by Perez Visaires, Jon's avatar Perez Visaires, Jon

Notebooks

parent fd6001f9
import imageio
import PIL
import numpy as np
frames = 200
time_steps = 6
images_in = []
images_out = []
images_pred = []
images_combi = []
for number = 10 to 109:
for number in range(frames):
images_in.append(imageio.imread("in_" + str(number) + ".png"))
images_out.append(imageio.imread("out_" + str(number) + ".png"))
imageio.mimsave("./in.gif", images_in)
imageio.mimsave("./out.gif", images_out)
\ No newline at end of file
imageio.mimsave("./out.gif", images_out)
for number in range(time_steps, frames):
images_pred.append(imageio.imread("pred_" + str(number) + ".png"))
imageio.mimsave("./pred.gif", images_pred)
for number in range(time_steps, frames):
list_im = ["in_" + str(number) + ".png", "out_" + str(number) + ".png", "pred_" + str(number) + ".png"]
imgs = [PIL.Image.open(i) for i in list_im]
min_shape = sorted([(np.sum(i.size), i.size) for i in imgs])[0][1]
imgs_comb = np.hstack((np.asarray(i.resize(min_shape)) for i in imgs))
imgs_comb = PIL.Image.fromarray(imgs_comb)
imgs_comb.save("combi_" + str(number) + ".png")
for number in range(time_steps, frames):
images_combi.append(imageio.imread("combi_" + str(number) + ".png"))
imageio.mimsave("./combi.gif", images_combi)
\ No newline at end of file
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Librerías"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"import os\n",
"import sys\n",
"import tensorflow as tf\n",
"import numpy as np\n",
"import scipy.misc\n",
"import matplotlib.pyplot as plt"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"sys.path.append(\"../tools\") # Herramientas propias de MantaFlow\n",
"import uniio # Lectura de ficheros .uni"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Hiperparámetros"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"num_sims = 2000 # num_sims - 1000 escenas. \n",
"frames = 200 # Frames por escena.\n",
"\n",
"epochs_autoencoder = 5\n",
"epochs_lstm = 50\n",
"epochs_pretraining = 1\n",
"\n",
"batch_size_autoencoder = 4\n",
"batch_size_lstm = 16\n",
"\n",
"time_steps_lstm = 6\n",
"out_time_steps_lstm = 1\n",
"\n",
"save_autoencoder = True\n",
"save_lstm = True"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Datos iniciales"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Inicializamos las seed para funciones random. Al ser inicializadas al mismo número, el resultado no cambiará en cada ejecución."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"np.random.seed(13)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Ruta a los datos de simulación, donde también se guardan los resultados."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"base_path = \"../data\""
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Carga de datos"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Podemos elegir el número de escenas y los frames de cada una, dependiendo de la configuración de los simuladores clásicos."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"print(\"Cargamos {} escenas, con {} frames cada una.\".format(num_sims-1000, frames))\n",
"print(\"Trabajamos con un total de {} frames.\".format((num_sims-1000) * frames))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Cargamos los datos desde los ficheros .uni en arrays de numpy. Los .uni son ficheros propios de MantaFlow, en los que se guarda los resultados de los simuladores clásicos. En este caso cargamos los datos de densidad de humo simulados previamente."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"densities = []\n",
"\n",
"for sim in range(1000, num_sims):\n",
" \n",
" if os.path.exists(\"%s/simSimple_%04d\" % (base_path, sim)): # Comprueba la existencia de las carpetas (cada una 100 frames de datos).\n",
" \n",
" for i in range(0, frames):\n",
" \n",
" filename = \"%s/simSimple_%04d/density_%04d.uni\" # Nombre de cada frame (densidad).\n",
" uni_path = filename % (base_path, sim, i) # 200 frames por sim, rellena parametros de la ruta.\n",
" header, content = uniio.readUni(uni_path) # Devuelve un array Numpy [Z, Y, X, C].\n",
" \n",
" h = header[\"dimX\"]\n",
" w = header[\"dimY\"]\n",
" \n",
" arr = content[:, ::-1, :, :] # Cambia el orden de Y.\n",
" arr = np.reshape(arr, [w, h, 1]) # Deshecha Z.\n",
" \n",
" densities.append(arr)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Devuelve los datos de cada frame (canal de grises, 0 a 255) en una lista de Python. En este caso las imagenes son de 64x64 pixels. (64, 64, 1)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Necesitamos al menos 2 simulaciones para trabajar de manera adecuada."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"load_num = len(densities)\n",
"\n",
"if load_num < 2 * frames:\n",
" \n",
" print(\"Error - Usa al menos dos simulaciones completas\")\n",
" \n",
" exit(True)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Convertimos la lista \"densities\" en un array de Numpy."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"densities = np.reshape(densities, (len(densities), 64, 64, 1))\n",
"\n",
"print(\"Forma del array: {}\".format(densities.shape))\n",
"print(\"Dimensiones del array: {}\".format(densities.ndim))\n",
"print(\"Número de pixels en total: {}\".format(densities.size))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## Creación del set de validación"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Con el fin de entrenar correctamente a los modelos Deep Learning, separamos los datos de densidad en un set de entrenamiento y otro de validación. Creamos el set de validación de entre los datos de simulación generados, al menos una simulación completa o el 10% de los datos (el que sea mayor de los dos)."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"vali_set_size = max(200, int(load_num * 0.1)) # Al menos una simu completa o el 10% de los datos.\n",
"\n",
"vali_data = densities[load_num - vali_set_size : load_num, :] # \"load_num\" datos del final de \"densities\".\n",
"train_data = densities[0 : load_num - vali_set_size, :] # El resto de datos del principio de \"densities\".\n",
"\n",
"print(\"Separamos en {} frames de entrenamiento y {} frames de validación.\".format(train_data.shape[0], vali_data.shape[0]))"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"Convertimos los datos de entrenamiento y validación en arrays."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"train_data = np.reshape(train_data, (len(train_data), 64, 64, 1))\n",
"vali_data = np.reshape(vali_data, (len(vali_data), 64, 64, 1))\n",
"\n",
"print(\"Forma del set de entrenamiento: {}\".format(train_data.shape))\n",
"print(\"Forma del set de validación: {}\".format(vali_data.shape))"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.6.8"
},
"toc": {
"base_numbering": 1,
"nav_menu": {},
"number_sections": true,
"sideBar": true,
"skip_h1_title": false,
"title_cell": "Table of Contents",
"title_sidebar": "Contents",
"toc_cell": false,
"toc_position": {},
"toc_section_display": true,
"toc_window_display": false
}
},
"nbformat": 4,
"nbformat_minor": 2
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment