"""
Implements the API for dexi evaluation of KPIs.
"""
import json
import platform
import subprocess
import csv
import os
import logging
import random
import pandas as pd
import types
from typing import List, Dict
from jsonschema import validate
from flask import Blueprint, request
from flask import current_app as app
from pony.orm import db_session
from deepdiff import DeepDiff
from copy import deepcopy

from app import utils
from app.entities import Simulation

dss = Blueprint("dss", __name__)

ts_url = app.dotenv["TS_URL"]
city_id = f"{app.dotenv['URBANITE_CITY']}"
data_dir = f"{app.dotenv['DATA_DIR']}/{city_id}"
dexi_dir = app.dotenv["DEXI_DIR"]
assets_dir = app.dotenv["ASSETS_DIR"]
general_dexi_output_dir = f"{data_dir}/temp/results.csv"


logger = logging.getLogger(__name__)


@app.route("/dss/kpi_eval/<string:city_id>", methods=["POST"])
def dexi_eval_json(city_id=None):
    """
    Dexi evaluation of stored kpi files.
    """
    if city_id is None:
        return {"message": "Error on frontend - city_id not specified"}

    baseline_id = request.get_json()["baseline"]
    compare_id = request.get_json()["compare"]
    compare_simulation_name = get_compare_simulation_name(compare_id)
    baseline_json, compare_json, _ = preprocess_kpi_jsons(baseline_id, compare_id)

    # running dexi here normally
    dexi_output, simulation_dirs = prepare_and_run_dexi(compare_json, city_id, compare_simulation_name,
                                                        baseline_id, compare_id, "temp")
    write_results(dexi_output, baseline_id, simulation_dirs[0])

    return {"success": True}


@app.route("/dss/recommendation_analysis/<string:city_id>", methods=["POST"])
def recommendation_analysis(city_id=None):
    if city_id is None:
        return {"message": "Error on frontend - city_id not specified"}

    baseline_id = request.get_json()["baseline"]
    compare_id = request.get_json()["compare"]
    step_sizes_for_analysis = [1, 2]

    for step_size in step_sizes_for_analysis:

        folder_name, analysis_limit = get_analysis_parameters(step_size)
        compare_simulation_name = get_compare_simulation_name(compare_id)
        baseline_json, compare_json, _ = preprocess_kpi_jsons(baseline_id, compare_id)
        compare_json_copy = deepcopy(compare_json)
        for plus_one_json, changed_element in plus_one_kpi_change_generator(compare_json_copy, step_size, analysis_limit):

            if len(DeepDiff(plus_one_json, compare_json)) != 0:
                logger.error(f'changed element is: {changed_element}, diff {DeepDiff(compare_json, plus_one_json)} and thats what we will compare\n passed down changed JSON IS: {plus_one_json}')
                dexi_output, simulation_dirs = prepare_and_run_dexi(plus_one_json, city_id, compare_simulation_name,
                                                                    baseline_id, compare_id, folder_name)

                write_results(dexi_output, baseline_id, simulation_dirs[0])
                # Remark: first we need to run the normal dexi pipeline, so we can create the temp folder. Then later we need to call the recommendation analysis function
                compare_dexi_results(f'{data_dir}/temp/', f'{data_dir}/{folder_name}/', changed_element, folder_name, step_size)

    return {"success": True}

@app.route("/dss/general_recommendation", methods=["GET"])
def general_recommendation():
    with open(general_dexi_output_dir, 'r', encoding='utf-8')as f:
        lines = f.readlines()
    index = lines[1].split(',')[1].replace('"', '').replace("'", "").strip()
    index = int(index)
    return {"general_recommendation": create_general_recommendation_text(index)}


def create_general_recommendation_text(index):
    if index == 1:
        return f'According to the decision making system (DEXi), the simulation scenario is better than the baseline one.'
    if index == 3:
        return f'According to the decision making system (DEXi), the simulation scenario is worse than the baseline one.'
    return f'According to the decision making system (DEXi), the simulation scenario shows no significant improvements over the baseline one and is considered equal.'


def get_analysis_parameters(step_size):
    if step_size == 1:
        folder_name = "temp_recc_eng_plus_one"
        analysis_limit = 5
    if step_size == 2:
        folder_name = "temp_recc_eng_plus_two"
        analysis_limit = 4
    return folder_name, analysis_limit


def compare_dexi_results(original_results_path, recommender_engine_results_path, changed_element, folder_name, step_size):
    original_df = pd.read_csv(original_results_path + 'results.csv')
    recc_eng_df = pd.read_csv(recommender_engine_results_path + 'results.csv')

    diff_from_original_df = original_df.merge(recc_eng_df, how='outer', indicator=True).query(
        '_merge=="left_only"').drop('_merge', axis=1)
    if diff_from_original_df.empty:
        logger.debug("EMPTY")
        return  # if this is empty, neither will the other df have elements
    logger.debug("FOUND SOMETHING")
    diff_from_recc_eng_df = original_df.merge(recc_eng_df, how='outer', indicator=True).query(
        '_merge=="right_only"').drop('_merge', axis=1)

    diff_from_original_df.set_index(diff_from_original_df.columns[0], inplace=True)
    diff_from_recc_eng_df.set_index(diff_from_recc_eng_df.columns[0], inplace=True)

    difference_df = diff_from_recc_eng_df.subtract(diff_from_original_df)
    # we search for only these top level KPIs because there will be results where the system will just repeat itself,
    # ex: "in order to decrease CO2 levels by 10%, then you should decrease CO2 levels by 10%". we are only
    # interested if the parent of the KPI will be changed, for example if by changing CO2 levels do we change the
    # overall pollution in the city
    top_level_KPIs = ['Mobility Policy Quality', 'Local', 'Local public transport', 'Local trips share',
                      'Local Pollution', 'Local Emissions', 'Local Traffic', 'Local bike infrastructure',
                      'City-wide', 'City-wide Pollution', 'City-wide public transport', 'City-wide Traffic',
                      'City-wide Emissions', 'City-wide trips share', 'City-wide bike infrastructure']

    for top_level_kpi in top_level_KPIs:
        if top_level_kpi.lower() in map(str.lower, difference_df.index):
            difference_df.to_csv(recommender_engine_results_path + 'influenced_kpis.txt')
            with open(recommender_engine_results_path + 'influenced_kpis.txt', 'a', encoding='utf-8')as f:
                f.write('Caused by,' + changed_element)
            break
    else:
        return
    read_results_form_recc_eng_and_write_recc(recommender_engine_results_path + 'influenced_kpis.txt', changed_element, folder_name, step_size)
    logger.error(difference_df)


def read_results_form_recc_eng_and_write_recc(results_path, caused_by_element, folder_name, step_size):
    with open(results_path, 'r', encoding='utf-8') as f:
        results = f.readlines()
    scenario_name = results[0].split(',')[1]
    result = ''
    for line in results[1:-1]:
        elements = line.split(',')
        kpi = elements[0]
        amount_changed = elements[1]
        result += create_recommendation_text(kpi, amount_changed, caused_by_element, step_size)
    with open(f"{data_dir}/{folder_name}/recommendation.txt", 'w', encoding='utf-8') as f:
        f.write(result)


def create_recommendation_text(kpi, amount_changed, caused_by, step_size):
    if caused_by.replace(' ', '').lower() in kpi.replace(' ', '').lower():
        return ""
    if 'co2' in caused_by.replace(' ', '').lower() and 'co2' in kpi.replace(' ', '').lower():
        return ""
    return f'In order to improve the KPI of {kpi} by {int(amount_changed)*10}%, {caused_by} should be improved by {step_size*10}%\n'


@app.route("/dss/get_specific_recommendation/", methods=["GET"])
def get_recommendation():
    recommendation_for_plus_one_analysis = ""
    recommendation_for_plus_two_analysis = ""

    folder_name, _ = get_analysis_parameters(1)
    if os.path.exists(f"{data_dir}/{folder_name}/recommendation.txt"):
        with open(f"{data_dir}/{folder_name}/recommendation.txt", 'r', encoding='utf-8')as f:
            recommendation_for_plus_one_analysis += f.read()

    folder_name, _ = get_analysis_parameters(2)
    if os.path.exists(f"{data_dir}/{folder_name}/recommendation.txt"):
        with open(f"{data_dir}/{folder_name}/recommendation.txt", 'r', encoding='utf-8') as f:
            recommendation_for_plus_two_analysis += f.read()

    if recommendation_for_plus_one_analysis == "":
        recommendation_for_plus_one_analysis = "No recommendation could be made using the +/- 1 analysis of the decision making system (DEXi)"
    if recommendation_for_plus_two_analysis == "":
        recommendation_for_plus_two_analysis = "No recommendation could be made using the +/- 2 analysis of the decision making system (DEXi)"
    return {"specific_recommendation_plusminus_one": recommendation_for_plus_one_analysis, "specific_recommendation_plusminus_two": recommendation_for_plus_two_analysis}


def prepare_and_run_dexi(compare_json, city_id, compare_simulation_name, baseline_id, compare_id, temp_name):
    json_inputs = [compare_json]

    # decision model path
    dexi_model_path = f"{assets_dir}/Urbanite_{city_id}_decision_model.dxi"

    # create temporary folder
    temp_path = f"{data_dir}/{temp_name}"
    if not os.path.exists(temp_path):
        os.mkdir(temp_path)
    dexi_input_path = f"{temp_path}/inputs.tab"
    dexi_output_path = f"{temp_path}/results.csv"
    if not os.path.exists(os.path.dirname(dexi_output_path)):
        os.mkdir(os.path.dirname(dexi_output_path))

    # prepare inputs, directory names
    dexi_input = create_dexi_input(city_id, json_inputs, compare_simulation_name)
    logger.debug("DEXI INPUT:\n%s", dexi_input)

    simulation_dirs = utils.get_sim_dir_names([baseline_id, compare_id])
    logger.debug("SIMULATION DIRS:\n%s", simulation_dirs)

    write_to_temporary_dir(dexi_input, dexi_input_path)
    debug_log_temporary_files(dexi_input_path)

    call_dexi(dexi_model_path, dexi_input_path, dexi_output_path)
    dexi_output = read_dexi_output(dexi_output_path, compare_simulation_name)
    logger.debug("DEXI_OUTPUT:\n%s", dexi_output)
    return dexi_output, simulation_dirs


def get_compare_simulation_name(c_id):
    simulation_names = []
    with db_session():
        simulation_names.append(
            Simulation.select(lambda sim: sim.id == c_id).get().name
        )
    return simulation_names


def preprocess_kpi_jsons(baseline_id, compare_id):
    """
    Read the kpi data, calculate relative to baseline, discretize for dexi.
    """
    baseline_json = read_kpi_jsons([baseline_id])[0]
    compare_json = read_kpi_jsons([compare_id])[0]

    simulation_ids = get_simulation_ids([baseline_json, compare_json])

    baseline_json, compare_json = calculate_relative_kpi(baseline_json, compare_json)
    baseline_json, compare_json = discretize_relative_kpi(baseline_json, compare_json)

    return baseline_json, compare_json, simulation_ids


def read_kpi_jsons(sim_ids):
    """
    Returns json object of kpi files for all simulations provided by ids.
    """
    logger.debug("read_kpi_jsons: %s", sim_ids)
    datetime_dirs = utils.get_sim_dir_names(sim_ids)
    kpis_data = []
    for sim_id, datetime_dir in zip(sim_ids, datetime_dirs):
        sim_path = f"{data_dir}/simulations/{sim_id}/results/{datetime_dir}/kpis.json"
        logger.warning("Reading file at: %s", sim_path)
        with open(sim_path, "r", encoding="utf-8") as fd:
            kpis_data.append(json.loads(fd.read()))

    # validate json against schema
    schema_path = f"{assets_dir}/schemas/kpis_{city_id}.schema.json"
    with open(schema_path, "r", encoding="utf-8") as fd:
        schema = json.load(fd)
    for kpis_datum in kpis_data:
        validate(kpis_datum, schema)

    return kpis_data


def calculate_relative_kpi(baseline_json: Dict, compare_json: Dict):
    """
    Converts baseline data values to 1 and compare data values to % of baseline values.
    """
    baseline_result = baseline_json.copy()
    compare_result = compare_json.copy()
    for b_k, b_v in baseline_json.items():
        if isinstance(b_v, dict):
            baseline_result[b_k], compare_result[b_k] = calculate_relative_kpi(
                b_v, compare_json[b_k]
            )
        else:
            try:
                compare_result[b_k] = (
                    0.0
                    if compare_json[b_k] == 0.0
                    else float(b_v) / float(compare_json[b_k])
                )
            except RuntimeError as e:
                app.logger.error("Error occured during relativizing KPIs.\n%s", e)
                return
            baseline_result[b_k] = 1
    return baseline_result, compare_result


def is_kpi_good(kpi_name):
    list_of_kpis = ['bikesafety', 'bikeability', 'averagespeedofpublictransport', 'numberofbiketrips', 'publictransportuse']
    if kpi_name.lower() in list_of_kpis:
        return True
    else:
        return False

def plus_one_kpi_change_generator(json_file: Dict, step_size, analysis_limit):
    for k, v in json_file.items():
        was_it_increased_flag = False
        if k == 'simulation_id':
            continue
        if isinstance(v, dict):
            json_file[k] = plus_one_kpi_change_generator(v, step_size, analysis_limit)
            for i in json_file[k]:
                json_file[k] = i[0]
                yield json_file, i[1]
        if is_kpi_good(k):
            if step_size == 2:
                lower_analysis_limit = 2
            elif step_size == 1:
                lower_analysis_limit = 1
            if isinstance(json_file[k], int) and json_file[k] > lower_analysis_limit:
                json_file[k] -= step_size
                was_it_increased_flag = True
            yield json_file, k
            if isinstance(json_file[k], int) and was_it_increased_flag:
                json_file[k] += step_size
        else:
            if isinstance(json_file[k], int) and json_file[k] < analysis_limit:
                json_file[k] += step_size
                was_it_increased_flag = True
            yield json_file, k
            if isinstance(json_file[k], int) and was_it_increased_flag:
                json_file[k] -= step_size


def discretize_relative_kpi(baseline_json: Dict, compare_json: Dict):
    """
    Converts baseline and compare data values to discrete: -15% - 0, <-5% - 1, <5% - 2, <15% - 3, >15% - 4.
    """
    baseline_result = baseline_json.copy()
    compare_result = compare_json.copy()
    for b_k, b_v in baseline_json.items():
        if isinstance(b_v, dict):
            baseline_result[b_k], compare_result[b_k] = discretize_relative_kpi(
                b_v, compare_json[b_k]
            )
        else:
            baseline_result[b_k] = 3  # baseline values are always `no change`
            # according to decision models
            # for future use - meaning explained: 0.85 value means that the compare_json kpi is BIGGER by 15% than the baseline.
            # and vice verse: 1.05 means that the compare_json KPI is by 5% smaller than the baseline.
            # TODO: IMPORANT FOR DEXI MODELS: keep in mind that this is how we calculate the input to dexi, if the compare_json has a SMALLER KPI value
            # than baseline, then the passed input index to dexi will be higer. Take this into consideration when designing if the attribute scale
            # in the dexi model is increasing or decreasing, depending on semantics.
            # pomala brojka na relative KPI, pogolem index
            # smaller numbers give higher dexi input index. So pollution attribute scale should be decreasing, from 1.+15%, 2.5%, 3.0%, 4.-5%, 5.-15%.
            # other example, public transport use should have increasing scale: index(1).-15%... to index(5).+15%

            if compare_json[b_k] < 0.85:
                compare_result[b_k] = 1  # This can't be zero, because Dexi doens't accept 0 index
            elif compare_json[b_k] < 0.95:
                compare_result[b_k] = 2
            elif compare_json[b_k] < 1.05:
                compare_result[b_k] = 3
            elif compare_json[b_k] < 1.15:
                compare_result[b_k] = 4
            else:
                compare_result[b_k] = 5
    return baseline_result, compare_result


def create_temp_folder():
    """
    Creates the temporary folder for dexi inputs.
    """
    temp_path = f"{data_dir}/temp"
    if not os.path.exists(temp_path):
        os.mkdir(temp_path)
    return temp_path


def get_simulation_ids(json_inputs: List[Dict]) -> List[str]:
    """
    Returns a list of simulation ids.
    """
    simulation_ids = [json_i["simulation_id"] for json_i in json_inputs]
    app.logger.debug(f"simulation ids: {simulation_ids}")
    return simulation_ids


def write_to_temporary_dir(dexi_input, dexi_input_path) -> None:
    """
    Write dexi input files to temporary directory.
    """
    with open(dexi_input_path, "w", encoding="utf-8") as fd:
        fd.write(
            "\n".join(["\t".join([str(o) for o in _out]) for _out in dexi_input] + [""])
        )


def debug_log_temporary_files(dexi_input_path: str) -> None:
    """
    Debug only - logs the written temporary files.
    """
    with open(dexi_input_path, "r", encoding="utf-8") as fd:
        lines = fd.readlines()
        app.logger.debug("*** DEXI_INPUT_FILE ***")
        for line in lines:
            app.logger.debug(line.strip())
        app.logger.debug("[__ DEXI_INPUT_FILE __]")


def create_dexi_input(_city_id: str, json_inputs: List, compare_sim_name: List):
    """
    Creates dexi input.
    """
    app.logger.debug(json_inputs)
    output = None
    logger.error("compare_sim_name in create_dexi_input:\n%s", compare_sim_name)
    if _city_id == "bilbao":
        output = create_dexi_input_bilbao(json_inputs, compare_sim_name)
    elif _city_id == "messina":
        output = create_dexi_input_messina(json_inputs, compare_sim_name)
    elif _city_id == "amsterdam":
        output = create_dexi_input_amsterdam(json_inputs, compare_sim_name)
    elif _city_id == "helsinki":
        output = create_dexi_input_helsinki(json_inputs, compare_sim_name)
    else:
        app.logger.error(f"Illegal city_id provided: {_city_id}")
    return output


def create_dexi_input_amsterdam(json_inputs, compare_sim_name):
    """
    Returns the dexi inputs list for Amsterdam.
    """
    # 3 is default - no-change from baseline
    return [
        [""] + compare_sim_name,
        ["Local bike intensity"]
        + [
            json_inputs[i]["amsterdam"]["local"]
            .get("traffic", {})
            .get("bikeIntensity", 2)
            for i, _ in enumerate(compare_sim_name)
        ],
        ["Local bike congestion"]
        + [
            json_inputs[i]["amsterdam"]["local"]
            .get("traffic", {})
            .get("bikeCongestion", 2)
            for i, _ in enumerate(compare_sim_name)
        ],
        ["Local bike safety"]
        + [
            json_inputs[i]["amsterdam"]["local"]
            .get("bikeInfrastructure", {})
            .get("bikeSafety", 2)
            for i, _ in enumerate(compare_sim_name)
        ],
        ["Local bikeability"]
        + [
            json_inputs[i]["amsterdam"]["local"]
            .get("bikeInfrastructure", {})
            .get("bikeability", 2)
            for i, _ in enumerate(compare_sim_name)
        ],
        ["City-wide bike intensity"]
        + [
            json_inputs[i]["amsterdam"]["cityWide"]
            .get("traffic", {})
            .get("bikeIntensity", 2)
            for i, _ in enumerate(compare_sim_name)
        ],
        ["City-wide bike congestion"]
        + [
            json_inputs[i]["amsterdam"]["cityWide"]
            .get("traffic", {})
            .get("bikeCongestion", 2)
            for i, _ in enumerate(compare_sim_name)
        ],
        ["City-wide bike safety"]
        + [
            json_inputs[i]["amsterdam"]["cityWide"]
            .get("bikeInfrastructure", {})
            .get("bikeSafety", 2)
            for i, _ in enumerate(compare_sim_name)
        ],
        ["City-wide bikeability"]
        + [
            json_inputs[i]["amsterdam"]["cityWide"]
            .get("bikeInfrastructure", {})
            .get("bikeability", 2)
            for i, _ in enumerate(compare_sim_name)
        ],
    ]


def create_dexi_input_bilbao(json_inputs, compare_sim_name):
    """
    Returns the dexi inputs list for Bilbao.
    """
    dexi_input = [
        [""] + compare_sim_name,
        ["Local NOx"]
        + [
            json_inputs[i]["bilbao"]
            .get("local", {})
            .get("pollution", {})
            .get("NOx", 2)
            for i, _ in enumerate(compare_sim_name)
        ],
        ["Local PM10"]
        + [
            json_inputs[i]["bilbao"]
            .get("local", {})
            .get("pollution", {})
            .get("PM")
            for i, _ in enumerate(compare_sim_name)
        ],
        ["Local CO2"]
        + [
            json_inputs[i]["bilbao"]
            .get("local", {})
            .get("pollution", {})
            .get("CO2_TOTAL", 2)
            for i, _ in enumerate(compare_sim_name)
        ],
        ["Local Acoustic pollution"]
        + [
            json_inputs[i]["bilbao"]
            .get("local", {})
            .get("pollution", {})
            .get("accousticPollution", 2)
            for i, _ in enumerate(compare_sim_name)
        ],
        ["Local Pedestrian travel time"]
        + [
            json_inputs[i]["bilbao"]
            .get("local")
            .get("traffic", {})
            .get("pedestrianTravelTime", 2)
            for i, _ in enumerate(compare_sim_name)
        ],
        ["Local Daily internal bike travels"]
        + [
            json_inputs[i]["bilbao"]
            .get("local")
            .get("traffic", {})
            .get("dailyInternalBikeTravels", 2)
            for i, _ in enumerate(compare_sim_name)
        ],
        ["City-wide NOx"]
        + [
            json_inputs[i]["bilbao"]
            .get("cityWide", {})
            .get("pollution", {})
            .get("NOx", 2)
            for i, _ in enumerate(compare_sim_name)
        ],
        ["City-wide PM10"]
        + [
            json_inputs[i]["bilbao"]
            .get("cityWide", {})
            .get("pollution", {})
            .get("PM", 2)
            for i, _ in enumerate(compare_sim_name)
        ],
        ["City-wide CO2"]
        + [
            json_inputs[i]["bilbao"]
            .get("cityWide", {})
            .get("pollution", {})
            .get("CO2_TOTAL", 2)
            for i, _ in enumerate(compare_sim_name)
        ],
        ["City-wide Acoustic pollution"]
        + [
            json_inputs[i]["bilbao"]
            .get("cityWide", {})
            .get("pollution", {})
            .get("pollution", 2)
            for i, _ in enumerate(compare_sim_name)
        ],
        ["City-wide Pedestrian travel time"]
        + [
            json_inputs[i]["bilbao"]
            .get("cityWide", {})
            .get("traffic", {})
            .get("pedestrianTravelTime", 2)
            for i, _ in enumerate(compare_sim_name)
        ],
        ["City-wide Daily internal bike travels"]
        + [
            json_inputs[i]["bilbao"]
            .get("cityWide", {})
            .get("traffic", {})
            .get("dailyInternalBikeTravels", 2)
            for i, _ in enumerate(compare_sim_name)
        ],
        ["Entry capacity to center"]
        + [
            json_inputs[i]["bilbao"].get("entryCapacityToCenter", 2)
            for i, _ in enumerate(compare_sim_name)
        ],
    ]

    return dexi_input


def create_dexi_input_helsinki(json_inputs, compare_sim_name):
    """
    Returns the dexi inputs list for Helsinki.
    """
    return [
        [""] + compare_sim_name,
        ["Local NOx"]
        + [
            json_inputs[i]["helsinki"]["local"].get("pollution", {}).get("NOx", 2)
            for i, _ in enumerate(compare_sim_name)
        ],
        ["Local PM10"]
        + [
            json_inputs[i]["helsinki"]["local"].get("pollution", {}).get("PM", 2)
            for i, _ in enumerate(compare_sim_name)
        ],
        ["Local CO2"]
        + [
            json_inputs[i]["helsinki"]["local"].get("pollution", {}).get("CO2_TOTAL", 2)
            for i, _ in enumerate(compare_sim_name)
        ],
        ["Local Acoustic pollution"]
        + [
            json_inputs[i]["helsinki"]["local"]
            .get("pollution", {})
            .get("acousticPollution", 2)
            for i, _ in enumerate(compare_sim_name)
        ],
        ["Local congestions and bottlenecks"]
        + [
            json_inputs[i]["helsinki"]["local"].get("congestionsAndBottlenecks", 2)
            for i, _ in enumerate(compare_sim_name)
        ],
        ["City-wide NOx"]
        + [
            json_inputs[i]["helsinki"]["cityWide"].get("pollution", {}).get("NOx", 2)
            for i, _ in enumerate(compare_sim_name)
        ],
        ["City-wide PM10"]
        + [
            json_inputs[i]["helsinki"]["cityWide"].get("pollution", {}).get("PM", 2)
            for i, _ in enumerate(compare_sim_name)
        ],
        ["City-wide CO2"]
        + [
            json_inputs[i]["helsinki"]["cityWide"].get("pollution", {}).get("CO2_TOTAL", 2)
            for i, _ in enumerate(compare_sim_name)
        ],
        ["City-wide Acoustic pollution"]
        + [
            json_inputs[i]["helsinki"]["cityWide"]
            .get("pollution", {})
            .get("acousticPollution", 2)
            for i, _ in enumerate(compare_sim_name)
        ],
        ["City-wide congestions and bottlenecks"]
        + [
            json_inputs[i]["helsinki"]["cityWide"].get("congestionsAndBottlenecks", 2)
            for i, _ in enumerate(compare_sim_name)
        ],
        ["Harbour area traffic flow"]
        + [
            json_inputs[i].get("harbourAreaTrafficFlow", 2)
            for i, _ in enumerate(compare_sim_name)
        ],
    ]


def create_dexi_input_messina(json_inputs, compare_sim_name):
    """
    Returns the dexi inputs list for Messina.
    """
    return [
        [""] + compare_sim_name,
        ["Local public transport use"]
        + [
            json_inputs[i]["messina"]["local"]
            .get("publicTransport", {})
            .get("publicTransportUse", 2)
            for i, _ in enumerate(compare_sim_name)
        ],
        ["Local average speed of public transport"]
        + [
            json_inputs[i]["messina"]["local"]
            .get("publicTransport", {})
            .get("averageSpeedOfPublicTransport", 2)
            for i, _ in enumerate(compare_sim_name)
        ],
        ["Local number of bike trips"]
        + [
            json_inputs[i]["messina"]["local"]
            .get("publicTransport", {})
            .get("numberOfBikeTrips", 2)
            for i, _ in enumerate(compare_sim_name)
        ],
        ["Local share of public transport"]
        + [
            json_inputs[i]["messina"]["local"]
            .get("shareOfTrips", {})
            .get("shareOfPublicTransport", 2)
            for i, _ in enumerate(compare_sim_name)
        ],
        ["Local share of car trips"]
        + [
            json_inputs[i]["messina"]["local"]
            .get("shareOfTrips", {})
            .get("shareOfCarTrips", 2)
            for i, _ in enumerate(compare_sim_name)
        ],
        ["Local share of bicycles"]
        + [
            json_inputs[i]["messina"]["local"]
            .get("shareOfTrips", {})
            .get("shareOfBicycles", 2)
            for i, _ in enumerate(compare_sim_name)
        ],
        ["City-wide public transport use"]
        + [
            json_inputs[i]["messina"]["cityWide"]
            .get("publicTransport", {})
            .get("publicTransportUse", 2)
            for i, _ in enumerate(compare_sim_name)
        ],
        ["City-wide average speed of public transport"]
        + [
            json_inputs[i]["messina"]["cityWide"]
            .get("publicTransport", {})
            .get("averageSpeedOfPublicTransport", 2)
            for i, _ in enumerate(compare_sim_name)
        ],
        ["City-wide number of bike trips"]
        + [
            json_inputs[i]["messina"]["cityWide"]
            .get("publicTransport", {})
            .get("numberOfBikeTrips", 2)
            for i, _ in enumerate(compare_sim_name)
        ],
        ["City-wide share of public transport"]
        + [
            json_inputs[i]["messina"]["cityWide"]
            .get("shareOfTrips", {})
            .get("shareOfPublicTransport", 2)
            for i, _ in enumerate(compare_sim_name)
        ],
        ["City-wide share of car trips"]
        + [
            json_inputs[i]["messina"]["cityWide"]
            .get("shareOfTrips", {})
            .get("shareOfCarTrips", 2)
            for i, _ in enumerate(compare_sim_name)
        ],
        ["City-wide share of bicycles"]
        + [
            json_inputs[i]["messina"]["cityWide"]
            .get("shareOfTrips", {})
            .get("shareOfBicycles", 2)
            for i, _ in enumerate(compare_sim_name)
        ],
    ]


def call_dexi(dexi_model_path, dexi_input_path, dexi_output_path):
    """
    Calls dexi, logs dexi return status.
    """
    subprocess_status = 0
    if platform.system().lower() == "linux":
        subprocess_status = subprocess.call(
            [
                f"{dexi_dir}/DEXiEval",
                dexi_model_path,
                dexi_input_path,
                "-noindent",
                dexi_output_path,
            ]
        )
    else:
        subprocess_status = subprocess.call(
            [
                f"{dexi_dir}DEXiEval.exe",
                dexi_model_path,
                dexi_input_path,
                "-noindent",
                dexi_output_path,
            ]
        )
    app.logger.debug(f"DEXI subprocess status {subprocess_status}")


def read_dexi_output(dexi_output_path: str, simulation_names: List[str]) -> Dict:
    """
    Reads dexi output files and returns object.
    """
    dexi_output = list(csv.DictReader(open(dexi_output_path, "r", encoding="utf-8")))
    output = {s: {k[""]: k[s] for k in dexi_output} for s in simulation_names}
    return output


def write_results(dexi_output, sim_id, sim_dir):
    """
    Write outputs to simulation directory.
    """
    path = f"{data_dir}/simulations/{sim_id}/results/{sim_dir}/evaluated.json"
    with open(path, "w", encoding="utf-8") as fd:
        json.dump(dexi_output, fd)