Skip to content
Snippets Groups Projects
Commit b7b50ffb authored by b95debora's avatar b95debora
Browse files

update parser with a complete and generic version

parent e4bf2262
No related branches found
No related tags found
No related merge requests found
Showing with 752 additions and 290 deletions
......@@ -12,8 +12,8 @@ Installation
To have a functional ICG application the following steps can be used.
- Download the full content of this repository
- Build the docker image launching the following command: `docker build -t icg:0.1 .`
- Run the container: `docker run --name icg -d -p 5000:5000 icg:0.1`
- Build the docker image launching the following command: `docker build -t icg:1.0.0 .`
- Run the container: `docker run --name icg -d -p 5000:5000 icg:1.0.0`
Usage
------------
......
import json
import logging
import json
import tarfile
import uuid
......@@ -12,6 +12,7 @@ api_router = APIRouter()
base_compress_file_name = "iac_files_"
@api_router.post("/infrastructure/files")
def create_iac_from_intermediate_representation(intermediate_representation: dict = Body(...)):
logging.info("Received intermediate representation create_iac_from_intermediate_representation request")
......@@ -29,19 +30,21 @@ def create_iac_from_doml(data: str = Body(..., media_type="application/xml")):
f = open(temp_model_file_path, "w")
f.write(data)
f.close()
ModelParser.parse_model(temp_model_file_path, False)
with open("input_file_generated/ir.json") as json_file:
data = json.load(json_file)
template_generated_folder = create_infrastructure_files(data)
intermediate_representation = ModelParser.parse_model(temp_model_file_path)
intermediate_representation = reorganize_info(intermediate_representation)
save(intermediate_representation, "input_file_generated/ir.json")
template_generated_folder = create_infrastructure_files(intermediate_representation)
compress_file_name = random_file_name_generation(base_compress_file_name)
compress_file_folder = compress_file(template_generated_folder, compress_file_name)
return FileResponse(compress_file_folder,
media_type='application/octet-stream',
filename=compress_file_name)
def random_file_name_generation(base_name):
return base_name + str(uuid.uuid4().hex) + ".tar.gz"
def compress_file(source_folder, dest_file_name):
# prefix_path = "/opt/"
prefix_path = ""
......@@ -50,3 +53,21 @@ def compress_file(source_folder, dest_file_name):
with tarfile.open(prefix_path + dest_file_name, "w:gz") as tar:
tar.add(source_folder, arcname='.')
return prefix_path + dest_file_name
def save(data, file_path):
file = open(file_path, "w")
if isinstance(data, dict):
data = json.dumps(data, indent=2, sort_keys=True)
print(data)
file.write(data)
file.close()
def reorganize_info(intermediate_repr):
computing_group_list = []
groups = intermediate_repr["steps"][0]["data"]["computingGroup"][0]
for key in groups:
if not key == "name":
computing_group_list.append(groups[key])
intermediate_repr["steps"][0]["data"]["computingGroup"] = computing_group_list
return intermediate_repr
import logging
from pyecore.ecore import EOrderedSet, EEnumLiteral
TO_BE_PARSED_RESOURCES = {}
def extract_value_from(ecore_object_value):
if isinstance(ecore_object_value, EOrderedSet):
value = list(ecore_object_value)
elif isinstance(ecore_object_value, EEnumLiteral):
value = ecore_object_value.name
else:
value = ecore_object_value
return value
def get_reference_list_if_exists(from_object, reference):
reference_from_object = from_object.eGet(reference.name)
if reference_from_object and isinstance(reference_from_object, EOrderedSet) and len(reference_from_object) > 0:
return reference_from_object
else:
return None
def save_annotations(from_object, to_object):
print(f'Saving annotation from {from_object.name}')
if not to_object:
to_object = {}
for annotation in from_object.annotations:
to_object[annotation.key] = annotation.value
return to_object
def save_attributes(from_object, to_object, skip_component_name=False):
print(f'Saving attributes from {from_object.name}')
if not to_object:
to_object = {}
for attribute in from_object.eClass.eAllAttributes():
if from_object.eGet(attribute.name):
key = attribute.name
if skip_component_name and attribute.name == "name":
key = "infra_element_name"
print(f'Renaming attributes {attribute.name} from {from_object.name} into {key}')
value = from_object.eGet(attribute.name)
if isinstance(value, EOrderedSet):
value = list(value)
if isinstance(value, EEnumLiteral):
value = value.name
to_object[key] = value
return to_object
def update_missing_parsed_resources(resource, reference, is_to_be_parsed):
resource_name = resource.name
if is_to_be_parsed and not (resource_name in TO_BE_PARSED_RESOURCES):
print(f'Adding {resource_name} as missing parsed resource')
TO_BE_PARSED_RESOURCES[resource_name] = {"resource": resource,
"reference": reference} ## TODO introdurre interfaccia
elif not is_to_be_parsed and (resource_name in TO_BE_PARSED_RESOURCES):
print(f'Removing {resource_name} to the missing parsed resource')
del TO_BE_PARSED_RESOURCES[resource_name]
else:
print(f'update_missing_parsed_resources: skipping {resource_name}')
def save_references_info(from_object, to_object): ## TODO refactoring
refs = from_object.eClass.eAllReferences()
for ref in refs:
if get_reference_list_if_exists(from_object, ref):
logging.info(f'{ref.name} is a list, skipping it')
## TODO trattare la lista
elif from_object.eGet(ref.name):
logging.info(f'Adding reference "{ref.name}" location')
reference_object = from_object.eGet(ref.name)
to_object[ref.name] = reference_object.name
update_missing_parsed_resources(reference_object, reference=ref, is_to_be_parsed=True)
return to_object
def save_inner_components(from_object, to_object):
inner_components = from_object.eAllContents()
for obj in inner_components:
if not isinstance(obj, EOrderedSet): # TODO espandere info
print(f'Saving information from object {obj.name}')
inner_component = save_attributes(obj, {})
save_references_info(obj, inner_component)
to_object[obj.name] = inner_component
return to_object
def add_infrastructure_information(infrastructure_element, to_object):
print(f'Saving infrastructure information from {infrastructure_element.name}')
update_missing_parsed_resources(infrastructure_element, is_to_be_parsed=False, reference=None)
save_attributes(infrastructure_element, to_object, skip_component_name=True)
save_references_info(infrastructure_element, to_object)
save_inner_components(infrastructure_element, to_object)
return to_object
def retrieve_missing_parsed_resources():
return TO_BE_PARSED_RESOURCES
......@@ -11,218 +11,93 @@
# --single / --single_mmodel use the single (non-split) metamodel
# model the input model to be translated into the ICG intermediate representation
#
# Author: Lorenzo Blasi
# 23/2/2022 - created
# © Copyright 2022 Hewlett Packard Enterprise Development LP
# -------------------------------------------------------------------------
import logging
import sys
from pyecore.resources import ResourceSet, URI, global_registry
import pyecore.ecore as Ecore # This gets a reference to the Ecore metamodel implementation
# -------------------------------------------------------------------------
# Utility functions to printout the loaded model
# -------------------------------------------------------------------------
newline = "\n"
spaces = " "
comment = "#"
def write_to(outchannel, line):
# for now we just print on the console
if outchannel == "console":
print(line)
# if the channel is different we don't print at all
def print_obj(obj, level=0):
# for x in range(level):
# print(" ", end='')
class_name = obj.eClass.name
if class_name == 'Property':
# print('Class: {0}\t\t{1} = {2}'.format(class_name, obj.key, obj.value))
print(f'{comment}{level * spaces}Class: {class_name}\t\t{obj.key} = {obj.value}')
return False
if class_name == 'Deployment':
print(
f'{comment}{level * spaces}Class: {class_name}\t\tcomponent = {obj.component.eClass.name}/{obj.component.name} node = {obj.node.eClass.name}/{obj.node.name}')
return False
try:
obj_name = obj.name
print(f'{comment}{level * spaces}Class: {class_name}\t\tObject: {obj_name}')
return True
except Exception:
print(f'{comment}{level * spaces}Class: {class_name}\t\tObject: no name')
return False
def print_contents_recursive(obj, level=0):
if print_obj(obj, level):
for x in obj.eContents:
print_contents_recursive(x, level + 1)
# -------------------------------------------------------------------------
# Utility functions to produce the output Intermediate Language
# -------------------------------------------------------------------------
# --- Helpers
def extract_image_name(concretevm_obj):
# To find the VM image name you could search into the inverse relations of the abstract image generating its related abstract VM, looking for a concrete image object (whose class is VMImage) and extract the value from its contents
# concretevm_obj is a VirtualMachine (nginx-openstack_v2.doml:81, it should have been a OpenStackVM),
# concretevm_obj.maps is a VirtualMachine (the abstract one)
# concretevm_obj.maps.generatedFrom is a VMImage (the abstract one)
for x in concretevm_obj.maps.generatedFrom._inverse_rels:
if x[0].eClass.name == 'VMImage':
return x[0].eContents[0].value
def extract_concrete_network_name(abstractnet_obj):
for x in abstractnet_obj._inverse_rels:
if x[0].eClass.name == 'Network':
return x[0].eContents[0].value
# --- Handlers
def model_handler(obj, model_root, level, intermediate_repr):
# output prefix
append_in_file(intermediate_repr,
f'{level * spaces}{{{newline}{level * spaces}{spaces}"output_path": "output_files_generated/{obj.name}/",')
append_in_file(intermediate_repr, f'{level * spaces}{spaces}"steps": [')
# handle contents
for x in obj.eContents:
handle_obj(x, model_root, level + 2, intermediate_repr)
# output suffix
append_in_file(intermediate_repr, f'{level * spaces}{spaces}]')
append_in_file(intermediate_repr, f'{level * spaces}}}')
def concrete_infra_handler(obj, model_root, level, intermediate_repr):
# output prefix
append_in_file(intermediate_repr,
f'{level * spaces}{{{newline}{level * spaces}{spaces}"programming_language": "terraform",')
# handle contents
for x in obj.eContents:
handle_obj(x, model_root, level + 1, intermediate_repr)
# output suffix
append_in_file(intermediate_repr, f'{level * spaces}}}')
def network_handler(obj, model_root, level, intermediate_repr):
# ignore the concrete network, since its name has been extracted separately and included in the concrete VM
logging.warning('Ignoring Network')
def property_handler(obj, model_root, level, intermediate_repr):
key = obj.key
append_in_file(intermediate_repr, f'{level * spaces}"{key}" : "{obj.value}",')
from icgparser import DomlParserUtilities
from icgparser.DomlParserUtilities import get_reference_list_if_exists
def provider_handler(obj, model_root, level, intermediate_repr):
# output prefix
append_in_file(intermediate_repr,
f'{level * spaces}"data": {{{newline}{level * spaces}{spaces}"provider": "{obj.name}",')
# handle contents
for x in obj.eContents:
handle_obj(x, model_root, level + 1, intermediate_repr)
# output suffix
append_in_file(intermediate_repr, f'{level * spaces}}}')
OUTPUT_BASE_DIR_PATH = "output_files_generated/"
DOML_PATH = "icgparser/doml/doml.ecore"
def concrete_vm_handler(obj, model_root, level, intermediate_repr):
# output prefix
append_in_file(intermediate_repr, f'{level * spaces}"vm": [{{') # VMs can be more than one: I need an example...
level = level + 1
# print(f'{level * spaces}# maps {obj.maps.name}')
logging.warning(f"Ignoring map {obj.maps.name}")
# handle contents
for x in obj.eContents:
handle_obj(x, model_root, level, intermediate_repr)
# add other attributes defined elsewhere: image name, address, ...
append_in_file(intermediate_repr, f'{level * spaces}"image" : "{extract_image_name(obj)}",')
for iface in obj.maps.ifaces:
append_in_file(intermediate_repr, f'{level * spaces}"address" : "{iface.endPoint}",')
append_in_file(intermediate_repr,
f'{level * spaces}"network_name" : "{extract_concrete_network_name(iface.belongsTo)}"')
# output suffix
level = level - 1
append_in_file(intermediate_repr, f'{level * spaces}}}]')
def vm_image_handler(obj, model_root, level, intermediate_repr):
# ignore the concrete image, since its image name has been extracted separately and included in the concrete VM
logging.warning(f'Ignoring VMImage')
class_handler = {
"DOMLModel": model_handler,
"ConcreteInfrastructure": concrete_infra_handler,
"Network": network_handler,
"Property": property_handler,
"RuntimeProvider": provider_handler,
"VirtualMachine": concrete_vm_handler, # Warning: the class here might change to some concrete VM class
"VMImage": vm_image_handler
doml_layers = {
"active_infrastructure_layer": "activeInfrastructure",
}
def handle_obj(obj, model_root, level, intermediate_repr):
if obj.eClass.name in class_handler:
class_handler[obj.eClass.name](obj, model_root, level, intermediate_repr)
else:
logging.warning(f'Class {obj.eClass.name} has no handler')
# -------------------------------------------------------------------------
# Load each part of the DOML metamodel and register them
# -------------------------------------------------------------------------
def load_metamodel(load_split_model, doml_directory="icgparser/doml"):
def load_model(doml_path, model_path):
global_registry[Ecore.nsURI] = Ecore # Load the Ecore metamodel first
rset = ResourceSet()
if load_split_model:
mm_parts = ["doml", "commons", "application", "infrastructure", "concrete", "optimization"]
for mm_filename in mm_parts:
resource = rset.get_resource(URI(f"{doml_directory}/{mm_filename}.ecore"))
mm_root = resource.contents[0] # Get the root of the MetaModel (EPackage)
rset.metamodel_registry[mm_root.nsURI] = mm_root
else:
resource = rset.get_resource(URI(f"{doml_directory}/doml.ecore"))
resource = rset.get_resource(URI(f"{doml_path}"))
mm_root = resource.contents[0] # Get the root of the MetaModel (EPackage)
rset.metamodel_registry[mm_root.nsURI] = mm_root
for subp in mm_root.eSubpackages:
rset.metamodel_registry[subp.nsURI] = subp
return rset
# -------------------------------------------------------------------------
# Finally load the model and print it out
# -------------------------------------------------------------------------
def parse_model(model, load_split_model, doml_directory="icgparser/doml"):
rset = load_metamodel(load_split_model)
doml_model_resource = rset.get_resource(URI(model))
doml_model = doml_model_resource.contents[0]
single = "single-file (doml.ecore)"
split = "split"
dash = "-"
logging.info(f'{comment}{80 * dash}')
logging.info(f'{comment} Using {split if load_split_model else single} metamodel from directory {doml_directory}')
print(f'{comment} Model loaded from file {model}:')
print(f'{comment}{80 * dash}')
print_contents_recursive(doml_model)
print(f'{comment}{80 * dash}{newline}{comment} Generated Intermediate Representation follows:{newline}{comment}')
intermediate_repr_file_path = "input_file_generated/ir.json"
create_file("input_file_generated/ir.json")
handle_obj(doml_model, doml_model, 0, intermediate_repr_file_path)
def create_file(file_name):
f = open(file_name, "w")
f.write("")
f.close()
def append_in_file(file_name, data):
f = open(file_name, "a")
f.write(data)
f.write("\n")
f.close()
doml_model_resource = rset.get_resource(URI(model_path))
return doml_model_resource.contents[0]
def to_camel_case(content):
return content[0].lower() + content[1:]
def include_missing_objects_from_infrastructure_layer(to_step):
for obj_name in DomlParserUtilities.retrieve_missing_parsed_resources():
obj = DomlParserUtilities.retrieve_missing_parsed_resources()[obj_name]
infra_object_representation = {}
infra_object_representation = DomlParserUtilities.save_attributes(obj["resource"], infra_object_representation)
infra_object_representation = DomlParserUtilities.save_inner_components(obj["resource"],
infra_object_representation)
## TODO fix attenzione che sovrascrive
ir_key_name = to_camel_case(obj["reference"].eType.name)
to_step["data"][ir_key_name] = [infra_object_representation]
return to_step
def include_infra_object_from_concrete_layer(provider, infra_object_step):
logging.info(f'Adding objects from concrete layer for provider {provider.name}')
for ref in provider.eClass.eReferences:
provider_object_list = get_reference_list_if_exists(provider, ref)
if provider_object_list:
logging.info(
f'Found list of object {len(provider_object_list)} "{provider_object_list}" in "{provider.name}"')
object_list_representation = []
for object in provider_object_list:
object_representation = {}
object_representation = DomlParserUtilities.save_annotations(object, object_representation)
object_representation = DomlParserUtilities.save_attributes(object, object_representation)
object_representation = DomlParserUtilities.add_infrastructure_information(object.maps,
object_representation)
object_list_representation.append(object_representation)
infra_object_step["data"][ref.name] = object_list_representation
return infra_object_step
def parse_infrastructural_objects(doml_model):
infra_object_step = {"programming_language": "terraform"} ## TODO refactoring: generalize
concretization_layer = doml_model.eGet(doml_layers["active_infrastructure_layer"])
providers = concretization_layer.providers
for provider in providers:
logging.info(f'Searching objects to be generates for provider "{provider.name}"')
infra_object_step["data"] = {} ## TODO refactoring, fix (maybe list?): generalize
infra_object_step["data"]["provider"] = provider.name ## TODO refactoring: generalize
infra_object_step = include_infra_object_from_concrete_layer(provider, infra_object_step)
infra_object_step = include_missing_objects_from_infrastructure_layer(infra_object_step)
return infra_object_step
def parse_model(model_path):
doml_model = load_model(DOML_PATH, model_path)
model_name = doml_model.name
output_path = OUTPUT_BASE_DIR_PATH + model_name + "/"
intermediate_representation_steps = []
infra_object_step = parse_infrastructural_objects(doml_model)
intermediate_representation_steps.append(infra_object_step)
intermediate_representation = {
"output_path": output_path,
"steps": intermediate_representation_steps
}
return intermediate_representation
# # -------------------------------------------------------------------------
# # PIACERE ICG Parser
# #
# # This module has been tested with Python v3.7.4
# # To use it you must first install PyEcore
# # $ pip install pyecore
# #
# # Usage: python icgparser.py [-h] [-d dir] [-v] [--single] model
# # -h prints usage
# # -d dir loads metamodel from <dir>
# # --single / --single_mmodel use the single (non-split) metamodel
# # model the input model to be translated into the ICG intermediate representation
# #
# # Author: Lorenzo Blasi
# # 23/2/2022 - created
# # © Copyright 2022 Hewlett Packard Enterprise Development LP
# # -------------------------------------------------------------------------
# import logging
# import sys
# from pyecore.resources import ResourceSet, URI, global_registry
# import pyecore.ecore as Ecore # This gets a reference to the Ecore metamodel implementation
#
# # -------------------------------------------------------------------------
# # Utility functions to printout the loaded model
# # -------------------------------------------------------------------------
# newline = "\n"
# spaces = " "
# comment = "#"
#
#
# def write_to(outchannel, line):
# # for now we just print on the console
# if outchannel == "console":
# print(line)
# # if the channel is different we don't print at all
#
#
# def print_obj(obj, level=0):
# # for x in range(level):
# # print(" ", end='')
# class_name = obj.eClass.name
# if class_name == 'Property':
# # print('Class: {0}\t\t{1} = {2}'.format(class_name, obj.key, obj.value))
# print(f'{comment}{level * spaces}Class: {class_name}\t\t{obj.key} = {obj.value}')
# return False
# if class_name == 'Deployment':
# print(
# f'{comment}{level * spaces}Class: {class_name}\t\tcomponent = {obj.component.eClass.name}/{obj.component.name} node = {obj.node.eClass.name}/{obj.node.name}')
# return False
# try:
# obj_name = obj.name
# print(f'{comment}{level * spaces}Class: {class_name}\t\tObject: {obj_name}')
# return True
# except Exception:
# print(f'{comment}{level * spaces}Class: {class_name}\t\tObject: no name')
# return False
#
#
# def print_contents_recursive(obj, level=0):
# if print_obj(obj, level):
# for x in obj.eContents:
# print_contents_recursive(x, level + 1)
#
#
# # -------------------------------------------------------------------------
# # Utility functions to produce the output Intermediate Language
# # -------------------------------------------------------------------------
# # --- Helpers
# def extract_image_name(concretevm_obj):
# # To find the VM image name you could search into the inverse relations of the abstract image generating its related abstract VM, looking for a concrete image object (whose class is VMImage) and extract the value from its contents
# # concretevm_obj is a VirtualMachine (nginx-openstack_v2.doml:81, it should have been a OpenStackVM),
# # concretevm_obj.maps is a VirtualMachine (the abstract one)
# # concretevm_obj.maps.generatedFrom is a VMImage (the abstract one)
# for x in concretevm_obj.maps.generatedFrom._inverse_rels:
# if x[0].eClass.name == 'VMImage':
# return x[0].eContents[0].value
#
#
# def extract_concrete_network_name(abstractnet_obj):
# for x in abstractnet_obj._inverse_rels:
# if x[0].eClass.name == 'Network':
# return x[0].eContents[0].value
#
#
# # --- Handlers
# def model_handler(obj, model_root, level, intermediate_repr):
# # output prefix
# append_in_file(intermediate_repr,
# f'{level * spaces}{{{newline}{level * spaces}{spaces}"output_path": "output_files_generated/{obj.name}/",')
# append_in_file(intermediate_repr, f'{level * spaces}{spaces}"steps": [')
# # handle contents
# for x in obj.eContents:
# handle_obj(x, model_root, level + 2, intermediate_repr)
# # output suffix
# append_in_file(intermediate_repr, f'{level * spaces}{spaces}]')
# append_in_file(intermediate_repr, f'{level * spaces}}}')
#
#
# def concrete_infra_handler(obj, model_root, level, intermediate_repr):
# # output prefix
# append_in_file(intermediate_repr,
# f'{level * spaces}{{{newline}{level * spaces}{spaces}"programming_language": "terraform",')
# # handle contents
# for x in obj.eContents:
# handle_obj(x, model_root, level + 1, intermediate_repr)
# # output suffix
# append_in_file(intermediate_repr, f'{level * spaces}}}')
#
#
# def network_handler(obj, model_root, level, intermediate_repr):
# # ignore the concrete network, since its name has been extracted separately and included in the concrete VM
# logging.warning('Ignoring Network')
#
#
# def property_handler(obj, model_root, level, intermediate_repr):
# key = obj.key
# append_in_file(intermediate_repr, f'{level * spaces}"{key}" : "{obj.value}",')
#
#
# def provider_handler(obj, model_root, level, intermediate_repr):
# # output prefix
# append_in_file(intermediate_repr,
# f'{level * spaces}"data": {{{newline}{level * spaces}{spaces}"provider": "{obj.name}",')
# # handle contents
# for x in obj.eContents:
# handle_obj(x, model_root, level + 1, intermediate_repr)
# # output suffix
# append_in_file(intermediate_repr, f'{level * spaces}}}')
#
#
# def concrete_vm_handler(obj, model_root, level, intermediate_repr):
# # output prefix
# append_in_file(intermediate_repr, f'{level * spaces}"vm": [{{') # VMs can be more than one: I need an example...
# level = level + 1
# # print(f'{level * spaces}# maps {obj.maps.name}')
# logging.warning(f"Ignoring map {obj.maps.name}")
# # handle contents
# for x in obj.eContents:
# handle_obj(x, model_root, level, intermediate_repr)
# # add other attributes defined elsewhere: image name, address, ...
# append_in_file(intermediate_repr, f'{level * spaces}"image" : "{extract_image_name(obj)}",')
# for iface in obj.maps.ifaces:
# append_in_file(intermediate_repr, f'{level * spaces}"address" : "{iface.endPoint}",')
# append_in_file(intermediate_repr,
# f'{level * spaces}"network_name" : "{extract_concrete_network_name(iface.belongsTo)}"')
# # output suffix
# level = level - 1
# append_in_file(intermediate_repr, f'{level * spaces}}}]')
#
#
# def vm_image_handler(obj, model_root, level, intermediate_repr):
# # ignore the concrete image, since its image name has been extracted separately and included in the concrete VM
# logging.warning(f'Ignoring VMImage')
#
#
# class_handler = {
#
# "DOMLModel": model_handler,
# "ConcreteInfrastructure": concrete_infra_handler,
# "Network": network_handler,
# "Property": property_handler,
# "RuntimeProvider": provider_handler,
# "VirtualMachine": concrete_vm_handler, # Warning: the class here might change to some concrete VM class
# "VMImage": vm_image_handler
# }
#
#
# def handle_obj(obj, model_root, level, intermediate_repr):
# if obj.eClass.name in class_handler:
# class_handler[obj.eClass.name](obj, model_root, level, intermediate_repr)
# else:
# logging.warning(f'Class {obj.eClass.name} has no handler')
#
#
# # -------------------------------------------------------------------------
# # Load each part of the DOML metamodel and register them
# # -------------------------------------------------------------------------
# def load_metamodel(load_split_model, doml_directory="icgparser/doml"):
# global_registry[Ecore.nsURI] = Ecore # Load the Ecore metamodel first
# rset = ResourceSet()
# if load_split_model:
# mm_parts = ["doml", "commons", "application", "infrastructure", "concrete", "optimization"]
# for mm_filename in mm_parts:
# resource = rset.get_resource(URI(f"{doml_directory}/{mm_filename}.ecore"))
# mm_root = resource.contents[0] # Get the root of the MetaModel (EPackage)
# rset.metamodel_registry[mm_root.nsURI] = mm_root
# else:
# resource = rset.get_resource(URI(f"{doml_directory}/doml.ecore"))
# mm_root = resource.contents[0] # Get the root of the MetaModel (EPackage)
# rset.metamodel_registry[mm_root.nsURI] = mm_root
# for subp in mm_root.eSubpackages:
# rset.metamodel_registry[subp.nsURI] = subp
# return rset
#
#
# # -------------------------------------------------------------------------
# # Finally load the model and print it out
# # -------------------------------------------------------------------------
#
# def parse_model(model, load_split_model, doml_directory="icgparser/doml"):
# rset = load_metamodel(load_split_model)
# doml_model_resource = rset.get_resource(URI(model))
# doml_model = doml_model_resource.contents[0]
# single = "single-file (doml.ecore)"
# split = "split"
# dash = "-"
# logging.info(f'{comment}{80 * dash}')
# logging.info(f'{comment} Using {split if load_split_model else single} metamodel from directory {doml_directory}')
# print(f'{comment} Model loaded from file {model}:')
# print(f'{comment}{80 * dash}')
# print_contents_recursive(doml_model)
# print(f'{comment}{80 * dash}{newline}{comment} Generated Intermediate Representation follows:{newline}{comment}')
# intermediate_repr_file_path = "input_file_generated/ir.json"
# create_file("input_file_generated/ir.json")
# handle_obj(doml_model, doml_model, 0, intermediate_repr_file_path)
#
#
# def create_file(file_name):
# f = open(file_name, "w")
# f.write("")
# f.close()
#
#
# def append_in_file(file_name, data):
# f = open(file_name, "a")
# f.write(data)
# f.write("\n")
# f.close()
......@@ -2,20 +2,89 @@
"output_path": "output_files_generated/nginx_openstack/",
"steps": [
{
"programming_language": "terraform",
"data": {
"computingGroup": [
{
"addressRanges": [
"0.0.0.0/0",
"::/0"
],
"kind": "EGRESS",
"name": "out_all",
"protocol": "-1"
},
{
"addressRanges": [
"0.0.0.0/0",
"::/0"
],
"fromPort": 80,
"kind": "INGRESS",
"name": "http",
"protocol": "tcp",
"toPort": 80
},
{
"addressRanges": [
"0.0.0.0/0",
"::/0"
],
"fromPort": 443,
"kind": "INGRESS",
"name": "https",
"protocol": "tcp",
"toPort": 443
},
{
"addressRanges": [
"0.0.0.0/0",
"::/0"
],
"fromPort": 22,
"kind": "INGRESS",
"name": "ssh",
"protocol": "tcp",
"toPort": 22
}
],
"networks": [
{
"addressRange": "16.0.0.0/24",
"infra_element_name": "net1",
"name": "concrete_net",
"protocol": "tcp/ip"
}
],
"provider": "openstack",
"vm": [{
"vm_name" : "nginx-host",
"vmImages": [
{
"infra_element_name": "v_img",
"kind": "SCRIPT",
"name": "concrete_vm_image"
}
],
"vms": [
{
"credentials": "ssh_key",
"generatedFrom": "v_img",
"group": "sg",
"i1": {
"associated": "sg",
"belongsTo": "net1",
"endPoint": "16.0.0.1",
"name": "i1"
},
"infra_element_name": "vm1",
"name": "concrete_vm",
"ssh_key_file": "/home/user1/.ssh/openstack.key",
"ssh_user": "ubuntu",
"vm_flavor": "small",
"vm_key_name": "user1",
"ssh_user" : "ubuntu",
"ssh_key_file" : "/home/user1/.ssh/openstack.key",
"image" : "ubuntu-20.04.3",
"address" : "16.0.0.1",
"network_name" : "ostack2"
}]
"vm_name": "nginx-host"
}
]
},
"programming_language": "terraform"
}
]
}
\ No newline at end of file
......@@ -10,32 +10,46 @@ required_version = ">= 0.14.0"
# Configure the OpenStack Provider
provider "openstack" {
user_name = "{{ user }}" #admin
tenant_name = "{{ tenant }}" #test
password = "{{ password }}" #test
auth_url = "{{ url }}" #https://127.0.0.1:5000/v3
user_name = var.username
tenant_name = "admin"
password = var.password
auth_url = var.auth_url
insecure = true
}
resource "openstack_compute_keypair_v2" "user_key" {
name = "user1"
public_key = "{{ ssh_key }}" #ssh-rsa xxxx
public_key = var.ssh_key
}
# Retrieve data
data "openstack_networking_network_v2" "external" {
name = "external"
}
data "openstack_identity_project_v3" "test_tenant" {
name = "admin"
}
data "openstack_networking_secgroup_v2" "default" {
name = "default"
tenant_id = data.openstack_identity_project_v3.test_tenant.id
}
# Router creation. UUID external gateway
resource "openstack_networking_router_v2" "generic" {
name = "router-generic"
external_network_id = "${openstack_networking_network_v2.external.id}" #External network id
external_network_id = data.openstack_networking_network_v2.external.id #External network id
}
# Network creation
resource "openstack_networking_network_v2" "generic" {
name = "ostack2"
name = " "
}
#### HTTP SUBNET ####
# Subnet http configuration
# Subnet configuration
resource "openstack_networking_subnet_v2" "nginx" {
name = "subnet-nginx"
network_id = openstack_networking_network_v2.generic.id
......@@ -90,7 +104,7 @@ resource "openstack_networking_port_v2" "nginx" {
network_id = openstack_networking_network_v2.generic.id
admin_state_up = true
security_group_ids = [
"${openstack_compute_flavor_v2.default.id}" #default flavour id
data.openstack_networking_secgroup_v2.default.id #default flavour id
]
fixed_ip {
subnet_id = openstack_networking_subnet_v2.nginx.id
......@@ -99,10 +113,7 @@ resource "openstack_networking_port_v2" "nginx" {
# Create floating ip
resource "openstack_networking_floatingip_v2" "nginx" {
# pool = "ostack2"
# port_id = openstack_networking_port_v2.nginx.id
pool = "external"
# fixed_ip = "16.0.0.1"
}
# Attach floating ip to instance
......
......@@ -10,38 +10,163 @@ required_version = ">= 0.14.0"
# Configure the OpenStack Provider
provider "openstack" {
user_name = "admin"
tenant_name = "test"
password = "wRpuXgVqBzQqGwx8Bu0sylEeb8FgjSYG"
auth_url = "https://127.0.0.1:5000/v3"
user_name = var.username
tenant_name = "admin"
password = var.password
auth_url = var.auth_url
insecure = true
}
resource "openstack_compute_keypair_v2" "user_key" {
name = "user1"
public_key = var.ssh_key
}
# Retrieve data
data "openstack_networking_network_v2" "external" {
name = "external"
}
data "openstack_identity_project_v3" "test_tenant" {
name = "admin"
}
data "openstack_networking_secgroup_v2" "default" {
name = "default"
tenant_id = data.openstack_identity_project_v3.test_tenant.id
}
# Create router
resource "openstack_networking_router_v2" "net1_router" {
name = "net1_router"
external_network_id = data.openstack_networking_network_v2.external.id #External network id
}
# Create virtual machine
resource "openstack_compute_instance_v2" "nginx-host" {
resource "openstack_compute_instance_v2" "vm1" {
name = "nginx-host"
image_name = "ubuntu-20.04.3"
image_name = "i1"
flavor_name = "small"
key_pair = openstack_compute_keypair_v2.nginx-host_ssh_key.name
key_pair = openstack_compute_keypair_v2.ssh_key.name
network {
port = openstack_networking_port_v2.ostack2.id
port = openstack_networking_port_v2.net1.id
}
}
# Create ssh keys
resource "openstack_compute_keypair_v2" "nginx-host_ssh_key" {
resource "openstack_compute_keypair_v2" "ssh_key" {
name = "ubuntu"
public_key = "/home/user1/.ssh/openstack.key"
}
# Create floating ip
resource "openstack_networking_floatingip_v2" "nginx-host_floating_ip" {
resource "openstack_networking_floatingip_v2" "vm1_floating_ip" {
pool = "external"
# fixed_ip = "16.0.0.1"
# fixed_ip = ""
}
# Attach floating ip to instance
resource "openstack_compute_floatingip_associate_v2" "nginx-host_floating_ip_association" {
floating_ip = openstack_networking_floatingip_v2.nginx-host_floating_ip.address
instance_id = openstack_compute_instance_v2.nginx-host.id
resource "openstack_compute_floatingip_associate_v2" "vm1_floating_ip_association" {
floating_ip = openstack_networking_floatingip_v2.vm1_floating_ip.address
instance_id = openstack_compute_instance_v2.vm1.id
}
## Network
# Create Network
resource "openstack_networking_network_v2" "net1" {
name = "concrete_net"
}
# Create Subnet
resource "openstack_networking_subnet_v2" "net1_subnet" {
name = "concrete_net_subnet"
network_id = openstack_networking_network_v2.net1.id
cidr = "16.0.0.0/24"
dns_nameservers = ["8.8.8.8", "8.8.8.4"]
}
# Attach networking port
resource "openstack_networking_port_v2" "net1" {
name = "concrete_net"
network_id = openstack_networking_network_v2.net1.id
admin_state_up = true
security_group_ids = [
data.openstack_networking_secgroup_v2.default.id #default flavour id
]
fixed_ip {
subnet_id = openstack_networking_subnet_v2.net1_subnet.id
}
}
# Router interface configuration
resource "openstack_networking_router_interface_v2" "net1_router_interface" {
router_id = openstack_networking_router_v2.net1_router.id
subnet_id = openstack_networking_subnet_v2.net1_subnet.id
}
resource "openstack_compute_secgroup_v2" "out_all" {
name = "out_all"
description = "Security group rule for port -"
rule {
from_port =
to_port =
ip_protocol = "-1"
cidr = [
0.0.0.0/0,
::/0,
]
}
}
resource "openstack_compute_secgroup_v2" "http" {
name = "http"
description = "Security group rule for port -"
rule {
from_port = 80
to_port = 80
ip_protocol = "tcp"
cidr = [
0.0.0.0/0,
::/0,
]
}
}
resource "openstack_compute_secgroup_v2" "https" {
name = "https"
description = "Security group rule for port -"
rule {
from_port = 443
to_port = 443
ip_protocol = "tcp"
cidr = [
0.0.0.0/0,
::/0,
]
}
}
resource "openstack_compute_secgroup_v2" "ssh" {
name = "ssh"
description = "Security group rule for port -"
rule {
from_port = 22
to_port = 22
ip_protocol = "tcp"
cidr = [
0.0.0.0/0,
::/0,
]
}
}
[terraform.openstack]
init = templates/terraform/open_stack/init.tpl
vm = templates/terraform/open_stack/virtual_machine.tpl
net = templates/terraform/open_stack/network.tpl
sg = templates/terraform/open_stack/port_rule.tpl
vms = templates/terraform/open_stack/virtual_machine.tpl
networks = templates/terraform/open_stack/network.tpl
computingGroup = templates/terraform/open_stack/port_rule.tpl
[terraform.azure]
init = templates/terraform/azure/init.tpl
......
......@@ -10,9 +10,28 @@ required_version = ">= 0.14.0"
# Configure the OpenStack Provider
provider "openstack" {
user_name = "admin"
tenant_name = "test"
password = "wRpuXgVqBzQqGwx8Bu0sylEeb8FgjSYG"
auth_url = "https://127.0.0.1:5000/v3"
user_name = var.username
tenant_name = "admin"
password = var.password
auth_url = var.auth_url
insecure = true
}
resource "openstack_compute_keypair_v2" "user_key" {
name = "user1"
public_key = var.ssh_key
}
# Retrieve data
data "openstack_networking_network_v2" "external" {
name = "external"
}
data "openstack_identity_project_v3" "test_tenant" {
name = "admin"
}
data "openstack_networking_secgroup_v2" "default" {
name = "default"
tenant_id = data.openstack_identity_project_v3.test_tenant.id
}
\ No newline at end of file
## Network
# Create Network
resource "openstack_networking_network_v2" "{{ name }}" {
resource "openstack_networking_network_v2" "{{ infra_element_name }}" {
name = "{{ name }}"
}
# Create Subnet
resource "openstack_networking_subnet_v2" "{{ name ~ "_subnet" }}" {
resource "openstack_networking_subnet_v2" "{{ infra_element_name ~ "_subnet" }}" {
name = "{{ name ~ "_subnet" }}"
network_id = openstack_networking_network_v2.{{ name }}.id
cidr = "{{ address }}"
network_id = openstack_networking_network_v2.{{ infra_element_name }}.id
cidr = "{{ addressRange }}"
dns_nameservers = ["8.8.8.8", "8.8.8.4"]
}
# Attach networking port
resource "openstack_networking_port_v2" "{{ name }}" {
resource "openstack_networking_port_v2" "{{ infra_element_name }}" {
name = "{{ name }}"
network_id = openstack_networking_network_v2.{{ name }}.id
network_id = openstack_networking_network_v2.{{ infra_element_name }}.id
admin_state_up = true
security_group_ids = [
{% for rule_name in rules_name %}
openstack_compute_secgroup_v2.{{ rule_name ~ "_secgroup" }}.id,
{% endfor %}
data.openstack_networking_secgroup_v2.default.id #default flavour id
]
fixed_ip {
subnet_id = openstack_networking_subnet_v2.{{ name ~ "_subnet" }}.id
subnet_id = openstack_networking_subnet_v2.{{ infra_element_name ~ "_subnet" }}.id
}
}
# Create router
resource "openstack_networking_router_v2" "{{ infra_element_name ~ "_router" }}" {
name = "{{ infra_element_name ~ "_router" }}"
external_network_id = data.openstack_networking_network_v2.external.id #External network id
}
# Router interface configuration
resource "openstack_networking_router_interface_v2" "{{ infra_element_name ~ "_router_interface" }}" {
router_id = openstack_networking_router_v2.{{ infra_element_name ~ "_router" }}.id
subnet_id = openstack_networking_subnet_v2.{{ infra_element_name ~ "_subnet" }}.id
}
\ No newline at end of file
resource "openstack_compute_secgroup_v2" "{{ name ~ "_secgroup" }}" {
resource "openstack_compute_secgroup_v2" "{{ name }}" {
name = "{{ name }}"
description = "Security group rule for port {{ from_port }}-{{ to_port }}"
rule {
from_port = {{ from_port }}
to_port = {{ to_port }}
ip_protocol = "{{ ip_protocol }}"
cidr = "{{ ipv6_cidr_blocks }}"
from_port = {{ fromPort }}
to_port = {{ toPort }}
ip_protocol = "{{ protocol }}"
cidr = [
{% for range in addressRanges %}
{{ range }},
{% endfor %}
]
}
}
\ No newline at end of file
# Create virtual machine
resource "openstack_compute_instance_v2" "{{ vm_name }}" {
resource "openstack_compute_instance_v2" "{{ infra_element_name }}" {
name = "{{ vm_name }}"
image_name = "{{ image }}"
image_name = "{{ i1.name }}"
flavor_name = "{{ vm_flavor }}"
key_pair = openstack_compute_keypair_v2.{{ vm_name ~ "_ssh_key" }}.name
key_pair = openstack_compute_keypair_v2.{{ credentials }}.name
network {
port = openstack_networking_port_v2.{{ network_name }}.id
port = openstack_networking_port_v2.{{ i1.belongsTo }}.id
}
}
# Create ssh keys
resource "openstack_compute_keypair_v2" "{{ vm_name ~ "_ssh_key" }}" {
resource "openstack_compute_keypair_v2" "{{ credentials }}" {
name = "{{ ssh_user }}"
public_key = "{{ ssh_key_file }}"
}
# Create floating ip
resource "openstack_networking_floatingip_v2" "{{vm_name ~ "_floating_ip"}}" {
resource "openstack_networking_floatingip_v2" "{{infra_element_name ~ "_floating_ip"}}" {
pool = "external"
# fixed_ip = "{{ address }}"
}
# Attach floating ip to instance
resource "openstack_compute_floatingip_associate_v2" "{{ vm_name ~ "_floating_ip_association" }}" {
floating_ip = openstack_networking_floatingip_v2.{{ vm_name ~ "_floating_ip" }}.address
instance_id = openstack_compute_instance_v2.{{ vm_name }}.id
resource "openstack_compute_floatingip_associate_v2" "{{ infra_element_name ~ "_floating_ip_association" }}" {
floating_ip = openstack_networking_floatingip_v2.{{ infra_element_name ~ "_floating_ip" }}.address
instance_id = openstack_compute_instance_v2.{{ infra_element_name }}.id
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment