Skip to content
Snippets Groups Projects
Commit 121bdd0e authored by Debora Benedetto's avatar Debora Benedetto
Browse files

add ModelPrinter in parser and refactoring

parent 9a158774
No related branches found
No related tags found
No related merge requests found
......@@ -30,7 +30,7 @@ def create_iac_from_doml(data: str = Body(..., media_type="application/xml")):
f = open(temp_model_file_path, "w")
f.write(data)
f.close()
intermediate_representation = ModelParser.parse_model(temp_model_file_path)
intermediate_representation = ModelParser.parse_model(model_path=temp_model_file_path)
intermediate_representation = reorganize_info(intermediate_representation)
save(intermediate_representation, "input_file_generated/ir.json")
template_generated_folder = create_infrastructure_files(intermediate_representation)
......
import logging
from pyecore.ecore import EOrderedSet, EEnumLiteral
from pyecore.resources import ResourceSet, URI, global_registry
import pyecore.ecore as Ecore # This gets a reference to the Ecore metamodel implementation
TO_BE_PARSED_RESOURCES = {}
METAMODEL_SECTIONS = ["doml", "commons", "application", "infrastructure", "concrete", "optimization"]
METAMODEL_DIRECTORY = "icgparser/doml"
def extract_value_from(ecore_object_value):
if isinstance(ecore_object_value, EOrderedSet):
......@@ -99,3 +104,27 @@ def add_infrastructure_information(infrastructure_element, to_object):
def retrieve_missing_parsed_resources():
return TO_BE_PARSED_RESOURCES
def load_metamodel(metamodel_directory=METAMODEL_DIRECTORY, is_multiecore=False):
global_registry[Ecore.nsURI] = Ecore
rset = ResourceSet()
if is_multiecore:
logging.info(f"Loading multiecore metamodel from {metamodel_directory}")
for mm_filename in METAMODEL_SECTIONS:
resource = rset.get_resource(URI(f"{metamodel_directory}/{mm_filename}.ecore"))
mm_root = resource.contents[0] # Get the root of the MetaModel (EPackage)
rset.metamodel_registry[mm_root.nsURI] = mm_root
else:
logging.info(f"Loading multiecore metamodel from {metamodel_directory}/doml.ecore")
resource = rset.get_resource(URI(f"{metamodel_directory}/doml.ecore"))
mm_root = resource.contents[0] # Get the root of the MetaModel (EPackage)
rset.metamodel_registry[mm_root.nsURI] = mm_root
for subp in mm_root.eSubpackages:
rset.metamodel_registry[subp.nsURI] = subp
return rset
def load_model(model_path, rset):
doml_model_resource = rset.get_resource(URI(model_path))
return doml_model_resource.contents[0]
......@@ -14,33 +14,14 @@
# © Copyright 2022 Hewlett Packard Enterprise Development LP
# -------------------------------------------------------------------------
import logging
from pyecore.resources import ResourceSet, URI, global_registry
import pyecore.ecore as Ecore # This gets a reference to the Ecore metamodel implementation
from icgparser import DomlParserUtilities
from icgparser.DomlParserUtilities import get_reference_list_if_exists
OUTPUT_BASE_DIR_PATH = "output_files_generated/"
DOML_PATH = "icgparser/doml/doml.ecore"
doml_layers = {
"active_infrastructure_layer": "activeInfrastructure",
}
def load_model(doml_path, model_path):
global_registry[Ecore.nsURI] = Ecore # Load the Ecore metamodel first
rset = ResourceSet()
resource = rset.get_resource(URI(f"{doml_path}"))
mm_root = resource.contents[0] # Get the root of the MetaModel (EPackage)
rset.metamodel_registry[mm_root.nsURI] = mm_root
for subp in mm_root.eSubpackages:
rset.metamodel_registry[subp.nsURI] = subp
doml_model_resource = rset.get_resource(URI(model_path))
return doml_model_resource.contents[0]
def to_camel_case(content):
return content[0].lower() + content[1:]
......@@ -89,15 +70,22 @@ def parse_infrastructural_objects(doml_model):
infra_object_step = include_missing_objects_from_infrastructure_layer(infra_object_step)
return infra_object_step
def parse_model(model_path):
doml_model = load_model(DOML_PATH, model_path)
model_name = doml_model.name
def create_intermediate_representation(model_loaded):
model_name = model_loaded.name
output_path = OUTPUT_BASE_DIR_PATH + model_name + "/"
intermediate_representation_steps = []
infra_object_step = parse_infrastructural_objects(doml_model)
infra_object_step = parse_infrastructural_objects(model_loaded)
intermediate_representation_steps.append(infra_object_step)
intermediate_representation = {
"output_path": output_path,
"steps": intermediate_representation_steps
}
return intermediate_representation
def parse_model(model_path, is_multiecore_metamodel, metamodel_directory):
rset = DomlParserUtilities.load_metamodel(metamodel_directory=metamodel_directory,
is_multiecore=is_multiecore_metamodel)
doml_model = DomlParserUtilities.load_model(model_path, rset)
create_intermediate_representation(doml_model)
# # -------------------------------------------------------------------------
# # PIACERE ICG Parser
# #
# # This module has been tested with Python v3.7.4
# # To use it you must first install PyEcore
# # $ pip install pyecore
# #
# # Usage: python icgparser.py [-h] [-d dir] [-v] [--single] model
# # -h prints usage
# # -d dir loads metamodel from <dir>
# # --single / --single_mmodel use the single (non-split) metamodel
# # model the input model to be translated into the ICG intermediate representation
# #
# # Author: Lorenzo Blasi
# # 23/2/2022 - created
# # © Copyright 2022 Hewlett Packard Enterprise Development LP
# # -------------------------------------------------------------------------
# import logging
# import sys
# from pyecore.resources import ResourceSet, URI, global_registry
# import pyecore.ecore as Ecore # This gets a reference to the Ecore metamodel implementation
#
# # -------------------------------------------------------------------------
# # Utility functions to printout the loaded model
# # -------------------------------------------------------------------------
# newline = "\n"
# spaces = " "
# comment = "#"
#
#
# def write_to(outchannel, line):
# # for now we just print on the console
# if outchannel == "console":
# print(line)
# # if the channel is different we don't print at all
#
#
# def print_obj(obj, level=0):
# # for x in range(level):
# # print(" ", end='')
# class_name = obj.eClass.name
# if class_name == 'Property':
# # print('Class: {0}\t\t{1} = {2}'.format(class_name, obj.key, obj.value))
# print(f'{comment}{level * spaces}Class: {class_name}\t\t{obj.key} = {obj.value}')
# return False
# if class_name == 'Deployment':
# print(
# f'{comment}{level * spaces}Class: {class_name}\t\tcomponent = {obj.component.eClass.name}/{obj.component.name} node = {obj.node.eClass.name}/{obj.node.name}')
# return False
# try:
# obj_name = obj.name
# print(f'{comment}{level * spaces}Class: {class_name}\t\tObject: {obj_name}')
# return True
# except Exception:
# print(f'{comment}{level * spaces}Class: {class_name}\t\tObject: no name')
# return False
#
#
# def print_contents_recursive(obj, level=0):
# if print_obj(obj, level):
# for x in obj.eContents:
# print_contents_recursive(x, level + 1)
#
#
# # -------------------------------------------------------------------------
# # Utility functions to produce the output Intermediate Language
# # -------------------------------------------------------------------------
# # --- Helpers
# def extract_image_name(concretevm_obj):
# # To find the VM image name you could search into the inverse relations of the abstract image generating its related abstract VM, looking for a concrete image object (whose class is VMImage) and extract the value from its contents
# # concretevm_obj is a VirtualMachine (nginx-openstack_v2.doml:81, it should have been a OpenStackVM),
# # concretevm_obj.maps is a VirtualMachine (the abstract one)
# # concretevm_obj.maps.generatedFrom is a VMImage (the abstract one)
# for x in concretevm_obj.maps.generatedFrom._inverse_rels:
# if x[0].eClass.name == 'VMImage':
# return x[0].eContents[0].value
#
#
# def extract_concrete_network_name(abstractnet_obj):
# for x in abstractnet_obj._inverse_rels:
# if x[0].eClass.name == 'Network':
# return x[0].eContents[0].value
#
#
# # --- Handlers
# def model_handler(obj, model_root, level, intermediate_repr):
# # output prefix
# append_in_file(intermediate_repr,
# f'{level * spaces}{{{newline}{level * spaces}{spaces}"output_path": "output_files_generated/{obj.name}/",')
# append_in_file(intermediate_repr, f'{level * spaces}{spaces}"steps": [')
# # handle contents
# for x in obj.eContents:
# handle_obj(x, model_root, level + 2, intermediate_repr)
# # output suffix
# append_in_file(intermediate_repr, f'{level * spaces}{spaces}]')
# append_in_file(intermediate_repr, f'{level * spaces}}}')
#
#
# def concrete_infra_handler(obj, model_root, level, intermediate_repr):
# # output prefix
# append_in_file(intermediate_repr,
# f'{level * spaces}{{{newline}{level * spaces}{spaces}"programming_language": "terraform",')
# # handle contents
# for x in obj.eContents:
# handle_obj(x, model_root, level + 1, intermediate_repr)
# # output suffix
# append_in_file(intermediate_repr, f'{level * spaces}}}')
#
#
# def network_handler(obj, model_root, level, intermediate_repr):
# # ignore the concrete network, since its name has been extracted separately and included in the concrete VM
# logging.warning('Ignoring Network')
#
#
# def property_handler(obj, model_root, level, intermediate_repr):
# key = obj.key
# append_in_file(intermediate_repr, f'{level * spaces}"{key}" : "{obj.value}",')
#
#
# def provider_handler(obj, model_root, level, intermediate_repr):
# # output prefix
# append_in_file(intermediate_repr,
# f'{level * spaces}"data": {{{newline}{level * spaces}{spaces}"provider": "{obj.name}",')
# # handle contents
# for x in obj.eContents:
# handle_obj(x, model_root, level + 1, intermediate_repr)
# # output suffix
# append_in_file(intermediate_repr, f'{level * spaces}}}')
#
#
# def concrete_vm_handler(obj, model_root, level, intermediate_repr):
# # output prefix
# append_in_file(intermediate_repr, f'{level * spaces}"vm": [{{') # VMs can be more than one: I need an example...
# level = level + 1
# # print(f'{level * spaces}# maps {obj.maps.name}')
# logging.warning(f"Ignoring map {obj.maps.name}")
# # handle contents
# for x in obj.eContents:
# handle_obj(x, model_root, level, intermediate_repr)
# # add other attributes defined elsewhere: image name, address, ...
# append_in_file(intermediate_repr, f'{level * spaces}"image" : "{extract_image_name(obj)}",')
# for iface in obj.maps.ifaces:
# append_in_file(intermediate_repr, f'{level * spaces}"address" : "{iface.endPoint}",')
# append_in_file(intermediate_repr,
# f'{level * spaces}"network_name" : "{extract_concrete_network_name(iface.belongsTo)}"')
# # output suffix
# level = level - 1
# append_in_file(intermediate_repr, f'{level * spaces}}}]')
#
#
# def vm_image_handler(obj, model_root, level, intermediate_repr):
# # ignore the concrete image, since its image name has been extracted separately and included in the concrete VM
# logging.warning(f'Ignoring VMImage')
#
#
# class_handler = {
#
# "DOMLModel": model_handler,
# "ConcreteInfrastructure": concrete_infra_handler,
# "Network": network_handler,
# "Property": property_handler,
# "RuntimeProvider": provider_handler,
# "VirtualMachine": concrete_vm_handler, # Warning: the class here might change to some concrete VM class
# "VMImage": vm_image_handler
# }
#
#
# def handle_obj(obj, model_root, level, intermediate_repr):
# if obj.eClass.name in class_handler:
# class_handler[obj.eClass.name](obj, model_root, level, intermediate_repr)
# else:
# logging.warning(f'Class {obj.eClass.name} has no handler')
#
#
# # -------------------------------------------------------------------------
# # Load each part of the DOML metamodel and register them
# # -------------------------------------------------------------------------
# def load_metamodel(load_split_model, doml_directory="icgparser/doml"):
# global_registry[Ecore.nsURI] = Ecore # Load the Ecore metamodel first
# rset = ResourceSet()
# if load_split_model:
# mm_parts = ["doml", "commons", "application", "infrastructure", "concrete", "optimization"]
# for mm_filename in mm_parts:
# resource = rset.get_resource(URI(f"{doml_directory}/{mm_filename}.ecore"))
# mm_root = resource.contents[0] # Get the root of the MetaModel (EPackage)
# rset.metamodel_registry[mm_root.nsURI] = mm_root
# else:
# resource = rset.get_resource(URI(f"{doml_directory}/doml.ecore"))
# mm_root = resource.contents[0] # Get the root of the MetaModel (EPackage)
# rset.metamodel_registry[mm_root.nsURI] = mm_root
# for subp in mm_root.eSubpackages:
# rset.metamodel_registry[subp.nsURI] = subp
# return rset
#
#
# # -------------------------------------------------------------------------
# # Finally load the model and print it out
# # -------------------------------------------------------------------------
#
# def parse_model(model, load_split_model, doml_directory="icgparser/doml"):
# rset = load_metamodel(load_split_model)
# doml_model_resource = rset.get_resource(URI(model))
# doml_model = doml_model_resource.contents[0]
# single = "single-file (doml.ecore)"
# split = "split"
# dash = "-"
# logging.info(f'{comment}{80 * dash}')
# logging.info(f'{comment} Using {split if load_split_model else single} metamodel from directory {doml_directory}')
# print(f'{comment} Model loaded from file {model}:')
# print(f'{comment}{80 * dash}')
# print_contents_recursive(doml_model)
# print(f'{comment}{80 * dash}{newline}{comment} Generated Intermediate Representation follows:{newline}{comment}')
# intermediate_repr_file_path = "input_file_generated/ir.json"
# create_file("input_file_generated/ir.json")
# handle_obj(doml_model, doml_model, 0, intermediate_repr_file_path)
#
#
# def create_file(file_name):
# f = open(file_name, "w")
# f.write("")
# f.close()
#
#
# def append_in_file(file_name, data):
# f = open(file_name, "a")
# f.write(data)
# f.write("\n")
# f.close()
import logging
import sys
from pyecore.resources import ResourceSet, URI, global_registry
import pyecore.ecore as Ecore # This gets a reference to the Ecore metamodel implementation
#-------------------------------------------------------------------------
# Utility functions to printout the loaded model
#-------------------------------------------------------------------------
from icgparser import DomlParserUtilities
def print_obj(obj, level=0):
for x in range(level):
print(" ", end='')
class_name = obj.eClass.name
if class_name == 'Property':
print('Class: {0}\t\t{1} = {2}'.format(class_name, obj.key, obj.value))
return False
if class_name == 'Deployment':
print('Class: {0}\t\tcomponent = {1}/{2} node = {3}/{4}'.format(class_name,
obj.component.eClass.name,
obj.component.name,
obj.node.eClass.name,
obj.node.name
))
return False
try:
obj_name = obj.name
print('Class: {0}\t\tObject: {1}'.format(class_name, obj_name))
return True
except Exception:
print('Class: {0}\t\tObject: no name'.format(class_name))
return False
def print_contents_recursive(obj, level=0):
if print_obj(obj, level):
for x in obj.eContents:
print_contents_recursive(x, level+1)
def print_model(model_path, is_multiecore_metamodel, metamodel_directory):
rset = DomlParserUtilities.load_metamodel(metamodel_directory=metamodel_directory,
is_multiecore=is_multiecore_metamodel)
doml_model = DomlParserUtilities.load_model(model_path, rset)
logging.info("Printing model")
print_contents_recursive(doml_model)
......@@ -7,7 +7,7 @@ from fastapi import FastAPI
import api.InfrastructureTemplateController
from api.InfrastructureTemplateController import compress_file
from controller.PluginOrchestrator import create_infrastructure_files
from icgparser import ModelParser
from icgparser import ModelParser, ModelPrinter
fast_api = FastAPI()
......@@ -56,8 +56,11 @@ for i, param in enumerate(paramlist):
model_filename = param
if __name__ == '__main__':
ModelParser.parse_model(model_filename, load_split_model, doml_directory)
with open("input_file_generated/ir.json") as json_file:
data = json.load(json_file)
template_generated_folder = create_infrastructure_files(data)
compress_file_folder = compress_file(template_generated_folder, output_file_name)
ModelPrinter.print_model(model_path=model_filename, is_multiecore_metamodel=load_split_model,
metamodel_directory=doml_directory)
# ModelParser.parse_model(model_path=model_filename, is_multiecore_metamodel=load_split_model,
# metamodel_directory=doml_directory)
# with open("input_file_generated/ir.json") as json_file:
# data = json.load(json_file)
# template_generated_folder = create_infrastructure_files(data)
# compress_file_folder = compress_file(template_generated_folder, output_file_name)
......@@ -35,73 +35,6 @@ data "openstack_networking_secgroup_v2" "default" {
name = "default"
tenant_id = data.openstack_identity_project_v3.test_tenant.id
}
# Create router
resource "openstack_networking_router_v2" "net1_router" {
name = "net1_router"
external_network_id = data.openstack_networking_network_v2.external.id #External network id
}
# Create virtual machine
resource "openstack_compute_instance_v2" "vm1" {
name = "nginx-host"
image_name = "i1"
flavor_name = "small"
key_pair = openstack_compute_keypair_v2.ssh_key.name
network {
port = openstack_networking_port_v2.net1.id
}
}
# Create ssh keys
resource "openstack_compute_keypair_v2" "ssh_key" {
name = "ubuntu"
public_key = "/home/user1/.ssh/openstack.key"
}
# Create floating ip
resource "openstack_networking_floatingip_v2" "vm1_floating_ip" {
pool = "external"
# fixed_ip = ""
}
# Attach floating ip to instance
resource "openstack_compute_floatingip_associate_v2" "vm1_floating_ip_association" {
floating_ip = openstack_networking_floatingip_v2.vm1_floating_ip.address
instance_id = openstack_compute_instance_v2.vm1.id
}
## Network
# Create Network
resource "openstack_networking_network_v2" "net1" {
name = "concrete_net"
}
# Create Subnet
resource "openstack_networking_subnet_v2" "net1_subnet" {
name = "concrete_net_subnet"
network_id = openstack_networking_network_v2.net1.id
cidr = "16.0.0.0/24"
dns_nameservers = ["8.8.8.8", "8.8.8.4"]
}
# Attach networking port
resource "openstack_networking_port_v2" "net1" {
name = "concrete_net"
network_id = openstack_networking_network_v2.net1.id
admin_state_up = true
security_group_ids = [
data.openstack_networking_secgroup_v2.default.id #default flavour id
]
fixed_ip {
subnet_id = openstack_networking_subnet_v2.net1_subnet.id
}
}
# Router interface configuration
resource "openstack_networking_router_interface_v2" "net1_router_interface" {
router_id = openstack_networking_router_v2.net1_router.id
subnet_id = openstack_networking_subnet_v2.net1_subnet.id
}
resource "openstack_compute_secgroup_v2" "out_all" {
name = "out_all"
description = "Security group rule for port -"
......@@ -170,3 +103,71 @@ resource "openstack_compute_secgroup_v2" "ssh" {
}
}
## Network
# Create Network
resource "openstack_networking_network_v2" "net1" {
name = "concrete_net"
}
# Create Subnet
resource "openstack_networking_subnet_v2" "net1_subnet" {
name = "concrete_net_subnet"
network_id = openstack_networking_network_v2.net1.id
cidr = "16.0.0.0/24"
dns_nameservers = ["8.8.8.8", "8.8.8.4"]
}
# Attach networking port
resource "openstack_networking_port_v2" "net1" {
name = "concrete_net"
network_id = openstack_networking_network_v2.net1.id
admin_state_up = true
security_group_ids = [
data.openstack_networking_secgroup_v2.default.id #default flavour id
]
fixed_ip {
subnet_id = openstack_networking_subnet_v2.net1_subnet.id
}
}
# Create router
resource "openstack_networking_router_v2" "net1_router" {
name = "net1_router"
external_network_id = data.openstack_networking_network_v2.external.id #External network id
}
# Router interface configuration
resource "openstack_networking_router_interface_v2" "net1_router_interface" {
router_id = openstack_networking_router_v2.net1_router.id
subnet_id = openstack_networking_subnet_v2.net1_subnet.id
}
# Create virtual machine
resource "openstack_compute_instance_v2" "vm1" {
name = "nginx-host"
image_name = "i1"
flavor_name = "small"
key_pair = openstack_compute_keypair_v2.ssh_key.name
network {
port = openstack_networking_port_v2.net1.id
}
}
# Create ssh keys
resource "openstack_compute_keypair_v2" "ssh_key" {
name = "ubuntu"
public_key = "/home/user1/.ssh/openstack.key"
}
# Create floating ip
resource "openstack_networking_floatingip_v2" "vm1_floating_ip" {
pool = "external"
# fixed_ip = ""
}
# Attach floating ip to instance
resource "openstack_compute_floatingip_associate_v2" "vm1_floating_ip_association" {
floating_ip = openstack_networking_floatingip_v2.vm1_floating_ip.address
instance_id = openstack_compute_instance_v2.vm1.id
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment