Skip to content
Snippets Groups Projects
Commit 6352cd7f authored by Benedetto Debora's avatar Benedetto Debora
Browse files

Refactoring: move to FastAPI, create template directory, update Dockerfile

parent e340a454
No related branches found
No related tags found
No related merge requests found
Showing
with 250 additions and 396 deletions
.idea
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# PEP 582; used by e.g. github.com/David-OConnor/pyflow
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
\ No newline at end of file
resource "aws_db_instance" "{{ identifier }}" {
identifier = "{{ identifier }}"
instance_class = "{{ instance }}"
allocated_storage = {{ storage }}
engine = "{{ engine }}"
engine_version = "{{ version }}"
username = "{{ username }}"
password = {{ password }}
db_subnet_group_name = {{ subnet }}
vpc_security_group_ids = {{ security }}
parameter_group_name = {{ parameter }}
publicly_accessible = {{ accessible }}
skip_final_snapshot = {{ skip }}
}
\ No newline at end of file
resource "azurerm_sql_database" "test2" {
name = "{{ name }}"
resource_group_name = "{{ group_name}}"
location = "{{ location }}"
server_name = "{{ server_name }}"
threat_detection_policy {
state = "{{ state }}"
email_addresses = {{ email }}
retention_days = "{{ days }}"
storage_account_access_key = "{{ access_key }}"
storage_endpoint = "{{ endpoint }}"
use_server_default = "{{ default }}"
}
}
\ No newline at end of file
resource "google_sql_database" "database" {
name = "{{ name }}"
instance = {{ instance }}
}
resource "google_sql_database_instance" "instance" {
name = "{{ instance_name }}"
region = "{{ region }}"
settings {
tier = "{{ tier }}"
}
deletion_protection = "{{ deletion_protection }}"
}
\ No newline at end of file
resource "postgresql_database" {{ name }} {
name = {{ name }}
owner = {{ owner }}
template = {{ template }}
lc_collate = {{ lc_collate }}
connection_limit = {{ connection_limit }}
allow_connections = {{ allow_connections }}
}
\ No newline at end of file
......@@ -5,4 +5,4 @@ COPY . /opt/
RUN pip install -r requirements.txt
CMD [ "python3", "/opt/app.py"]
\ No newline at end of file
CMD ["uvicorn", "app.main:fast_api", "--host", "0.0.0.0", "--port", "8080"]
\ No newline at end of file
from ansibleBuilder import *
from terraformBuilder import *
import json, sys, os
def ICG_call(parameters):
os.system('rm -f /opt/Output-code/*')
for step in parameters["steps"]:
if step["programming_language"] == "ansible":
input_data = InputData(app_type=step["type"], code_path=step["output_path"], template_type=step["info"]["name"], template_path=step["info"]["template_path"], template_data=step["data"])
icg = AnsibleICG()
icg.generate_code(input_data)
elif step["programming_language"] == "terraform":
input_data = step["data"]
TerraformICG(input_data)
if __name__ == '__main__':
arg_len = len(sys.argv)
if arg_len > 1:
file_name = sys.argv[1]
else:
print("Add parameters file name")
sys.exit()
input_file = open(file_name, "r")
parameters = json.load(input_file)
ICG_call(parameters)
\ No newline at end of file
terraform {
required_version = ">= 0.14.0"
required_providers {
openstack = {
source = "terraform-provider-openstack/openstack"
version = "~> 1.35.0"
}
}
}
# Configure the OpenStack Provider
provider "openstack" {
user_name = "admin"
tenant_name = "admin"
password = "pwd"
auth_url = "http://myauthurl:5000/v2.0"
region = "RegionOne"
}
resource "openstack_compute_keypair_v2" "user_key" {
name = "user1"
public_key = "ssh-rsa XXXXXX"
}
# Router creation. UUID external gateway
resource "openstack_networking_router_v2" "generic" {
name = "router-generic"
external_network_id = "f67f0d72-0ddf-11e4-9d95-e1f29f417e2f"
}
# Network creation
resource "openstack_networking_network_v2" "generic" {
name = "ostack2"
}
#### HTTP SUBNET ####
# Subnet http configuration
resource "openstack_networking_subnet_v2" "nginx" {
name = "subnet-nginx"
network_id = openstack_networking_network_v2.generic.id
cidr = "16.0.0.0/24"
dns_nameservers = ["8.8.8.8", "8.8.8.4"]
}
# Router interface configuration
resource "openstack_networking_router_interface_v2" "nginx" {
router_id = openstack_networking_router_v2.generic.id
subnet_id = openstack_networking_subnet_v2.nginx.id
}
resource "openstack_compute_secgroup_v2" "http" {
name = "http"
description = "Open input http port"
rule {
from_port = 80
to_port = 80
ip_protocol = "tcp"
cidr = "0.0.0.0/0"
}
}
resource "openstack_compute_secgroup_v2" "ssh" {
name = "ssh"
description = "Open input ssh port"
rule {
from_port = 22
to_port = 22
ip_protocol = "tcp"
cidr = "0.0.0.0/0"
}
}
#
# Create instance
#
resource "openstack_compute_instance_v2" "nginx" {
name = "nginx-host"
image_name = "ubuntu-20.04.3"
flavor_name = "t2.small"
key_pair = openstack_compute_keypair_v2.user_key.name
network {
port = openstack_networking_port_v2.nginx.id
}
}
# Create network port
resource "openstack_networking_port_v2" "nginx" {
name = "nginx"
network_id = openstack_networking_network_v2.generic.id
admin_state_up = true
security_group_ids = [
openstack_compute_secgroup_v2.ssh.id,
openstack_compute_secgroup_v2.http.id,
]
fixed_ip {
subnet_id = openstack_networking_subnet_v2.nginx.id
}
}
# Create floating ip
resource "openstack_networking_floatingip_v2" "nginx" {
pool = "external-network"
fixed_ip = "16.0.0.1"
}
# Attach floating ip to instance
resource "openstack_compute_floatingip_associate_v2" "nginx" {
floating_ip = openstack_networking_floatingip_v2.nginx.address
instance_id = openstack_compute_instance_v2.nginx.id
}
\ No newline at end of file
data "{{ vm }}" "ami{{ id }}" {
#executable_users = {{ executable_users }}
most_recent = {{ mostrecent }}
name_regex = "{{ name_regex }}"
#owners = {{ owners }}
{{ filters }}
owners = ["099720109477"] # Canonical
}
resource "aws_instance" "instance{{ id }}" {
ami = data.aws_ami.ami{{ id }}.id
instance_type = "{{ instance_type }}"
tags = {
Name = "{{ name }}"
}
}
File deleted
File deleted
File deleted
import logging
import tarfile
from fastapi import APIRouter, Body
from fastapi.responses import FileResponse
from plugin import TerraformPlugin
from plugin import AnsiblePlugin
api_router = APIRouter()
@api_router.post("/infrastructure/files")
def create_infrastructure_files(intermediate_representation: dict = Body(...)):
logging.info("Received intermediate representation create_infrastructure_files request")
choose_plugin(intermediate_representation)
logging.info("Creating compress folder with iac files")
output_template_folder = intermediate_representation["output_path"]
compress_file_name = "outputIaC.tar.gz"
compress_file_path = compress_file(output_template_folder, compress_file_name)
return FileResponse(compress_file_path, media_type='application/octet-stream', filename=compress_file_name)
def choose_plugin(parameters):
# os.system('rm -f /opt/Output-code/*')
for step in parameters["steps"]:
if step["programming_language"] == "ansible":
input_data = step["data"]
AnsiblePlugin.create_files(input_data, parameters["output_path"])
elif step["programming_language"] == "terraform":
input_data = step["data"]
TerraformPlugin.create_files(input_data, parameters["output_path"])
def compress_file(source_folder, dest_file_name):
# prefix_path = "/opt/"
prefix_path = ""
logging.info("Compressing folder %s into destination %s", prefix_path + source_folder,
prefix_path + dest_file_name)
with tarfile.open(prefix_path + dest_file_name, "w:gz") as tar:
tar.add(source_folder, arcname='.')
return prefix_path + dest_file_name
from ICG import *
from flask import Flask, request, send_file
import tarfile
import os.path
def create_app(test_config=None):
app = Flask(__name__)
app.config["CLIENT_IMAGES"] = "/opt"
@app.post("/")
def ICG():
if request.is_json:
parameters = request.get_json()
ICG_call(parameters)
with tarfile.open("/opt/outputIaC.tar.gz", "w:gz") as tar:
tar.add("/opt/Output-code", arcname=os.path.basename("/opt/Output-code"))
file_name = "/opt/outputIaC.tar.gz"
return send_file(file_name, attachment_filename='outputIaC.tar.gz'), 201
return {"error": "Request must be JSON"}, 415
return app
APP = create_app()
if __name__ == '__main__':
APP.run(host='0.0.0.0', port=5000, debug=True)
\ No newline at end of file
def vmcatalog1 (vcpu, gib, sigla):
vcpu = str(vcpu)
gib = str(gib)
if sigla == "mac" and vcpu=='12' and gib=='32':
return "mac1.metal"
elif sigla == "t2":
if vcpu == '1':
if gib == '0.5':
return "t2.nano"
elif gib == '1':
return "t2.micro"
elif gib == '2':
return "t2.small"
elif vcpu == '2':
if gib == '4':
return "t2.medium"
elif gib == '8':
return "t2.large"
elif vcpu == '4' and gib == '16':
return "t2.xlarge"
elif vcpu == '8' and gib == '32':
return "t2.2xlarge"
elif sigla == "m6g" or sigla == "m6gd":
if vcpu == '1' and gib == '4':
return sigla+".medium"
if vcpu == '2' and gib == '8':
return sigla+".large"
if vcpu == '4' and gib == '16':
return sigla+".xlarge"
if vcpu == '8' and gib == '32':
return sigla+".2xlarge"
if vcpu == '16' and gib == '64':
return sigla+".4xlarge"
if vcpu == '32' and gib == '128':
return sigla+".8xlarge"
if vcpu == '48' and gib == '192':
return sigla+".12xlarge"
if vcpu == '64' and gib == '256':
return sigla+".16xlarge"
elif "m5" in sigla or sigla == "m4":
if vcpu == '2' and gib == '8':
return sigla+".large"
if vcpu == '4' and gib == '16':
return sigla+".xlarge"
if vcpu == '8' and gib == '32':
return sigla+".2xlarge"
if vcpu == '16' and gib == '64':
return sigla+".4xlarge"
if vcpu == '32' and gib == '128':
return sigla+".8xlarge"
if vcpu == '48' and gib == '192':
return sigla+".12xlarge"
if vcpu == '64' and gib == '256':
return sigla+".16xlarge"
if vcpu == '96' and gib == '384':
return sigla+".24xlarge"
elif sigla == "a1":
if vcpu == '1' and gib == '2':
return "a1.media"
if vcpu == '2' and gib == '4':
return "a1.large"
if vcpu == '4' and gib == '8':
return "a1.xlarge"
if vcpu == '8' and gib == '16':
return "a1.2xlarge"
if vcpu == '16' and gib == '32':
return "a1.4xlarge"
elif vcpu=='2':
if gib == '0.5':
return sigla+".nano"
elif gib == '1':
return sigla+".micro"
elif gib == '2':
return sigla+".small"
elif gib == '4':
return sigla+".medium"
elif gib == '8':
return sigla+".large"
elif vcpu == '4' and gib == '16':
return sigla+".xlarge"
elif vcpu == '8' and gib == '32':
return sigla+".2xlarge"
return "t3.micro"
def vmcatalog (vcpu, gib):
vcpu = str(vcpu)
gib = str(gib)
if vcpu=='12' and gib=='32':
return "mac1.metal"
if vcpu == '1':
if gib == '0.5':
return "t2.nano"
if gib == '1':
return "t2.micro"
if gib == '2':
return "t2.small"
if vcpu == '2':
if gib == '4':
return "t2.medium"
if gib == '8':
return "t2.large"
if vcpu == '4' and gib == '16':
return "t2.xlarge"
if vcpu == '8' and gib == '32':
return "t2.2xlarge"
if vcpu == '1' and gib == '4':
return "m6g.medium"
if vcpu == '2' and gib == '8':
return "m6g.large"
if vcpu == '4' and gib == '16':
return "m6g.xlarge"
if vcpu == '8' and gib == '32':
return "m6g.2xlarge"
if vcpu == '16' and gib == '64':
return "m6g.4xlarge"
if vcpu == '32' and gib == '128':
return "m6g.8xlarge"
if vcpu == '48' and gib == '192':
return "m6g.12xlarge"
if vcpu == '64' and gib == '256':
return "m6g.16xlarge"
if vcpu == '2' and gib == '8':
return "m4.large"
if vcpu == '4' and gib == '16':
return "m4.xlarge"
if vcpu == '8' and gib == '32':
return "m4.2xlarge"
if vcpu == '16' and gib == '64':
return "m4.4xlarge"
if vcpu == '32' and gib == '128':
return "m4.8xlarge"
if vcpu == '48' and gib == '192':
return "m4.12xlarge"
if vcpu == '64' and gib == '256':
return "m4.16xlarge"
if vcpu == '96' and gib == '384':
return "m4.24xlarge"
if vcpu == '1' and gib == '2':
return "a1.media"
if vcpu == '2' and gib == '4':
return "a1.large"
if vcpu == '4' and gib == '8':
return "a1.xlarge"
if vcpu == '8' and gib == '16':
return "a1.2xlarge"
if vcpu == '16' and gib == '32':
return "a1.4xlarge"
if vcpu=='2':
if gib == '0.5':
return "t3.nano"
if gib == '1':
return "t3.micro"
if gib == '2':
return "t3.small"
if gib == '4':
return "t3.medium"
if gib == '8':
return "t3.large"
if vcpu == '4' and gib == '16':
return "t3.xlarge"
if vcpu == '8' and gib == '32':
return "t3.2xlarge"
return "t2.micro"
# Azure specific functionalities
\ No newline at end of file
# Google Clud Platform specific functionalities
\ No newline at end of file
{
"output_path": "output_file_example/nginx_openstack/",
"steps": [
{
"programming_language": "terraform",
"data": {
"provider": "openstack",
"vm": [{
"name": "nginx-host",
"flavor": "small",
"vm_security_groups": "default",
"ssh_user": "ubuntu",
"ssh_key_file": "/home/user1/.ssh/openstack.key",
"address": "16.0.0.1",
"image": "ubuntu-20.04.3",
"network_name": "ostack2"
}],
"net": [{
"name": "ostack2",
"address": "16.0.0.0/24",
"protocol": "tcp/ip",
"rules_name": ["rule_1", "rule_2"]
}],
"sg": [{
"name": "rule_1",
"from_port": 80,
"to_port": 80,
"ip_protocol": "tcp",
"ipv6_cidr_blocks": "0.0.0.0/0"
}, {
"name": "rule_2",
"from_port": 22,
"to_port": 22,
"ip_protocol": "tcp",
"ipv6_cidr_blocks": "0.0.0.0/0"
}]
}
},
{
"programming_language": "ansible",
"data": {
"operating_system": "ubuntu",
"nginx": {
"ssh_user": "ubuntu",
"ssh_key_file": "/home/user1/.ssh/openstack.key",
"address": "16.0.0.1",
"source_code": [
"/var/www/html/index.html",
"/usr/share/nginx/html/index.html"
]
}
}
}
]
}
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment