From 54f18aa91b6c7ccec3ba1b75fc5f7558960237a3 Mon Sep 17 00:00:00 2001 From: Ruben Vasconcelos Date: Sun, 23 Sep 2018 16:57:05 +0100 Subject: [PATCH] Inital commit. --- .dockerignore | 21 + .gitignore | 19 + Dockerfile | 20 + Dockerfile.binary | 20 + README.md | 40 + paaspure/__init__.py | 3 + paaspure/__main__.py | 88 +++ paaspure/abstract/__init__.py | 7 + paaspure/abstract/argparser.py | 17 + paaspure/abstract/component.py | 22 + paaspure/abstract/module.py | 75 ++ paaspure/argparser/README.md | 61 ++ paaspure/argparser/__init__.py | 5 + paaspure/argparser/parser.py | 71 ++ paaspure/deployer/README.md | 23 + paaspure/deployer/__init__.py | 5 + paaspure/deployer/argparser.py | 39 + paaspure/deployer/gitlab-runner/.gitignore | 2 + paaspure/deployer/gitlab-runner/Dockerfile | 14 + paaspure/deployer/gitlab-runner/README.md | 38 + .../gitlab-runner/config/config.toml.template | 9 + .../deployer/gitlab-runner/docker-compose.yml | 8 + paaspure/deployer/jenkins/README.md | 15 + paaspure/deployer/jenkins/__init__.py | 5 + paaspure/deployer/jenkins/component.py | 25 + paaspure/deployer/jenkins/docker-compose.yml | 26 + paaspure/deployer/module.py | 19 + paaspure/deployer/portainer/README.md | 15 + paaspure/deployer/portainer/__init__.py | 5 + paaspure/deployer/portainer/component.py | 28 + .../deployer/portainer/docker-compose.yml | 41 + paaspure/deployer/registry/README.md | 33 + paaspure/deployer/registry/__init__.py | 5 + paaspure/deployer/registry/component.py | 25 + paaspure/deployer/registry/docker-compose.yml | 103 +++ paaspure/deployer/visualizer/README.md | 15 + paaspure/deployer/visualizer/__init__.py | 5 + paaspure/deployer/visualizer/component.py | 25 + .../deployer/visualizer/docker-compose.yml | 25 + paaspure/dummy_component/__init__.py | 6 + paaspure/dummy_component/component.py | 20 + paaspure/dummy_component/requirements.py | 11 + paaspure/generate/README.md | 48 ++ paaspure/generate/__init__.py | 5 + paaspure/generate/argparser.py | 64 ++ paaspure/generate/generator.py | 87 +++ .../templates/component/component.template | 0 paaspure/generate/templates/module/README-md | 16 + .../generate/templates/module/__init__-py | 6 + .../generate/templates/module/argparser-py | 31 + paaspure/generate/templates/module/module-py | 22 + paaspure/infra/README.md | 28 + paaspure/infra/__init__.py | 5 + paaspure/infra/argparser.py | 39 + paaspure/infra/hybrid_aws/00-vars.tf | 7 + paaspure/infra/hybrid_aws/01-aws-infra.tf | 110 +++ paaspure/infra/hybrid_aws/02-create-inv.tf | 20 + paaspure/infra/hybrid_aws/Dockerfile.ansible | 11 + .../infra/hybrid_aws/Dockerfile.terraform | 7 + paaspure/infra/hybrid_aws/README.md | 37 + paaspure/infra/hybrid_aws/__init__.py | 6 + paaspure/infra/hybrid_aws/component.py | 115 +++ paaspure/infra/hybrid_aws/requirements.py | 14 + paaspure/infra/hybrid_aws/swarm-join.yml | 31 + paaspure/infra/hybrid_aws/swarm-leave.yml | 6 + paaspure/infra/module.py | 19 + paaspure/infra/terraform_aws/Dockerfile | 7 + paaspure/infra/terraform_aws/README.md | 36 + paaspure/infra/terraform_aws/__init__.py | 6 + paaspure/infra/terraform_aws/component.py | 55 ++ paaspure/infra/terraform_aws/main.tf | 18 + paaspure/infra/terraform_aws/requirements.py | 14 + .../infra/terraform_aws/test_terraform_aws.py | 63 ++ paaspure/infra/terraform_azure/Docker.tmpl | 722 ++++++++++++++++++ paaspure/infra/terraform_azure/Dockerfile | 7 + paaspure/infra/terraform_azure/README.md | 76 ++ paaspure/infra/terraform_azure/__init__.py | 6 + paaspure/infra/terraform_azure/component.py | 65 ++ paaspure/infra/terraform_azure/main.tf | 22 + .../infra/terraform_azure/requirements.py | 14 + .../terraform_azure/test_terraform_azure.py | 69 ++ paaspure/infra/test_infra.py | 0 paaspure/log_management/README.md | 23 + paaspure/log_management/__init__.py | 5 + paaspure/log_management/argparser.py | 39 + paaspure/log_management/elk_stack/README.md | 15 + paaspure/log_management/elk_stack/__init__.py | 5 + .../log_management/elk_stack/component.py | 28 + .../elk_stack/docker-compose.yml | 79 ++ paaspure/log_management/module.py | 19 + paaspure/monitoring/README.md | 23 + paaspure/monitoring/__init__.py | 5 + paaspure/monitoring/argparser.py | 39 + paaspure/monitoring/module.py | 19 + paaspure/monitoring/prom_stack/README.md | 29 + paaspure/monitoring/prom_stack/__init__.py | 5 + paaspure/monitoring/prom_stack/component.py | 28 + .../prom_stack/config/prometheus.yml | 39 + .../monitoring/prom_stack/docker-compose.yml | 86 +++ paaspure/network/README.md | 23 + paaspure/network/__init__.py | 5 + paaspure/network/argparser.py | 39 + paaspure/network/module.py | 21 + paaspure/network/traefik/README.md | 16 + paaspure/network/traefik/__init__.py | 5 + paaspure/network/traefik/component.py | 28 + paaspure/network/traefik/config/traefik | 15 + paaspure/network/traefik/docker-compose.yml | 15 + paaspure/orchestrator/README.md | 23 + paaspure/orchestrator/__init__.py | 5 + paaspure/orchestrator/argparser.py | 45 ++ paaspure/orchestrator/module.py | 33 + paaspure/orchestrator/swarm_aws/README.md | 21 + paaspure/orchestrator/swarm_aws/__init__.py | 6 + paaspure/orchestrator/swarm_aws/component.py | 78 ++ .../orchestrator/swarm_aws/requirements.py | 19 + paaspure/orchestrator/swarm_azure/README.md | 18 + paaspure/orchestrator/swarm_azure/__init__.py | 6 + .../orchestrator/swarm_azure/component.py | 68 ++ .../orchestrator/swarm_azure/requirements.py | 23 + paaspure/pull/README.md | 42 + paaspure/pull/__init__.py | 5 + paaspure/pull/argparser.py | 90 +++ paaspure/pull/puller.py | 141 ++++ paaspure/pull/utils.py | 34 + paaspure/settings.py | 15 + paaspure/utils/__init__.py | 26 + paaspure/utils/docker.py | 109 +++ paaspure/utils/files.py | 36 + paaspure/utils/general.py | 26 + paaspure/utils/hub.py | 83 ++ paaspure/utils/package.py | 60 ++ paaspure/vm_builder/README.md | 31 + paaspure/vm_builder/__init__.py | 5 + paaspure/vm_builder/argparser.py | 39 + paaspure/vm_builder/module.py | 17 + paaspure/vm_builder/packer_aws/Dockerfile | 7 + paaspure/vm_builder/packer_aws/README.md | 31 + paaspure/vm_builder/packer_aws/__init__.py | 6 + paaspure/vm_builder/packer_aws/component.py | 127 +++ .../packer_files/docker_ubuntu.json | 40 + .../packer_files/golden_ubuntu.json | 31 + .../packer_aws/packer_files/variables.json | 4 + .../vm_builder/packer_aws/requirements.py | 15 + .../vm_builder/packer_aws/test_packer_aws.py | 107 +++ paaspure/vm_builder/test_vm_builder.py | 42 + requirements.txt | 27 + sample_configs/aws.yml | 54 ++ sample_configs/azure.yml | 55 ++ sample_configs/hybrid.yml | 49 ++ setup.cfg | 8 + setup.py | 41 + tests/argparser/__init__.py | 1 + tests/argparser/test_paaspure_parser.py | 31 + tests/generate/__init__.py | 1 + tests/generate/test_paaspure_generate.py | 121 +++ tests/pull/__init__.py | 1 + tests/pull/test_paaspure_pull.py | 110 +++ tests/utils/__init__.py | 1 + tests/utils/test_files.py | 38 + tests/utils/test_general.py | 16 + tests/utils/test_package.py | 48 ++ 162 files changed, 5742 insertions(+) create mode 100644 .dockerignore create mode 100644 .gitignore create mode 100644 Dockerfile create mode 100644 Dockerfile.binary create mode 100644 README.md create mode 100644 paaspure/__init__.py create mode 100644 paaspure/__main__.py create mode 100644 paaspure/abstract/__init__.py create mode 100644 paaspure/abstract/argparser.py create mode 100644 paaspure/abstract/component.py create mode 100644 paaspure/abstract/module.py create mode 100644 paaspure/argparser/README.md create mode 100644 paaspure/argparser/__init__.py create mode 100644 paaspure/argparser/parser.py create mode 100644 paaspure/deployer/README.md create mode 100644 paaspure/deployer/__init__.py create mode 100644 paaspure/deployer/argparser.py create mode 100644 paaspure/deployer/gitlab-runner/.gitignore create mode 100644 paaspure/deployer/gitlab-runner/Dockerfile create mode 100644 paaspure/deployer/gitlab-runner/README.md create mode 100644 paaspure/deployer/gitlab-runner/config/config.toml.template create mode 100644 paaspure/deployer/gitlab-runner/docker-compose.yml create mode 100644 paaspure/deployer/jenkins/README.md create mode 100644 paaspure/deployer/jenkins/__init__.py create mode 100644 paaspure/deployer/jenkins/component.py create mode 100644 paaspure/deployer/jenkins/docker-compose.yml create mode 100644 paaspure/deployer/module.py create mode 100644 paaspure/deployer/portainer/README.md create mode 100644 paaspure/deployer/portainer/__init__.py create mode 100644 paaspure/deployer/portainer/component.py create mode 100644 paaspure/deployer/portainer/docker-compose.yml create mode 100644 paaspure/deployer/registry/README.md create mode 100644 paaspure/deployer/registry/__init__.py create mode 100644 paaspure/deployer/registry/component.py create mode 100644 paaspure/deployer/registry/docker-compose.yml create mode 100644 paaspure/deployer/visualizer/README.md create mode 100644 paaspure/deployer/visualizer/__init__.py create mode 100644 paaspure/deployer/visualizer/component.py create mode 100644 paaspure/deployer/visualizer/docker-compose.yml create mode 100644 paaspure/dummy_component/__init__.py create mode 100644 paaspure/dummy_component/component.py create mode 100644 paaspure/dummy_component/requirements.py create mode 100644 paaspure/generate/README.md create mode 100644 paaspure/generate/__init__.py create mode 100644 paaspure/generate/argparser.py create mode 100644 paaspure/generate/generator.py create mode 100644 paaspure/generate/templates/component/component.template create mode 100644 paaspure/generate/templates/module/README-md create mode 100644 paaspure/generate/templates/module/__init__-py create mode 100644 paaspure/generate/templates/module/argparser-py create mode 100644 paaspure/generate/templates/module/module-py create mode 100644 paaspure/infra/README.md create mode 100644 paaspure/infra/__init__.py create mode 100644 paaspure/infra/argparser.py create mode 100644 paaspure/infra/hybrid_aws/00-vars.tf create mode 100644 paaspure/infra/hybrid_aws/01-aws-infra.tf create mode 100644 paaspure/infra/hybrid_aws/02-create-inv.tf create mode 100644 paaspure/infra/hybrid_aws/Dockerfile.ansible create mode 100644 paaspure/infra/hybrid_aws/Dockerfile.terraform create mode 100644 paaspure/infra/hybrid_aws/README.md create mode 100644 paaspure/infra/hybrid_aws/__init__.py create mode 100644 paaspure/infra/hybrid_aws/component.py create mode 100644 paaspure/infra/hybrid_aws/requirements.py create mode 100644 paaspure/infra/hybrid_aws/swarm-join.yml create mode 100644 paaspure/infra/hybrid_aws/swarm-leave.yml create mode 100644 paaspure/infra/module.py create mode 100644 paaspure/infra/terraform_aws/Dockerfile create mode 100644 paaspure/infra/terraform_aws/README.md create mode 100644 paaspure/infra/terraform_aws/__init__.py create mode 100644 paaspure/infra/terraform_aws/component.py create mode 100644 paaspure/infra/terraform_aws/main.tf create mode 100644 paaspure/infra/terraform_aws/requirements.py create mode 100644 paaspure/infra/terraform_aws/test_terraform_aws.py create mode 100644 paaspure/infra/terraform_azure/Docker.tmpl create mode 100644 paaspure/infra/terraform_azure/Dockerfile create mode 100644 paaspure/infra/terraform_azure/README.md create mode 100644 paaspure/infra/terraform_azure/__init__.py create mode 100644 paaspure/infra/terraform_azure/component.py create mode 100644 paaspure/infra/terraform_azure/main.tf create mode 100644 paaspure/infra/terraform_azure/requirements.py create mode 100644 paaspure/infra/terraform_azure/test_terraform_azure.py create mode 100644 paaspure/infra/test_infra.py create mode 100644 paaspure/log_management/README.md create mode 100644 paaspure/log_management/__init__.py create mode 100644 paaspure/log_management/argparser.py create mode 100644 paaspure/log_management/elk_stack/README.md create mode 100644 paaspure/log_management/elk_stack/__init__.py create mode 100644 paaspure/log_management/elk_stack/component.py create mode 100644 paaspure/log_management/elk_stack/docker-compose.yml create mode 100644 paaspure/log_management/module.py create mode 100644 paaspure/monitoring/README.md create mode 100644 paaspure/monitoring/__init__.py create mode 100644 paaspure/monitoring/argparser.py create mode 100644 paaspure/monitoring/module.py create mode 100644 paaspure/monitoring/prom_stack/README.md create mode 100644 paaspure/monitoring/prom_stack/__init__.py create mode 100644 paaspure/monitoring/prom_stack/component.py create mode 100644 paaspure/monitoring/prom_stack/config/prometheus.yml create mode 100644 paaspure/monitoring/prom_stack/docker-compose.yml create mode 100644 paaspure/network/README.md create mode 100644 paaspure/network/__init__.py create mode 100644 paaspure/network/argparser.py create mode 100644 paaspure/network/module.py create mode 100644 paaspure/network/traefik/README.md create mode 100644 paaspure/network/traefik/__init__.py create mode 100644 paaspure/network/traefik/component.py create mode 100644 paaspure/network/traefik/config/traefik create mode 100644 paaspure/network/traefik/docker-compose.yml create mode 100644 paaspure/orchestrator/README.md create mode 100644 paaspure/orchestrator/__init__.py create mode 100644 paaspure/orchestrator/argparser.py create mode 100644 paaspure/orchestrator/module.py create mode 100644 paaspure/orchestrator/swarm_aws/README.md create mode 100644 paaspure/orchestrator/swarm_aws/__init__.py create mode 100644 paaspure/orchestrator/swarm_aws/component.py create mode 100644 paaspure/orchestrator/swarm_aws/requirements.py create mode 100644 paaspure/orchestrator/swarm_azure/README.md create mode 100644 paaspure/orchestrator/swarm_azure/__init__.py create mode 100644 paaspure/orchestrator/swarm_azure/component.py create mode 100644 paaspure/orchestrator/swarm_azure/requirements.py create mode 100644 paaspure/pull/README.md create mode 100644 paaspure/pull/__init__.py create mode 100644 paaspure/pull/argparser.py create mode 100644 paaspure/pull/puller.py create mode 100644 paaspure/pull/utils.py create mode 100644 paaspure/settings.py create mode 100644 paaspure/utils/__init__.py create mode 100644 paaspure/utils/docker.py create mode 100644 paaspure/utils/files.py create mode 100644 paaspure/utils/general.py create mode 100644 paaspure/utils/hub.py create mode 100644 paaspure/utils/package.py create mode 100644 paaspure/vm_builder/README.md create mode 100644 paaspure/vm_builder/__init__.py create mode 100644 paaspure/vm_builder/argparser.py create mode 100644 paaspure/vm_builder/module.py create mode 100644 paaspure/vm_builder/packer_aws/Dockerfile create mode 100644 paaspure/vm_builder/packer_aws/README.md create mode 100644 paaspure/vm_builder/packer_aws/__init__.py create mode 100644 paaspure/vm_builder/packer_aws/component.py create mode 100644 paaspure/vm_builder/packer_aws/packer_files/docker_ubuntu.json create mode 100644 paaspure/vm_builder/packer_aws/packer_files/golden_ubuntu.json create mode 100644 paaspure/vm_builder/packer_aws/packer_files/variables.json create mode 100644 paaspure/vm_builder/packer_aws/requirements.py create mode 100644 paaspure/vm_builder/packer_aws/test_packer_aws.py create mode 100644 paaspure/vm_builder/test_vm_builder.py create mode 100644 requirements.txt create mode 100644 sample_configs/aws.yml create mode 100644 sample_configs/azure.yml create mode 100644 sample_configs/hybrid.yml create mode 100644 setup.cfg create mode 100644 setup.py create mode 100644 tests/argparser/__init__.py create mode 100644 tests/argparser/test_paaspure_parser.py create mode 100644 tests/generate/__init__.py create mode 100644 tests/generate/test_paaspure_generate.py create mode 100644 tests/pull/__init__.py create mode 100644 tests/pull/test_paaspure_pull.py create mode 100644 tests/utils/__init__.py create mode 100644 tests/utils/test_files.py create mode 100644 tests/utils/test_general.py create mode 100644 tests/utils/test_package.py diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..3849f37 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,21 @@ +# Build files and dirs +build/ +*.pyc +__pycache__ +*.egg-info +dist + +# Docker related files +Dockerfile* + +# Testing +htmlcov +.pytest_cache +.coverage + +# Other +.git +.gitignore +.gitlab-ci.yml +gitlab-runner/ +docs/ diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..9a0384b --- /dev/null +++ b/.gitignore @@ -0,0 +1,19 @@ +# Build files and dirs +build/ +__pycache__ +*.pyc +*.egg-info +dist + +# Testing +htmlcov +.pytest_cache +.coverage + +# Other +.DS_Store + +# Files with confidential information +pure*.yml +*.pem +*.pub diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..f338311 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,20 @@ +FROM python:3-alpine + +LABEL maintainer "ruben.vasconcelos3@mail.dcu.ie" + +# Update and patch system +RUN apk update \ + && apk add \ + git \ + docker + +# TODO: change this to /path once puller is implemented module/component code +# is moved to their own repos +ENV PYTHONPATH /app:/app/paaspure + +WORKDIR /app + +COPY requirements.txt . +RUN pip install -r requirements.txt + +COPY . . diff --git a/Dockerfile.binary b/Dockerfile.binary new file mode 100644 index 0000000..2e9c051 --- /dev/null +++ b/Dockerfile.binary @@ -0,0 +1,20 @@ +FROM python:3-alpine + +LABEL maintainer "ruben.vasconcelos3@mail.dcu.ie" + +# Update and patch system +RUN apk update \ + && apk add \ + git \ + docker + +ENV PYTHONPATH /app + +WORKDIR /app + +COPY requirements.txt . +RUN pip install -r requirements.txt + +COPY . . + +RUN python setup.py install diff --git a/README.md b/README.md new file mode 100644 index 0000000..5e11ec3 --- /dev/null +++ b/README.md @@ -0,0 +1,40 @@ +# PaaSPure CLI + +> This is was created as part of my computing masters practicum. The PaaSPure CLI was intended to be used for building/deploying PaaS platforms based on user supplied configs. The cli tool will still work without the HUB as users may still pull directly from a git repo. + + +#### Build + +```bash +docker build -t paaspure . +``` + +#### Run + +```bash +docker run -it --rm \ + -v "$(pwd)":/app \ + -v /var/run/docker.sock:/var/run/docker.sock \ +paaspure sh +``` + +This will mount the current folder as a volume mapped to /app inside the container. So that any changes done on your machine will also be reflected inside the container. + +## CI/CD + +#### Lint using Flake8 +Flak8 is a linting tool, for ensuring compliance with pep8, pyflakes and circular complexity. + +```bash +docker run --rm paaspure flake8 +``` + +#### Unit test using PyTest +```bash +docker run --rm paaspure pytest +``` + +## TODO +* Move pure objects to their own repos. +* Clean up code. +* Remove credentials from config and read from creds file. diff --git a/paaspure/__init__.py b/paaspure/__init__.py new file mode 100644 index 0000000..5f1d2cc --- /dev/null +++ b/paaspure/__init__.py @@ -0,0 +1,3 @@ +# -*- coding: utf-8 -*- + +__version__ = "0.0.1" diff --git a/paaspure/__main__.py b/paaspure/__main__.py new file mode 100644 index 0000000..007b93b --- /dev/null +++ b/paaspure/__main__.py @@ -0,0 +1,88 @@ +# -*- coding: utf-8 -*- + +import sys +import importlib + +from paaspure import settings +from paaspure.argparser import paaSPureParser +from paaspure.generate import PaaSPureGenerator +from paaspure.pull import PaaSPurePuller +from paaspure.utils import read_yaml_file + + +def main(args=None): + # TODO: This is a bit hacky, fix initial parser. + config_file = 'pure.yml' + if '-f' in sys.argv: + config_file = sys.argv[sys.argv.index('-f') + 1] + elif '--file' in sys.argv: + config_file = sys.argv[sys.argv.index('--file') + 1] + + if '-v' in sys.argv or '--verbose' in sys.argv: + settings.DEBUG = '-v' in sys.argv + + config = read_yaml_file( + 'config', + config_file + ) + + if 'hub' in config: + settings.HUB = config['hub'] + + args = __extend_paaspure_parser(config) + __run_command(config, args) + + +def __extend_paaspure_parser(config): + PaaSPurePuller() + PaaSPureGenerator() + + if 'modules' in config and config['modules'] is not None: + for module in config['modules']: + try: + importlib.import_module(module) + except ModuleNotFoundError as err: + args = paaSPureParser.parser.parse_args(sys.argv[1:]) + + if args.command != 'generate' and args.command != 'pull': + print(err) + print('To import an existing module run:') + print(f'\tpaaspure pull module {module}') + print(f'\tpaaspure pull all') + print('To create a new module run:') + print(f'\tpaaspure generate module {module}') + sys.exit(1) + + return paaSPureParser.parser.parse_args(sys.argv[1:]) + + +def __run_command(config, args): + if args.command == 'generate': + PaaSPureGenerator().run(args) + elif args.command == 'pull': + PaaSPurePuller().run(args, config) + elif args.command == 'build' or args.command == 'destroy': + # NOTE: use temp command in case modules modify args. Fine for now but + # there must be a cleaner version (maybe suply modules with deep copy) + command = args.command + + if args.command == 'build': + modules = list(config['modules'].keys()) + else: + modules = reversed(list(config['modules'].keys())) + + for module in modules: + args.command = module + args.subcommand = command + module = importlib.import_module(args.command) + module.instance.execute(config, args) + elif args.command is not None: + module = importlib.import_module(args.command) + module.instance.execute(config, args) + else: + paaSPureParser.parser.print_help() + sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/paaspure/abstract/__init__.py b/paaspure/abstract/__init__.py new file mode 100644 index 0000000..4a72266 --- /dev/null +++ b/paaspure/abstract/__init__.py @@ -0,0 +1,7 @@ +# -*- coding: utf-8 -*- + +from paaspure.abstract.module import AbstractModule +from paaspure.abstract.component import AbstractComponent +from paaspure.abstract.argparser import AbstractParser + +__all__ = ['AbstractModule', 'AbstractComponent', 'AbstractParser'] diff --git a/paaspure/abstract/argparser.py b/paaspure/abstract/argparser.py new file mode 100644 index 0000000..580fc5f --- /dev/null +++ b/paaspure/abstract/argparser.py @@ -0,0 +1,17 @@ +# -*- coding: utf-8 -*- + +import os +from abc import ABC, abstractmethod + + +class AbstractParser(ABC): + def __init__(self, filename=__file__): + self.name = os.path.basename(os.path.dirname(filename)), + self.name = ''.join(self.name) + super(AbstractParser, self).__init__() + + @abstractmethod + def initialize(self): + """ + This method should implement the module parser. + """ diff --git a/paaspure/abstract/component.py b/paaspure/abstract/component.py new file mode 100644 index 0000000..e27c8a8 --- /dev/null +++ b/paaspure/abstract/component.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- + +from abc import ABC, abstractmethod + + +class AbstractComponent(ABC): + def __init__(self): + super(AbstractComponent, self).__init__() + + @abstractmethod + def build(self): + """ + This method should implement the logic for running a specific + compoenent. + """ + + @abstractmethod + def destroy(self): + """ + This method should implement the logic for tearing down the + resources created by run(). + """ diff --git a/paaspure/abstract/module.py b/paaspure/abstract/module.py new file mode 100644 index 0000000..072ffbe --- /dev/null +++ b/paaspure/abstract/module.py @@ -0,0 +1,75 @@ +# -*- coding: utf-8 -*- + +import sys +import os +import importlib +from abc import ABC, abstractmethod + + +class AbstractModule(ABC): + def __init__(self, filename=__file__): + sys.path.append(os.path.dirname(filename)) + super(AbstractModule, self).__init__() + + @abstractmethod + def execute(self): + """ + This method should implement how to run the module. Think of it + as a layer of abstraction that implements the logic needed to + setup, run and clean up after components. + """ + + def general_execute(self, config, args): + if args.subcommand is None: + self.parser.print_help() + sys.exit(1) + + components = config['modules'][args.command]['components'] + for name, sub_config in components.items(): + try: + component = importlib.import_module(f'{name}') + # Use dispatch pattern to invoke method with same name + getattr(component.instance, args.subcommand)( + sub_config, + config['credentials'] + ) + except ModuleNotFoundError: + self._general_err(args, name) + + def general_deploy(self, config, args): + if args.subcommand is None: + self.parser.print_help() + sys.exit(1) + + orchestrator = importlib.import_module( + config['modules'][args.command]['orchestrator'] + ).instance + + components = config['modules'][args.command]['components'] + component_command = args.subcommand + for name, sub_config in components.items(): + try: + component = importlib.import_module(f'{name}').instance + + args.subcommand = 'client_connection' + args.command = 'orchestrator' + + with orchestrator.execute(config, args) as client: + # Use dispatch pattern to invoke method with same name + getattr(component, component_command)( + sub_config, + client + ) + except ModuleNotFoundError: + self._general_err(args, name) + + def _general_err(self, args, name): + print( + f"No component named '{name}' in module '{args.command}'" + ) + print('To import an existing component run:') + print(f'\tpaaspure pull component {args.command} {name}') + print(f'\tpaaspure pull all') + print('To create a new component run:') + print(f'\tpaaspure generate component {args.command} {name}') + sys.exit(1) diff --git a/paaspure/argparser/README.md b/paaspure/argparser/README.md new file mode 100644 index 0000000..2126203 --- /dev/null +++ b/paaspure/argparser/README.md @@ -0,0 +1,61 @@ +# PaaSPure Generic Parser + +### Generic ArgParse +By default the generic parser just parses 3 arguments: + +```bash + -f, --file Name of the cofig file (Default is 'PATH/pure.yml'). + -v, --version Show version number and exit. + -h, --help Show help message and exit. +``` + +At runtime the parser is extended based on the modules defined in the cofig file. This allows the CLI tool to adapt to it's users and provide a sanitized output. + + +### Extending the default ArgParse +New modules are able to extend the parser features, simply by importing it and adding the new arguments to it. + +```python +# -*- coding: utf-8 -*- + +from paaspure.argparser import paaSPureParser + + +class NewModuleParser: + def __init__(self, module): + module.parser = paaSPureParser.extend_parser( + 'paaspure new_module COMMAND', + 'new_module', + 'New auto-generated module.' + ) + + sub_parsers = module.parser.add_subparsers( + title='Commands', + dest='subcommand' + ) + + module.run_parser = sub_parsers.add_parser( + 'build', + help='Run the NewModule module.', + usage='paaspure new_module run' + ) + + module.run_parser._optionals.title = 'Options' + module.run_parser._positionals.title = 'Commands' + module.run_parser.set_defaults(parser=True) + + super(NewModuleParser, self).__init__() +``` + +Running ```paaspure new_module``` would output: + +```bash +usage: paaspure new_module COMMAND + +Options: + -h, --help show this help message and exit + +Commands: + {run} + run Run the NewModule module. +``` diff --git a/paaspure/argparser/__init__.py b/paaspure/argparser/__init__.py new file mode 100644 index 0000000..ce0659e --- /dev/null +++ b/paaspure/argparser/__init__.py @@ -0,0 +1,5 @@ +# -*- coding: utf-8 -*- + +from paaspure.argparser.parser import paaSPureParser + +__all__ = ['paaSPureParser'] diff --git a/paaspure/argparser/parser.py b/paaspure/argparser/parser.py new file mode 100644 index 0000000..a7814da --- /dev/null +++ b/paaspure/argparser/parser.py @@ -0,0 +1,71 @@ +# -*- coding: utf-8 -*- + +import argparse + +from paaspure.utils import get_version + + +class PaaSPureParser: + """Genereic parser to be extended by sub-modules.""" + def __init__(self): + self.create_parser() + super(PaaSPureParser, self).__init__() + + def create_parser(self): + self.parser = argparse.ArgumentParser( + description='PaaSPure build the Paas of the future.', + usage='paaspure command', + add_help=False + ) + + self.parser._optionals.title = 'Options' + + self.parser.add_argument('-f', '--file', default='pure.yml', type=str, + help="Name of the cofig file \ + (Default is 'PATH/pure.yml').") + + self.parser.add_argument('-h', '--help', action='help', + default=argparse.SUPPRESS, + help='Show this help message and exit.') + + self.parser.add_argument('-v', '--verbose', action='store_true', + help='Debug mode.') + + self.parser.add_argument('--version', action='version', + version=f'%(prog)s {get_version()}', + help='Show version number and exit.') + + self.parser._positionals.title = 'Management Commands' + + self.subparsers = self.parser.add_subparsers( + title='commands', + dest='command' + ) + + self.extend_parser( + f'paaspure build', + f'build', + 'Build all PureObjects.' + ) + + self.extend_parser( + f'paaspure destroy', + f'destroy', + 'Destroy all PureObjects.' + ) + + def extend_parser(self, usage, command, help_msg): + new_parser = self.subparsers.add_parser( + command, + help=help_msg, + usage=usage + ) + + new_parser._optionals.title = 'Options' + new_parser._positionals.title = 'Commands' + new_parser.set_defaults(new_parser=True) + + return new_parser + + +paaSPureParser = PaaSPureParser() diff --git a/paaspure/deployer/README.md b/paaspure/deployer/README.md new file mode 100644 index 0000000..292adb7 --- /dev/null +++ b/paaspure/deployer/README.md @@ -0,0 +1,23 @@ +# PaaSPure Deployer + +A general module for deploying stuff. + +### Usage + +```bash +usage: paaspure deployer + +Options: + -h, --help show this help message and exit +``` + +### Sample PureFile + +```yaml +deployer: + orchestrator: $ORCHESTRATOR_NAME + components: + $COMPONENT_NAME: + $COMPONENT_ARG1: $VAL1 + ... +``` diff --git a/paaspure/deployer/__init__.py b/paaspure/deployer/__init__.py new file mode 100644 index 0000000..26b7809 --- /dev/null +++ b/paaspure/deployer/__init__.py @@ -0,0 +1,5 @@ +# -*- coding: utf-8 -*- + +from .module import instance + +__all__ = ['instance'] diff --git a/paaspure/deployer/argparser.py b/paaspure/deployer/argparser.py new file mode 100644 index 0000000..8cdf720 --- /dev/null +++ b/paaspure/deployer/argparser.py @@ -0,0 +1,39 @@ +# -*- coding: utf-8 -*- + +from paaspure.abstract import AbstractParser +from paaspure.argparser import paaSPureParser + + +class ServiceDeployerParser(AbstractParser): + """New auto-generated modulo argparse template.""" + def __init__(self, module): + super(ServiceDeployerParser, self).__init__(__file__) + self.initialize(module) + + def initialize(self, module): + module.parser = paaSPureParser.extend_parser( + f'paaspure {self.name} COMMAND', + f'{self.name}', + 'Deploy general purpose applications.' + ) + + sub_parsers = module.parser.add_subparsers( + title='Commands', + dest='subcommand' + ) + + module.run_parser = sub_parsers.add_parser( + 'build', + help='Deploy resources.', + usage=f'paaspure {self.name} run' + ) + + module.run_parser = sub_parsers.add_parser( + 'destroy', + help='Destroy resources.', + usage=f'paaspure {self.name} destroy' + ) + + module.run_parser._optionals.title = 'Options' + module.run_parser._positionals.title = 'Commands' + module.run_parser.set_defaults(parser=True) diff --git a/paaspure/deployer/gitlab-runner/.gitignore b/paaspure/deployer/gitlab-runner/.gitignore new file mode 100644 index 0000000..b165f42 --- /dev/null +++ b/paaspure/deployer/gitlab-runner/.gitignore @@ -0,0 +1,2 @@ +# Config files with confidential data +config.toml diff --git a/paaspure/deployer/gitlab-runner/Dockerfile b/paaspure/deployer/gitlab-runner/Dockerfile new file mode 100644 index 0000000..2241953 --- /dev/null +++ b/paaspure/deployer/gitlab-runner/Dockerfile @@ -0,0 +1,14 @@ +FROM gitlab/gitlab-runner:latest + +LABEL maintainer "ruben.vasconcelos3@mail.dcu.ie" + +# Install and setup docker. +RUN curl -sSL https://get.docker.com/ | sh + +# Setup docker-compose. +ENV COMPOSE_URL https://github.com/docker/compose/releases/download/1.20.1/docker-compose-Linux-x86_64 +RUN curl -L $COMPOSE_URL -o /usr/local/bin/docker-compose \ + && chmod +x /usr/local/bin/docker-compose + +ENTRYPOINT ["gitlab-runner"] +CMD ["run", "--working-directory=/home/gitlab-runner"] diff --git a/paaspure/deployer/gitlab-runner/README.md b/paaspure/deployer/gitlab-runner/README.md new file mode 100644 index 0000000..2d89514 --- /dev/null +++ b/paaspure/deployer/gitlab-runner/README.md @@ -0,0 +1,38 @@ +# GitLab Runner + +### Instructions: + + * For now you have to register a runner manually. Might automate this in the future. + * How to register a runner: https://docs.gitlab.com/runner/register/ + * You should end-up with a config.toml file similar to config/config.toml.template: + + +```text +concurrent = 1 +check_interval = 0 + +[[runners]] + name = "PaaSPure Runner" + url = "INSERT URL HERE" + token = "INSERT YOUR TOKEN HERE" + executor = "shell" + [runners.cache] + +``` + + * Put config.toml inside the config folder. + + * Build agent + +```bash +docker-compose build +``` + + * Start agent + +```bash +docker-compose up -d +``` + +### TODO +* Add component.py and and script for initiating runner. diff --git a/paaspure/deployer/gitlab-runner/config/config.toml.template b/paaspure/deployer/gitlab-runner/config/config.toml.template new file mode 100644 index 0000000..52df8af --- /dev/null +++ b/paaspure/deployer/gitlab-runner/config/config.toml.template @@ -0,0 +1,9 @@ +concurrent = 1 +check_interval = 0 + +[[runners]] + name = "PaaSPure Runner" + url = "INSERT URL HERE" + token = "INSERT YOUR TOKEN HERE" + executor = "shell" + [runners.cache] diff --git a/paaspure/deployer/gitlab-runner/docker-compose.yml b/paaspure/deployer/gitlab-runner/docker-compose.yml new file mode 100644 index 0000000..c382794 --- /dev/null +++ b/paaspure/deployer/gitlab-runner/docker-compose.yml @@ -0,0 +1,8 @@ +version: '3' + +services: + gitlab-runner: + build: . + volumes: + - ./config:/etc/gitlab-runner + - /var/run/docker.sock:/var/run/docker.sock diff --git a/paaspure/deployer/jenkins/README.md b/paaspure/deployer/jenkins/README.md new file mode 100644 index 0000000..d9d6333 --- /dev/null +++ b/paaspure/deployer/jenkins/README.md @@ -0,0 +1,15 @@ +# Jenkins +CI/CD server + +### Usage + +```yaml +deployer: + orchestrator: orchestrator + components: + jenkins: +``` +More coming soon... + +### Todo +1. Update Instructions diff --git a/paaspure/deployer/jenkins/__init__.py b/paaspure/deployer/jenkins/__init__.py new file mode 100644 index 0000000..485dd29 --- /dev/null +++ b/paaspure/deployer/jenkins/__init__.py @@ -0,0 +1,5 @@ +# -*- coding: utf-8 -*- + +from .component import instance + +__all__ = ['instance'] diff --git a/paaspure/deployer/jenkins/component.py b/paaspure/deployer/jenkins/component.py new file mode 100644 index 0000000..48be668 --- /dev/null +++ b/paaspure/deployer/jenkins/component.py @@ -0,0 +1,25 @@ +# -*- coding: utf-8 -*- + +import os + +from paaspure.abstract import AbstractComponent +from paaspure.utils import docker_stack + + +class PortusRegistry(AbstractComponent): + """Setup image registry and portus.""" + def __init__(self): + super(PortusRegistry, self).__init__() + + def build(self, config, client): + docker_stack( + command='deploy', + compose_file=os.path.dirname(__file__) + '/docker-compose.yml', + stack_name='registry' + ) + + def destroy(self, config, client): + docker_stack(command='rm', stack_name='registry') + + +instance = PortusRegistry() diff --git a/paaspure/deployer/jenkins/docker-compose.yml b/paaspure/deployer/jenkins/docker-compose.yml new file mode 100644 index 0000000..2031bf9 --- /dev/null +++ b/paaspure/deployer/jenkins/docker-compose.yml @@ -0,0 +1,26 @@ +version: '3.4' + +volumes: + jenkins_data: + +services: + jenkins: + image: csanchez/jenkins-swarm + ports: + - 8081:8080 + restart: always + volumes: + - jenkins_data:/var/jenkins_home + + worker: + image: csanchez/jenkins-swarm-slave + command: -username jenkins -password jenkins -executors 1 + environment: + - "JENKINS_PORT_8080_TCP_ADDR=jenkins" + - "JENKINS_PORT_8080_TCP_PORT=8081" + +# networks: +# traefik_default: +# external: true +# registry_default: +# external: true diff --git a/paaspure/deployer/module.py b/paaspure/deployer/module.py new file mode 100644 index 0000000..181cf17 --- /dev/null +++ b/paaspure/deployer/module.py @@ -0,0 +1,19 @@ +# -*- coding: utf-8 -*- + +from paaspure.abstract import AbstractModule +from .argparser import ServiceDeployerParser + + +class ServiceDeployer(AbstractModule): + """ + Abstraction module for deploying general components. + """ + def __init__(self): + ServiceDeployerParser(self) + super(ServiceDeployer, self).__init__(__file__) + + def execute(self, config, args): + super(ServiceDeployer, self).general_deploy(config, args) + + +instance = ServiceDeployer() diff --git a/paaspure/deployer/portainer/README.md b/paaspure/deployer/portainer/README.md new file mode 100644 index 0000000..97f8b7d --- /dev/null +++ b/paaspure/deployer/portainer/README.md @@ -0,0 +1,15 @@ +# Portainer +Swarm Mode UI + +### Usage + +```yaml +deployer: + orchestrator: orchestrator + components: + portainer: +``` +More coming soon... + +### Todo +1. Update Instructions diff --git a/paaspure/deployer/portainer/__init__.py b/paaspure/deployer/portainer/__init__.py new file mode 100644 index 0000000..485dd29 --- /dev/null +++ b/paaspure/deployer/portainer/__init__.py @@ -0,0 +1,5 @@ +# -*- coding: utf-8 -*- + +from .component import instance + +__all__ = ['instance'] diff --git a/paaspure/deployer/portainer/component.py b/paaspure/deployer/portainer/component.py new file mode 100644 index 0000000..3d3abcc --- /dev/null +++ b/paaspure/deployer/portainer/component.py @@ -0,0 +1,28 @@ +# -*- coding: utf-8 -*- + +import os + +from paaspure.abstract import AbstractComponent +from paaspure.utils import docker_stack + + +class Portainer(AbstractComponent): + """A Swarm UI.""" + def __init__(self): + super(Portainer, self).__init__() + + def build(self, config, client): + docker_stack( + command='deploy', + compose_file=os.path.dirname(__file__) + '/docker-compose.yml', + stack_name='portainer' + ) + + def destroy(self, config, client): + docker_stack( + command='rm', + stack_name='portainer' + ) + + +instance = Portainer() diff --git a/paaspure/deployer/portainer/docker-compose.yml b/paaspure/deployer/portainer/docker-compose.yml new file mode 100644 index 0000000..997a51a --- /dev/null +++ b/paaspure/deployer/portainer/docker-compose.yml @@ -0,0 +1,41 @@ +version: '3.4' + +services: + agent: + image: portainer/agent + environment: + AGENT_CLUSTER_ADDR: tasks.agent + volumes: + - /var/run/docker.sock:/var/run/docker.sock + networks: + - agent_network + deploy: + mode: global + + portainer: + image: portainer/portainer + command: -H tcp://tasks.agent:9001 --tlsskipverify + volumes: + - portainer_data:/data + networks: + - agent_network + - traefik_default + deploy: + mode: replicated + replicas: 1 + placement: + constraints: [node.role == manager] + labels: + - traefik.backend=portainer + - traefik.port=9000 + - traefik.frontend.rule=Host:portainer.demo + - traefik.docker.network=traefik_default + +networks: + traefik_default: + external: true + agent_network: + driver: overlay + +volumes: + portainer_data: diff --git a/paaspure/deployer/registry/README.md b/paaspure/deployer/registry/README.md new file mode 100644 index 0000000..dd996b9 --- /dev/null +++ b/paaspure/deployer/registry/README.md @@ -0,0 +1,33 @@ +# Registry +Image repository and visualizer + +### Usage + +```yaml +deployer: + orchestrator: orchestrator + components: + registry: +``` + +add registry.demo as an insecure registry + +More coming soon... + + +### Generate certs + docker run --rm \ + -v "$(pwd)/certs":/certs \ + -e SSL_IP=172.17.8.101 \ + -e SSL_DNS=registry.demo \ + paulczar/omgwtfssl + +docker run --rm \ + -v "$(pwd)/certs":/certs \ + -e SSL_SUBJECT=registry.demo \ + -e SSL_DNS=registry.demo \ + paulczar/omgwtfssl + +### Todo +1. Fix portus or find another solution. +2. Update Instructions diff --git a/paaspure/deployer/registry/__init__.py b/paaspure/deployer/registry/__init__.py new file mode 100644 index 0000000..485dd29 --- /dev/null +++ b/paaspure/deployer/registry/__init__.py @@ -0,0 +1,5 @@ +# -*- coding: utf-8 -*- + +from .component import instance + +__all__ = ['instance'] diff --git a/paaspure/deployer/registry/component.py b/paaspure/deployer/registry/component.py new file mode 100644 index 0000000..48be668 --- /dev/null +++ b/paaspure/deployer/registry/component.py @@ -0,0 +1,25 @@ +# -*- coding: utf-8 -*- + +import os + +from paaspure.abstract import AbstractComponent +from paaspure.utils import docker_stack + + +class PortusRegistry(AbstractComponent): + """Setup image registry and portus.""" + def __init__(self): + super(PortusRegistry, self).__init__() + + def build(self, config, client): + docker_stack( + command='deploy', + compose_file=os.path.dirname(__file__) + '/docker-compose.yml', + stack_name='registry' + ) + + def destroy(self, config, client): + docker_stack(command='rm', stack_name='registry') + + +instance = PortusRegistry() diff --git a/paaspure/deployer/registry/docker-compose.yml b/paaspure/deployer/registry/docker-compose.yml new file mode 100644 index 0000000..7b93707 --- /dev/null +++ b/paaspure/deployer/registry/docker-compose.yml @@ -0,0 +1,103 @@ +version: '3.4' + +# volumes: +# mariadb: + +services: + registry: + image: registry:2.6 + networks: + - traefik_default + - default +# secrets: +# - cert.pem +# environment: +# REGISTRY_AUTH_TOKEN_REALM: http://portus.demo/v2/token +# REGISTRY_AUTH_TOKEN_SERVICE: registry.demo +# REGISTRY_AUTH_TOKEN_ISSUER: portus.demo +# REGISTRY_AUTH_TOKEN_ROOTCERTBUNDLE: /run/secrets/cert.pem +# REGISTRY_NOTIFICATIONS_ENDPOINTS: > +# - name: portus +# url: http://portus.demo/v2/webhooks/events +# timeout: 500ms +# threshold: 5 +# backoff: 1s + deploy: + replicas: 1 + labels: + - traefik.backend=registry + - traefik.port=5000 + - traefik.frontend.rule=Host:registry.demo + - traefik.docker.network=traefik_default +# +# mariadb: +# image: mariadb:10.3 +# networks: +# - default +# ports: +# - '3306' +# volumes: +# - mariadb:/var/lib/mysql +# environment: +# MYSQL_ROOT_PASSWORD: dbSecretPass +# +# portus: +# image: opensuse/portus:head +# depends_on: +# - mariadb +# networks: +# - traefik_default +# - default +# secrets: +# - key.pem +# environment: +# PORTUS_MACHINE_FQDN_VALUE: portus.demo +# PORTUS_LOG_LEVEL: debug +# RAILS_SERVE_STATIC_FILES: 'true' +# PORTUS_DB_HOST: mariadb +# PORTUS_DB_USERNAME: root +# PORTUS_DB_PASSWORD: dbSecretPass +# PORTUS_DB_DATABASE: portus +# PORTUS_DELETE_ENABLED: 'true' +# PORTUS_CHECK_SSL_USAGE_ENABLED: 'false' +# PORTUS_SECRET_KEY_BASE: b494a25faa8d22e430e843e220e424e10ac84d2ce0e64231f5b636d21251eb6d267adb042ad5884cbff0f3891bcf911bdf8abb3ce719849ccda9a4889249e5c2 +# PORTUS_KEY_PATH: /run/secrets/key.pem +# PORTUS_PASSWORD: portusSecretPass +# deploy: +# replicas: 1 +# labels: +# - traefik.backend=portus +# - traefik.port=3000 +# - traefik.frontend.rule=Host:portus.demo +# - traefik.docker.network=traefik_default +# +# portus-background: +# image: opensuse/portus:head +# depends_on: +# - portus +# - mariadb +# networks: +# - default +# secrets: +# - key.pem +# environment: +# PORTUS_MACHINE_FQDN_VALUE: portus.demo +# PORTUS_DB_HOST: mariadb +# PORTUS_DB_USERNAME: root +# PORTUS_DB_PASSWORD: dbSecretPass +# PORTUS_DB_DATABASE: portus +# PORTUS_DELETE_ENABLED: 'true' +# PORTUS_KEY_PATH: /run/secrets/key.pem +# PORTUS_SECRET_KEY_BASE: b494a25faa8d22e430e843e220e424e10ac84d2ce0e64231f5b636d21251eb6d267adb042ad5884cbff0f3891bcf911bdf8abb3ce719849ccda9a4889249e5c2 +# PORTUS_PASSWORD: portusSecretPass +# PORTUS_BACKGROUND: 'true' +# +# secrets: +# cert.pem: +# file: ./certs/cert.pem +# key.pem: +# file: ./certs/key.pem + +networks: + traefik_default: + external: true diff --git a/paaspure/deployer/visualizer/README.md b/paaspure/deployer/visualizer/README.md new file mode 100644 index 0000000..c460f13 --- /dev/null +++ b/paaspure/deployer/visualizer/README.md @@ -0,0 +1,15 @@ +# Visualizer +Simple cluster UI + +### Usage + +```yaml +deployer: + orchestrator: orchestrator + components: + visualizer: +``` +More coming soon... + +### Todo +1. Update Instructions diff --git a/paaspure/deployer/visualizer/__init__.py b/paaspure/deployer/visualizer/__init__.py new file mode 100644 index 0000000..485dd29 --- /dev/null +++ b/paaspure/deployer/visualizer/__init__.py @@ -0,0 +1,5 @@ +# -*- coding: utf-8 -*- + +from .component import instance + +__all__ = ['instance'] diff --git a/paaspure/deployer/visualizer/component.py b/paaspure/deployer/visualizer/component.py new file mode 100644 index 0000000..257ff3a --- /dev/null +++ b/paaspure/deployer/visualizer/component.py @@ -0,0 +1,25 @@ +# -*- coding: utf-8 -*- + +import os + +from paaspure.abstract import AbstractComponent +from paaspure.utils import docker_stack + + +class SwarmVisualizer(AbstractComponent): + """Deploy the Swarm service visualizer.""" + def __init__(self): + super(SwarmVisualizer, self).__init__() + + def build(self, config, client): + docker_stack( + command='deploy', + compose_file=os.path.dirname(__file__) + '/docker-compose.yml', + stack_name='visualizer' + ) + + def destroy(self, config, client): + docker_stack(command='rm', stack_name='visualizer') + + +instance = SwarmVisualizer() diff --git a/paaspure/deployer/visualizer/docker-compose.yml b/paaspure/deployer/visualizer/docker-compose.yml new file mode 100644 index 0000000..10065a2 --- /dev/null +++ b/paaspure/deployer/visualizer/docker-compose.yml @@ -0,0 +1,25 @@ +version: '3.4' + +services: + visualizer: + image: dockersamples/visualizer + ports: + - 8080/tcp + volumes: + - /var/run/docker.sock:/var/run/docker.sock + networks: + - traefik_default + deploy: + restart_policy: + condition: on-failure + placement: + constraints: [node.role == manager] + labels: + - traefik.backend=visualizer + - traefik.port=8080 + - traefik.frontend.rule=Host:visualizer.demo + - traefik.docker.network=traefik_default + +networks: + traefik_default: + external: true diff --git a/paaspure/dummy_component/__init__.py b/paaspure/dummy_component/__init__.py new file mode 100644 index 0000000..bf6ce18 --- /dev/null +++ b/paaspure/dummy_component/__init__.py @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- + +from paaspure.dummy_component.requirements import install +from paaspure.dummy_component.component import instance + +__all__ = ['install', 'instance'] diff --git a/paaspure/dummy_component/component.py b/paaspure/dummy_component/component.py new file mode 100644 index 0000000..09816a6 --- /dev/null +++ b/paaspure/dummy_component/component.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- + +from paaspure.abstract import AbstractComponent + + +class DummyComponent(AbstractComponent): + """Dummy Component""" + def __init__(self): + super(DummyComponent, self).__init__() + + def build(self, config, credentials): + print('Dummy run.') + return True + + def destroy(self, config, credentials): + print('Dummy destroy.') + return True + + +instance = DummyComponent() diff --git a/paaspure/dummy_component/requirements.py b/paaspure/dummy_component/requirements.py new file mode 100644 index 0000000..bad7cc9 --- /dev/null +++ b/paaspure/dummy_component/requirements.py @@ -0,0 +1,11 @@ +# -*- coding: utf-8 -*- + +from paaspure.utils import pip_install + + +def install(): + print('Dummy install') + pip_install(packages=[], component='DummyComponent') + + +install() diff --git a/paaspure/generate/README.md b/paaspure/generate/README.md new file mode 100644 index 0000000..ae26b05 --- /dev/null +++ b/paaspure/generate/README.md @@ -0,0 +1,48 @@ +# PaaSPure Generator + +The generator is built into paaspure and allows users to quickly generate scaffolding code and tests. + +### Usage + +```bash +usage: paaspure generate TEMPLATE + +Options: + -h, --help show this help message and exit + +Templates: + {module,component} + module Generate template for a new module. + component Generate template for a new component. +``` + + +### Module +Modules provide a layer of abstraction for running all the different components and represent things such as logging, CI or monitoring. + + +```bash +usage: paaspure generate module NAME + +Arguments: + NAME The name of the new module. + +Options: + -h, --help show this help message and exit +``` + + +### Component +A module may have many components, components are usually orchestrator specific composed of things such as Dockerfiles, stackfiles and component specific tests. + + +```bash +usage: paaspure generate component PARENT_MODULE NAME + +Arguments: + PARENT_MODULE The parent module name. + NAME The name of the new component. + +Options: + -h, --help show this help message and exit +``` diff --git a/paaspure/generate/__init__.py b/paaspure/generate/__init__.py new file mode 100644 index 0000000..368d030 --- /dev/null +++ b/paaspure/generate/__init__.py @@ -0,0 +1,5 @@ +# -*- coding: utf-8 -*- + +from paaspure.generate.generator import PaaSPureGenerator + +__all__ = ['PaaSPureGenerator'] diff --git a/paaspure/generate/argparser.py b/paaspure/generate/argparser.py new file mode 100644 index 0000000..c89dcef --- /dev/null +++ b/paaspure/generate/argparser.py @@ -0,0 +1,64 @@ +# -*- coding: utf-8 -*- + +from paaspure.abstract import AbstractParser +from paaspure.argparser import paaSPureParser + + +class GeneratorParser(AbstractParser): + """Template generator, for adding code scaffolding.""" + def __init__(self, generator): + super(GeneratorParser, self).__init__(__file__) + self.initialize(generator) + + def initialize(self, generator): + generator.parser = paaSPureParser.extend_parser( + f'paaspure {self.name} TEMPLATE', + f'{self.name}', + 'Generate templates.' + ) + + sub_parsers = generator.parser.add_subparsers( + title='Templates', + dest='template' + ) + + generator.module_parser = sub_parsers.add_parser( + 'module', + help='Generate template for a new module.', + usage=f'paaspure {self.name} module NAME' + ) + + generator.module_parser.add_argument( + 'NAME', + nargs='?', + type=str, + help='The name of the new module.' + ) + + generator.module_parser._optionals.title = 'Options' + generator.module_parser._positionals.title = 'Arguments' + generator.module_parser.set_defaults(module_parser=True) + + generator.component_parser = sub_parsers.add_parser( + 'component', + help='Generate template for a new component.', + usage=f'paaspure {self.name} component PARENT_MODULE NAME' + ) + + generator.component_parser.add_argument( + 'PARENT_MODULE', + nargs='?', + type=str, + help='The name of the parent module.' + ) + + generator.component_parser.add_argument( + 'NAME', + nargs='?', + type=str, + help='The name of the new component.' + ) + + generator.component_parser._optionals.title = 'Options' + generator.component_parser._positionals.title = 'Arguments' + generator.component_parser.set_defaults(new_parser=True) diff --git a/paaspure/generate/generator.py b/paaspure/generate/generator.py new file mode 100644 index 0000000..6738d8a --- /dev/null +++ b/paaspure/generate/generator.py @@ -0,0 +1,87 @@ +# -*- coding: utf-8 -*- + +import os +import sys + +from paaspure.utils import validate_name +from paaspure.generate.argparser import GeneratorParser +from jinja2 import Environment, FileSystemLoader + + +class PaaSPureGenerator: + """Template generator, for adding code scaffolding.""" + def __init__(self): + GeneratorParser(self) + super(PaaSPureGenerator, self).__init__() + + def run(self, args): + if args.template is None or not hasattr(self, args.template): + self.parser.print_help() + sys.exit(1) + + # Use dispatch pattern to invoke method with same name + getattr(self, args.template)(args) + + def module(self, args): + if args.NAME is None: + self.module_parser.print_help() + sys.exit(1) + + name = args.NAME.lower() + validate_name(args.template, name) + capitalized_name = self.__capitalize_name(name) + + template_path = os.path.join( + os.path.dirname(__file__), + 'templates', + 'module' + ) + + target_path = os.path.join(os.getcwd(), name) + + self.__generate_target_files( + template_path, + target_path, + name, + capitalized_name + ) + + def component(self, args): + if args.PARENT_MODULE is None or args.NAME is None: + self.component_parser.print_help() + sys.exit(1) + + # generate new compoenent source files + + def __capitalize_name(self, name): + split_name = name.split('_') + + capitalized_name = '' + + for token in split_name: + capitalized_name += token.capitalize() + + return capitalized_name + + def __generate_target_files(self, template_path, target_path, name, + capitalized_name): + try: + os.mkdir(target_path) + except FileExistsError: + print(f'Module {name}, already exists.') + sys.exit(1) + + # Create the jinja2 environment. + j2_env = Environment( + loader=FileSystemLoader(template_path), + trim_blocks=True + ) + + for template in j2_env.list_templates(): + j2_env.get_template(template).stream( + module=name, + module_capitalized=capitalized_name, + new_line='' + ).dump( + os.path.join(target_path, '.'.join(template.split('-'))) + ) diff --git a/paaspure/generate/templates/component/component.template b/paaspure/generate/templates/component/component.template new file mode 100644 index 0000000..e69de29 diff --git a/paaspure/generate/templates/module/README-md b/paaspure/generate/templates/module/README-md new file mode 100644 index 0000000..968eb05 --- /dev/null +++ b/paaspure/generate/templates/module/README-md @@ -0,0 +1,16 @@ +# {{ module_capitalized }} + +New auto generated module. + +### Usage + +```bash +usage: paaspure {{ module }} COMMAND + +Options: + -h, --help show this help message and exit + +Commands: + {run} + run Run the {{ module_capitalized }} module. +``` diff --git a/paaspure/generate/templates/module/__init__-py b/paaspure/generate/templates/module/__init__-py new file mode 100644 index 0000000..4c1bff7 --- /dev/null +++ b/paaspure/generate/templates/module/__init__-py @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- + +from {{ module }}.module import instance + +__all__ = ['instance'] +{{ new_line }} diff --git a/paaspure/generate/templates/module/argparser-py b/paaspure/generate/templates/module/argparser-py new file mode 100644 index 0000000..15355a2 --- /dev/null +++ b/paaspure/generate/templates/module/argparser-py @@ -0,0 +1,31 @@ +# -*- coding: utf-8 -*- + +from paaspure.argparser import paaSPureParser + + +class {{ module_capitalized }}Parser: + """New auto-generated modulo argparse template.""" + def __init__(self, module): + module.parser = paaSPureParser.extend_parser( + 'paaspure {{ module }} COMMAND', + '{{ module }}', + 'New auto-generated module.' + ) + + sub_parsers = module.parser.add_subparsers( + title='Commands', + dest='subcommand' + ) + + module.run_parser = sub_parsers.add_parser( + 'build', + help='Run the {{ module_capitalized }} module.', + usage='paaspure {{ module }} run' + ) + + module.run_parser._optionals.title = 'Options' + module.run_parser._positionals.title = 'Commands' + module.run_parser.set_defaults(parser=True) + + super({{ module_capitalized }}Parser, self).__init__() +{{ new_line }} diff --git a/paaspure/generate/templates/module/module-py b/paaspure/generate/templates/module/module-py new file mode 100644 index 0000000..698c29c --- /dev/null +++ b/paaspure/generate/templates/module/module-py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- + +import sys + +from {{ module }}.argparser import {{ module_capitalized }}Parser + + +class {{ module_capitalized }}: + """New auto-generated modulo template.""" + def __init__(self): + {{ module_capitalized }}Parser(self) + super({{ module_capitalized }}, self).__init__() + + def execute(self, args): + super({{ module_capitalized }}, self).general_execute(args) + + def build(self, args): + print('Successfully ran the {{ module }} module.') + + +instance = {{ module_capitalized }}() +{{ new_line }} diff --git a/paaspure/infra/README.md b/paaspure/infra/README.md new file mode 100644 index 0000000..9fdc60e --- /dev/null +++ b/paaspure/infra/README.md @@ -0,0 +1,28 @@ +# PaaSPure Infra Builder + +Abstraction module for components used to build the infrastructure. +Run cloud VMs and setup cluster. + +### Usage + +```bash +usage: paaspure vm_builder COMMAND + +Options: + -h, --help show this help message and exit + +Commands: + {run,destroy} + run Run the VmBuilder module. + destroy Destroy VmBuilder resources. +``` + +### Sample PureFile + +```yaml +infra: + components: + $COMPONENT_NAME: + $COMPONENT_ARG1: $VAL1 + ... +``` diff --git a/paaspure/infra/__init__.py b/paaspure/infra/__init__.py new file mode 100644 index 0000000..26b7809 --- /dev/null +++ b/paaspure/infra/__init__.py @@ -0,0 +1,5 @@ +# -*- coding: utf-8 -*- + +from .module import instance + +__all__ = ['instance'] diff --git a/paaspure/infra/argparser.py b/paaspure/infra/argparser.py new file mode 100644 index 0000000..3ce9c28 --- /dev/null +++ b/paaspure/infra/argparser.py @@ -0,0 +1,39 @@ +# -*- coding: utf-8 -*- + +from paaspure.abstract import AbstractParser +from paaspure.argparser import paaSPureParser + + +class InfraBuilderParser(AbstractParser): + """New auto-generated modulo argparse template.""" + def __init__(self, module): + super(InfraBuilderParser, self).__init__(__file__) + self.initialize(module) + + def initialize(self, module): + module.parser = paaSPureParser.extend_parser( + f'paaspure {self.name} COMMAND', + f'{self.name}', + 'Setup cloud infrastructure.' + ) + + sub_parsers = module.parser.add_subparsers( + title='Commands', + dest='subcommand' + ) + + module.run_parser = sub_parsers.add_parser( + 'build', + help='Run the InfraBuilder module.', + usage=f'paaspure {self.name} run' + ) + + module.destroy_parser = sub_parsers.add_parser( + 'destroy', + help='Destroy InfraBuilder resources.', + usage=f'paaspure {self.name} destroy' + ) + + module.run_parser._optionals.title = 'Options' + module.run_parser._positionals.title = 'Commands' + module.run_parser.set_defaults(parser=True) diff --git a/paaspure/infra/hybrid_aws/00-vars.tf b/paaspure/infra/hybrid_aws/00-vars.tf new file mode 100644 index 0000000..55841a0 --- /dev/null +++ b/paaspure/infra/hybrid_aws/00-vars.tf @@ -0,0 +1,7 @@ +variable "aws_region" {} +variable "ssh_user" {} +variable "aws_key_name" {} +# variable "manager_instance_type" {} +# variable "manager_count" {} +variable "worker_instance_type" {} +variable "worker_count" {} diff --git a/paaspure/infra/hybrid_aws/01-aws-infra.tf b/paaspure/infra/hybrid_aws/01-aws-infra.tf new file mode 100644 index 0000000..2e81a40 --- /dev/null +++ b/paaspure/infra/hybrid_aws/01-aws-infra.tf @@ -0,0 +1,110 @@ +## Amazon Infrastructure +provider "aws" { + region = "${var.aws_region}" +} + +## Create swarm security group +resource "aws_security_group" "swarm_sg" { + name = "swarm_sg" + description = "Allow all inbound traffic necessary for Swarm" + + ingress { + from_port = 22 + to_port = 22 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + } + + ingress { + from_port = 2377 + to_port = 2377 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + } + + ingress { + from_port = 7946 + to_port = 7946 + protocol = "tcp" + cidr_blocks = ["0.0.0.0/0"] + } + + ingress { + from_port = 7946 + to_port = 7946 + protocol = "udp" + cidr_blocks = ["0.0.0.0/0"] + } + + ingress { + from_port = 4789 + to_port = 4789 + protocol = "udp" + cidr_blocks = ["0.0.0.0/0"] + } + + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + + cidr_blocks = [ + "0.0.0.0/0", + ] + } + + tags { + Name = "swarm_sg" + } +} + +# ## Find latest AMI +# data "aws_ami" "ubuntu" { +# most_recent = true +# +# filter { +# name = "tag-value" +# values = ["docker_host_ubuntu_16_04"] +# } +# +# owners = ["self"] +# } +data "aws_ami" "moby" { + most_recent = true + + filter { + name = "name" + values = ["Moby Linux 18.06.0-ce-aws1 stable"] + } + + owners = ["041673875206"] +} + +# TODO: Open Azure ports to allow managers +# ## Create Swarm Managers +# resource "aws_instance" "aws-swarm-managers" { +# depends_on = ["aws_security_group.swarm_sg"] +# ami = "${data.aws_ami.moby.id}" +# instance_type = "${var.manager_instance_type}" +# vpc_security_group_ids = ["${aws_security_group.swarm_sg.id}"] +# key_name = "${var.aws_key_name}" +# count = "${var.manager_count}" +# +# tags { +# Name = "swarm-manager-${count.index}" +# } +# } + +## Create AWS Swarm Workers +resource "aws_instance" "aws-swarm-workers" { + depends_on = ["aws_security_group.swarm_sg"] + ami = "${data.aws_ami.moby.id}" + instance_type = "${var.worker_instance_type}" + vpc_security_group_ids = ["${aws_security_group.swarm_sg.id}"] + key_name = "${var.aws_key_name}" + count = "${var.worker_count}" + + tags { + Name = "swarm-worker-${count.index}" + } +} diff --git a/paaspure/infra/hybrid_aws/02-create-inv.tf b/paaspure/infra/hybrid_aws/02-create-inv.tf new file mode 100644 index 0000000..9c6f695 --- /dev/null +++ b/paaspure/infra/hybrid_aws/02-create-inv.tf @@ -0,0 +1,20 @@ +resource "null_resource" "ansible-provision" { + # depends_on = ["aws_instance.aws-swarm-managers", "aws_instance.aws-swarm-workers"] + depends_on = ["aws_instance.aws-swarm-workers"] + + # provisioner "local-exec" { + # command = "echo \"[swarm-managers]\" >> swarm-inventory" + # } + # + # provisioner "local-exec" { + # command = "echo \"${join("\n",formatlist("%s ansible_user=%s ansible_ssh_private_key_file=%s.pem", aws_instance.aws-swarm-managers.*.public_ip, var.ssh_user, var.aws_key_name))}\" >> swarm-inventory" + # } + + provisioner "local-exec" { + command = "echo \"[swarm-workers]\" >> swarm-inventory" + } + + provisioner "local-exec" { + command = "echo \"${join("\n",formatlist("%s ansible_user=%s ansible_ssh_private_key_file=%s.pem", aws_instance.aws-swarm-workers.*.public_ip, var.ssh_user, var.aws_key_name))}\" >> swarm-inventory" + } +} diff --git a/paaspure/infra/hybrid_aws/Dockerfile.ansible b/paaspure/infra/hybrid_aws/Dockerfile.ansible new file mode 100644 index 0000000..f962340 --- /dev/null +++ b/paaspure/infra/hybrid_aws/Dockerfile.ansible @@ -0,0 +1,11 @@ +FROM mullnerz/ansible-playbook + +LABEL maintainer "ruben.vasconcelos3@mail.dcu.ie" + +WORKDIR /ansible/playbooks +COPY . . + +ENV ANSIBLE_HOST_KEY_CHECKING False + +ENTRYPOINT ["ansible-playbook"] +CMD ["--version"] diff --git a/paaspure/infra/hybrid_aws/Dockerfile.terraform b/paaspure/infra/hybrid_aws/Dockerfile.terraform new file mode 100644 index 0000000..cf4ef5b --- /dev/null +++ b/paaspure/infra/hybrid_aws/Dockerfile.terraform @@ -0,0 +1,7 @@ +FROM hashicorp/terraform:0.11.7 + +LABEL maintainer "ruben.vasconcelos3@mail.dcu.ie" + +WORKDIR /data +COPY . . +RUN terraform init diff --git a/paaspure/infra/hybrid_aws/README.md b/paaspure/infra/hybrid_aws/README.md new file mode 100644 index 0000000..08e856a --- /dev/null +++ b/paaspure/infra/hybrid_aws/README.md @@ -0,0 +1,37 @@ +# PaaSPure Extend Swarm + +PaaSPure component for extending existing swarm. Provisioning AWS resources using terraform and configuration using Ansible. + +## Networking +Open protocols and ports between the hosts +The following ports must be available. On some systems, these ports are open by default. + +#### Required +TCP port 2377 for cluster management communications +TCP and UDP port 7946 for communication among nodes +UDP port 4789 for overlay network traffic + +#### Optional +TCP port 22 for ssh + +## Usage + +```yaml +hybrid_aws: + aws_region: "eu-west-1" + aws_key_name: "paaspure" + ssh_user: "docker" + manager_instance_type: "t2.micro" + manager_count: 1 + worker_instance_type: "t2.micro" + worker_count: 1 + orchestrator_params: + name: 'orchestrator' + component: 'swarm_azure' + resource_group_name: paaspureswarm + swarmName: "dockerswarm" +``` + + +Validate template by adding '--syntax-check' to the execute command +E.g ['--syntax-check', '-i', 'swarm-inventory', 'swarm-leave.yml'] diff --git a/paaspure/infra/hybrid_aws/__init__.py b/paaspure/infra/hybrid_aws/__init__.py new file mode 100644 index 0000000..462e426 --- /dev/null +++ b/paaspure/infra/hybrid_aws/__init__.py @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- + +from .requirements import install +from .component import instance + +__all__ = ['install', 'instance'] diff --git a/paaspure/infra/hybrid_aws/component.py b/paaspure/infra/hybrid_aws/component.py new file mode 100644 index 0000000..546e605 --- /dev/null +++ b/paaspure/infra/hybrid_aws/component.py @@ -0,0 +1,115 @@ +# -*- coding: utf-8 -*- + +import os +import docker +import json +import shutil +import importlib + +from paaspure.abstract import AbstractComponent +from paaspure.utils import build_image, copy_from_container + + +class HybridAWS(AbstractComponent): + """Component for extending existing swarm. Using AWS resources.""" + def __init__(self): + super(HybridAWS, self).__init__() + + def build(self, config, credentials): + var_file = os.path.dirname(__file__) + "/terraform.tfvars" + with open(var_file, 'w+') as f: + f.write(json.dumps(config, indent=4)) + + # inventory_file = os.path.dirname(__file__) + "/swarm-inventory" + # ssh_key = f'ansible_ssh_private_key_file={config["aws_key_name"]}.pem' + # ssh_user = f'ansible_user={config["ssh_user"]}' + # + # orchestrator = self.__get_orchestrator_instance( + # config['orchestrator_params']['name'], + # config['orchestrator_params']['component'] + # ) + # + # host, ssh_port = orchestrator.build( + # config['orchestrator_params'], + # credentials + # ) + # + # with open(inventory_file, 'w+') as f: + # f.write('[swarm-master]\n') + # f.write(f'{host} ansible_port={ssh_port} {ssh_user} {ssh_key}\n') + # + # shutil.copy2(credentials['private_key'], os.path.dirname(__file__)) + # + self.__terraform_execute(credentials, ['apply', '-auto-approve']) + self.__ansible_execute( + credentials, + ['-i', 'swarm-inventory', 'swarm-join.yml'] + ) + + def destroy(self, config, credentials): + # TODO: Should destroy also remove resource files? + self.__ansible_execute( + credentials, + ['-i', 'swarm-inventory', 'swarm-leave.yml'] + ) + self.__terraform_execute(credentials, ['destroy', '-force']) + + def __get_orchestrator_instance(self, name, component): + return importlib.import_module( + name + '.' + component + ).instance + + def __terraform_execute(self, credentials, command=['plan']): + build_image( + image_tag='paaspure_hybrid_terraform', + path=os.path.dirname(__file__), + dockerfile='Dockerfile.terraform' + ) + + client = docker.from_env() + + container = client.containers.run( + 'paaspure_hybrid_terraform', + environment=[ + 'AWS_ACCESS_KEY_ID=' + credentials['aws_access_key'], + 'AWS_SECRET_ACCESS_KEY=' + credentials['aws_secret_key'] + ], + command=command, + detach=True + ) + + for log in container.logs(stream=True): + print(log.decode(), end='') + + copy_from_container( + container=container, + src_path='/data/.', + dest_path=os.path.dirname(__file__) + ) + + def __ansible_execute(self, credentials, command=['--version']): + build_image( + image_tag='paaspure_hybrid_ansible', + path=os.path.dirname(__file__), + dockerfile='Dockerfile.ansible' + ) + + client = docker.from_env() + + container = client.containers.run( + 'paaspure_hybrid_ansible', + command=command, + detach=True + ) + + for log in container.logs(stream=True): + print(log.decode(), end='') + + copy_from_container( + container=container, + src_path='/ansible/playbooks/.', + dest_path=os.path.dirname(__file__) + ) + + +instance = HybridAWS() diff --git a/paaspure/infra/hybrid_aws/requirements.py b/paaspure/infra/hybrid_aws/requirements.py new file mode 100644 index 0000000..93deeba --- /dev/null +++ b/paaspure/infra/hybrid_aws/requirements.py @@ -0,0 +1,14 @@ +# -*- coding: utf-8 -*- + +from paaspure.utils import pip_install + +pip_packages = [ + 'docker' +] + + +def install(): + pip_install(packages=pip_packages, component='TerraformAWS') + + +install() diff --git a/paaspure/infra/hybrid_aws/swarm-join.yml b/paaspure/infra/hybrid_aws/swarm-join.yml new file mode 100644 index 0000000..7e89ffd --- /dev/null +++ b/paaspure/infra/hybrid_aws/swarm-join.yml @@ -0,0 +1,31 @@ +# NOTE: sudo apk add sudo if required to workaround issue https://github.com/docker/for-azure/issues/10 +# Remove it once the proper fix is implemented +- name: Install Ansible Prereqs + hosts: swarm-master:swarm-workers + # hosts: swarm-master:swarm-managers:swarm-nodes + gather_facts: no + tasks: + - raw: "sudo apk update && sudo apk add sudo && sudo apk add python py-pip" + +- name: Fecth tokens from Swarm Master + hosts: swarm-master + gather_facts: yes + tasks: + - command: "docker swarm join-token -q manager" + register: swarm_manager_token + - set_fact: swarmtokenmanager="{{swarm_manager_token.stdout}}" + - command: "docker swarm join-token -q worker" + register: swarm_worker_token + - set_fact: swarmtokenworker="{{swarm_worker_token.stdout}}" + +# - name: Join Swarm Manager +# hosts: swarm-managers +# gather_facts: yes +# tasks: +# - command: "docker swarm join --advertise-addr {{inventory_hostname}} --token {{hostvars[groups['swarm-master'][0]].swarmtokenmanager}} {{hostvars[groups['swarm-master'][0]].inventory_hostname}}:2377" + +- name: Join Swarm Worker + hosts: swarm-workers + gather_facts: yes + tasks: + - command: "docker swarm join --advertise-addr {{inventory_hostname}} --token {{hostvars[groups['swarm-master'][0]].swarmtokenworker}} {{hostvars[groups['swarm-master'][0]].inventory_hostname}}:50101" diff --git a/paaspure/infra/hybrid_aws/swarm-leave.yml b/paaspure/infra/hybrid_aws/swarm-leave.yml new file mode 100644 index 0000000..c2fb336 --- /dev/null +++ b/paaspure/infra/hybrid_aws/swarm-leave.yml @@ -0,0 +1,6 @@ +- name: Leave Swarm + # hosts: swarm-managers:swarm-workers + hosts: swarm-workers + gather_facts: yes + tasks: + - command: "docker swarm leave --force" diff --git a/paaspure/infra/module.py b/paaspure/infra/module.py new file mode 100644 index 0000000..3483613 --- /dev/null +++ b/paaspure/infra/module.py @@ -0,0 +1,19 @@ +# -*- coding: utf-8 -*- + +from paaspure.abstract import AbstractModule +from .argparser import InfraBuilderParser + + +class InfraBuilder(AbstractModule): + """ + Abstraction module for components used to run the cloud infrastructure. + """ + def __init__(self): + InfraBuilderParser(self) + super(InfraBuilder, self).__init__(__file__) + + def execute(self, config, args): + super(InfraBuilder, self).general_execute(config, args) + + +instance = InfraBuilder() diff --git a/paaspure/infra/terraform_aws/Dockerfile b/paaspure/infra/terraform_aws/Dockerfile new file mode 100644 index 0000000..cf4ef5b --- /dev/null +++ b/paaspure/infra/terraform_aws/Dockerfile @@ -0,0 +1,7 @@ +FROM hashicorp/terraform:0.11.7 + +LABEL maintainer "ruben.vasconcelos3@mail.dcu.ie" + +WORKDIR /data +COPY . . +RUN terraform init diff --git a/paaspure/infra/terraform_aws/README.md b/paaspure/infra/terraform_aws/README.md new file mode 100644 index 0000000..69e8b21 --- /dev/null +++ b/paaspure/infra/terraform_aws/README.md @@ -0,0 +1,36 @@ +# PaaSPure Terraform AWS + +PaaSPure component for provisioning AWS resources using terraform. + +### Usage +Tested with the infra module: https://github.com/iorubs/paaspure_infra.git + +```bash +Usage: + run Provisioning resources. + destroy Destroy resources. +``` + + +# Sample pure.yml + +```yaml +version: 1 + +credentials: + aws_access_key: ACCESS_KEY + aws_secret_key: SECRET_KEY + +modules: + infra: + components: + docker_for_aws: + stack_name: "PaasPureDocker" + region: "eu-west-1" + parameters: + KeyName: "paaspure" + ManagerSize: 1 + ManagerInstanceType: "t2.micro" + ClusterSize: 1 + InstanceType: "t2.micro" +``` diff --git a/paaspure/infra/terraform_aws/__init__.py b/paaspure/infra/terraform_aws/__init__.py new file mode 100644 index 0000000..462e426 --- /dev/null +++ b/paaspure/infra/terraform_aws/__init__.py @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- + +from .requirements import install +from .component import instance + +__all__ = ['install', 'instance'] diff --git a/paaspure/infra/terraform_aws/component.py b/paaspure/infra/terraform_aws/component.py new file mode 100644 index 0000000..77dcb16 --- /dev/null +++ b/paaspure/infra/terraform_aws/component.py @@ -0,0 +1,55 @@ +# -*- coding: utf-8 -*- + +import os +import docker +import json + +from paaspure.abstract import AbstractComponent +from paaspure.utils import build_image, copy_from_container + + +class TerraformAWS(AbstractComponent): + """Component for provisioning AWS resources.""" + def __init__(self): + super(TerraformAWS, self).__init__() + + def build(self, config, credentials): + var_file = os.path.dirname(__file__) + "/terraform.tfvars" + with open(var_file, 'w+') as f: + f.write(json.dumps(config, indent=4)) + + self.__execute_command(credentials, ['apply', '-auto-approve']) + + def destroy(self, config, credentials): + # TODO: Should destroy also remove resource files? + self.__execute_command(credentials, ['destroy', '-force']) + + def __execute_command(self, credentials, command=['plan']): + build_image( + image_tag='paaspure_terraform', + path=os.path.dirname(__file__) + ) + + client = docker.from_env() + + container = client.containers.run( + 'paaspure_terraform', + environment=[ + 'AWS_ACCESS_KEY_ID=' + credentials['aws_access_key'], + 'AWS_SECRET_ACCESS_KEY=' + credentials['aws_secret_key'] + ], + command=command, + detach=True + ) + + for log in container.logs(stream=True): + print(log.decode(), end='') + + copy_from_container( + container=container, + src_path='/data/.', + dest_path=os.path.dirname(__file__) + ) + + +instance = TerraformAWS() diff --git a/paaspure/infra/terraform_aws/main.tf b/paaspure/infra/terraform_aws/main.tf new file mode 100644 index 0000000..d99a742 --- /dev/null +++ b/paaspure/infra/terraform_aws/main.tf @@ -0,0 +1,18 @@ +variable "stack_name" {} +variable "region" {} +variable "parameters" { type = "map" } + +## Infrastructure +provider "aws" { + region = "${var.region}" +} + +resource "aws_cloudformation_stack" "paaspure_infra" { + name = "${var.stack_name}" + + parameters = "${var.parameters}" + + capabilities = ["CAPABILITY_IAM"] + + template_url = "https://editions-us-east-1.s3.amazonaws.com/aws/stable/Docker.tmpl" +} diff --git a/paaspure/infra/terraform_aws/requirements.py b/paaspure/infra/terraform_aws/requirements.py new file mode 100644 index 0000000..93deeba --- /dev/null +++ b/paaspure/infra/terraform_aws/requirements.py @@ -0,0 +1,14 @@ +# -*- coding: utf-8 -*- + +from paaspure.utils import pip_install + +pip_packages = [ + 'docker' +] + + +def install(): + pip_install(packages=pip_packages, component='TerraformAWS') + + +install() diff --git a/paaspure/infra/terraform_aws/test_terraform_aws.py b/paaspure/infra/terraform_aws/test_terraform_aws.py new file mode 100644 index 0000000..7428094 --- /dev/null +++ b/paaspure/infra/terraform_aws/test_terraform_aws.py @@ -0,0 +1,63 @@ +# -*- coding: utf-8 -*- + +import os +import pytest +import docker + +from paaspure.utils import MockContainerRun +from .component import instance + + +@pytest.fixture(scope="function") +def tfvars(): + """ Cleanup work.""" + vars = {"stack_name": "PaasPureDocker", "region": "eu-west-1", + "parameters": {"KeyName": "paaspure", "ManagerSize": 1}} + + tfvars_path = os.path.join( + os.path.dirname(__file__), + 'terraform.tfvars' + ) + + tfvars = {'vars': vars, 'path': tfvars_path} + yield tfvars + os.remove(tfvars['path']) + + +class TestTerraformAWS: + def test_mock_run(self, capsys, monkeypatch, tfvars): + assert not os.path.exists(tfvars['path']) + + mock_container = MockContainerRun( + log_output=['Some Output'], + status_code=1 + ) + + monkeypatch.setattr(docker, 'from_env', mock_container) + + instance.build( + tfvars['vars'], + {'aws_access_key': '', 'aws_secret_key': ''} + ) + + assert os.path.exists(tfvars['path']) + out, _ = capsys.readouterr() + assert 'Some Output' in out + assert 'Container execution failed' in out + + def test_mock_destroy(self, capsys, monkeypatch): + # assert os.path.exists(tfvars['path']) + + mock_container = MockContainerRun( + log_output=[], + status_code=1 + ) + + monkeypatch.setattr(docker, 'from_env', mock_container) + + instance.destroy({}, {'aws_access_key': '', 'aws_secret_key': ''}) + + # TODO: Should check for removed files + # assert not os.path.exists(tfvars['path']) + out, _ = capsys.readouterr() + assert 'Container execution failed' in out diff --git a/paaspure/infra/terraform_azure/Docker.tmpl b/paaspure/infra/terraform_azure/Docker.tmpl new file mode 100644 index 0000000..7cf6379 --- /dev/null +++ b/paaspure/infra/terraform_azure/Docker.tmpl @@ -0,0 +1,722 @@ +{ + "$schema": "https://schema.management.azure.com/schemas/2015-01-01/deploymentTemplate.json#", + "contentVersion": "1.0.0.0", + "outputs": { + "AppURL": { + "type": "String", + "value": "[concat(reference(resourceId('Microsoft.Network/publicIPAddresses', variables('lbPublicIPAddressName'))).dnsSettings.fqdn)]" + }, + "DefaultDNSTarget": { + "type": "string", + "value": "[reference(resourceId('Microsoft.Network/publicIPAddresses', variables('lbPublicIPAddressName'))).ipAddress]" + }, + "SSH Targets": { + "type": "string", + "value": "[concat('https://', variables('portalFQDN'), '/#resource/subscriptions/', subscription().subscriptionId, '/resourceGroups/', resourceGroup().name, '/providers/Microsoft.Network/loadBalancers/', variables('lbSSHName'), '/inboundNatRules')]" + } + }, + "parameters": { + "adServicePrincipalAppID": { + "metadata": { + "description": "AD ServicePrincipal App ID" + }, + "type": "string" + }, + "adServicePrincipalAppSecret": { + "metadata": { + "description": "AD ServicePrincipal App Secret" + }, + "type": "securestring" + }, + "enableExtLogs": { + "allowedValues": [ + "yes", + "no" + ], + "defaultValue": "yes", + "metadata": { + "description": "Stores container logs in storage container on azure" + }, + "type": "string" + }, + "enableSystemPrune": { + "allowedValues": [ + "yes", + "no" + ], + "defaultValue": "no", + "metadata": { + "description": "Cleans up unused images, containers, networks and volumes" + }, + "type": "string" + }, + "linuxSSHPublicKey": { + "metadata": { + "description": "The SSH public key used to authenticate with the created swarm. Usually available in $HOME/.ssh/id_rsa.pub file" + }, + "type": "string" + }, + "linuxWorkerCount": { + "allowedValues": [ + "0", + "1", + "2", + "3", + "4", + "5", + "6", + "7", + "8", + "9", + "10", + "11", + "12", + "13", + "14", + "15" + ], + "defaultValue": "1", + "metadata": { + "description": "Number of Linux worker VMs" + }, + "type": "string" + }, + "linuxWorkerVMSize": { + "allowedValues": [ + "Standard_A0", + "Standard_A1", + "Standard_A2", + "Standard_A3", + "Standard_A4", + "Standard_A5", + "Standard_A6", + "Standard_A7", + "Standard_A8", + "Standard_A9", + "Standard_A10", + "Standard_A11", + "Standard_B1s", + "Standard_D1", + "Standard_D2", + "Standard_D3", + "Standard_D4", + "Standard_D11", + "Standard_D12", + "Standard_D13", + "Standard_D14", + "Standard_D1_v2", + "Standard_D2_v2", + "Standard_D3_v2", + "Standard_D4_v2", + "Standard_D5_v2", + "Standard_D11_v2", + "Standard_D12_v2", + "Standard_D13_v2", + "Standard_D14_v2", + "Standard_D15_v2", + "Standard_G1", + "Standard_G2", + "Standard_G3", + "Standard_G4", + "Standard_G5", + "Standard_DS1", + "Standard_DS2", + "Standard_DS3", + "Standard_DS4", + "Standard_DS11", + "Standard_DS12", + "Standard_DS13", + "Standard_DS14", + "Standard_DS1_v2", + "Standard_DS2_v2", + "Standard_DS3_v2", + "Standard_DS4_v2", + "Standard_DS5_v2", + "Standard_DS11_v2", + "Standard_DS12_v2", + "Standard_DS13_v2", + "Standard_DS14_v2", + "Standard_DS15_v2", + "Standard_D2s_v3", + "Standard_D4s_v3", + "Standard_D8s_v3", + "Standard_D16s_v3", + "Standard_D32s_v3", + "Standard_D64s_v3", + "Standard_L4s", + "Standard_L8s", + "Standard_L16s", + "Standard_L32s", + "Standard_GS1", + "Standard_GS2", + "Standard_GS3", + "Standard_GS4", + "Standard_GS5" + ], + "defaultValue": "Standard_D2_v2", + "metadata": { + "description": "The size of the Linux worker VMs" + }, + "type": "string" + }, + "managerCount": { + "allowedValues": [ + "1", + "3", + "5" + ], + "defaultValue": "1", + "type": "string" + }, + "managerVMSize": { + "allowedValues": [ + "Standard_A0", + "Standard_A1", + "Standard_A2", + "Standard_A3", + "Standard_A4", + "Standard_A5", + "Standard_A6", + "Standard_A7", + "Standard_A8", + "Standard_A9", + "Standard_A10", + "Standard_A11", + "Standard_B1s", + "Standard_D1", + "Standard_D2", + "Standard_D3", + "Standard_D4", + "Standard_D11", + "Standard_D12", + "Standard_D13", + "Standard_D14", + "Standard_D1_v2", + "Standard_D2_v2", + "Standard_D3_v2", + "Standard_D4_v2", + "Standard_D5_v2", + "Standard_D11_v2", + "Standard_D12_v2", + "Standard_D13_v2", + "Standard_D14_v2", + "Standard_D15_v2", + "Standard_G1", + "Standard_G2", + "Standard_G3", + "Standard_G4", + "Standard_G5", + "Standard_DS1", + "Standard_DS2", + "Standard_DS3", + "Standard_DS4", + "Standard_DS11", + "Standard_DS12", + "Standard_DS13", + "Standard_DS14", + "Standard_DS1_v2", + "Standard_DS2_v2", + "Standard_DS3_v2", + "Standard_DS4_v2", + "Standard_DS5_v2", + "Standard_DS11_v2", + "Standard_DS12_v2", + "Standard_DS13_v2", + "Standard_DS14_v2", + "Standard_DS15_v2", + "Standard_D2s_v3", + "Standard_D4s_v3", + "Standard_D8s_v3", + "Standard_D16s_v3", + "Standard_D32s_v3", + "Standard_D64s_v3", + "Standard_L4s", + "Standard_L8s", + "Standard_L16s", + "Standard_L32s", + "Standard_GS1", + "Standard_GS2", + "Standard_GS3", + "Standard_GS4", + "Standard_GS5" + ], + "defaultValue": "Standard_D2_v2", + "metadata": { + "description": "The size of the Swarm manager VMs" + }, + "type": "string" + }, + "swarmName": { + "defaultValue": "dockerswarm", + "metadata": { + "description": "Define how the swarm resources should be named." + }, + "type": "string" + } + }, + "resources": [ + { + "apiVersion": "[variables('storageApiVersion')]", + "copy": { + "count": "[length(variables('uniqueStringArray'))]", + "name": "storageLoop" + }, + "kind": "Storage", + "location": "[resourceGroup().location]", + "name": "[concat(variables('uniqueStringArray')[copyIndex()], variables('storageAccountSuffix'))]", + "sku": { + "name": "Standard_LRS" + }, + "tags": { + "channel": "[variables('channel')]", + "provider": "[toUpper(variables('DockerProviderTag'))]" + }, + "type": "Microsoft.Storage/storageAccounts" + }, + { + "apiVersion": "[variables('storageApiVersion')]", + "kind": "Storage", + "location": "[resourceGroup().location]", + "name": "[variables('swarmLogsStorageAccount')]", + "sku": { + "name": "Standard_LRS" + }, + "tags": { + "channel": "[variables('channel')]", + "provider": "[toUpper(variables('DockerProviderTag'))]" + }, + "type": "Microsoft.Storage/storageAccounts" + }, + { + "apiVersion": "[variables('apiVersion')]", + "location": "[resourceGroup().location]", + "name": "[variables('virtualNetworkName')]", + "properties": { + "addressSpace": { + "addressPrefixes": [ + "[variables('subnetPrefix')]", + "[variables('managerAddressPrefix')]" + ] + }, + "subnets": [ + { + "name": "[variables('subnetName')]", + "properties": { + "addressPrefix": "[variables('subnetPrefix')]" + } + } + ] + }, + "tags": { + "channel": "[variables('channel')]", + "provider": "[toUpper(variables('DockerProviderTag'))]" + }, + "type": "Microsoft.Network/virtualNetworks" + }, + { + "apiVersion": "[variables('vmssApiVersion')]", + "dependsOn": [ + "[variables('vnetID')]", + "[variables('lbSSHID')]", + "storageLoop" + ], + "location": "[resourceGroup().location]", + "name": "[variables('managerVMSSName')]", + "plan": { + "name": "[variables('linuxImageSku')]", + "product": "[variables('linuxImageOffer')]", + "publisher": "[variables('linuxImagePublisher')]" + }, + "properties": { + "overprovision": false, + "upgradePolicy": { + "mode": "Manual" + }, + "virtualMachineProfile": { + "diagnosticsProfile": { + "bootDiagnostics": { + "enabled": true, + "storageUri": "[reference(concat('Microsoft.Storage/storageAccounts/', variables('uniqueStringArray')[0], variables('storageAccountSuffix'))).primaryEndpoints.blob]" + } + }, + "networkProfile": { + "networkInterfaceConfigurations": [ + { + "name": "managerNodeNic", + "properties": { + "ipConfigurations": [ + { + "name": "mgripconfig", + "privateIPAllocationMethod": "dynamic", + "properties": { + "loadBalancerBackendAddressPools": [ + { + "id": "[variables('lbSSHBackendAddressPoolID')]" + } + ], + "loadBalancerInboundNatPools": [ + { + "id": "[variables('lbSSHNATPoolID')]" + }, + { + "id": "[variables('lbSwarmNATPoolID')]" + } + ], + "subnet": { + "id": "[variables('subnetRef')]" + } + } + } + ], + "primary": true + } + } + ] + }, + "osProfile": { + "adminUsername": "[variables('adminUsername')]", + "computerNamePrefix": "[variables('managerVMNamePrefix')]", + "customData": "[base64(concat('#!/bin/bash', '\n', 'export ROLE=\"MANAGER\"', '\n', 'export LB_IP=\"', reference(resourceId('Microsoft.Network/publicIPAddresses', variables('lbPublicIPAddressName'))).ipAddress, '\"', '\n', variables('customData')))]", + "linuxConfiguration": { + "disablePasswordAuthentication": true, + "ssh": { + "publicKeys": [ + { + "keyData": "[variables('sshRSAPublicKey')]", + "path": "[variables('sshKeyPath')]" + } + ] + } + } + }, + "storageProfile": { + "imageReference": "[variables('linuxImageReference')]", + "osDisk": { + "caching": "ReadWrite", + "createOption": "FromImage", + "name": "[concat(variables('managerVMNamePrefix'), 'vmssosdisk')]", + "vhdContainers": [ + "[concat(reference(concat('Microsoft.Storage/storageAccounts/', variables('uniqueStringArray')[0], variables('storageAccountSuffix'))).primaryEndpoints.blob, variables('vhdContainerName'))]" + ] + } + } + } + }, + "sku": { + "capacity": "[variables('managerCount')]", + "name": "[variables('managerVMSize')]", + "tier": "Standard" + }, + "tags": { + "channel": "[variables('channel')]", + "provider": "[toUpper(variables('DockerProviderTag'))]" + }, + "type": "Microsoft.Compute/virtualMachineScaleSets" + }, + { + "apiVersion": "[variables('vmssApiVersion')]", + "dependsOn": [ + "[variables('vnetID')]", + "[variables('lbID')]", + "storageLoop" + ], + "location": "[resourceGroup().location]", + "name": "[variables('linuxWorkerVMSSName')]", + "plan": { + "name": "[variables('linuxImageSku')]", + "product": "[variables('linuxImageOffer')]", + "publisher": "[variables('linuxImagePublisher')]" + }, + "properties": { + "overprovision": false, + "upgradePolicy": { + "mode": "Manual" + }, + "virtualMachineProfile": { + "diagnosticsProfile": { + "bootDiagnostics": { + "enabled": true, + "storageUri": "[reference(concat('Microsoft.Storage/storageAccounts/', variables('uniqueStringArray')[0], variables('storageAccountSuffix'))).primaryEndpoints.blob]" + } + }, + "networkProfile": { + "networkInterfaceConfigurations": [ + { + "name": "workerNodeNic", + "properties": { + "ipConfigurations": [ + { + "name": "nicipconfig", + "privateIPAllocationMethod": "dynamic", + "properties": { + "loadBalancerBackendAddressPools": [ + { + "id": "[ variables('lbBackendAddressPoolID')]" + } + ], + "subnet": { + "id": "[variables('subnetRef')]" + } + } + } + ], + "primary": true + } + } + ] + }, + "osProfile": { + "adminUsername": "[variables('adminUsername')]", + "computerNamePrefix": "[variables('linuxWorkerVMNamePrefix')]", + "customData": "[base64(concat('#!/bin/bash', '\n', 'export ROLE=\"WORKER\"', '\n', 'export LB_IP=\"', reference(resourceId('Microsoft.Network/publicIPAddresses', variables('lbPublicIPAddressName'))).ipAddress, '\"', '\n', variables('customData')))]", + "linuxConfiguration": { + "disablePasswordAuthentication": true, + "ssh": { + "publicKeys": [ + { + "keyData": "[variables('sshRSAPublicKey')]", + "path": "[variables('sshKeyPath')]" + } + ] + } + } + }, + "storageProfile": { + "imageReference": "[variables('linuxImageReference')]", + "osDisk": { + "caching": "ReadWrite", + "createOption": "FromImage", + "name": "[concat(variables('linuxWorkerVMNamePrefix'), 'vmssosdisk', 0)]", + "vhdContainers": [ + "[concat(reference(concat('Microsoft.Storage/storageAccounts/', variables('uniqueStringArray')[0], variables('storageAccountSuffix'))).primaryEndpoints.blob, variables('vhdContainerName'))]", + "[concat(reference(concat('Microsoft.Storage/storageAccounts/', variables('uniqueStringArray')[1], variables('storageAccountSuffix'))).primaryEndpoints.blob, variables('vhdContainerName'))]", + "[concat(reference(concat('Microsoft.Storage/storageAccounts/', variables('uniqueStringArray')[2], variables('storageAccountSuffix'))).primaryEndpoints.blob, variables('vhdContainerName'))]", + "[concat(reference(concat('Microsoft.Storage/storageAccounts/', variables('uniqueStringArray')[3], variables('storageAccountSuffix'))).primaryEndpoints.blob, variables('vhdContainerName'))]", + "[concat(reference(concat('Microsoft.Storage/storageAccounts/', variables('uniqueStringArray')[4], variables('storageAccountSuffix'))).primaryEndpoints.blob, variables('vhdContainerName'))]" + ] + } + } + } + }, + "sku": { + "capacity": "[variables('linuxWorkerCount')]", + "name": "[variables('linuxWorkerVMSize')]", + "tier": "Standard" + }, + "tags": { + "channel": "[variables('channel')]", + "provider": "[toUpper(variables('DockerProviderTag'))]" + }, + "type": "Microsoft.Compute/virtualMachineScaleSets" + }, + { + "apiVersion": "[variables('apiVersion')]", + "location": "[resourceGroup().location]", + "name": "[variables('lbPublicIPAddressName')]", + "properties": { + "dnsSettings": { + "domainNameLabel": "[variables('lbPublicIpDnsName')]" + }, + "publicIPAllocationMethod": "Static" + }, + "tags": { + "channel": "[variables('channel')]", + "provider": "[toUpper(variables('DockerProviderTag'))]" + }, + "type": "Microsoft.Network/publicIPAddresses" + }, + { + "apiVersion": "[variables('apiVersion')]", + "dependsOn": [ + "[concat('Microsoft.Network/publicIPAddresses/', variables('lbPublicIPAddressName'))]" + ], + "location": "[resourceGroup().location]", + "name": "[variables('lbName')]", + "properties": { + "backendAddressPools": [ + { + "name": "default" + } + ], + "frontendIPConfigurations": [ + { + "name": "default", + "properties": { + "publicIPAddress": { + "id": "[resourceId('Microsoft.Network/publicIPAddresses', variables('lbPublicIPAddressName'))]" + } + } + } + ], + "probes": [ + { + "name": "default", + "properties": { + "intervalInSeconds": 10, + "numberOfProbes": 2, + "port": 44554, + "protocol": "Tcp" + } + } + ] + }, + "tags": { + "channel": "[variables('channel')]", + "provider": "[toUpper(variables('DockerProviderTag'))]" + }, + "type": "Microsoft.Network/loadBalancers" + }, + { + "apiVersion": "[variables('apiVersion')]", + "location": "[resourceGroup().location]", + "name": "[variables('lbSSHPublicIPAddressName')]", + "properties": { + "publicIPAllocationMethod": "Static" + }, + "tags": { + "channel": "[variables('channel')]", + "provider": "[toUpper(variables('DockerProviderTag'))]" + }, + "type": "Microsoft.Network/publicIPAddresses" + }, + { + "apiVersion": "[variables('apiVersion')]", + "dependsOn": [ + "[concat('Microsoft.Network/publicIPAddresses/', variables('lbSSHPublicIPAddressName'))]" + ], + "location": "[resourceGroup().location]", + "name": "[variables('lbSSHName')]", + "properties": { + "backendAddressPools": [ + { + "name": "default" + } + ], + "frontendIPConfigurations": [ + { + "name": "default", + "properties": { + "publicIPAddress": { + "id": "[resourceId('Microsoft.Network/publicIPAddresses', variables('lbSSHPublicIPAddressName'))]" + } + } + } + ], + "inboundNatPools": [ + { + "name": "default", + "properties": { + "backendPort": 22, + "frontendIPConfiguration": { + "id": "[variables('lbSSHFrontEndIPConfigID')]" + }, + "frontendPortRangeEnd": "[variables('natSSHEndPort')]", + "frontendPortRangeStart": "[variables('natSSHStartPort')]", + "protocol": "tcp" + } + }, + { + "name": "swarmcontrol", + "properties": { + "backendPort": 2377, + "frontendIPConfiguration": { + "id": "[variables('lbSSHFrontEndIPConfigID')]" + }, + "frontendPortRangeEnd": "50201", + "frontendPortRangeStart": "50101", + "protocol": "tcp" + } + } + ], + "probes": [ + { + "name": "default", + "properties": { + "intervalInSeconds": 10, + "numberOfProbes": 2, + "port": 22, + "protocol": "Tcp" + } + } + ] + }, + "tags": { + "channel": "[variables('channel')]", + "provider": "[toUpper(variables('DockerProviderTag'))]" + }, + "type": "Microsoft.Network/loadBalancers" + } + ], + "variables": { + "Description": "Docker for Azure 18.03.0-ce-azure1", + "DockerProviderTag": "8CF0E79C-DF97-4992-9B59-602DB544D354", + "accountID": "[subscription().subscriptionId]", + "adServicePrincipalTenantID": "[subscription().tenantId]", + "adminUsername": "docker", + "apiVersion": "2016-03-30", + "azureEnvironment": "AzureCloud", + "basePrefix": "[parameters('swarmName')]", + "channel": "stable", + "customData": "[concat('export ACCOUNT_ID=\"', variables('accountID'), '\"', '\n', 'export SUB_ID=\"', variables('accountID'), '\"', '\n', 'export GROUP_NAME=\"', variables('groupName'), '\"', '\n', 'export LB_NAME=\"', variables('lbName'), '\"', '\n', 'export APP_ID=\"', parameters('adServicePrincipalAppID'), '\"', '\n', 'export APP_SECRET=\"', parameters('adServicePrincipalAppSecret'), '\"', '\n', 'export TENANT_ID=\"', variables('adServicePrincipalTenantID'), '\"', '\n', 'export SWARM_INFO_STORAGE_ACCOUNT=\"', variables('swarmInfoStorageAccount'), '\"', '\n', 'export SWARM_LOGS_STORAGE_ACCOUNT=\"', variables('swarmLogsStorageAccount'), '\"', '\n', 'export PRIVATE_IP=$(ifconfig eth0 | grep \"inet addr:\" | cut -d: -f2 | cut -d\" \" -f1)\n', 'export AZURE_HOSTNAME=$(hostname)\n', '\n', 'docker run --label com.docker.editions.system --log-driver=json-file --restart=no -it -e LB_NAME -e SUB_ID -e ROLE -e TENANT_ID -e APP_ID -e APP_SECRET -e ACCOUNT_ID -e GROUP_NAME -e PRIVATE_IP -e DOCKER_FOR_IAAS_VERSION -e SWARM_INFO_STORAGE_ACCOUNT -e SWARM_LOGS_STORAGE_ACCOUNT -e AZURE_HOSTNAME -v /var/run/docker.sock:/var/run/docker.sock -v /usr/bin/docker:/usr/bin/docker -v /var/lib/docker:/var/lib/docker -v /var/log:/var/log docker4x/init-azure:\"$DOCKER_FOR_IAAS_VERSION\"\n')]", + "docker": "18.03.0-ce", + "editionAddOn": "base", + "extlbname": "[concat(variables('lbPublicIpDnsName'), '.', resourceGroup().location, '.', 'cloudapp.azure.com')]", + "groupName": "[resourceGroup().name]", + "lbBackendAddressPoolID": "[concat(variables('lbID'),'/backendAddressPools/default')]", + "lbID": "[resourceId('Microsoft.Network/loadBalancers',variables('lbName'))]", + "lbName": "externalLoadBalancer", + "lbPublicIPAddressName": "[concat(variables('basePrefix'), '-', variables('lbName'), '-public-ip')]", + "lbPublicIpDnsName": "[concat('applb-', uniqueString(resourceGroup().id))]", + "lbSSHBackendAddressPoolID": "[concat(variables('lbSSHID'),'/backendAddressPools/default')]", + "lbSSHFrontEndIPConfigID": "[concat(variables('lbSSHID'),'/frontendIPConfigurations/default')]", + "lbSSHID": "[resourceId('Microsoft.Network/loadBalancers',variables('lbSSHName'))]", + "lbSSHNATPoolID": "[concat(variables('lbSSHID'),'/inboundNatPools/default')]", + "lbSwarmNATPoolID": "[concat(variables('lbSSHID'),'/inboundNatPools/swarmcontrol')]", + "lbSSHName": "externalSSHLoadBalancer", + "lbSSHPublicIPAddressName": "[concat(variables('basePrefix'), '-', variables('lbSSHName'), '-public-ip')]", + "linuxImageOffer": "docker-ce", + "linuxImagePublisher": "docker", + "linuxImageReference": { + "offer": "[variables('linuxImageOffer')]", + "publisher": "[variables('linuxImagePublisher')]", + "sku": "[variables('linuxImageSku')]", + "version": "[variables('linuxImageVersion')]" + }, + "linuxImageSku": "docker-ce", + "linuxImageVersion": "1.0.18", + "linuxWorkerCount": "[parameters('linuxWorkerCount')]", + "linuxWorkerVMNamePrefix": "swarm-worker", + "linuxWorkerVMSSName": "swarm-worker-vmss", + "linuxWorkerVMSize": "[parameters('linuxWorkerVMSize')]", + "managerAddressPrefix": "172.16.0.0/24", + "managerCount": "[parameters('managerCount')]", + "managerVMNamePrefix": "swarm-manager", + "managerVMSSName": "swarm-manager-vmss", + "managerVMSize": "[parameters('managerVMSize')]", + "natSSHEndPort": 50100, + "natSSHStartPort": 50000, + "portalFQDN": "portal.azure.com", + "scriptsUrl": "https://download.docker.com/azure/stable/18.03.0-ce/", + "scriptsVersion": "18.03.0-ce-azure1", + "sshKeyPath": "[concat('/home/', variables('adminUsername'), '/.ssh/authorized_keys')]", + "sshRSAPublicKey": "[parameters('linuxSSHPublicKey')]", + "storageAccountSuffix": "docker", + "storageApiVersion": "2016-12-01", + "subnetName": "[concat(variables('basePrefix'), '-subnet')]", + "subnetPrefix": "10.0.0.0/8", + "subnetRef": "[concat(variables('vnetID'),'/subnets/', variables('subnetName'))]", + "swarmInfoStorageAccount": "[concat(variables('uniqueStringArray')[0], variables('storageAccountSuffix'))]", + "swarmLogsStorageAccount": "[concat(uniqueString(concat(resourceGroup().id, variables('storageAccountSuffix'))), 'logs')]", + "uniqueStringArray": [ + "[concat(uniqueString(concat(resourceGroup().id, variables('storageAccountSuffix'), '0')))]", + "[concat(uniqueString(concat(resourceGroup().id, variables('storageAccountSuffix'), '1')))]", + "[concat(uniqueString(concat(resourceGroup().id, variables('storageAccountSuffix'), '2')))]", + "[concat(uniqueString(concat(resourceGroup().id, variables('storageAccountSuffix'), '3')))]", + "[concat(uniqueString(concat(resourceGroup().id, variables('storageAccountSuffix'), '4')))]" + ], + "vhdContainerName": "dockervhd", + "virtualNetworkName": "[concat(variables('basePrefix'), '-vnet')]", + "vmssApiVersion": "2017-03-30", + "vnetID": "[resourceId('Microsoft.Network/virtualNetworks', variables('virtualNetworkName'))]" + } +} diff --git a/paaspure/infra/terraform_azure/Dockerfile b/paaspure/infra/terraform_azure/Dockerfile new file mode 100644 index 0000000..cf4ef5b --- /dev/null +++ b/paaspure/infra/terraform_azure/Dockerfile @@ -0,0 +1,7 @@ +FROM hashicorp/terraform:0.11.7 + +LABEL maintainer "ruben.vasconcelos3@mail.dcu.ie" + +WORKDIR /data +COPY . . +RUN terraform init diff --git a/paaspure/infra/terraform_azure/README.md b/paaspure/infra/terraform_azure/README.md new file mode 100644 index 0000000..a390791 --- /dev/null +++ b/paaspure/infra/terraform_azure/README.md @@ -0,0 +1,76 @@ +# PaaSPure Packer Azure + +PaaSPure component for building and provisioning Azure cloud images using packer. + +# Sample pure.yml + +```yaml +version: 1 + +credentials: + private_key: + azure_client_id: + azure_client_secret: + azure_tenant_id: + subscription_id: + +modules: + infra: + components: + docker_for_azure: + stack_name: "PaasPureDocker" + resource_group_name: paaspureswarm + resource_group_location: "North Europe" + parameters: + enableExtLogs: "no" + linuxSSHPublicKey: /app/paaspure/paaspure.pub + managerCount: 1 + managerVMSize: "Standard_D1_v2" + linuxWorkerCount: 1 + linuxWorkerVMSize: "Standard_D1_v2" + swarmName: "dockerswarm" +``` + +# Setup permissions (Warning this must be done manually in advance of the first setup.) + +## New App Registration + +This is required for running tasks programmatically using Terraform. It will generate the required azure_client_id, azure_client_secret and azure_tenant_id. + +1. Run ```docker run -it --rm docker4x/create-sp-azure ${SP-NAME}``` +2. Where SP-NAME equals the desired app name. E.g paaspure +3. Login to shown URL with the given authentication code. +4. Go back to terminal and select subscription. +5. Wait a couple minutes and you should see the following: + +```bash +Your access credentials ================================================== +AD ServicePrincipal App ID: azure_client_id +AD ServicePrincipal App Secret: azure_client_secret +AD ServicePrincipal Tenant ID: azure_tenant_id +``` + +## Image Permissions (DOCKER FOR AZURE SPECIFIC: You must accept the cloud images) + +Without this you will see: "Marketplace purchase eligibilty check returned errors." + +1. Run ```docker run -it --rm microsoft/azure-cli``` +2. Run ```az login``` +3. Login to shown URL with the given authentication code. +4. Go back to terminal. +5. Run ```az vm image accept-terms --urn docker:docker-ce:docker-ce:1.0.18``` + +Optional: You can list the available linuxImageVersions + +1. Run ```az vm image list --all --publisher docker --offer docker-ce --sku docker-ce``` +1. Update linuxImageVersion in Docker.tmpl to the desired version. + +### Changes made to default Docker for azure tmpl + +#### Required +Changed parameters to allow strings (seems like terraform passes Ints to azure templates as Strings) + +#### Optional +Added Standard_B1s to available instance types to make use of free subscription resources. + +Lines: [360 to 362], [617 to 628] and [674], where changed to open extra ports for hybrid cloud demo. (In the feature this should be done in a separate component for security reasons) diff --git a/paaspure/infra/terraform_azure/__init__.py b/paaspure/infra/terraform_azure/__init__.py new file mode 100644 index 0000000..462e426 --- /dev/null +++ b/paaspure/infra/terraform_azure/__init__.py @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- + +from .requirements import install +from .component import instance + +__all__ = ['install', 'instance'] diff --git a/paaspure/infra/terraform_azure/component.py b/paaspure/infra/terraform_azure/component.py new file mode 100644 index 0000000..5bdb054 --- /dev/null +++ b/paaspure/infra/terraform_azure/component.py @@ -0,0 +1,65 @@ +# -*- coding: utf-8 -*- + +import os +import docker +import json + +from paaspure.abstract import AbstractComponent +from paaspure.utils import build_image, copy_from_container + + +class TerraformAzure(AbstractComponent): + """Component for provisioning Azure resources.""" + def __init__(self): + super(TerraformAzure, self).__init__() + + def build(self, config, credentials): + with open(config['parameters']['linuxSSHPublicKey'], 'r') as f: + config['parameters']['linuxSSHPublicKey'] = f.read() + + config['parameters']['adServicePrincipalAppID'] = \ + credentials['azure_client_id'] + config['parameters']['adServicePrincipalAppSecret'] = \ + credentials['azure_client_secret'] + + var_file = os.path.dirname(__file__) + "/terraform.tfvars" + with open(var_file, 'w+') as f: + f.write(json.dumps(config, indent=4)) + + self.__execute_command(credentials, ['apply', '-auto-approve']) + + def destroy(self, config, credentials): + self.__execute_command(credentials, ['destroy', '-force']) + + def __execute_command(self, credentials, command=['plan']): + build_image( + image_tag='paaspure_terraform', + path=os.path.dirname(__file__) + ) + + client = docker.from_env() + + container = client.containers.run( + 'paaspure_terraform', + environment=[ + 'ARM_CLIENT_ID=' + credentials['azure_client_id'], + 'ARM_CLIENT_SECRET=' + credentials['azure_client_secret'], + 'ARM_TENANT_ID=' + credentials['azure_tenant_id'], + 'ARM_SUBSCRIPTION_ID=' + credentials['subscription_id'] + + ], + command=command, + detach=True + ) + + for log in container.logs(stream=True): + print(log.decode(), end='') + + copy_from_container( + container=container, + src_path='/data/.', + dest_path=os.path.dirname(__file__) + ) + + +instance = TerraformAzure() diff --git a/paaspure/infra/terraform_azure/main.tf b/paaspure/infra/terraform_azure/main.tf new file mode 100644 index 0000000..edabfc4 --- /dev/null +++ b/paaspure/infra/terraform_azure/main.tf @@ -0,0 +1,22 @@ +variable "stack_name" {} +variable "resource_group_name" {} +variable "resource_group_location" {} +variable "parameters" { type = "map" } + +## Infrastructure +resource "azurerm_resource_group" "paaspure_infra" { + name = "${var.resource_group_name}" + location = "${var.resource_group_location}" +} + +resource "azurerm_template_deployment" "paaspure_infra" { + name = "${var.stack_name}" + + resource_group_name = "${azurerm_resource_group.paaspure_infra.name}" + + template_body = "${file("./Docker.tmpl")}" + + parameters = "${var.parameters}" + + deployment_mode = "Incremental" +} diff --git a/paaspure/infra/terraform_azure/requirements.py b/paaspure/infra/terraform_azure/requirements.py new file mode 100644 index 0000000..89d3d3c --- /dev/null +++ b/paaspure/infra/terraform_azure/requirements.py @@ -0,0 +1,14 @@ +# -*- coding: utf-8 -*- + +from paaspure.utils import pip_install + +pip_packages = [ + 'docker' +] + + +def install(): + pip_install(packages=pip_packages, component='DockerForAzure') + + +install() diff --git a/paaspure/infra/terraform_azure/test_terraform_azure.py b/paaspure/infra/terraform_azure/test_terraform_azure.py new file mode 100644 index 0000000..adb6685 --- /dev/null +++ b/paaspure/infra/terraform_azure/test_terraform_azure.py @@ -0,0 +1,69 @@ +# -*- coding: utf-8 -*- + +import os +import pytest +import docker + +from paaspure.utils import MockContainerRun +from .component import instance + + +@pytest.fixture(scope="function") +def tfvars(): + """ Cleanup work.""" + vars = {'parameters': {'linuxSSHPublicKey': __file__}} + + tfvars_path = os.path.join( + os.path.dirname(__file__), + 'terraform.tfvars' + ) + + tfvars = {'vars': vars, 'path': tfvars_path} + yield tfvars + os.remove(tfvars['path']) + + +class TestTerraformAzure: + def test_mock_run(self, capsys, monkeypatch, tfvars): + assert not os.path.exists(tfvars['path']) + + mock_container = MockContainerRun( + log_output=['Some Output'], + status_code=1 + ) + + monkeypatch.setattr(docker, 'from_env', mock_container) + + instance.build( + tfvars['vars'], + {'azure_client_id': '', 'azure_client_secret': '', + 'azure_tenant_id': '', 'subscription_id': ''} + ) + + assert os.path.exists(tfvars['path']) + out, _ = capsys.readouterr() + assert 'Some Output' in out + assert 'Container execution failed' in out + + def test_mock_destroy(self, capsys, monkeypatch): + # assert os.path.exists(tfvars['path']) + + mock_container = MockContainerRun( + log_output=[], + status_code=1 + ) + + monkeypatch.setattr(docker, 'from_env', mock_container) + + instance.destroy({}, {'azure_client_id': '', 'azure_client_secret': '', + 'azure_tenant_id': '', 'subscription_id': ''}) + + # TODO: Should check for removed files + # assert not os.path.exists(tfvars['path']) + out, _ = capsys.readouterr() + assert 'Container execution failed' in out + + @pytest.mark.skip(reason='Still not sure what tfvars must look like.') + def test_validate_tfvars(self): + # TODO: Should pass in some dummy cred and ensure tfvars is valid. + pass diff --git a/paaspure/infra/test_infra.py b/paaspure/infra/test_infra.py new file mode 100644 index 0000000..e69de29 diff --git a/paaspure/log_management/README.md b/paaspure/log_management/README.md new file mode 100644 index 0000000..7144d75 --- /dev/null +++ b/paaspure/log_management/README.md @@ -0,0 +1,23 @@ +# PaaSPure Logging + +Abstraction module for components used to setup a logging solution. + +### Usage + +```bash +usage: paaspure logging + +Options: + -h, --help show this help message and exit +``` + +### Sample PureFile + +```yaml +log_management: + orchestrator: $ORCHESTRATOR_NAME + components: + $COMPONENT_NAME: + $COMPONENT_ARG1: $VAL1 + ... +``` diff --git a/paaspure/log_management/__init__.py b/paaspure/log_management/__init__.py new file mode 100644 index 0000000..26b7809 --- /dev/null +++ b/paaspure/log_management/__init__.py @@ -0,0 +1,5 @@ +# -*- coding: utf-8 -*- + +from .module import instance + +__all__ = ['instance'] diff --git a/paaspure/log_management/argparser.py b/paaspure/log_management/argparser.py new file mode 100644 index 0000000..4ff25d6 --- /dev/null +++ b/paaspure/log_management/argparser.py @@ -0,0 +1,39 @@ +# -*- coding: utf-8 -*- + +from paaspure.abstract import AbstractParser +from paaspure.argparser import paaSPureParser + + +class LogManagementParser(AbstractParser): + """New auto-generated modulo argparse template.""" + def __init__(self, module): + super(LogManagementParser, self).__init__(__file__) + self.initialize(module) + + def initialize(self, module): + module.parser = paaSPureParser.extend_parser( + f'paaspure {self.name} COMMAND', + f'{self.name}', + 'Manage log management solution.' + ) + + sub_parsers = module.parser.add_subparsers( + title='Commands', + dest='subcommand' + ) + + module.run_parser = sub_parsers.add_parser( + 'build', + help='Deploy Log Management solution.', + usage=f'paaspure {self.name} run' + ) + + module.run_parser = sub_parsers.add_parser( + 'destroy', + help='Destroy LogManagement resources.', + usage=f'paaspure {self.name} destroy' + ) + + module.run_parser._optionals.title = 'Options' + module.run_parser._positionals.title = 'Commands' + module.run_parser.set_defaults(parser=True) diff --git a/paaspure/log_management/elk_stack/README.md b/paaspure/log_management/elk_stack/README.md new file mode 100644 index 0000000..369d719 --- /dev/null +++ b/paaspure/log_management/elk_stack/README.md @@ -0,0 +1,15 @@ +# ELK Stack +Log management solution. + +### Usage + +```yaml +log_management: + orchestrator: orchestrator + components: + elk_stack: +``` +More coming soon... + +### Todo +1. Update Instructions diff --git a/paaspure/log_management/elk_stack/__init__.py b/paaspure/log_management/elk_stack/__init__.py new file mode 100644 index 0000000..485dd29 --- /dev/null +++ b/paaspure/log_management/elk_stack/__init__.py @@ -0,0 +1,5 @@ +# -*- coding: utf-8 -*- + +from .component import instance + +__all__ = ['instance'] diff --git a/paaspure/log_management/elk_stack/component.py b/paaspure/log_management/elk_stack/component.py new file mode 100644 index 0000000..74476a7 --- /dev/null +++ b/paaspure/log_management/elk_stack/component.py @@ -0,0 +1,28 @@ +# -*- coding: utf-8 -*- + +import os + +from paaspure.abstract import AbstractComponent +from paaspure.utils import docker_stack + + +class TraefikReverseProxy(AbstractComponent): + """Deploy the areverse proxy traefik.""" + def __init__(self): + super(TraefikReverseProxy, self).__init__() + + def build(self, config, client): + docker_stack( + command='deploy', + compose_file=os.path.dirname(__file__) + '/docker-compose.yml', + stack_name='logging' + ) + + def destroy(self, config, client): + docker_stack( + command='rm', + stack_name='logging' + ) + + +instance = TraefikReverseProxy() diff --git a/paaspure/log_management/elk_stack/docker-compose.yml b/paaspure/log_management/elk_stack/docker-compose.yml new file mode 100644 index 0000000..d7a0c4a --- /dev/null +++ b/paaspure/log_management/elk_stack/docker-compose.yml @@ -0,0 +1,79 @@ +version: '3.4' + +volumes: + es_data: + +services: + logspout: + image: gliderlabs/logspout + command: 'syslog+tcp://logstash:5000' + environment: + - LOGSPOUT=ignore + networks: + - default + volumes: + - /var/run/docker.sock:/var/run/docker.sock + deploy: + mode: global + restart_policy: + condition: on-failure + + logstash: + image: devopz/logstash-json-traefik:6.2.3 + environment: + - LOGSPOUT=ignore + - DROP_NON_JSON=false + - STDOUT=true + ports: + - 5000 + networks: + - default + deploy: + mode: replicated + replicas: 1 + + kibana: + image: docker.elastic.co/kibana/kibana:6.2.3 + networks: + - traefik_default + - default + environment: + - LOGSPOUT=ignore + deploy: + mode: replicated + replicas: 1 + restart_policy: + condition: on-failure + labels: + - traefik.backend=logging + - traefik.port=5601 + - traefik.frontend.rule=Host:logging.demo + - traefik.docker.network=traefik_default + + elasticsearch: + image: docker.elastic.co/elasticsearch/elasticsearch:6.2.3 + networks: + - default + ports: + - 9200 + volumes: + - es_data:/usr/share/elasticsearch/data + environment: + - http.host=0.0.0.0 + - transport.host=localhost + - network.host=0.0.0.0 + - xpack.security.enabled=false + - "ES_JAVA_OPTS=-Xms512m -Xmx512m" + - LOGSPOUT=ignore + deploy: + resources: + limits: + memory: 2g + mode: replicated + replicas: 1 + +networks: + traefik_default: + external: true + default: + external: false diff --git a/paaspure/log_management/module.py b/paaspure/log_management/module.py new file mode 100644 index 0000000..0d723bf --- /dev/null +++ b/paaspure/log_management/module.py @@ -0,0 +1,19 @@ +# -*- coding: utf-8 -*- + +from paaspure.abstract import AbstractModule +from .argparser import LogManagementParser + + +class LogManagement(AbstractModule): + """ + Abstraction module for deploying general components. + """ + def __init__(self): + LogManagementParser(self) + super(LogManagement, self).__init__(__file__) + + def execute(self, config, args): + super(LogManagement, self).general_deploy(config, args) + + +instance = LogManagement() diff --git a/paaspure/monitoring/README.md b/paaspure/monitoring/README.md new file mode 100644 index 0000000..09e1571 --- /dev/null +++ b/paaspure/monitoring/README.md @@ -0,0 +1,23 @@ +# PaaSPure Monitoring + +Abstraction module for components used to setup a monitoring solution. + +### Usage + +```bash +usage: paaspure monitoring + +Options: + -h, --help show this help message and exit +``` + +### Sample PureFile + +```yaml +monitoring: + orchestrator: $ORCHESTRATOR_NAME + components: + $COMPONENT_NAME: + $COMPONENT_ARG1: $VAL1 + ... +``` diff --git a/paaspure/monitoring/__init__.py b/paaspure/monitoring/__init__.py new file mode 100644 index 0000000..26b7809 --- /dev/null +++ b/paaspure/monitoring/__init__.py @@ -0,0 +1,5 @@ +# -*- coding: utf-8 -*- + +from .module import instance + +__all__ = ['instance'] diff --git a/paaspure/monitoring/argparser.py b/paaspure/monitoring/argparser.py new file mode 100644 index 0000000..7409fc9 --- /dev/null +++ b/paaspure/monitoring/argparser.py @@ -0,0 +1,39 @@ +# -*- coding: utf-8 -*- + +from paaspure.abstract import AbstractParser +from paaspure.argparser import paaSPureParser + + +class MonitoringParser(AbstractParser): + """New auto-generated modulo argparse template.""" + def __init__(self, module): + super(MonitoringParser, self).__init__(__file__) + self.initialize(module) + + def initialize(self, module): + module.parser = paaSPureParser.extend_parser( + f'paaspure {self.name} COMMAND', + f'{self.name}', + 'Manage monitoring solution.' + ) + + sub_parsers = module.parser.add_subparsers( + title='Commands', + dest='subcommand' + ) + + module.run_parser = sub_parsers.add_parser( + 'build', + help='Deploy Monitoring solution.', + usage=f'paaspure {self.name} run' + ) + + module.run_parser = sub_parsers.add_parser( + 'destroy', + help='Destroy resources.', + usage=f'paaspure {self.name} destroy' + ) + + module.run_parser._optionals.title = 'Options' + module.run_parser._positionals.title = 'Commands' + module.run_parser.set_defaults(parser=True) diff --git a/paaspure/monitoring/module.py b/paaspure/monitoring/module.py new file mode 100644 index 0000000..971605e --- /dev/null +++ b/paaspure/monitoring/module.py @@ -0,0 +1,19 @@ +# -*- coding: utf-8 -*- + +from paaspure.abstract import AbstractModule +from .argparser import MonitoringParser + + +class Monitoring(AbstractModule): + """ + Abstraction module for Monitoring service and infrastructure. + """ + def __init__(self): + MonitoringParser(self) + super(Monitoring, self).__init__(__file__) + + def execute(self, config, args): + super(Monitoring, self).general_deploy(config, args) + + +instance = Monitoring() diff --git a/paaspure/monitoring/prom_stack/README.md b/paaspure/monitoring/prom_stack/README.md new file mode 100644 index 0000000..8836ae8 --- /dev/null +++ b/paaspure/monitoring/prom_stack/README.md @@ -0,0 +1,29 @@ +# Prom Stack +Monitoring stack with Prometheus and Grafana + +### Usage + +```yaml +monitoring: + orchestrator: orchestrator + components: + prom_stack: +``` +More coming soon... + +### Grafana usage +Default username admin +Default password admin + +1. Login and create a new Prometheus data point. +2. Import dashboards using their IDs + +### Useful dashboards +1442 - node exporter (host stats) +2603 - cadvisor (docker stats) +2240 - traefik reverse proxy stats +3662 - prometheus scraping stats + +### Todo +1. Update Instructions +2. Add AlertManager diff --git a/paaspure/monitoring/prom_stack/__init__.py b/paaspure/monitoring/prom_stack/__init__.py new file mode 100644 index 0000000..485dd29 --- /dev/null +++ b/paaspure/monitoring/prom_stack/__init__.py @@ -0,0 +1,5 @@ +# -*- coding: utf-8 -*- + +from .component import instance + +__all__ = ['instance'] diff --git a/paaspure/monitoring/prom_stack/component.py b/paaspure/monitoring/prom_stack/component.py new file mode 100644 index 0000000..7a3e511 --- /dev/null +++ b/paaspure/monitoring/prom_stack/component.py @@ -0,0 +1,28 @@ +# -*- coding: utf-8 -*- + +import os + +from paaspure.abstract import AbstractComponent +from paaspure.utils import docker_stack + + +class TraefikReverseProxy(AbstractComponent): + """Deploy the areverse proxy traefik.""" + def __init__(self): + super(TraefikReverseProxy, self).__init__() + + def build(self, config, client): + docker_stack( + command='deploy', + compose_file=os.path.dirname(__file__) + '/docker-compose.yml', + stack_name='monitoring' + ) + + def destroy(self, config, client): + docker_stack( + command='rm', + stack_name='monitoring' + ) + + +instance = TraefikReverseProxy() diff --git a/paaspure/monitoring/prom_stack/config/prometheus.yml b/paaspure/monitoring/prom_stack/config/prometheus.yml new file mode 100644 index 0000000..253468f --- /dev/null +++ b/paaspure/monitoring/prom_stack/config/prometheus.yml @@ -0,0 +1,39 @@ +global: + scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. + evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. + # scrape_timeout is set to the global default (10s). + +# Alertmanager configuration +alerting: + alertmanagers: + - static_configs: + # - targets: ['alertmanager:9093'] + +# Load rules once and periodically evaluate them according to the global 'evaluation_interval'. +rule_files: + # - "first_rules.yml" + # - "second_rules.yml" + +# A scrape configuration containing exactly one endpoint to scrape: +scrape_configs: + # Prometheus itself. + - job_name: 'prometheus' + # metrics_path defaults to '/metrics' + # scheme defaults to 'http'. + static_configs: + - targets: ['localhost:9090'] + + - job_name: 'node-exporter' + scrape_interval: 15s + static_configs: + - targets: ['exporter:9100'] + + - job_name: 'cadvisor' + scrape_interval: 15s + static_configs: + - targets: ['cadvisor:8080'] + + - job_name: 'traefik' + scrape_interval: 15s + static_configs: + - targets: ['traefik:8080'] diff --git a/paaspure/monitoring/prom_stack/docker-compose.yml b/paaspure/monitoring/prom_stack/docker-compose.yml new file mode 100644 index 0000000..690399c --- /dev/null +++ b/paaspure/monitoring/prom_stack/docker-compose.yml @@ -0,0 +1,86 @@ +version: '3.4' + +volumes: + prometheus: + grafana: + +services: + exporter: + image: prom/node-exporter:v0.15.2 + ports: + - '9100' + volumes: + - /:/rootfs:ro + - /sys:/host/sys:ro + - /proc:/host/proc:ro + command: --collector.filesystem.ignored-mount-points "^/rootfs/(var/lib/docker/)|(run/docker/netns/)|(sys/kernel/debug/).*" + deploy: + mode: global + restart_policy: + condition: on-failure + + cadvisor: + image: google/cadvisor + volumes: + - /:/rootfs:ro + - /var/run:/var/run:rw + - /sys:/sys:ro + - /var/lib/docker/:/var/lib/docker:ro + ports: + - '8080' + deploy: + mode: global + restart_policy: + condition: on-failure + + prometheus: + image: prom/prometheus:v2.3.2 + depends_on: + - exporter + ports: + - '9090' + networks: + - default + - traefik_default + configs: + - source: prometheus.yml + target: /etc/prometheus/prometheus.yml + volumes: + - prometheus:/prometheus:rw + deploy: + mode: replicated + replicas: 1 + restart_policy: + condition: on-failure + labels: + - traefik.port=9090 + + grafana: + image: grafana/grafana:5.2.2 + depends_on: + - prometheus + ports: + - '3000' + networks: + - default + - traefik_default + volumes: + - grafana:/var/lib/grafana:rw + deploy: + mode: replicated + replicas: 1 + restart_policy: + condition: on-failure + labels: + - traefik.backend=monitoring + - traefik.port=3000 + - traefik.frontend.rule=Host:monitoring.demo + - traefik.docker.network=traefik_default + +configs: + prometheus.yml: + file: ./config/prometheus.yml + +networks: + traefik_default: + external: true diff --git a/paaspure/network/README.md b/paaspure/network/README.md new file mode 100644 index 0000000..86c97d2 --- /dev/null +++ b/paaspure/network/README.md @@ -0,0 +1,23 @@ +# PaaSPure Network + +Abstraction module for setting up network components. Things like reverse proxy and ssl certs + +### Usage + +```bash +usage: paaspure network + +Options: + -h, --help show this help message and exit +``` + +### Sample PureFile + +```yaml +network: + orchestrator: $ORCHESTRATOR_NAME + components: + $COMPONENT_NAME: + $COMPONENT_ARG1: $VAL1 + ... +``` diff --git a/paaspure/network/__init__.py b/paaspure/network/__init__.py new file mode 100644 index 0000000..26b7809 --- /dev/null +++ b/paaspure/network/__init__.py @@ -0,0 +1,5 @@ +# -*- coding: utf-8 -*- + +from .module import instance + +__all__ = ['instance'] diff --git a/paaspure/network/argparser.py b/paaspure/network/argparser.py new file mode 100644 index 0000000..5e896e0 --- /dev/null +++ b/paaspure/network/argparser.py @@ -0,0 +1,39 @@ +# -*- coding: utf-8 -*- + +from paaspure.abstract import AbstractParser +from paaspure.argparser import paaSPureParser + + +class NetworkParser(AbstractParser): + """New auto-generated modulo argparse template.""" + def __init__(self, module): + super(NetworkParser, self).__init__(__file__) + self.initialize(module) + + def initialize(self, module): + module.parser = paaSPureParser.extend_parser( + f'paaspure {self.name} COMMAND', + f'{self.name}', + 'Manage Network resources.' + ) + + sub_parsers = module.parser.add_subparsers( + title='Commands', + dest='subcommand' + ) + + module.run_parser = sub_parsers.add_parser( + 'build', + help='Create Network resources.', + usage=f'paaspure {self.name} run' + ) + + module.run_parser = sub_parsers.add_parser( + 'destroy', + help='Destroy Network resources.', + usage=f'paaspure {self.name} destroy' + ) + + module.run_parser._optionals.title = 'Options' + module.run_parser._positionals.title = 'Commands' + module.run_parser.set_defaults(parser=True) diff --git a/paaspure/network/module.py b/paaspure/network/module.py new file mode 100644 index 0000000..e57f56b --- /dev/null +++ b/paaspure/network/module.py @@ -0,0 +1,21 @@ +# -*- coding: utf-8 -*- + +from paaspure.abstract import AbstractModule +from .argparser import NetworkParser + + +class Network(AbstractModule): + """ + Abstraction module for setting up network resources. + E.g reverge proxy, ssl certs, etc. + """ + def __init__(self): + NetworkParser(self) + super(Network, self).__init__(__file__) + + def execute(self, config, args): + # TODO: Add ssl certs as secret. Use a component + super(Network, self).general_deploy(config, args) + + +instance = Network() diff --git a/paaspure/network/traefik/README.md b/paaspure/network/traefik/README.md new file mode 100644 index 0000000..355348c --- /dev/null +++ b/paaspure/network/traefik/README.md @@ -0,0 +1,16 @@ +# Traefik Layer 7 proxy + +Can be used for things such as reverse proxying and ssl termination + +### Usage + +```yaml +network: + orchestrator: orchestrator + components: + traefik: +``` +More coming soon... + +### Todo +Add SSL capability diff --git a/paaspure/network/traefik/__init__.py b/paaspure/network/traefik/__init__.py new file mode 100644 index 0000000..485dd29 --- /dev/null +++ b/paaspure/network/traefik/__init__.py @@ -0,0 +1,5 @@ +# -*- coding: utf-8 -*- + +from .component import instance + +__all__ = ['instance'] diff --git a/paaspure/network/traefik/component.py b/paaspure/network/traefik/component.py new file mode 100644 index 0000000..874eded --- /dev/null +++ b/paaspure/network/traefik/component.py @@ -0,0 +1,28 @@ +# -*- coding: utf-8 -*- + +import os + +from paaspure.abstract import AbstractComponent +from paaspure.utils import docker_stack + + +class TraefikReverseProxy(AbstractComponent): + """Deploy the reverse proxy traefik.""" + def __init__(self): + super(TraefikReverseProxy, self).__init__() + + def build(self, config, client): + docker_stack( + command='deploy', + compose_file=os.path.dirname(__file__) + '/docker-compose.yml', + stack_name='traefik' + ) + + def destroy(self, config, client): + docker_stack( + command='rm', + stack_name='traefik' + ) + + +instance = TraefikReverseProxy() diff --git a/paaspure/network/traefik/config/traefik b/paaspure/network/traefik/config/traefik new file mode 100644 index 0000000..2e1e5c4 --- /dev/null +++ b/paaspure/network/traefik/config/traefik @@ -0,0 +1,15 @@ +defaultEntryPoints = ["http"] + +logLevel = "INFO" + +[web] +address = ":8080" + [web.auth.basic] + users = ["admin:$apr1$orCSZ1J8$Epn.IPP15UHTXOp8WHhCG0"] # change this with 'htpasswd -nb admin your_secure_password' + +# Metrics definition +[metrics] + # To enable Traefik to export internal metrics to Prometheus + [metrics.prometheus] + entryPoint = "traefik" + buckets = [0.1,0.3,1.2,5.0] diff --git a/paaspure/network/traefik/docker-compose.yml b/paaspure/network/traefik/docker-compose.yml new file mode 100644 index 0000000..5b3ad43 --- /dev/null +++ b/paaspure/network/traefik/docker-compose.yml @@ -0,0 +1,15 @@ +version: '3.4' + +services: + traefik: + image: traefik:1.7 + command: --api --docker --metrics.prometheus --metrics.prometheus.entryPoint="traefik" --docker.swarmmode --docker.watch --docker.domain=demo -l DEBUG + ports: + - "80:80" + - "8080:8080" + volumes: + - /var/run/docker.sock:/var/run/docker.sock + deploy: + mode: global + placement: + constraints: [node.role == manager] diff --git a/paaspure/orchestrator/README.md b/paaspure/orchestrator/README.md new file mode 100644 index 0000000..0cf0fac --- /dev/null +++ b/paaspure/orchestrator/README.md @@ -0,0 +1,23 @@ +# PaaSPure Infra Builder + +Abstraction module for components used to build the infrastructure. +Run cloud VMs and setup cluster. + +### Usage + +```bash +usage: paaspure orchestrator + +Options: + -h, --help show this help message and exit +``` + +### Sample PureFile + +```yaml +orchestrator: + components: + $COMPONENT_NAME: + $COMPONENT_ARG1: $VAL1 + ... +``` diff --git a/paaspure/orchestrator/__init__.py b/paaspure/orchestrator/__init__.py new file mode 100644 index 0000000..26b7809 --- /dev/null +++ b/paaspure/orchestrator/__init__.py @@ -0,0 +1,5 @@ +# -*- coding: utf-8 -*- + +from .module import instance + +__all__ = ['instance'] diff --git a/paaspure/orchestrator/argparser.py b/paaspure/orchestrator/argparser.py new file mode 100644 index 0000000..407285a --- /dev/null +++ b/paaspure/orchestrator/argparser.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- + +from paaspure.abstract import AbstractParser +from paaspure.argparser import paaSPureParser + + +class InfraBuilderParser(AbstractParser): + """New auto-generated modulo argparse template.""" + def __init__(self, module): + super(InfraBuilderParser, self).__init__(__file__) + self.initialize(module) + + def initialize(self, module): + module.parser = paaSPureParser.extend_parser( + f'paaspure {self.name} COMMAND', + f'{self.name}', + 'Connect to orchestrator.' + ) + + sub_parsers = module.parser.add_subparsers( + title='Commands', + dest='subcommand' + ) + + module.run_parser = sub_parsers.add_parser( + 'build', + help='Run the VmBuilder module.', + usage=f'paaspure {self.name} run' + ) + + module.run_parser = sub_parsers.add_parser( + 'destroy', + help='Destroy VmBuilder resources.', + usage=f'paaspure {self.name} destroy' + ) + + module.run_parser = sub_parsers.add_parser( + 'client_connection', + help='Return docker client configured using ssh tunnel.', + usage=f'paaspure {self.name} client_connection' + ) + + module.run_parser._optionals.title = 'Options' + module.run_parser._positionals.title = 'Commands' + module.run_parser.set_defaults(parser=True) diff --git a/paaspure/orchestrator/module.py b/paaspure/orchestrator/module.py new file mode 100644 index 0000000..3440d9f --- /dev/null +++ b/paaspure/orchestrator/module.py @@ -0,0 +1,33 @@ +# -*- coding: utf-8 -*- + +import sys +import importlib + +from paaspure.abstract import AbstractModule +from .argparser import InfraBuilderParser + + +class OrchestratorConnect(AbstractModule): + """ + Abstraction module for connection to orchestrators. + """ + def __init__(self): + InfraBuilderParser(self) + super(OrchestratorConnect, self).__init__(__file__) + + def execute(self, config, args): + if args.subcommand is None: + self.parser.print_help() + sys.exit(1) + + components = config['modules'][args.command]['components'] + for name, sub_config in components.items(): + component = importlib.import_module(f'{name}') + # Use dispatch pattern to invoke method with same name + return getattr(component.instance, args.subcommand)( + sub_config, + config['credentials'] + ) + + +instance = OrchestratorConnect() diff --git a/paaspure/orchestrator/swarm_aws/README.md b/paaspure/orchestrator/swarm_aws/README.md new file mode 100644 index 0000000..a780b21 --- /dev/null +++ b/paaspure/orchestrator/swarm_aws/README.md @@ -0,0 +1,21 @@ +# AWS Swarm +Component for connecting to Swarm cluster running on AWS. + +### Usage + +```yaml +orchestrator: + components: + swarm_aws: + user: docker + bind_port: 2374 + region: eu-west-1 + tags: + key: swarm-node-type + manager_value: manager + worker_value: worker +``` +More coming soon... + +### Todo +1. Update Instructions diff --git a/paaspure/orchestrator/swarm_aws/__init__.py b/paaspure/orchestrator/swarm_aws/__init__.py new file mode 100644 index 0000000..462e426 --- /dev/null +++ b/paaspure/orchestrator/swarm_aws/__init__.py @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- + +from .requirements import install +from .component import instance + +__all__ = ['install', 'instance'] diff --git a/paaspure/orchestrator/swarm_aws/component.py b/paaspure/orchestrator/swarm_aws/component.py new file mode 100644 index 0000000..d37490c --- /dev/null +++ b/paaspure/orchestrator/swarm_aws/component.py @@ -0,0 +1,78 @@ +# -*- coding: utf-8 -*- + +import os +import boto3 +import sys + +from paaspure.abstract import AbstractComponent +from paaspure.utils import DockerClientSSHTunnel + + +class SwarmAWS(AbstractComponent): + """Find swarm manager and create client connection.""" + def __init__(self): + super(SwarmAWS, self).__init__() + + def build(self, config, credentials): + os.environ['AWS_ACCESS_KEY_ID'] = credentials['aws_access_key'] + os.environ['AWS_SECRET_ACCESS_KEY'] = credentials['aws_secret_key'] + os.environ['AWS_DEFAULT_REGION'] = config['region'] + + ec2 = boto3.resource('ec2') + + filter_managers = [{ + 'Name': f'tag:{config["tags"]["key"]}', + 'Values': [config['tags']['manager_value']] + }, { + 'Name': 'instance-state-name', + 'Values': ['running'] + }] + + filter_workers = [{ + 'Name': f'tag:config["tags"]["key"]', + 'Values': [config['tags']['worker_value']] + }, { + 'Name': 'instance-state-name', + 'Values': ['running'] + }] + + return { + 'managers': ec2.instances.filter(Filters=filter_managers), + 'workers': ec2.instances.filter(Filters=filter_workers), + } + + def destroy(self, config, credentials): + pass + + def client_connection(self, config, credentials): + cluster_instances = self.build(config, credentials) + first_manager = list(cluster_instances['managers'])[0] + + try: + permission = oct( + os.stat(credentials['private_key']).st_mode & 0o777 + ) + if int(permission[2:]) > 600: + raise Exception(f'Permissions {permission} for \ + {credentials["private_key"]} are too open.') + except KeyError: + print(f'No private_key credential in Purefile') + sys.exit(1) + + return DockerClientSSHTunnel( + key_path=credentials["private_key"], + manager_address=first_manager.public_ip_address, + bind_port=config['bind_port'], + user=config['user'] + ) + + +instance = SwarmAWS() + + +# docker -H localhost:2374 service create \ +# --name=viz \ +# --publish=8080:8080/tcp \ +# --constraint=node.role==manager \ +# --mount=type=bind,src=/var/run/docker.sock,dst=/var/run/docker.sock \ +# dockersamples/visualizer diff --git a/paaspure/orchestrator/swarm_aws/requirements.py b/paaspure/orchestrator/swarm_aws/requirements.py new file mode 100644 index 0000000..8ec4f1c --- /dev/null +++ b/paaspure/orchestrator/swarm_aws/requirements.py @@ -0,0 +1,19 @@ +# -*- coding: utf-8 -*- + +from paaspure.utils import pip_install, apk_install + +pip_packages = [ + 'boto3' +] + +apk_packages = [ + 'openssh' +] + + +def install(): + pip_install(packages=pip_packages, component='SwarmAWS') + apk_install(packages=apk_packages, component='SwarmAWS') + + +install() diff --git a/paaspure/orchestrator/swarm_azure/README.md b/paaspure/orchestrator/swarm_azure/README.md new file mode 100644 index 0000000..eb69b8d --- /dev/null +++ b/paaspure/orchestrator/swarm_azure/README.md @@ -0,0 +1,18 @@ +# Swarm Azure +Component for connecting to Swarm running in Azure. + +### Usage + +```yaml +orchestrator: + components: + swarm_azure: + resource_group_name: paaspureswarm + swarmName: "dockerswarm" + user: docker + bind_port: 2374 +``` +More coming soon... + +### Todo +1. Update Instructions diff --git a/paaspure/orchestrator/swarm_azure/__init__.py b/paaspure/orchestrator/swarm_azure/__init__.py new file mode 100644 index 0000000..462e426 --- /dev/null +++ b/paaspure/orchestrator/swarm_azure/__init__.py @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- + +from .requirements import install +from .component import instance + +__all__ = ['install', 'instance'] diff --git a/paaspure/orchestrator/swarm_azure/component.py b/paaspure/orchestrator/swarm_azure/component.py new file mode 100644 index 0000000..4593360 --- /dev/null +++ b/paaspure/orchestrator/swarm_azure/component.py @@ -0,0 +1,68 @@ +# -*- coding: utf-8 -*- + +import os +import sys + +from paaspure.abstract import AbstractComponent +from paaspure.utils import DockerClientSSHTunnel +from azure.common.credentials import ServicePrincipalCredentials +from azure.mgmt.network import NetworkManagementClient + + +class SwarmAzure(AbstractComponent): + """Find swarm manager and create client connection.""" + def __init__(self): + super(SwarmAzure, self).__init__() + + def build(self, config, credentials): + sp_creds = ServicePrincipalCredentials( + client_id=credentials['azure_client_id'], + secret=credentials['azure_client_secret'], + tenant=credentials['azure_tenant_id'] + ) + + network_client = NetworkManagementClient( + sp_creds, + credentials['subscription_id'] + ) + + puplic_ip = network_client.public_ip_addresses.get( + config['resource_group_name'], + config['swarmName'] + '-externalSSHLoadBalancer-public-ip' + ) + + nat_rule = network_client.inbound_nat_rules.get( + config['resource_group_name'], + 'externalSSHLoadBalancer', + 'default.0' + ) + + return puplic_ip.ip_address, nat_rule.frontend_port + + def destroy(self, config, credentials): + pass + + def client_connection(self, config, credentials): + manager_ip, manager_port = self.build(config, credentials) + + try: + permission = oct( + os.stat(credentials['private_key']).st_mode & 0o777 + ) + if int(permission[2:]) > 600: + raise Exception(f'Permissions {permission} for \ + {credentials["private_key"]} are too open.') + except KeyError: + print(f'No private_key credential in Purefile') + sys.exit(1) + + return DockerClientSSHTunnel( + key_path=credentials["private_key"], + manager_address=manager_ip, + manager_port=manager_port, + bind_port=config['bind_port'], + user=config['user'] + ) + + +instance = SwarmAzure() diff --git a/paaspure/orchestrator/swarm_azure/requirements.py b/paaspure/orchestrator/swarm_azure/requirements.py new file mode 100644 index 0000000..36730cb --- /dev/null +++ b/paaspure/orchestrator/swarm_azure/requirements.py @@ -0,0 +1,23 @@ +# -*- coding: utf-8 -*- + +from paaspure.utils import pip_install, apk_install + +pip_packages = [ + 'azure', + 'keyrings.alt' +] + +apk_packages = [ + 'openssh', + 'build-base', + 'libffi-dev', + 'openssl-dev' +] + + +def install(): + apk_install(packages=apk_packages, component='SwarmAzure') + pip_install(packages=pip_packages, component='SwarmAzure') + + +install() diff --git a/paaspure/pull/README.md b/paaspure/pull/README.md new file mode 100644 index 0000000..6f41251 --- /dev/null +++ b/paaspure/pull/README.md @@ -0,0 +1,42 @@ +# PaaSPure Puller + +Module for pulling other modules and components. + +## Usage + +```bash +usage: paaspure pull TYPE + +Options: + -h, --help show this help message and exit + --git-url GIT_URL Repo url to pull from. + +Commands: + {module,component} + module Pull module + component Pull component +``` + +### Pull Module +```bash +usage: paaspure pull module NAME + +Arguments: + NAME The module name + +Options: + -h, --help show this help message and exit +``` + +### Pull Component + +```bash +usage: paaspure pull component PARENT_MODULE NAME + +Arguments: + PARENT_MODULE The parent module name + NAME The component name + +Options: + -h, --help show this help message and exit +``` diff --git a/paaspure/pull/__init__.py b/paaspure/pull/__init__.py new file mode 100644 index 0000000..6bf1d42 --- /dev/null +++ b/paaspure/pull/__init__.py @@ -0,0 +1,5 @@ +# -*- coding: utf-8 -*- + +from paaspure.pull.puller import PaaSPurePuller + +__all__ = ['PaaSPurePuller'] diff --git a/paaspure/pull/argparser.py b/paaspure/pull/argparser.py new file mode 100644 index 0000000..7afc8f3 --- /dev/null +++ b/paaspure/pull/argparser.py @@ -0,0 +1,90 @@ +# -*- coding: utf-8 -*- + +from paaspure.abstract import AbstractParser +from paaspure.argparser import paaSPureParser + + +class PullerParser(AbstractParser): + """PaaSPurePuller argparser""" + def __init__(self, puller): + super(PullerParser, self).__init__(__file__) + self.initialize(puller) + + def initialize(self, puller): + puller.parser = paaSPureParser.extend_parser( + f'paaspure {self.name} [OPTIONS] TYPE', + f'{self.name}', + 'Pull a module or component' + ) + + puller.parser.add_argument( + '--git-url', + type=str, + help='Repo url to pull from.' + ) + + sub_parsers = puller.parser.add_subparsers( + title='Type', + dest='type' + ) + + puller.parsers = {} + + puller.parsers['all'] = sub_parsers.add_parser( + 'all', + help=f'Pull all objects definied in the config file.', + usage=f'paaspure {self.name} all' + ) + + puller.parsers['module'] = sub_parsers.add_parser( + 'module', + help=f'Pull module', + usage=f'paaspure {self.name} module NAME' + ) + + puller.parsers['module'].add_argument( + 'NAME', + nargs='?', + type=str, + help='The module name' + ) + + puller.parsers['module'].add_argument( + '--version', + type=str, + help='Module version' + ) + + puller.parsers['module']._optionals.title = 'Options' + puller.parsers['module']._positionals.title = 'Arguments' + puller.parsers['module'].set_defaults(module_parser=True) + + puller.parsers['component'] = sub_parsers.add_parser( + 'component', + help='Pull component', + usage=f'paaspure {self.name} component PARENT_MODULE NAME' + ) + + puller.parsers['component'].add_argument( + 'PARENT_MODULE', + nargs='?', + type=str, + help='The parent module name' + ) + + puller.parsers['component'].add_argument( + 'NAME', + nargs='?', + type=str, + help='The component name' + ) + + puller.parsers['component'].add_argument( + '--version', + type=str, + help='Module version' + ) + + puller.parsers['component']._optionals.title = 'Options' + puller.parsers['component']._positionals.title = 'Arguments' + puller.parsers['component'].set_defaults(new_parser=True) diff --git a/paaspure/pull/puller.py b/paaspure/pull/puller.py new file mode 100644 index 0000000..b727265 --- /dev/null +++ b/paaspure/pull/puller.py @@ -0,0 +1,141 @@ +# -*- coding: utf-8 -*- + +import os +import sys + +from paaspure.utils import validate_name, hub +from paaspure.pull.argparser import PullerParser +from paaspure.pull.utils import clone + + +class PaaSPurePuller: + """Module for pulling other modules and components.""" + def __init__(self): + PullerParser(self) + super(PaaSPurePuller, self).__init__() + + def run(self, args={}, config={}): + if args.type == 'all': + self.__all(config['modules']) + else: + if args.type is None: + self.parser.print_help() + sys.exit(1) + + if args.NAME is None: + self.parsers[args.type].print_help() + sys.exit(1) + + self.__pull_type(args) + + def __all(self, pure_obects): + for module, module_args in pure_obects.items(): + print('Pulling module', module) + repo, commit = self.__search_hub( + name=module, + type='module', + args=module_args + ) + + self.__module( + name=module, + type='module', + commit=commit, + repo_url=repo + ) + + for component, component_args in module_args['components'].items(): + print('Pulling component', component) + + repo, commit = self.__search_hub( + name=component, + type='component', + args=component_args + ) + + self.__component( + name=component, + parent_module=module, + type='component', + commit=commit, + repo_url=repo + ) + + def __search_hub(self, name=None, type=None, args=None): + if args is None: + args = {} + + if 'repo' in args: + repo = args['repo'] + if 'commit' not in args: + commit = 'master' + else: + commit = args['commit'] + else: + hub_object = hub.get_object(name, type) + repo = hub_object['gitUrl'] + if 'tag' not in args: + args['tag'] = 'latest' + + commit = hub.get_version(hub_object, str(args['tag']))['commit'] + + return repo, commit + + def __pull_type(self, args): + if args.git_url is None: + hub_object = hub.get_object(args.NAME, args.type) + args.git_url = hub_object['gitUrl'] + + # NOTE: Only get versions for hub objects + if args.version is not None: + version = hub.get_version(hub_object, args.version) + args.version = version['commit'] + + if args.type == 'module': + self.__module( + name=args.NAME, + type=args.type, + commit=args.version, + repo_url=args.git_url + ) + else: + self.__component( + name=args.NAME, + parent_module=args.PARENT_MODULE, + type=args.type, + commit=args.version, + repo_url=args.git_url + ) + + def __module(self, name=None, type=None, commit='Master', repo_url=None): + name = name.lower() + validate_name(type, name) + + clone( + repo_url=repo_url, + type=type, + commit=commit, + target_path=[name] + ) + + def __component(self, name=None, parent_module=None, type=None, + commit='Master', repo_url=None): + if parent_module is None: + self.parsers[type].print_help() + sys.exit(1) + + name = name.lower() + validate_name(type, name) + parent_module = parent_module.lower() + validate_name('module', parent_module) + + if not os.path.exists(parent_module): + print(f'Missing module: {parent_module}') + sys.exit(1) + + clone( + repo_url=repo_url, + type=type, + commit=commit, + target_path=[parent_module, name] + ) diff --git a/paaspure/pull/utils.py b/paaspure/pull/utils.py new file mode 100644 index 0000000..9cb3ffb --- /dev/null +++ b/paaspure/pull/utils.py @@ -0,0 +1,34 @@ +import os +import git +import shutil + +from paaspure.utils import request_input + + +def clone(repo_url=None, type=None, target_path=[], commit=None): + target = os.path.join(*target_path) + + if os.path.exists(target): + print(f'Found existing {type} {target}: ') + request_input( + question='\tWould you like to overwrite it? [Y/n] ', + reject='\tSkipping pull! ¯\_(ツ)_/¯' + ) + + shutil.rmtree(target) + + repo_name = repo_url.replace('.', '/').split('/')[-2] + git.Git().clone(repo_url) + + if commit is not None: + git.Git(repo_name).checkout(commit) + + try: + # TODO: Fix error handler + shutil.copytree(repo_name, target) + except shutil.Error as e: + print('Directory not copied. Error: %s' % e) + except OSError as e: + print('Directory not copied. Error: %s' % e) + + shutil.rmtree(repo_name) diff --git a/paaspure/settings.py b/paaspure/settings.py new file mode 100644 index 0000000..7a0a14e --- /dev/null +++ b/paaspure/settings.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- + +import os +import sys + + +DEBUG = False +PROJECT_ROOT = os.path.dirname(__file__) + +if 'pytest' in sys.modules: + QUIET_INSTALL = True +else: + QUIET_INSTALL = False + +HUB = 'https://paaspure.com' diff --git a/paaspure/utils/__init__.py b/paaspure/utils/__init__.py new file mode 100644 index 0000000..77323b8 --- /dev/null +++ b/paaspure/utils/__init__.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- + +from paaspure.utils.general import validate_name, escape_ansi, get_version +from paaspure.utils.files import read_yaml_file, write_yaml_file, read_file +from paaspure.utils.package import pip_install, request_input, apk_install +from paaspure.utils.docker import build_image, MockContainerRun, \ + copy_from_container, DockerClientSSHTunnel, docker_stack +from paaspure.utils import hub + +__all__ = [ + 'get_version', + 'read_yaml_file', + 'write_yaml_file', + 'read_file', + 'validate_name', + 'escape_ansi', + 'pip_install', + 'apk_install', + 'request_input', + 'build_image', + 'MockContainerRun', + 'copy_from_container', + 'hub', + 'DockerClientSSHTunnel', + 'docker_stack' +] diff --git a/paaspure/utils/docker.py b/paaspure/utils/docker.py new file mode 100644 index 0000000..2b13bca --- /dev/null +++ b/paaspure/utils/docker.py @@ -0,0 +1,109 @@ +# -*- coding: utf-8 -*- + +import os +import docker +import collections +import tarfile +import subprocess +import time + + +from paaspure import settings + + +def build_image(image_tag=None, path=None, dockerfile='Dockerfile'): + print(f'Building docker image: {image_tag}') + + logs = docker.APIClient().build( + path=path, + tag=image_tag, + decode=True, + dockerfile=dockerfile + ) + + for log in logs: + if settings.DEBUG: + for key, value in log.items(): + print(value, end='') + + +def copy_from_container(container=None, src_path=None, dest_path=None, + force=False): + + if container.wait()['StatusCode'] != 0 and not force: + print('Container execution failed, skipping copy_from_container()') + print('\tYou can force it with force=True') + else: + stream, stat = container.get_archive(src_path) + with open('extracted_resources.tar', 'wb') as outfile: + for data in stream: + outfile.write(data) + + tar = tarfile.open('extracted_resources.tar') + tar.extractall(path=dest_path) + tar.close() + + os.remove('extracted_resources.tar') + + +class MockContainerRun: + def __init__(self, log_output=[], status_code=None): + self.log_output = [log.encode() for log in log_output] + self.status_code = status_code + self.containers = self + + def __call__(self): + return self + + def run(self, image_tag, **kwargs): + MockContainerRun = collections.namedtuple( + 'MockContainerRun', + 'logs wait' + ) + return MockContainerRun(logs=self.logs, wait=self.wait) + + def logs(self, stream=None): + return self.log_output + + def wait(self): + return {'StatusCode': self.status_code} + + +class DockerClientSSHTunnel: + def __init__(self, key_path=None, manager_address=None, manager_port=22, + bind_port='2374', user=None): + self.key_path = key_path + self.manager_address = manager_address + self.manager_port = str(manager_port) + self.bind_port = str(bind_port) + self.user = user + + def __enter__(self): + self.p = subprocess.Popen( + [ + 'ssh', '-i', self.key_path, '-oStrictHostKeyChecking=no', '-p', + self.manager_port, '-NL', + f'localhost:{self.bind_port}:/var/run/docker.sock', + f'{self.user}@{self.manager_address}' + ], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE + ) + + # NOTE: wait for tunnel to be ready. + time.sleep(1) + + return docker.DockerClient(base_url=f'localhost:{self.bind_port}') + + def __exit__(self, type, value, traceback): + self.p.terminate() + + +def docker_stack(command=None, compose_file=None, stack_name=None): + docker_command = f'docker -H localhost:2374 stack {command} ' + if command == 'deploy': + docker_command += f'-c {compose_file} ' + docker_command += stack_name + + if subprocess.check_call(docker_command, shell=True) != 0: + print('Error') diff --git a/paaspure/utils/files.py b/paaspure/utils/files.py new file mode 100644 index 0000000..7bf4de2 --- /dev/null +++ b/paaspure/utils/files.py @@ -0,0 +1,36 @@ +# -*- coding: utf-8 -*- + +import os +import yaml + + +def read_yaml_file(description='yaml', *path): + try: + with open(os.path.join(*path), 'r') as f: + return yaml.load(f) + except FileNotFoundError as err: + print(f'Missing {description} file: {os.path.join(*path)}') + raise + except yaml.scanner.ScannerError as yaml_err: + print('Invalid YAML file:') + print(yaml_err) + raise + + +def write_yaml_file(output, *path): + try: + with open(os.path.join(*path), 'w') as outfile: + yaml.dump(output, outfile, default_flow_style=False) + except Exception as err: + print(f'Could not create new YAML file: {os.path.join(*path)}') + print(err) + raise + + +def read_file(*path): + try: + with open(os.path.join(*path), 'r') as fp: + return fp.read() + except FileNotFoundError as err: + print(f'Missing file: {os.path.join(*path)}') + raise diff --git a/paaspure/utils/general.py b/paaspure/utils/general.py new file mode 100644 index 0000000..c24087a --- /dev/null +++ b/paaspure/utils/general.py @@ -0,0 +1,26 @@ +# -*- coding: utf-8 -*- + +import re +import sys +from paaspure.__init__ import __version__ + + +def get_version(): + return __version__ + + +def escape_ansi(line): + ansi_escape = re.compile(r'(\x9B|\x1B\[)[0-?]*[ -/]*[@-~]') + return ansi_escape.sub('', line) + + +def validate_name(type, name): + if re.match('^[a-z]+(_[a-z]+)*$', name) is None: + print(f'Invalid {type} name: {name}') + print('Valid names must:') + print('\tConsist of only letters and underscores.') + print('\tStart and finish with a letter.') + + print(f'E.g: {type}_name') + + sys.exit(1) diff --git a/paaspure/utils/hub.py b/paaspure/utils/hub.py new file mode 100644 index 0000000..2b108b2 --- /dev/null +++ b/paaspure/utils/hub.py @@ -0,0 +1,83 @@ +import sys +import requests + +from paaspure import settings + + +def __request(url=f'{settings.HUB}/api'): + try: + response = requests.get(url) + except requests.exceptions.ConnectionError: + print(f'Could not connect to hub: {settings.HUB}') + sys.exit(1) + + if response.status_code != 200: + print(f'Invalid response from hub: {settings.HUB}') + print(f'\t{response}') + sys.exit(1) + + return response.json() + + +def request_objects(): + return __request(url=f'{settings.HUB}/api/hub') + + +def request_versions(id=None): + versions = __request(url=f'{settings.HUB}/api/version/{id}') + return [{'tag': 'latest', 'date': 'now', 'commit': 'master'}] + versions + + +def get_object(name='', type='module', version='latest'): + hub_objects = request_objects() + + filtered_hub_objects = [obj for obj in hub_objects + if obj['type'] == type and name == obj['name']] + + if len(filtered_hub_objects) == 1: + return filtered_hub_objects[0] + + hub_objects = [obj for obj in hub_objects + if obj['type'] == type and name in obj['name']] + + if len(hub_objects) == 0: + print(f'Pull failed: {type} {name} does not exist.') + sys.exit(1) + if len(hub_objects) >= 1: + print(f'Could not find a name match for {type}: {name}') + print(f'Other {type}s with a similar name:') + for item in hub_objects: + print(' {:40s} {:s}'.format( + item['name'], + item['description'] + )) + sys.exit(1) + + +def get_version(hub_object, version): + versions = request_versions(hub_object['_id']) + filtered_versions = [vrs for vrs in versions if version == str(vrs['tag'])] + + if len(filtered_versions) == 1: + return filtered_versions[0] + + filtered_versions = [vrs for vrs in versions if version in str(vrs['tag'])] + + if len(filtered_versions) == 0: + print(f'Could not find {hub_object["name"]} version {version}.') + print('Versions available:') + __print_versions(versions) + sys.exit(1) + if len(filtered_versions) >= 1: + print(f'Could not match {hub_object["name"]} version {version}.') + print(f'Other similar tags:') + __print_versions(filtered_versions) + sys.exit(1) + + +def __print_versions(versions): + print(' {:20s} {:20s} {:s}'.format('tag', 'date', 'hash')) + for item in versions: + print(' {:20s} {:20s} {:s}'.format( + str(item['tag']), item['date'], item['commit']) + ) diff --git a/paaspure/utils/package.py b/paaspure/utils/package.py new file mode 100644 index 0000000..656b180 --- /dev/null +++ b/paaspure/utils/package.py @@ -0,0 +1,60 @@ +# -*- coding: utf-8 -*- + +import sys +import importlib +import subprocess +from paaspure import settings + + +def pip_install(packages=None, component=None): + print(f'Checking pip dependecies: {component}') + + missing = [] + for package in packages: + try: + importlib.__import__(package) + except ImportError: + missing.append(package) + + __run_install([sys.executable, '-m', 'pip', 'install'], missing) + + +def apk_install(packages=None, component=None): + print(f'Checking apk dependecies: {component}') + + installed = subprocess.check_output(['apk', 'info']).decode().split() + missing = [package for package in packages if package not in installed] + __run_install(['apk', 'add'], missing) + + +def __run_install(command, missing): + if len(missing) != 0: + print(f'Install missing dependecies:') + print(missing) + + request_input( + question='Do you want to continue? [Y/n] ', + reject='\tY U NOT let me install? (/ಠ,ಠ)/' + ) + + if not settings.DEBUG: + command.append('-q') + subprocess.check_call(command + missing) + + +def request_input(**kwargs): + print(kwargs['question'], end='') + + yes = {'yes', 'y', 'ye', ''} + no = {'no', 'n'} + + choice = 'yes' if settings.QUIET_INSTALL else input().lower() + + if choice in yes: + return + elif choice in no: + print(kwargs['reject']) + sys.exit(1) + else: + print("\tPlease respond with 'yes' or 'no'") + sys.exit(1) diff --git a/paaspure/vm_builder/README.md b/paaspure/vm_builder/README.md new file mode 100644 index 0000000..fae2097 --- /dev/null +++ b/paaspure/vm_builder/README.md @@ -0,0 +1,31 @@ +# PaaSPure VM Builder + +Abstraction module for components used to build cloud images. + +### Usage + +```bash +usage: paaspure vm_builder COMMAND + +Options: + -h, --help show this help message and exit + +Commands: + {run,destroy} + run Run the VmBuilder module. + destroy Destroy VmBuilder resources. +``` + +### Sample PureFile + +```yaml +vm_builder: + repo: + commit: + components: + packer_aws: + template: docker_ubuntu.json + region: eu-west-1 + var-files: + - variables.json +``` diff --git a/paaspure/vm_builder/__init__.py b/paaspure/vm_builder/__init__.py new file mode 100644 index 0000000..26b7809 --- /dev/null +++ b/paaspure/vm_builder/__init__.py @@ -0,0 +1,5 @@ +# -*- coding: utf-8 -*- + +from .module import instance + +__all__ = ['instance'] diff --git a/paaspure/vm_builder/argparser.py b/paaspure/vm_builder/argparser.py new file mode 100644 index 0000000..94db536 --- /dev/null +++ b/paaspure/vm_builder/argparser.py @@ -0,0 +1,39 @@ +# -*- coding: utf-8 -*- + +from paaspure.abstract import AbstractParser +from paaspure.argparser import paaSPureParser + + +class VmBuilderParser(AbstractParser): + """New auto-generated modulo argparse template.""" + def __init__(self, module): + super(VmBuilderParser, self).__init__(__file__) + self.initialize(module) + + def initialize(self, module): + module.parser = paaSPureParser.extend_parser( + f'paaspure {self.name} COMMAND', + f'{self.name}', + 'Build cloud images.' + ) + + sub_parsers = module.parser.add_subparsers( + title='Commands', + dest='subcommand' + ) + + module.run_parser = sub_parsers.add_parser( + 'build', + help='Run the VmBuilder module.', + usage=f'paaspure {self.name} run' + ) + + module.run_parser = sub_parsers.add_parser( + 'destroy', + help='Destroy VmBuilder resources.', + usage=f'paaspure {self.name} destroy' + ) + + module.run_parser._optionals.title = 'Options' + module.run_parser._positionals.title = 'Commands' + module.run_parser.set_defaults(parser=True) diff --git a/paaspure/vm_builder/module.py b/paaspure/vm_builder/module.py new file mode 100644 index 0000000..e4df2e4 --- /dev/null +++ b/paaspure/vm_builder/module.py @@ -0,0 +1,17 @@ +# -*- coding: utf-8 -*- + +from paaspure.abstract import AbstractModule +from .argparser import VmBuilderParser + + +class VmBuilder(AbstractModule): + """Abstraction module for components used to build cloud images.""" + def __init__(self): + VmBuilderParser(self) + super(VmBuilder, self).__init__(__file__) + + def execute(self, config, args): + super(VmBuilder, self).general_execute(config, args) + + +instance = VmBuilder() diff --git a/paaspure/vm_builder/packer_aws/Dockerfile b/paaspure/vm_builder/packer_aws/Dockerfile new file mode 100644 index 0000000..51c62ba --- /dev/null +++ b/paaspure/vm_builder/packer_aws/Dockerfile @@ -0,0 +1,7 @@ +FROM hashicorp/packer:1.1.2 + +LABEL maintainer "ruben.vasconcelos3@mail.dcu.ie" + +WORKDIR /app + +COPY packer_files . diff --git a/paaspure/vm_builder/packer_aws/README.md b/paaspure/vm_builder/packer_aws/README.md new file mode 100644 index 0000000..6fb7b07 --- /dev/null +++ b/paaspure/vm_builder/packer_aws/README.md @@ -0,0 +1,31 @@ +# PaaSPure Packer AWS + +PaaSPure component for building and provisioning AWS cloud images using packer. + +### Usage +Tested with the vm_builder module: https://github.com/iorubs/paaspure_vm_builder.git + +```bash +Usage: + run Build IMAs. + destroy Destroy IMAs and Snapshots +``` + + +# Sample pure.yml + +```yaml +version: 1 + +credentials: + aws_access_key: ACCESS_KEY + aws_secret_key: SECRET_KEY + +modules: + vm_builder: + packer_aws: + template: PACKER_TEMPLATE_FILE + region: REGION + var-files: + - PACKER_VARIABLES_FILE +``` diff --git a/paaspure/vm_builder/packer_aws/__init__.py b/paaspure/vm_builder/packer_aws/__init__.py new file mode 100644 index 0000000..462e426 --- /dev/null +++ b/paaspure/vm_builder/packer_aws/__init__.py @@ -0,0 +1,6 @@ +# -*- coding: utf-8 -*- + +from .requirements import install +from .component import instance + +__all__ = ['install', 'instance'] diff --git a/paaspure/vm_builder/packer_aws/component.py b/paaspure/vm_builder/packer_aws/component.py new file mode 100644 index 0000000..306dbaa --- /dev/null +++ b/paaspure/vm_builder/packer_aws/component.py @@ -0,0 +1,127 @@ +# -*- coding: utf-8 -*- + +import os +import docker +import boto3 + +from paaspure.abstract import AbstractComponent +from botocore.exceptions import ClientError +from paaspure.utils import read_yaml_file, write_yaml_file, escape_ansi, \ + build_image + + +class PackerAWS(AbstractComponent): + """Create AWS AMIs using Packer.""" + def __init__(self): + self.resource_file = os.path.join( + os.path.dirname(__file__), + 'resources.yml' + ) + + super(PackerAWS, self).__init__() + + def parse_id(self, line): + no_color = escape_ansi(line) + split_output = no_color.replace('\n', ' ').split() + return split_output if len(split_output) == 0 else split_output[-1] + + def build(self, config, credentials): + client = docker.from_env() + build_image( + image_tag='paaspure_packer', + path=os.path.dirname(__file__) + ) + + resources = {'images': [], 'snapshots': []} + + command = ['build'] + + for var_file in config['var-files']: + command.append(f'-var-file={var_file}') + + command.append(config['template']) + + container = client.containers.run( + 'paaspure_packer', + environment=[ + 'AWS_ACCESS_KEY=' + credentials['aws_access_key'], + 'AWS_SECRET_KEY=' + credentials['aws_secret_key'] + ], + command=command, + detach=True + ) + + for log in container.logs(stream=True): + print(log.decode(), end='') + + if 'AMI: ami' in log.decode() and 'Error' not in log.decode(): + resources['images'].append(self.parse_id(log.decode())) + elif 'shot: snap' in log.decode() and 'Error' not in log.decode(): + resources['snapshots'].append(self.parse_id(log.decode())) + + if container.wait()['StatusCode'] == 1: + print(container.wait()['StatusCode']) + raise Exception('Could not build image.') + else: + write_yaml_file(resources, self.resource_file) + + def destroy(self, config, credentials): + self.boto3_client = boto3.client( + 'ec2', + aws_access_key_id=credentials['aws_access_key'], + aws_secret_access_key=credentials['aws_secret_key'], + region_name=config['region'] + ) + + resources = {} + + try: + resources = read_yaml_file( + 'packer resource', + self.resource_file + ) + except Exception as err: + print('No packer resources were removed!') + return + + self.__destroy_resources( + resource_list=resources['images'], + resource_type='image' + ) + + self.__destroy_resources( + resource_list=resources['snapshots'], + resource_type='snapshot' + ) + + if len(resources['snapshots']) == 0 and len(resources['images']) == 0: + os.remove(self.resource_file) + print('All packer resources removed succefully!') + else: + print('Could not remove some resources!') + write_yaml_file(resources, self.resource_file) + + def __destroy_resources(self, resource_list=[], resource_type=None): + already_removed_codes = [ + 'InvalidAMIID.Unavailable', + 'InvalidSnapshot.NotFound' + ] + + for resource_id in resource_list: + try: + if resource_type == 'image': + self.boto3_client.deregister_image(ImageId=resource_id) + if resource_type == 'snapshot': + self.boto3_client.delete_snapshot(SnapshotId=resource_id) + + resource_list.remove(resource_id) + + print(f'Removed {resource_type}: {resource_id}') + except ClientError as e: + if e.response['Error']['Code'] in already_removed_codes: + resource_list.remove(resource_id) + else: + print(f'Could not remove resource: {resource_id}') + + +instance = PackerAWS() diff --git a/paaspure/vm_builder/packer_aws/packer_files/docker_ubuntu.json b/paaspure/vm_builder/packer_aws/packer_files/docker_ubuntu.json new file mode 100644 index 0000000..963ceb5 --- /dev/null +++ b/paaspure/vm_builder/packer_aws/packer_files/docker_ubuntu.json @@ -0,0 +1,40 @@ +{ + "variables": { + "instance_type": "", + "ssh_username": "", + "access_key": "{{env `AWS_ACCESS_KEY`}}", + "secret_key": "{{env `AWS_SECRET_KEY`}}" + }, + "builders": [{ + "type": "amazon-ebs", + "name": "docker-host", + "access_key": "{{user `access_key`}}", + "secret_key": "{{user `secret_key`}}", + "region": "eu-west-1", + "source_ami_filter": { + "filters": { + "virtualization-type": "hvm", + "name": "ubuntu/images/*ubuntu-xenial-16.04-amd64-server-*", + "root-device-type": "ebs" + }, + "owners": ["099720109477"], + "most_recent": true + }, + "instance_type": "{{user `instance_type`}}", + "ssh_username": "{{user `ssh_username`}}", + "ami_name": "docker-host", + "ami_description": "AMI for running a docker host.", + "tags":{ + "Name": "paaspure_docker_ubuntu_16_04" + } + }], + "provisioners": [{ + "inline": [ + "curl -fsSL get.docker.com -o get-docker.sh", + "sudo sh get-docker.sh", + "rm -rf get-docker.sh", + "sudo usermod -aG docker $USER" + ], + "type": "shell" + }] +} diff --git a/paaspure/vm_builder/packer_aws/packer_files/golden_ubuntu.json b/paaspure/vm_builder/packer_aws/packer_files/golden_ubuntu.json new file mode 100644 index 0000000..1030da4 --- /dev/null +++ b/paaspure/vm_builder/packer_aws/packer_files/golden_ubuntu.json @@ -0,0 +1,31 @@ +{ + "variables": { + "instance_type": "", + "ssh_username": "", + "access_key": "{{env `AWS_ACCESS_KEY`}}", + "secret_key": "{{env `AWS_SECRET_KEY`}}" + }, + "builders": [{ + "type": "amazon-ebs", + "name": "golden-ubuntu", + "access_key": "{{user `access_key`}}", + "secret_key": "{{user `secret_key`}}", + "region": "eu-west-1", + "source_ami_filter": { + "filters": { + "virtualization-type": "hvm", + "name": "ubuntu/images/*ubuntu-xenial-16.04-amd64-server-*", + "root-device-type": "ebs" + }, + "owners": ["099720109477"], + "most_recent": true + }, + "instance_type": "{{user `instance_type`}}", + "ssh_username": "{{user `ssh_username`}}", + "ami_name": "golden-host", + "ami_description": "Fresh Ubuntu installation.", + "tags":{ + "Name": "paaspure_ubuntu_16_04" + } + }] +} diff --git a/paaspure/vm_builder/packer_aws/packer_files/variables.json b/paaspure/vm_builder/packer_aws/packer_files/variables.json new file mode 100644 index 0000000..f1ccc52 --- /dev/null +++ b/paaspure/vm_builder/packer_aws/packer_files/variables.json @@ -0,0 +1,4 @@ +{ + "instance_type": "t2.micro", + "ssh_username": "ubuntu" +} diff --git a/paaspure/vm_builder/packer_aws/requirements.py b/paaspure/vm_builder/packer_aws/requirements.py new file mode 100644 index 0000000..1103704 --- /dev/null +++ b/paaspure/vm_builder/packer_aws/requirements.py @@ -0,0 +1,15 @@ +# -*- coding: utf-8 -*- + +from paaspure.utils import pip_install + +pip_packages = [ + 'docker', + 'boto3' +] + + +def install(): + pip_install(packages=pip_packages, component='PackerAWS') + + +install() diff --git a/paaspure/vm_builder/packer_aws/test_packer_aws.py b/paaspure/vm_builder/packer_aws/test_packer_aws.py new file mode 100644 index 0000000..bed2353 --- /dev/null +++ b/paaspure/vm_builder/packer_aws/test_packer_aws.py @@ -0,0 +1,107 @@ +# -*- coding: utf-8 -*- + +import os +import pytest +import docker + +# TODO: Remove mock from requirements.txt +# from mock import MagicMock +from paaspure.utils import build_image, write_yaml_file, MockContainerRun +from .component import instance + + +@pytest.fixture(scope="function") +def resource_file(): + """ New module name and cleanup work.""" + resources = os.path.join( + os.path.dirname(__file__), + 'resources.yml' + ) + yield resources + os.remove(resources) + + +class TestPackerAWS: + resource_file = os.path.join( + os.path.dirname(__file__), + 'resources.yml' + ) + + def test_validate_packer_template(self): + client = docker.from_env() + + build_image( + image_tag='paaspure_packer_test', + path=os.path.dirname(__file__) + ) + assert client.images.get('paaspure_packer_test') is not None + + container = client.containers.run( + 'paaspure_packer_test', + command=[ + 'validate', + '-var-file=variables.json', + 'docker_ubuntu.json' + ], + detach=True + ) + + output = [] + + for log in container.logs(stdout=True, stderr=True, stream=True): + output.append(log.decode()) + + assert 'Template validated successfully.' in ''.join(output) + + client.images.remove('paaspure_packer_test') + + with pytest.raises(docker.errors.ImageNotFound): + client.images.get('paaspure_packer_test') + + def test_invalid_inputs(self, resource_file): + assert not os.path.exists(resource_file) + + with pytest.raises(Exception): + instance.build( + {'template': '', 'region': '', 'var-files': ['']}, + {'aws_access_key': '', 'aws_secret_key': ''} + ) + + # No file to read from + instance.destroy( + {'template': '', 'region': 'eu-west-1', 'var-files': ['']}, + {'aws_access_key': '', 'aws_secret_key': ''} + ) + + # Try to remove non-existent resources + dummy_resources = { + 'images': ['ami-a046fake'], + 'snapshots': ['snap-a046fake'] + } + + write_yaml_file(dummy_resources, resource_file) + + instance.destroy( + {'template': '', 'region': 'eu-west-1', 'var-files': ['']}, + {'aws_access_key': '', 'aws_secret_key': ''} + ) + + def test_mock_run(self, monkeypatch, resource_file): + assert not os.path.exists(resource_file) + + mock_container = MockContainerRun( + log_output=[ + 'AMI: ami-a046ebce', + 'Snapshot: snap-a046eaae' + ], + status_code=0 + ) + + monkeypatch.setattr(docker, 'from_env', mock_container) + + instance.build( + {'template': '', 'region': '', 'var-files': ['']}, + {'aws_access_key': '', 'aws_secret_key': ''} + ) + + assert os.path.exists(resource_file) diff --git a/paaspure/vm_builder/test_vm_builder.py b/paaspure/vm_builder/test_vm_builder.py new file mode 100644 index 0000000..f79afe5 --- /dev/null +++ b/paaspure/vm_builder/test_vm_builder.py @@ -0,0 +1,42 @@ +# -*- coding: utf-8 -*- + +import pytest +from paaspure.argparser import paaSPureParser +from .module import instance + + +class TestVmBuilder: + def test_vm_builder_execution(self, capsys): + # NOTE: Need a better way to do this. + module_name = __name__.split('.')[-2] + + config = { + 'version': 1, + 'credentials': {}, + 'modules': { + module_name: { + 'components': { + 'paaspure.dummy_component': {} + } + } + } + } + + args = paaSPureParser.parser.parse_args([ + module_name + ]) + with pytest.raises(SystemExit): + instance.execute(config, args) + + out, _ = capsys.readouterr() + assert f'paaspure {module_name} COMMAND' in out + + args.subcommand = 'build' + instance.execute(config, args) + out, _ = capsys.readouterr() + assert 'Dummy run.' in out + + args.subcommand = 'destroy' + instance.execute(config, args) + out, _ = capsys.readouterr() + assert 'Dummy destroy.' in out diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..9cd2009 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,27 @@ +attrs==17.4.0 +certifi==2018.1.18 +chardet==3.0.4 +coverage==4.5.1 +docker==3.1.1 +docker-pycreds==0.2.2 +flake8==3.5.0 +gitdb2==2.0.3 +GitPython==2.1.8 +idna==2.6 +Jinja2==2.10 +MarkupSafe==1.0 +mccabe==0.6.1 +mock==2.0.0 +pbr==3.1.1 +pluggy==0.6.0 +py==1.5.2 +pycodestyle==2.3.1 +pyflakes==1.6.0 +pytest==3.4.0 +pytest-cov==2.5.1 +PyYAML==3.12 +requests==2.18.4 +six==1.11.0 +smmap2==2.0.3 +urllib3==1.22 +websocket-client==0.47.0 diff --git a/sample_configs/aws.yml b/sample_configs/aws.yml new file mode 100644 index 0000000..979a5eb --- /dev/null +++ b/sample_configs/aws.yml @@ -0,0 +1,54 @@ +version: 1 + +hub: 'http://10.216.18.67:8080' + +credentials: + private_key: /path/to/keyname.pem + aws_access_key: secret + aws_secret_key: qsecret + +modules: + infra: + components: + terraform_aws: + stack_name: "PaasPureDocker" + region: "eu-west-1" + parameters: + KeyName: "keyname" + ManagerSize: 1 + ManagerInstanceType: "t2.micro" + ClusterSize: 2 + InstanceType: "t2.micro" + EnableCloudWatchLogs: "no" + + orchestrator: + components: + swarm_aws: + user: docker + bind_port: 2374 + region: eu-west-1 + tags: + key: swarm-node-type + manager_value: manager + worker_value: worker + + network: + orchestrator: orchestrator + components: + traefik: + + log_management: + orchestrator: orchestrator + components: + elk_stack: + + monitoring: + orchestrator: orchestrator + components: + prom_stack: + + deployer: + orchestrator: orchestrator + components: + portainer: + registry: diff --git a/sample_configs/azure.yml b/sample_configs/azure.yml new file mode 100644 index 0000000..a884afe --- /dev/null +++ b/sample_configs/azure.yml @@ -0,0 +1,55 @@ +version: 1 + +hub: 'http://10.216.18.67:8080' + +credentials: + private_key: /path/to/keyname.pem + azure_client_id: secret + azure_client_secret: secret + azure_tenant_id: secret + subscription_id: secret + +modules: + infra: + components: + terraform_azure: + stack_name: "PaasPureDocker" + resource_group_name: paaspureswarm + resource_group_location: "North Europe" + parameters: + enableExtLogs: "no" + linuxSSHPublicKey: /path/to/keyname.pub + managerCount: 1 + managerVMSize: "Standard_D1_v2" + linuxWorkerCount: 1 + linuxWorkerVMSize: "Standard_D1_v2" + swarmName: "dockerswarm" + + orchestrator: + components: + swarm_azure: + resource_group_name: paaspureswarm + swarmName: "dockerswarm" + user: docker + bind_port: 2374 + + network: + orchestrator: orchestrator + components: + traefik: + + log_management: + orchestrator: orchestrator + components: + elk_stack: + + monitoring: + orchestrator: orchestrator + components: + prom_stack: + + deployer: + orchestrator: orchestrator + components: + portainer: + registry: diff --git a/sample_configs/hybrid.yml b/sample_configs/hybrid.yml new file mode 100644 index 0000000..9606163 --- /dev/null +++ b/sample_configs/hybrid.yml @@ -0,0 +1,49 @@ +version: 1 + +hub: 'http://10.216.18.67:8080' + +credentials: + private_key: /path/to/keyname.pem + azure_client_id: secret + azure_client_secret: secret + azure_tenant_id: secret + subscription_id: secret + aws_access_key: secret + aws_secret_key: secret + +modules: + infra: + components: + terraform_azure: + stack_name: "PaasPureDocker" + resource_group_name: paaspureswarm + resource_group_location: "North Europe" + parameters: + enableExtLogs: "no" + linuxSSHPublicKey: /path/to/keyname.pub + managerCount: 1 + managerVMSize: "Standard_D1_v2" + linuxWorkerCount: 1 + linuxWorkerVMSize: "Standard_D1_v2" + swarmName: "dockerswarm" + hybrid_aws: + aws_region: "eu-west-1" + aws_key_name: "keyname" + ssh_user: "docker" + worker_instance_type: "t2.micro" + worker_count: 3 + orchestrator_params: + name: 'orchestrator' + component: 'swarm_azure' + resource_group_name: paaspureswarm + swarmName: "dockerswarm" + + orchestrator: + components: + swarm_azure: + resource_group_name: paaspureswarm + swarmName: "dockerswarm" + user: docker + bind_port: 2374 + + ... Other Pure objects ... diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 0000000..6bf32c7 --- /dev/null +++ b/setup.cfg @@ -0,0 +1,8 @@ +[bdist_wheel] +universal = 1 + +# [flake8] +# max-line-length = 120 + +# [metadata] +# license_file = LICENSE.txt diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..a7809a9 --- /dev/null +++ b/setup.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- + +import os + +from setuptools import setup, find_packages +from paaspure.utils import get_version, read_file + +setup( + name='paaspure', + version=get_version(), + description='A tool for building the PaaS of the future.', + long_description=read_file( + os.path.abspath(os.path.dirname(__file__)), + 'README.md' + ), + classifiers=[ + 'Development Status :: 1 - Planning', + 'Intended Audience :: DevOps & Platform', + 'License :: Unkown for now', + 'Topic :: System :: Installation/Setup', + 'Programming Language :: Python :: 3.6' + ], + keywords='paas caas', + author='Ruben Vasconcelos', + author_email='ruben.vasconcelos3@mail.dcu.ie', + url='None yet', + license='MIT hopefully', + packages=find_packages( + exclude=['docs', 'tests*'] + ), + entry_points={ + 'console_scripts': [ + 'paaspure = paaspure.__main__:main' + ], + }, + zip_safe=False, + python_requires='>=3.0', + extras_require={ + 'testing': ['pytest'], + }, +) diff --git a/tests/argparser/__init__.py b/tests/argparser/__init__.py new file mode 100644 index 0000000..40a96af --- /dev/null +++ b/tests/argparser/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/tests/argparser/test_paaspure_parser.py b/tests/argparser/test_paaspure_parser.py new file mode 100644 index 0000000..4de1787 --- /dev/null +++ b/tests/argparser/test_paaspure_parser.py @@ -0,0 +1,31 @@ +# -*- coding: utf-8 -*- + +import argparse +import pytest +from paaspure.argparser import paaSPureParser + + +def test_generic_parser(capsys): + assert isinstance(paaSPureParser.parser, argparse.ArgumentParser) + + args = paaSPureParser.parser.parse_args([]) + assert args.file == 'pure.yml' + + args = paaSPureParser.parser.parse_args(['--file', 'test']) + assert args.file == 'test' + + with pytest.raises(SystemExit): + paaSPureParser.parser.parse_args(['-h']) + + out, _ = capsys.readouterr() + assert 'PaaSPure build the Paas of the future.' in out + + +def test_can_extend_parser(): + paaSPureParser.parser.add_argument('--unit_test', type=str) + + args = paaSPureParser.parser.parse_args([ + '--unit_test', 'test' + ]) + + assert args.unit_test == 'test' diff --git a/tests/generate/__init__.py b/tests/generate/__init__.py new file mode 100644 index 0000000..40a96af --- /dev/null +++ b/tests/generate/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/tests/generate/test_paaspure_generate.py b/tests/generate/test_paaspure_generate.py new file mode 100644 index 0000000..c865443 --- /dev/null +++ b/tests/generate/test_paaspure_generate.py @@ -0,0 +1,121 @@ +# -*- coding: utf-8 -*- + +import os +import shutil +import pytest + +from paaspure import settings +from paaspure.argparser import paaSPureParser +from paaspure.generate import PaaSPureGenerator + + +invalid_names = [ + '1', + '_test', + 'test_', + 'test_1', + 'as@#asd', + 'test1test' +] + + +@pytest.fixture(scope="module") +def generator_setup(): + """ Setup state specific to the execution of the module.""" + PaaSPureGenerator() + + +@pytest.fixture(scope="function") +def module_name(): + """ New module name and cleanup work.""" + new_module = 'test_module' + yield new_module + shutil.rmtree(new_module, ignore_errors=True) + + +@pytest.mark.usefixtures("generator_setup") +class TestPaaSPureGenerator: + def test_extended_generic_parser(self, capsys): + with pytest.raises(SystemExit): + PaaSPureGenerator().run( + paaSPureParser.parser.parse_args(['generate']) + ) + + with pytest.raises(SystemExit): + PaaSPureGenerator().run( + paaSPureParser.parser.parse_args(['generate', 'module']) + ) + + with pytest.raises(SystemExit): + PaaSPureGenerator().run( + paaSPureParser.parser.parse_args(['generate', 'component']) + ) + + out, _ = capsys.readouterr() + assert 'usage: paaspure generate TEMPLATE' in out + assert 'paaspure generate module NAME' in out + assert 'paaspure generate component PARENT_MODULE NAME' in out + + def test_generate_module(self, capsys, module_name): + args = paaSPureParser.parser.parse_args([ + 'generate', 'module', module_name + ]) + + assert args.command == 'generate' + assert args.template == 'module' + assert args.NAME == module_name + + PaaSPureGenerator().run(args) + + assert os.path.exists(module_name) + + # Check that all files were generated. + component_templates = os.listdir(os.path.join( + settings.PROJECT_ROOT, + 'generate', + 'templates', + 'module' + )) + + for template in component_templates: + assert os.path.exists(os.path.join( + module_name, + '.'.join(template.split('-')) + )) + + # Test: Trying to create module with existing name raises exeception. + with pytest.raises(SystemExit): + PaaSPureGenerator().run(args) + + out, _ = capsys.readouterr() + assert f'Module {module_name}, already exists.' in out + + def test_generate_component(self, module_name): + args = paaSPureParser.parser.parse_args([ + 'generate', 'component', module_name, module_name + ]) + + assert args.command == 'generate' + assert args.template == 'component' + assert args.PARENT_MODULE == module_name + assert args.NAME == module_name + + PaaSPureGenerator().run(args) + # TODO: Finish component implementation and asserts + + @pytest.mark.parametrize("name", invalid_names) + def test_invalid_names(self, capsys, name): + args = paaSPureParser.parser.parse_args([ + 'generate', 'module', name + ]) + + assert args.command == 'generate' + assert args.template == 'module' + assert args.NAME == name + + with pytest.raises(SystemExit): + PaaSPureGenerator().run(args) + + out, _ = capsys.readouterr() + assert 'Valid names must:' in out + assert not os.path.exists(name) diff --git a/tests/pull/__init__.py b/tests/pull/__init__.py new file mode 100644 index 0000000..40a96af --- /dev/null +++ b/tests/pull/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/tests/pull/test_paaspure_pull.py b/tests/pull/test_paaspure_pull.py new file mode 100644 index 0000000..cc6f960 --- /dev/null +++ b/tests/pull/test_paaspure_pull.py @@ -0,0 +1,110 @@ +# -*- coding: utf-8 -*- + +import os +import shutil +import pytest + +from paaspure.argparser import paaSPureParser +from paaspure.pull import PaaSPurePuller + + +@pytest.fixture(scope="module") +def pull_setup(): + """ Setup state specific to the execution of the module.""" + PaaSPurePuller() + + +@pytest.fixture(scope="function") +def pulled_module(): + """ New module name and cleanup work.""" + module_url = 'git@github.com:iorubs/paaspure_vm_builder.git' + module_name = 'test_module' + yield [module_url, module_name] + shutil.rmtree(module_name, ignore_errors=True) + shutil.rmtree('tests/' + module_name + '_tests', ignore_errors=True) + + +# TODO: Update tests after the central hub is implemented. +@pytest.mark.usefixtures("pull_setup") +class TestPaaSPurePuller: + def test_extended_generic_parser(self, capsys): + with pytest.raises(SystemExit): + PaaSPurePuller().run( + args=paaSPureParser.parser.parse_args(['pull']), + config={} + ) + + with pytest.raises(SystemExit): + PaaSPurePuller().run( + args=paaSPureParser.parser.parse_args( + ['pull', '--git-url', 'some_url', 'module'] + ), + config={} + ) + + with pytest.raises(SystemExit): + PaaSPurePuller().run( + args=paaSPureParser.parser.parse_args( + ['pull', '--git-url', 'some_url', 'component'] + ), + config={} + ) + + out, _ = capsys.readouterr() + assert 'usage: paaspure pull [OPTIONS] TYPE' in out + assert 'paaspure pull module NAME' in out + assert 'paaspure pull component PARENT_MODULE NAME' in out + + @pytest.mark.skip(reason="Repos are currently private.") + def test_pull_module(self, capsys, pulled_module): + args = paaSPureParser.parser.parse_args([ + 'pull', '--git-url', pulled_module[0], 'module', pulled_module[1] + ]) + + assert args.command == 'pull' + assert args.type == 'module' + assert args.NAME == pulled_module[1] + + PaaSPurePuller().run(args) + + assert os.path.exists(pulled_module[1]) + assert os.path.exists('tests/' + pulled_module[1] + '_tests') + + # Test: Trying to create module with existing name raises exeception. + PaaSPurePuller().run(args) + + out, _ = capsys.readouterr() + assert f'Found existing module {pulled_module[1]}:' in out + + @pytest.mark.skip(reason="Repos are currently private.") + def test_pull_component(self, capsys, pulled_module): + component_args = paaSPureParser.parser.parse_args([ + 'pull', + '--git-url', + 'git@github.com:iorubs/paaspure_packer_aws.git', + 'component', + pulled_module[1], + pulled_module[1], + ]) + + assert component_args.command == 'pull' + assert component_args.type == 'component' + assert component_args.PARENT_MODULE == pulled_module[1] + assert component_args.NAME == pulled_module[1] + + with pytest.raises(SystemExit): + PaaSPurePuller().run(component_args) + + out, _ = capsys.readouterr() + assert 'Missing module: ' in out + + module_args = paaSPureParser.parser.parse_args([ + 'pull', '--git-url', pulled_module[0], 'module', pulled_module[1] + ]) + + PaaSPurePuller().run(module_args) + PaaSPurePuller().run(component_args) + assert os.path.exists(pulled_module[1] + '/' + pulled_module[1]) + module_test_folder = pulled_module[1] + '_tests/' + component_test_folder = module_test_folder + module_test_folder + assert os.path.exists('tests/' + component_test_folder) diff --git a/tests/utils/__init__.py b/tests/utils/__init__.py new file mode 100644 index 0000000..40a96af --- /dev/null +++ b/tests/utils/__init__.py @@ -0,0 +1 @@ +# -*- coding: utf-8 -*- diff --git a/tests/utils/test_files.py b/tests/utils/test_files.py new file mode 100644 index 0000000..743120c --- /dev/null +++ b/tests/utils/test_files.py @@ -0,0 +1,38 @@ +# -*- coding: utf-8 -*- + +import pytest +import yaml +import os +from paaspure.utils import read_file, read_yaml_file, write_yaml_file +import pathlib + + +def test_normal_file_utils(): + with pytest.raises(FileNotFoundError): + file_output = read_file('Missing file', 'missing') + + file_output = read_file('setup.py') + assert "name='paaspure'," in file_output + + +def test_yaml_file_utils(): + with pytest.raises(yaml.scanner.ScannerError): + yaml_file_output = read_yaml_file('setup.py', 'setup.py') + + with pytest.raises(FileNotFoundError): + yaml_file_output = read_yaml_file('Missing file', 'missing') + + with pytest.raises(FileNotFoundError): + write_yaml_file(['Bad path'], '/dummy loc/test.yml') + + write_yaml_file(['test'], 'test.yml') + + new_yaml_file = pathlib.Path('test.yml') + assert new_yaml_file.exists() + + yaml_file_output = read_yaml_file('test.yml', 'test.yml') + + assert ['test'] == yaml_file_output + + os.remove('test.yml') + assert not new_yaml_file.exists() diff --git a/tests/utils/test_general.py b/tests/utils/test_general.py new file mode 100644 index 0000000..c137626 --- /dev/null +++ b/tests/utils/test_general.py @@ -0,0 +1,16 @@ +# -*- coding: utf-8 -*- + +from paaspure.utils import get_version, escape_ansi +from paaspure.__init__ import __version__ + + +def test_get_version(): + assert get_version() == __version__ + + +def test_escape_ansi(): + expected = '\tTestAnsi\n' + ansi_encoded_line = '\t\u001b[0;35mTestAnsi\u001b[0m\n' + actual = escape_ansi(ansi_encoded_line) + + assert expected == actual diff --git a/tests/utils/test_package.py b/tests/utils/test_package.py new file mode 100644 index 0000000..a3cf722 --- /dev/null +++ b/tests/utils/test_package.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- + +import pytest +import subprocess +import sys +import importlib +from paaspure.utils import pip_install +from paaspure import settings +from io import StringIO + + +@pytest.fixture(scope="function") +def dummy_package(): + package = 'dummy-yummy' + yield [package, 'yummy'] + subprocess.check_call( + [sys.executable, '-m', 'pip', 'uninstall', '-y', package] + ) + + +def test_pip_install_quiet(dummy_package): + with pytest.raises(ImportError): + importlib.__import__(dummy_package[1]) + + pip_install(packages=[dummy_package[0]], component='Test') + + try: + importlib.__import__(dummy_package[1]) + except ImportError: + raise pytest.fail('Package not found.') + + +def test_pip_install(capsys, dummy_package): + settings.QUIET_INSTALL = False + + sys.stdin = StringIO('random_gib') + with pytest.raises(SystemExit): + pip_install(packages=[dummy_package[0]], component='Test') + + out, _ = capsys.readouterr() + assert 'respond with' in out + + sys.stdin = StringIO('no') + with pytest.raises(SystemExit): + pip_install(packages=[dummy_package[0]], component='Test') + + out, _ = capsys.readouterr() + assert 'let me install' in out