From 8ce10caabcd2d3f432be5b431a1624626533f3a5 Mon Sep 17 00:00:00 2001 From: Kat Morgan Date: Wed, 6 Nov 2024 23:23:31 +0000 Subject: [PATCH] re-refactoring --- .cursorignore | 33 + .cursorrules | 224 ++++++ Pulumi.yaml | 4 +- __main__.py | 132 ++++ core/__init__.py | 172 +++++ core/config.py | 437 +++++++++++ core/deployment.py | 311 ++++++++ {pulumi/core => core}/metadata.py | 310 +++++--- core/resource_helpers.py | 452 +++++++++++ core/types.py | 443 +++++++++++ core/utils.py | 370 +++++++++ ...ult_versions.json => default_versions.json | 0 docs/developer_guide/module_refactoring.md | 229 ++++++ docs/developer_guide/modules/core/README.md | 351 +++++++++ docs/infrastructure_as_code.md | 131 ++++ docs/pulumi_iac.md | 168 ++++ modules/aws/__init__.py | 91 +++ {pulumi/modules => modules}/aws/config.py | 116 ++- {pulumi/modules => modules}/aws/deploy.py | 58 +- modules/aws/eks.py | 307 ++++++++ modules/aws/exceptions.py | 21 + modules/aws/iam.py | 392 ++++++++++ modules/aws/networking.py | 554 ++++++++++++++ modules/aws/organization.py | 424 +++++++++++ modules/aws/provider.py | 352 +++++++++ modules/aws/resources.py | 438 +++++++++++ modules/aws/security.py | 497 ++++++++++++ {pulumi/modules => modules}/aws/taggable.py | 0 modules/aws/types.py | 407 ++++++++++ pulumi/mypy.ini => mypy.ini | 0 pulumi/__main__.py | 65 -- pulumi/core/config.py | 216 ------ pulumi/core/deployment.py | 293 ------- pulumi/core/resource_helpers.py | 382 ---------- pulumi/core/types.py | 65 -- pulumi/core/utils.py | 266 ------- pulumi/modules/aws/resources.py | 717 ------------------ pulumi/modules/aws/types.py | 152 ---- pulumi/providers/aws/__init__.py | 0 pulumi/providers/k8s/__init__.py | 0 pulumi/stacks/Pulumi.scip-ops-prod.yaml | 65 -- pulumi/requirements.txt => requirements.txt | 0 {pulumi/stacks => stacks}/.gitkeep | 0 staging/aux/documentation.py | 249 ++++++ .../modules/azure}/__init__.py | 0 .../aws => staging/modules/ceph}/__init__.py | 0 {pulumi => staging}/modules/ceph/deploy.py | 0 .../modules/cert_manager/README.md | 0 .../modules/cert_manager}/__init__.py | 0 .../modules/cert_manager/deploy.py | 0 .../modules/cert_manager/types.py | 0 .../modules/cilium}/__init__.py | 0 {pulumi => staging}/modules/cilium/deploy.py | 0 .../cluster_network_addons}/__init__.py | 0 .../modules/cluster_network_addons/deploy.py | 0 .../containerized_data_importer}/__init__.py | 0 .../containerized_data_importer/deploy.py | 0 .../containerized_data_importer/types.py | 0 .../modules/hostpath_provisioner}/__init__.py | 0 .../modules/hostpath_provisioner/deploy.py | 0 .../modules/hostpath_provisioner/types.py | 0 .../modules/kubernetes}/__init__.py | 0 .../modules/kubernetes_dashboard}/__init__.py | 0 .../modules/kubernetes_dashboard/deploy.py | 0 .../modules/kubevirt/README.md | 0 .../modules/kubevirt}/__init__.py | 0 .../modules/kubevirt/deploy.py | 0 {pulumi => staging}/modules/kubevirt/types.py | 0 .../modules/kv_manager}/__init__.py | 0 .../modules/kv_manager/deploy.py | 0 .../modules/local_path_storage}/__init__.py | 0 .../modules/local_path_storage/deploy.py | 0 .../modules/multus}/__init__.py | 0 {pulumi => staging}/modules/multus/deploy.py | 0 {pulumi => staging}/modules/multus/types.py | 0 .../modules/openunison}/__init__.py | 0 .../openunison/assets/alertmanager.png | Bin .../modules/openunison/assets/grafana.png | Bin .../modules/openunison/assets/kubevirt.png | Bin .../modules/openunison/assets/prometheus.png | Bin .../modules/openunison/deploy.py | 0 .../modules/openunison/encoded_assets.py | 0 .../modules/prometheus}/__init__.py | 0 .../modules/prometheus/deploy.py | 0 .../modules/prometheus/types.py | 0 .../modules/vm}/__init__.py | 0 {pulumi => staging}/modules/vm/talos.py | 0 {pulumi => staging}/modules/vm/ubuntu.py | 0 .../providers}/__init__.py | 0 .../vm => staging/providers/aws}/__init__.py | 0 {pulumi => staging}/providers/aws/config.py | 0 .../providers/aws/deployment.py | 0 .../providers/aws/resources.py | 0 {pulumi => staging}/providers/aws/types.py | 0 {pulumi => staging}/providers/config.py | 0 .../providers/k8s}/__init__.py | 0 {pulumi => staging}/providers/k8s/config.py | 0 .../providers/k8s/deployment.py | 0 .../providers/k8s/resources.py | 0 {pulumi => staging}/providers/k8s/types.py | 0 100 files changed, 7537 insertions(+), 2357 deletions(-) create mode 100644 .cursorignore create mode 100644 .cursorrules create mode 100644 __main__.py create mode 100644 core/__init__.py create mode 100644 core/config.py create mode 100644 core/deployment.py rename {pulumi/core => core}/metadata.py (56%) create mode 100644 core/resource_helpers.py create mode 100644 core/types.py create mode 100644 core/utils.py rename pulumi/default_versions.json => default_versions.json (100%) create mode 100644 docs/developer_guide/module_refactoring.md create mode 100644 docs/developer_guide/modules/core/README.md create mode 100644 docs/infrastructure_as_code.md create mode 100644 docs/pulumi_iac.md create mode 100644 modules/aws/__init__.py rename {pulumi/modules => modules}/aws/config.py (63%) rename {pulumi/modules => modules}/aws/deploy.py (70%) create mode 100644 modules/aws/eks.py create mode 100644 modules/aws/exceptions.py create mode 100644 modules/aws/iam.py create mode 100644 modules/aws/networking.py create mode 100644 modules/aws/organization.py create mode 100644 modules/aws/provider.py create mode 100644 modules/aws/resources.py create mode 100644 modules/aws/security.py rename {pulumi/modules => modules}/aws/taggable.py (100%) create mode 100644 modules/aws/types.py rename pulumi/mypy.ini => mypy.ini (100%) delete mode 100644 pulumi/__main__.py delete mode 100644 pulumi/core/config.py delete mode 100644 pulumi/core/deployment.py delete mode 100644 pulumi/core/resource_helpers.py delete mode 100644 pulumi/core/types.py delete mode 100644 pulumi/core/utils.py delete mode 100644 pulumi/modules/aws/resources.py delete mode 100644 pulumi/modules/aws/types.py delete mode 100644 pulumi/providers/aws/__init__.py delete mode 100644 pulumi/providers/k8s/__init__.py delete mode 100644 pulumi/stacks/Pulumi.scip-ops-prod.yaml rename pulumi/requirements.txt => requirements.txt (100%) rename {pulumi/stacks => stacks}/.gitkeep (100%) create mode 100644 staging/aux/documentation.py rename {pulumi/core => staging/modules/azure}/__init__.py (100%) rename {pulumi/modules/aws => staging/modules/ceph}/__init__.py (100%) rename {pulumi => staging}/modules/ceph/deploy.py (100%) rename {pulumi => staging}/modules/cert_manager/README.md (100%) rename {pulumi/modules/azure => staging/modules/cert_manager}/__init__.py (100%) rename {pulumi => staging}/modules/cert_manager/deploy.py (100%) rename {pulumi => staging}/modules/cert_manager/types.py (100%) rename {pulumi/modules/ceph => staging/modules/cilium}/__init__.py (100%) rename {pulumi => staging}/modules/cilium/deploy.py (100%) rename {pulumi/modules/cert_manager => staging/modules/cluster_network_addons}/__init__.py (100%) rename {pulumi => staging}/modules/cluster_network_addons/deploy.py (100%) rename {pulumi/modules/cilium => staging/modules/containerized_data_importer}/__init__.py (100%) rename {pulumi => staging}/modules/containerized_data_importer/deploy.py (100%) rename {pulumi => staging}/modules/containerized_data_importer/types.py (100%) rename {pulumi/modules/cluster_network_addons => staging/modules/hostpath_provisioner}/__init__.py (100%) rename {pulumi => staging}/modules/hostpath_provisioner/deploy.py (100%) rename {pulumi => staging}/modules/hostpath_provisioner/types.py (100%) rename {pulumi/modules/containerized_data_importer => staging/modules/kubernetes}/__init__.py (100%) rename {pulumi/modules/hostpath_provisioner => staging/modules/kubernetes_dashboard}/__init__.py (100%) rename {pulumi => staging}/modules/kubernetes_dashboard/deploy.py (100%) rename {pulumi => staging}/modules/kubevirt/README.md (100%) rename {pulumi/modules/kubernetes => staging/modules/kubevirt}/__init__.py (100%) rename {pulumi => staging}/modules/kubevirt/deploy.py (100%) rename {pulumi => staging}/modules/kubevirt/types.py (100%) rename {pulumi/modules/kubernetes_dashboard => staging/modules/kv_manager}/__init__.py (100%) rename {pulumi => staging}/modules/kv_manager/deploy.py (100%) rename {pulumi/modules/kubevirt => staging/modules/local_path_storage}/__init__.py (100%) rename {pulumi => staging}/modules/local_path_storage/deploy.py (100%) rename {pulumi/modules/kv_manager => staging/modules/multus}/__init__.py (100%) rename {pulumi => staging}/modules/multus/deploy.py (100%) rename {pulumi => staging}/modules/multus/types.py (100%) rename {pulumi/modules/local_path_storage => staging/modules/openunison}/__init__.py (100%) rename {pulumi => staging}/modules/openunison/assets/alertmanager.png (100%) rename {pulumi => staging}/modules/openunison/assets/grafana.png (100%) rename {pulumi => staging}/modules/openunison/assets/kubevirt.png (100%) rename {pulumi => staging}/modules/openunison/assets/prometheus.png (100%) rename {pulumi => staging}/modules/openunison/deploy.py (100%) rename {pulumi => staging}/modules/openunison/encoded_assets.py (100%) rename {pulumi/modules/multus => staging/modules/prometheus}/__init__.py (100%) rename {pulumi => staging}/modules/prometheus/deploy.py (100%) rename {pulumi => staging}/modules/prometheus/types.py (100%) rename {pulumi/modules/openunison => staging/modules/vm}/__init__.py (100%) rename {pulumi => staging}/modules/vm/talos.py (100%) rename {pulumi => staging}/modules/vm/ubuntu.py (100%) rename {pulumi/modules/prometheus => staging/providers}/__init__.py (100%) rename {pulumi/modules/vm => staging/providers/aws}/__init__.py (100%) rename {pulumi => staging}/providers/aws/config.py (100%) rename {pulumi => staging}/providers/aws/deployment.py (100%) rename {pulumi => staging}/providers/aws/resources.py (100%) rename {pulumi => staging}/providers/aws/types.py (100%) rename {pulumi => staging}/providers/config.py (100%) rename {pulumi/providers => staging/providers/k8s}/__init__.py (100%) rename {pulumi => staging}/providers/k8s/config.py (100%) rename {pulumi => staging}/providers/k8s/deployment.py (100%) rename {pulumi => staging}/providers/k8s/resources.py (100%) rename {pulumi => staging}/providers/k8s/types.py (100%) diff --git a/.cursorignore b/.cursorignore new file mode 100644 index 0000000..59be025 --- /dev/null +++ b/.cursorignore @@ -0,0 +1,33 @@ +# Add directories or file patterns to ignore during indexing (e.g. foo/ or *.csv) +.env +.envrc +.tmpenv +**/.tmpenv + +local +.kube +.talos + +*.pyc +venv/ +__pycache__/ +pulumi/.pulumi + +*.pyc +venv/ +__pycache__/ +.talos/manifest +.pulumi/plugins +.pulumi/* +Pulumi.*.yaml +.env +.devcontainer/docker +tmp.* +tmp +.ssh +tmp +smce-cli +**/config +**/credentials +**/.gitconfig +pulumi/modules/aws.bak diff --git a/.cursorrules b/.cursorrules new file mode 100644 index 0000000..7907a92 --- /dev/null +++ b/.cursorrules @@ -0,0 +1,224 @@ +// Konductor Platform Engineering .cursorrules +// Core development principles and AI assistance configuration + +// Development Philosophy +You are an expert Python/Pulumi developer specializing in Infrastructure as Code (IaC). +Focus on object-oriented design, type safety, maintainability, and compliance-ready infrastructure. +Prioritize code quality, proper module design, and best practices over feature quantity. +Adhere to the principle: "Features are nice. Quality is paramount." + +// Technical Standards +- Enforce strict type checking with Pyright in strict mode. +- Use type hints consistently throughout the codebase; avoid the use of `Any` type. +- Leverage Pydantic models and `TypedDict` for configuration validation. +- Implement comprehensive error handling with context capture. +- Follow PEP 8 and PEP 257 for code style and documentation. +- Maintain modular architecture with clear separation of concerns. + +// Module Structure Requirements +Each module must have: +- `__init__.py`: Exposing the public API with `__all__`; no direct resource creation. +- `types.py`: For type definitions, configurations, and data models. +- `resources.py`: For resource management classes and logic. +- `provider.py` (if applicable): For provider-specific integrations. +- Component-specific implementation files as needed. +- `README.md`: For module-specific documentation. + +**File Responsibilities:** +- `__init__.py`: Public API and entry points; import public interfaces; define `__all__`. +- `types.py`: Type definitions, configuration classes, and data models. +- `resources.py`: Classes for managing resource creation and lifecycle. + +// Code Organization Rules +- Maintain clear separation between public API (`__init__.py`), type definitions (`types.py`), and resource management (`resources.py`). +- Use classes to encapsulate related functionality; prefer composition over inheritance. +- Implement the single responsibility principle in classes and modules. +- Avoid circular dependencies and maintain proper module dependencies. +- Organize test files in a parallel structure within a `tests/` directory. + +// Class Design Requirements +All classes must: +- Have a clear, single responsibility. +- Use dependency injection where appropriate. +- Include comprehensive docstrings using PEP 257 conventions. +- Implement proper error handling and resource cleanup. +- Use type hints for all methods and properties. +- Follow encapsulation principles; use properties for computed values. + +// Type Safety Rules +All code must include: +- Type hints for all functions, methods, and parameters. +- Return type annotations for all functions and methods. +- Use Pydantic models for configuration validation. +- Use `TypedDict` for structured dictionary types where appropriate. +- Define clear interfaces using Protocol classes when necessary. +- Enforce strict typing; no use of `Any` type. + +// Documentation Requirements +Include: +- Clear module docstrings describing purpose and functionality. +- Class and method docstrings detailing behavior, parameters, and return types. +- Configuration documentation, including defaults and examples. +- Usage examples in `README.md` files. +- Breaking change notices and migration guides when applicable. +- Follow a consistent docstring style (e.g., Google or NumPy). + +// Testing Requirements +All tests must: +- Use the `pytest` framework exclusively. +- Include type annotations and follow type safety practices. +- Test configuration validation, resource creation, and error conditions. +- Maintain minimum test coverage of 80%, including branch coverage. +- Be organized in a `tests/` directory mirroring the module structure. +- Support mocking of external dependencies and resource providers. + +// Error Handling +Implement: +- Custom exception classes organized in a clear hierarchy. +- Meaningful error messages with comprehensive context. +- Proper error logging and monitoring. +- Recovery procedures where possible. +- Ensure resource cleanup on errors or exceptions. + +// Configuration Management +Use: +- Pydantic models for configuration validation and management. +- Support for environment variables and overrides. +- Configuration merging capabilities with defaults. +- Early validation of configurations during initialization. +- Secure secret management and environment-specific settings. + +// Resource Management +Ensure: +- Idempotent resource creation and updates. +- Explicit handling of resource dependencies. +- Support for resource tagging and metadata. +- Proper cleanup procedures for resources. +- Error recovery mechanisms and retries where appropriate. + +// Security and Compliance +Enforce: +- NIST controls and FISMA compliance requirements. +- Security-first infrastructure design principles. +- Comprehensive audit logging and monitoring. +- Automated compliance reporting and validation. +- Secure handling of secrets and sensitive data. + +// Infrastructure Patterns +Implement infrastructure with: +- Support for multi-account and multi-region strategies. +- Compliance-ready configurations out of the box. +- Automated security controls and policies. +- Comprehensive logging, monitoring, and alerting. +- Considerations for disaster recovery and business continuity. + +// Naming Conventions +Follow: +- `snake_case` for functions, methods, variables, and module names. +- `PascalCase` (CapWords) for class names. +- `UPPER_SNAKE_CASE` for constants and global variables. +- Descriptive and meaningful names that reflect purpose. +- Consistent terminology across the codebase. + +// Development Workflow +Adhere to: +- Feature branch workflow with meaningful branch names. +- Comprehensive code reviews with attention to the guidelines. +- Documentation updates alongside code changes. +- Maintaining test coverage and adding tests for new features. +- Continuous integration and compliance validation. + +// Best Practices +Maintain: +- DRY (Don't Repeat Yourself) principle. +- Single responsibility in functions and classes. +- Clear and comprehensive documentation. +- Type safety and strict typing. +- Security considerations in all code. +- Backward compatibility where possible. + +// When Generating or Modifying Code: +1. Follow class-based design patterns and encapsulate functionality. +2. Implement proper module structure with clear separation of concerns. +3. Use type hints consistently; enforce strict typing. +4. Include comprehensive unit and integration tests. +5. Document all public interfaces and important implementation details. +6. Implement robust error handling and logging. +7. Consider backward compatibility and document breaking changes. +8. Maintain resource lifecycle management and cleanup. + +// When Reviewing or Suggesting Changes: +1. Verify that module structure adheres to guidelines. +2. Check for proper class design and single responsibility. +3. Validate type safety and strict typing compliance. +4. Review error handling and resource cleanup procedures. +5. Assess test coverage and effectiveness. +6. Verify completeness and clarity of documentation. +7. Consider the impact of changes on backward compatibility. +8. Ensure resource management is correct and efficient. + +// Remember: +- **Quality over quantity**: focus on code excellence. +- **Security is non-negotiable**: prioritize secure coding practices. +- **Documentation is crucial**: keep it up-to-date and clear. +- **Type safety is mandatory**: enforce strict typing. +- **Tests are required**: maintain high coverage and test quality. +- **Compliance must be maintained**: adhere to standards and regulations. +- **Resource management must be clean**: ensure proper creation and cleanup. +- **Breaking changes need migration paths**: provide clear guidance. + +// Prohibited Patterns +Avoid: +- Using `from typing import Any`; strive for explicit types. +- Using `except Exception:` without specifying exceptions. +- Suppressing type checking with `# type: ignore` without justification. +- Using `# noqa` to suppress linter warnings without addressing issues. +- Global state and mutable global variables. +- Circular dependencies between modules. +- Direct resource creation in `__init__.py`; use classes and methods. + +// Required Patterns +Ensure: +- `__all__` is defined in `__init__.py` to manage the public API. +- Configurations are defined using Pydantic models or `TypedDict`. +- Resource management is encapsulated within classes. +- Explicit error handling and logging are implemented. +- Use of dependency injection for better testability and flexibility. + +// Dependency Management +Use: +- Poetry for dependency management and virtual environments. +- Version pinning for dependencies to ensure reproducibility. +- Dependency injection to manage external dependencies. +- Explicit declarations of required packages in `pyproject.toml`. + +// Version Control +Follow: +- Meaningful commit messages that reference issues and describe changes. +- Use of semantic versioning for releases. +- Feature branches for new features and fixes. +- Tagging versions appropriately in version control. + +// Migration and Breaking Changes +When introducing breaking changes: +- Document the changes clearly in `CHANGELOG.md`. +- Provide migration guides and steps. +- Maintain backward compatibility where possible. +- Bump version numbers appropriately following semantic versioning. + +// Compliance and Security Validation +Ensure: +- Code is reviewed for compliance adherence. +- Security controls are implemented and effective. +- Automated checks are in place for compliance validation. +- Secrets and sensitive data are handled securely. + +// AI Assistant Behavior +When using AI assistance: +- Ensure generated code complies with these guidelines. +- Review AI-suggested code for correctness and compliance. +- Do not rely solely on AI for critical code sections. +- Use AI assistance to augment, not replace, developer expertise. + +// Conclusion +By adhering to these guidelines, we ensure that our codebase remains maintainable, secure, and of high quality. These rules are designed to foster best practices and facilitate collaboration across the team. Always strive for excellence in your work and support your colleagues in doing the same. diff --git a/Pulumi.yaml b/Pulumi.yaml index ed4321c..c940cb4 100644 --- a/Pulumi.yaml +++ b/Pulumi.yaml @@ -1,7 +1,7 @@ name: konductor description: DevOps Platform IaC Template Repository -main: ./pulumi -stackConfigDir: ./pulumi/stacks +main: ./ +stackConfigDir: ./stacks runtime: name: python options: diff --git a/__main__.py b/__main__.py new file mode 100644 index 0000000..5e09faf --- /dev/null +++ b/__main__.py @@ -0,0 +1,132 @@ +#!/usr/bin/env python3 +""" +Konductor Infrastructure as Code Platform + +This is the main entry point for the Konductor platform, which provides: +- Multi-cloud infrastructure deployment +- Compliance-driven resource management +- Modular infrastructure components +- GitOps-ready configuration + +Usage: + pulumi up [stack-name] + pulumi preview [stack-name] + pulumi destroy [stack-name] +""" + +import sys +from typing import Dict, Any, List +import pulumi +from pulumi import log + +from core.config import ( + get_enabled_modules, + load_default_versions, + initialize_config, + validate_module_config +) +from core.deployment import ( + initialize_pulumi, + deploy_modules, + DeploymentManager +) +from core.metadata import ( + collect_git_info, + set_global_labels, + set_global_annotations, + setup_global_metadata +) +from core.types import ( + InitializationConfig, + ComplianceConfig, + ModuleDeploymentResult +) + +def main() -> None: + """ + Main entry point for Konductor platform. + + Handles: + - Platform initialization + - Module deployment + - Resource management + - Error handling + """ + try: + # Initialize Pulumi + init = initialize_pulumi() + + # Extract initialization components + config: pulumi.Config = init.config + k8s_provider = init.k8s_provider + default_versions = load_default_versions(config) + compliance_config = ComplianceConfig.merge( + config.get_object("compliance") or {} + ) + + # Initialize configuration + init_config = initialize_config({ + "config": config, + "stack_name": pulumi.get_stack(), + "project_name": pulumi.get_project(), + "default_versions": default_versions, + "k8s_provider": k8s_provider, + "compliance_config": compliance_config + }) + + # Setup global metadata + setup_global_metadata(init_config) + + # Get enabled modules + modules_to_deploy = get_enabled_modules(config) + log.info(f"Deploying modules: {', '.join(modules_to_deploy)}") + + # Create deployment manager + deployment_manager = DeploymentManager(init_config) + + # Deploy modules + results: Dict[str, ModuleDeploymentResult] = {} + for module_name in modules_to_deploy: + try: + # Validate module configuration + module_config = config.get_object(module_name) or {} + validate_module_config(module_name, module_config) + + # Deploy module + result = deployment_manager.deploy_module(module_name) + results[module_name] = result + + if not result.success: + log.error(f"Failed to deploy module {module_name}: {result.errors}") + continue + + # Export module outputs + pulumi.export(f"{module_name}_version", result.version) + pulumi.export(f"{module_name}_resources", result.resources) + pulumi.export(f"{module_name}_metadata", result.metadata) + + except Exception as e: + log.error(f"Error deploying module {module_name}: {str(e)}") + results[module_name] = ModuleDeploymentResult( + success=False, + version="", + resources=[], + errors=[str(e)] + ) + + # Export global results + pulumi.export("deployment_results", { + name: { + "success": result.success, + "version": result.version, + "errors": result.errors + } + for name, result in results.items() + }) + + except Exception as e: + log.error(f"Deployment failed: {str(e)}") + sys.exit(1) + +if __name__ == "__main__": + main() diff --git a/core/__init__.py b/core/__init__.py new file mode 100644 index 0000000..f3ce9bc --- /dev/null +++ b/core/__init__.py @@ -0,0 +1,172 @@ +""" +Konductor Core Module + +This module provides the core functionality for the Konductor Infrastructure as Code platform. +It handles configuration management, deployment orchestration, resource management, +and compliance controls. + +Key Components: +- Configuration Management +- Deployment Orchestration +- Resource Helpers +- Metadata Management +- Type Definitions + +Usage: + from pulumi.core import ( + initialize_pulumi, + deploy_modules, + ComplianceConfig, + InitializationConfig + ) +""" + +from typing import Dict, List, Any, Optional + +# Version information +__version__ = "0.1.0" +__author__ = "Konductor Team" + +# Type exports +from .types import ( + ComplianceConfig, + InitializationConfig, + ModuleBase, + ModuleDefaults, + ModuleDeploymentResult, + ResourceMetadata, + NamespaceConfig, + FismaConfig, + NistConfig, + ScipConfig, +) + +# Configuration management +from .config import ( + get_module_config, + load_default_versions, + export_results, + validate_module_config, + initialize_config, + merge_configurations, +) + +# Deployment management +from .deployment import ( + initialize_pulumi, + deploy_modules, + DeploymentManager, +) + +# Resource helpers +from .resource_helpers import ( + create_namespace, + create_custom_resource, + create_helm_release, + create_secret, + create_config_file, +) + +# Metadata management +from .metadata import ( + collect_git_info, + generate_git_labels, + generate_git_annotations, + generate_compliance_labels, + generate_compliance_annotations, + set_global_labels, + set_global_annotations, + get_global_labels, + get_global_annotations, +) + +# Utility functions +from .utils import ( + set_resource_metadata, + generate_global_transformations, + get_latest_helm_chart_version, + wait_for_crds, +) + +# Default module configuration +DEFAULT_MODULE_CONFIG: Dict[str, ModuleDefaults] = { + "aws": {"enabled": False, "version": None, "config": {}}, + "cert_manager": {"enabled": True, "version": None, "config": {}}, + "kubevirt": {"enabled": True, "version": None, "config": {}}, + "multus": {"enabled": True, "version": None, "config": {}}, + "hostpath_provisioner": {"enabled": True, "version": None, "config": {}}, + "containerized_data_importer": {"enabled": True, "version": None, "config": {}}, + "prometheus": {"enabled": True, "version": None, "config": {}} +} + +# Public API +__all__ = [ + # Types + "ComplianceConfig", + "InitializationConfig", + "ModuleBase", + "ModuleDefaults", + "ModuleDeploymentResult", + "ResourceMetadata", + "NamespaceConfig", + "FismaConfig", + "NistConfig", + "ScipConfig", + + # Configuration + "get_module_config", + "load_default_versions", + "export_results", + "validate_module_config", + "initialize_config", + "merge_configurations", + "DEFAULT_MODULE_CONFIG", + + # Deployment + "initialize_pulumi", + "deploy_modules", + "DeploymentManager", + + # Resources + "create_namespace", + "create_custom_resource", + "create_helm_release", + "create_secret", + "create_config_file", + + # Metadata + "collect_git_info", + "generate_git_labels", + "generate_git_annotations", + "generate_compliance_labels", + "generate_compliance_annotations", + "set_global_labels", + "set_global_annotations", + "get_global_labels", + "get_global_annotations", + + # Utilities + "set_resource_metadata", + "generate_global_transformations", + "get_latest_helm_chart_version", + "wait_for_crds", +] + +def get_version() -> str: + """Returns the core module version.""" + return __version__ + +def get_module_metadata() -> Dict[str, Any]: + """Returns metadata about the core module.""" + return { + "version": __version__, + "author": __author__, + "modules": list(DEFAULT_MODULE_CONFIG.keys()), + "features": [ + "Configuration Management", + "Deployment Orchestration", + "Resource Management", + "Compliance Controls", + "Metadata Management" + ] + } diff --git a/core/config.py b/core/config.py new file mode 100644 index 0000000..6226ac8 --- /dev/null +++ b/core/config.py @@ -0,0 +1,437 @@ +# pulumi/core/config.py + +""" +Configuration Management Module + +This module handles the retrieval and preparation of configurations for different modules +within the Pulumi IaC program. It centralizes configuration logic to promote reuse +and maintainability. + +Key Functions: +- get_module_config: Retrieves and prepares module configuration. +- load_default_versions: Loads default versions for modules. +- export_results: Exports global deployment stack metadata. + +Includes proper data type handling to ensure configurations are correctly parsed. +""" + +import json +import os +from pathlib import Path +from typing import Any, Dict, Tuple, Optional, cast, Union, Literal, List +import requests +from pydantic import ValidationError +from urllib.parse import urlparse + +import pulumi +from pulumi import log + +from .types import ( + ComplianceConfig, + ModuleDefaults, + DEFAULT_MODULE_CONFIG, + ModuleBase, + InitializationConfig, + ModuleRegistry +) + +# Configuration Constants +DEFAULT_VERSIONS_URL_TEMPLATE = ( + "https://raw.githubusercontent.com/ContainerCraft/Kargo/rerefactor/pulumi/" +) + +CACHE_DIR = Path("/tmp/konductor") +VERSION_CACHE_FILE = CACHE_DIR / "default_versions.json" + +def ensure_cache_dir() -> None: + """Ensures the cache directory exists.""" + CACHE_DIR.mkdir(parents=True, exist_ok=True) + +def coerce_to_bool(value: Any) -> bool: + """ + Coerces a value to a boolean. + + Args: + value: The value to coerce. + + Returns: + bool: The coerced boolean value. + """ + if isinstance(value, bool): + return value + if isinstance(value, str): + return value.lower() == "true" + return bool(value) + +def get_module_config( + module_name: str, + config: pulumi.Config, + default_versions: Dict[str, Any], + namespace: Optional[str] = None +) -> Tuple[Dict[str, Any], bool]: + """ + Retrieves and prepares the configuration for a module. + + Args: + module_name: The name of the module to configure. + config: The Pulumi configuration object. + default_versions: A dictionary of default versions for modules. + namespace: Optional namespace for module configuration. + + Returns: + Tuple containing: + - Module's configuration dictionary + - Boolean indicating if the module is enabled + + Raises: + ValueError: If module configuration is invalid + """ + try: + # Get module defaults + module_defaults = DEFAULT_MODULE_CONFIG.get(module_name, { + "enabled": False, + "version": None, + "config": {} + }) + + # Get module configuration + module_config = config.get_object(module_name) or {} + + # Determine if module is enabled with proper type coercion + enabled = coerce_to_bool( + module_config.get("enabled", module_defaults["enabled"]) + ) + + # Handle version resolution + version = module_config.get( + "version", + default_versions.get(module_name) + ) + + if version: + module_config["version"] = version + + return module_config, enabled + + except Exception as e: + log.error(f"Error configuring module {module_name}: {str(e)}") + raise + +def validate_version_format(version: str) -> bool: + """ + Validates version string format. + + Args: + version: Version string to validate + + Returns: + bool: True if valid format + """ + try: + # Basic semver validation + parts = version.split('.') + return len(parts) >= 2 and all(part.isdigit() for part in parts) + except Exception: + return False + +def load_versions_from_file(file_path: Path) -> Dict[str, Any]: + """ + Loads version information from a JSON file. + + Args: + file_path: Path to the JSON file. + + Returns: + Dict[str, Any]: Version information dictionary. + """ + try: + if file_path.exists(): + with file_path.open("r") as f: + versions = json.load(f) + # Validate version formats + for module, version in versions.items(): + if version and not validate_version_format(str(version)): + log.warn(f"Invalid version format for {module}: {version}") + log.info(f"Loaded versions from {file_path}") + return versions + except (json.JSONDecodeError, OSError) as e: + log.warn(f"Error loading versions from {file_path}: {str(e)}") + return {} + +def load_versions_from_url(url: str) -> Dict[str, Any]: + """ + Loads version information from a URL. + + Args: + url: URL to fetch versions from. + + Returns: + Dict[str, Any]: Version information dictionary. + """ + try: + response = requests.get(url, timeout=30) + response.raise_for_status() + versions = response.json() + log.info(f"Loaded versions from {url}") + return versions + except (requests.RequestException, json.JSONDecodeError) as e: + log.warn(f"Error loading versions from {url}: {str(e)}") + return {} + +def load_default_versions( + config: pulumi.Config, + force_refresh: bool = False +) -> Dict[str, Any]: + """ + Loads the default versions for modules based on configuration settings. + + This function attempts to load version information from multiple sources: + 1. User-specified source via config + 2. Stack-specific versions file + 3. Local default versions file + 4. Remote versions based on channel + + Args: + config: The Pulumi configuration object. + force_refresh: Whether to force refresh the versions cache. + + Returns: + Dict[str, Any]: Default versions for modules. + + Raises: + Exception: If versions cannot be loaded from any source. + """ + ensure_cache_dir() + + if not force_refresh and VERSION_CACHE_FILE.exists(): + if versions := load_versions_from_file(VERSION_CACHE_FILE): + return versions + + stack_name = pulumi.get_stack() + default_versions_source = config.get("default_versions.source") + versions_channel = config.get("versions.channel") or "stable" + versions_stack_name = coerce_to_bool(config.get("versions.stack_name")) or False + + # Try loading from specified source + if default_versions_source: + if default_versions_source.startswith(("http://", "https://")): + versions = load_versions_from_url(default_versions_source) + else: + versions = load_versions_from_file(Path(default_versions_source)) + + if versions: + _cache_versions(versions) + return versions + raise Exception(f"Failed to load versions from {default_versions_source}") + + # Try stack-specific versions + if versions_stack_name: + stack_versions_path = Path(__file__).parent.parent / "versions" / f"{stack_name}.json" + if versions := load_versions_from_file(stack_versions_path): + _cache_versions(versions) + return versions + + # Try local default versions + default_versions_path = Path(__file__).parent.parent / "default_versions.json" + if versions := load_versions_from_file(default_versions_path): + _cache_versions(versions) + return versions + + # Try remote versions + versions_url = f"{DEFAULT_VERSIONS_URL_TEMPLATE}{versions_channel}_versions.json" + if versions := load_versions_from_url(versions_url): + _cache_versions(versions) + return versions + + raise Exception("Cannot proceed without default versions") + +def _cache_versions(versions: Dict[str, Any]) -> None: + """ + Caches version information to file. + + Args: + versions: Version information to cache. + """ + try: + with VERSION_CACHE_FILE.open("w") as f: + json.dump(versions, f) + except OSError as e: + log.warn(f"Failed to cache versions: {str(e)}") + +def export_results( + versions: Dict[str, str], + configurations: Dict[str, Dict[str, Any]], + compliance: ComplianceConfig, +) -> None: + """ + Exports deployment results including versions, configurations, and compliance information. + + Args: + versions: Versions of deployed modules. + configurations: Configurations of deployed modules. + compliance: Compliance configuration. + """ + try: + # Convert compliance to dictionary if it's a Pydantic model + compliance_dict = ( + compliance.dict() + if isinstance(compliance, ComplianceConfig) + else compliance + ) + + # Export results + pulumi.export("versions", versions) + pulumi.export("configuration", configurations) + pulumi.export("compliance", compliance_dict) + + except Exception as e: + log.error(f"Failed to export results: {str(e)}") + raise + +def validate_url(url: str) -> bool: + """ + Validates URL format. + + Args: + url: URL to validate + + Returns: + bool: True if valid format + """ + try: + result = urlparse(url) + return all([result.scheme, result.netloc]) + except Exception: + return False + +def validate_module_config( + module_name: str, + config: Dict[str, Any], + module_class: Optional[type[ModuleBase]] = None +) -> None: + """ + Validates module configuration against its schema. + + Args: + module_name: Name of the module. + config: Module configuration to validate. + module_class: Optional module configuration class. + + Raises: + ValueError: If configuration is invalid. + """ + try: + # Validate basic structure + if not isinstance(config, dict): + raise ValueError("Configuration must be a dictionary") + + # Check required fields + required_fields = {"enabled", "version"} + missing_fields = required_fields - set(config.keys()) + if missing_fields: + raise ValueError(f"Missing required fields: {missing_fields}") + + # Validate with module class if provided + if module_class: + module_class(**config) + + # Validate version if present + if version := config.get("version"): + if not validate_version_format(str(version)): + raise ValueError(f"Invalid version format: {version}") + + except ValidationError as e: + log.error(f"Invalid configuration for module {module_name}: {str(e)}") + raise ValueError(f"Invalid configuration for module {module_name}: {str(e)}") + except Exception as e: + log.error(f"Configuration validation failed for {module_name}: {str(e)}") + raise + +def merge_configurations( + base_config: Dict[str, Any], + override_config: Dict[str, Any] +) -> Dict[str, Any]: + """ + Merges two configurations with override taking precedence. + + Args: + base_config: Base configuration + override_config: Override configuration + + Returns: + Dict[str, Any]: Merged configuration + """ + result = base_config.copy() + for key, value in override_config.items(): + if ( + key in result and + isinstance(result[key], dict) and + isinstance(value, dict) + ): + result[key] = merge_configurations(result[key], value) + else: + result[key] = value + return result + +def initialize_config(stack_config: Dict[str, Any]) -> InitializationConfig: + """ + Initializes core configuration from stack configuration. + + Args: + stack_config: Stack configuration dictionary. + + Returns: + InitializationConfig: Initialized configuration object. + + Raises: + ValueError: If configuration is invalid. + """ + try: + # Validate stack config structure + if not isinstance(stack_config, dict): + raise ValueError("Stack configuration must be a dictionary") + + # Ensure required fields + required_fields = {"project_name", "stack_name"} + missing_fields = required_fields - set(stack_config.keys()) + if missing_fields: + raise ValueError(f"Missing required stack configuration fields: {missing_fields}") + + # Initialize with validated config + config = InitializationConfig(**stack_config) + + # Validate provider configurations + if config.k8s_provider: + # Add provider-specific validation here + pass + + return config + + except ValidationError as e: + log.error(f"Invalid initialization configuration: {str(e)}") + raise ValueError(f"Invalid initialization configuration: {str(e)}") + except Exception as e: + log.error(f"Configuration initialization failed: {str(e)}") + raise + +def get_enabled_modules(config: pulumi.Config) -> List[str]: + """ + Get list of enabled modules from configuration. + + Args: + config: Pulumi configuration object + + Returns: + List[str]: List of enabled module names + """ + # Default modules - only AWS enabled by default for MVP + default_modules = ["aws"] + + # Get modules from config + modules_config = config.get_object("modules") or {} + + # Return enabled modules + return [ + module_name for module_name in default_modules + if modules_config.get(module_name, {}).get("enabled", True) + ] diff --git a/core/deployment.py b/core/deployment.py new file mode 100644 index 0000000..46d92a8 --- /dev/null +++ b/core/deployment.py @@ -0,0 +1,311 @@ +# pulumi/core/deployment.py + +""" +Deployment Management Module + +This module manages the deployment orchestration of modules, +initializes Pulumi and Kubernetes providers, and handles module deployments. + +Key Functions: +- initialize_pulumi: Initializes Pulumi configuration and providers +- deploy_modules: Orchestrates module deployments +- deploy_module: Handles individual module deployment +""" + +import os +import inspect +import importlib +from typing import Dict, Any, List, Type, Callable, Optional, cast +from pydantic import ValidationError + +import pulumi +import pulumi_kubernetes as k8s +from pulumi import log +from pulumi_kubernetes import Provider + +from .config import ( + get_module_config, + load_default_versions, + initialize_config, + validate_module_config +) +from .metadata import ( + collect_git_info, + generate_git_labels, + generate_git_annotations, + set_global_labels, + set_global_annotations, + generate_compliance_labels, + generate_compliance_annotations, +) +from .utils import generate_global_transformations +from .types import ( + ComplianceConfig, + InitializationConfig, + ModuleDeploymentResult +) +from .resource_helpers import create_namespace + +class DeploymentManager: + """ + Manages module deployment and lifecycle. + + Handles initialization, deployment orchestration, and cleanup of modules. + Ensures proper resource management and error handling. + """ + + def __init__(self, init_config: InitializationConfig): + """ + Initialize deployment manager. + + Args: + init_config: Initialization configuration + """ + self.init_config = init_config + self.deployed_modules: Dict[str, ModuleDeploymentResult] = {} + self._setup_global_metadata() + + def _setup_global_metadata(self) -> None: + """Sets up global metadata for all resources.""" + try: + # Generate metadata + git_info = collect_git_info() + git_labels = generate_git_labels(git_info) + git_annotations = generate_git_annotations(git_info) + + compliance_labels = generate_compliance_labels(self.init_config.compliance_config) + compliance_annotations = generate_compliance_annotations( + self.init_config.compliance_config + ) + + # Combine metadata + global_labels = {**compliance_labels, **git_labels} + global_annotations = {**compliance_annotations, **git_annotations} + + # Set global metadata + set_global_labels(global_labels) + set_global_annotations(global_annotations) + + # Register global transformations + generate_global_transformations(global_labels, global_annotations) + + except Exception as e: + log.error(f"Failed to setup global metadata: {str(e)}") + raise + + def discover_module_config(self, module_name: str) -> Type: + """ + Discovers and returns the configuration class from the module's types.py. + + Args: + module_name: The name of the module + + Returns: + Type: The configuration class + + Raises: + ImportError: If module cannot be imported + ValueError: If no suitable configuration class is found + """ + try: + types_module = importlib.import_module(f"modules.{module_name}.types") + + for name, obj in inspect.getmembers(types_module): + if inspect.isclass(obj): + if hasattr(obj, "Config"): # Pydantic model + return obj + elif hasattr(obj, "__dataclass_fields__"): # Dataclass + return obj + + raise ValueError(f"No configuration class found in modules.{module_name}.types") + + except ImportError as e: + log.error(f"Failed to import module {module_name}: {str(e)}") + raise + except Exception as e: + log.error(f"Error discovering module config: {str(e)}") + raise + + def discover_deploy_function(self, module_name: str) -> Callable: + """ + Discovers and returns the deploy function from the module's deploy.py. + + Args: + module_name: The name of the module + + Returns: + Callable: The deploy function + + Raises: + ImportError: If module cannot be imported + ValueError: If deploy function is not found + """ + try: + deploy_module = importlib.import_module(f"modules.{module_name}.deploy") + function_name = f"deploy_{module_name}_module" + + if deploy_func := getattr(deploy_module, function_name, None): + return deploy_func + + raise ValueError( + f"No deploy function named '{function_name}' found in modules.{module_name}.deploy" + ) + + except ImportError as e: + log.error(f"Failed to import module {module_name}: {str(e)}") + raise + except Exception as e: + log.error(f"Error discovering deploy function: {str(e)}") + raise + + def deploy_module(self, module_name: str) -> ModuleDeploymentResult: + """ + Deploys a single module with proper error handling. + + Args: + module_name: Name of the module to deploy + + Returns: + ModuleDeploymentResult: Result of the deployment + + Raises: + ValueError: If module configuration is invalid + """ + try: + # Get module configuration + module_config, module_enabled = get_module_config( + module_name, + self.init_config.config, + self.init_config.default_versions + ) + + if not module_enabled: + log.info(f"Module {module_name} is not enabled, skipping deployment") + return ModuleDeploymentResult( + success=True, + version="", + resources=[], + metadata={"enabled": False} + ) + + # Import module dynamically + module = importlib.import_module(f"modules.{module_name}") + deploy_func = getattr(module, f"deploy_{module_name}_module") + + # Deploy with proper error handling + result = deploy_func( + module_config, + self.init_config.global_depends_on + ) + + # Track deployment + self.deployed_modules[module_name] = result + return result + + except Exception as e: + log.error(f"Failed to deploy module {module_name}: {str(e)}") + return ModuleDeploymentResult( + success=False, + version="", + resources=[], + errors=[str(e)] + ) + +def initialize_pulumi() -> InitializationConfig: + """ + Initializes Pulumi configuration, Kubernetes provider, and global resources. + + Returns: + InitializationConfig: Initialized configuration object + + Raises: + Exception: If initialization fails + """ + try: + # Initialize basic configuration + config = pulumi.Config() + stack_name = pulumi.get_stack() + project_name = pulumi.get_project() + + # Load default versions + default_versions = load_default_versions(config) + versions: Dict[str, str] = {} + configurations: Dict[str, Dict[str, Any]] = {} + global_depends_on: List[pulumi.Resource] = [] + + # Initialize Kubernetes provider + kubernetes_config = config.get_object("kubernetes") or {} + kubernetes_context = kubernetes_config.get("context") + kubeconfig = kubernetes_config.get("kubeconfig") or os.getenv("KUBECONFIG") + + k8s_provider = Provider( + "k8s_provider", + kubeconfig=kubeconfig, + context=kubernetes_context, + ) + + # Get compliance configuration + compliance_config_dict = config.get_object("compliance") or {} + compliance_config = ComplianceConfig.merge(compliance_config_dict) + + # Initialize configuration object + init_config = initialize_config({ + "config": config, + "stack_name": stack_name, + "project_name": project_name, + "default_versions": default_versions, + "versions": versions, + "configurations": configurations, + "global_depends_on": global_depends_on, + "k8s_provider": k8s_provider, + "git_info": collect_git_info(), + "compliance_config": compliance_config + }) + + return init_config + + except Exception as e: + log.error(f"Initialization failed: {str(e)}") + raise + +def deploy_modules( + modules: List[str], + init_config: InitializationConfig +) -> None: + """ + Deploys a list of modules in order. + + Args: + modules: List of module names to deploy + init_config: Initialization configuration + + Raises: + Exception: If deployment fails + """ + try: + deployment_manager = DeploymentManager(init_config) + + for module_name in modules: + log.info(f"Deploying module: {module_name}") + + result = deployment_manager.deploy_module(module_name) + + if not result.success: + log.error( + f"Failed to deploy module {module_name}: {', '.join(result.errors)}" + ) + continue + + # Update versions and configurations + if result.version: + init_config.update_versions(module_name, result.version) + + init_config.configurations[module_name] = { + "enabled": True, + "version": result.version, + **result.metadata + } + + except Exception as e: + log.error(f"Module deployment failed: {str(e)}") + raise diff --git a/pulumi/core/metadata.py b/core/metadata.py similarity index 56% rename from pulumi/core/metadata.py rename to core/metadata.py index 14a29b8..d1f6307 100644 --- a/pulumi/core/metadata.py +++ b/core/metadata.py @@ -1,8 +1,4 @@ # pulumi/core/metadata.py -# TODO: -# - enhance with support for propagation of labels annotations on AWS resources -# - enhance by adding additional data to global tag / label / annotation metadata -# - support adding git release semver to global tag / label / annotation metadata """ Metadata Management Module @@ -13,54 +9,75 @@ import os import re -import git import json import semver import threading -from typing import Dict, Optional - +from typing import Dict, Optional, ClassVar, Any, List, Tuple +from git import Repo, GitCommandError from pulumi import log -from .types import ComplianceConfig - +from .types import ComplianceConfig, InitializationConfig -# Singleton class to manage global metadata -# Globals are correctly chosen to enforce consistency across all modules and resources -# This class is thread-safe and used to store global labels and annotations class MetadataSingleton: - _instance = None - __lock = threading.Lock() - - def __new__(cls, *args, **kwargs): - if not cls._instance: - with cls.__lock: - if not cls._instance: - cls._instance = super(MetadataSingleton, cls).__new__(cls) - cls._instance._data = { - "_global_labels": {}, - "_global_annotations": {}, - } + """ + Thread-safe singleton class to manage global metadata. + Ensures consistent labels and annotations across all resources. + """ + _instance: ClassVar[Optional['MetadataSingleton']] = None + _lock: ClassVar[threading.Lock] = threading.Lock() + + def __init__(self) -> None: + """Initialize metadata storage.""" + self._global_labels: Dict[str, str] = {} + self._global_annotations: Dict[str, str] = {} + + def __new__(cls) -> 'MetadataSingleton': + """Ensure only one instance is created.""" + if cls._instance is None: + with cls._lock: + if cls._instance is None: + instance = super().__new__(cls) + instance.__init__() + cls._instance = instance return cls._instance + @property + def global_labels(self) -> Dict[str, str]: + """Get global labels.""" + return self._global_labels.copy() + + @property + def global_annotations(self) -> Dict[str, str]: + """Get global annotations.""" + return self._global_annotations.copy() + + def set_labels(self, labels: Dict[str, str]) -> None: + """Set global labels.""" + self._global_labels = labels.copy() + + def set_annotations(self, annotations: Dict[str, str]) -> None: + """Set global annotations.""" + self._global_annotations = annotations.copy() -def set_global_labels(labels: Dict[str, str]): + +def set_global_labels(labels: Dict[str, str]) -> None: """ Sets global labels. Args: - labels (Dict[str, str]): The global labels. + labels: The global labels to set. """ - MetadataSingleton()._data["_global_labels"] = labels + MetadataSingleton().set_labels(labels) -def set_global_annotations(annotations: Dict[str, str]): +def set_global_annotations(annotations: Dict[str, str]) -> None: """ Sets global annotations. Args: - annotations (Dict[str, str]): The global annotations. + annotations: The global annotations to set. """ - MetadataSingleton()._data["_global_annotations"] = annotations + MetadataSingleton().set_annotations(annotations) def get_global_labels() -> Dict[str, str]: @@ -70,7 +87,7 @@ def get_global_labels() -> Dict[str, str]: Returns: Dict[str, str]: The global labels. """ - return MetadataSingleton()._data["_global_labels"] + return MetadataSingleton().global_labels def get_global_annotations() -> Dict[str, str]: @@ -80,7 +97,7 @@ def get_global_annotations() -> Dict[str, str]: Returns: Dict[str, str]: The global annotations. """ - return MetadataSingleton()._data["_global_annotations"] + return MetadataSingleton().global_annotations def generate_git_labels(git_info: Dict[str, str]) -> Dict[str, str]: @@ -88,18 +105,17 @@ def generate_git_labels(git_info: Dict[str, str]) -> Dict[str, str]: Generates git-related labels suitable for AWS tags. Args: - git_info (Dict[str, str]): The Git information. + git_info: The Git information. Returns: Dict[str, str]: The git-related labels. """ flattened_git_info = flatten_dict(git_info) + sanitized_labels: Dict[str, str] = {} - # Sanitize keys and values to conform to AWS tag requirements - sanitized_labels = {} for key, value in flattened_git_info.items(): sanitized_key = sanitize_tag_key(f"git.{key}") - sanitized_value = sanitize_tag_value(value) + sanitized_value = sanitize_tag_value(str(value)) sanitized_labels[sanitized_key] = sanitized_value return sanitized_labels @@ -110,7 +126,7 @@ def generate_git_annotations(git_info: Dict[str, str]) -> Dict[str, str]: Generates git-related annotations. Args: - git_info (Dict[str, str]): The Git information. + git_info: The Git information. Returns: Dict[str, str]: The git-related annotations. @@ -127,67 +143,55 @@ def generate_compliance_labels(compliance_config: ComplianceConfig) -> Dict[str, Generates compliance labels based on the given compliance configuration. Args: - compliance_config (ComplianceConfig): The compliance configuration object. + compliance_config: The compliance configuration object. Returns: Dict[str, str]: A dictionary of compliance labels. """ compliance_dict = compliance_config.dict() flattened_compliance = flatten_dict(compliance_dict, list_sep=":") + sanitized_labels: Dict[str, str] = {} - sanitized_labels = {} for key, value in flattened_compliance.items(): sanitized_key = sanitize_tag_key(key) - sanitized_value = sanitize_tag_value(value) + sanitized_value = sanitize_tag_value(str(value)) sanitized_labels[sanitized_key] = sanitized_value return sanitized_labels -def generate_compliance_annotations( - compliance_config: ComplianceConfig, -) -> Dict[str, str]: +def generate_compliance_annotations(compliance_config: ComplianceConfig) -> Dict[str, str]: """ Generates compliance annotations based on the given compliance configuration. Args: - compliance_config (ComplianceConfig): The compliance configuration object. + compliance_config: The compliance configuration object. Returns: Dict[str, str]: A dictionary of compliance annotations. """ + annotations: Dict[str, str] = {} - # TODO: enhance if logic to improve efficiency, DRY, readability and maintainability - annotations = {} if compliance_config.fisma.level: annotations["compliance.fisma.level"] = compliance_config.fisma.level if compliance_config.fisma.ato: annotations["compliance.fisma.ato"] = json.dumps(compliance_config.fisma.ato) if compliance_config.nist.controls: - annotations["compliance.nist.controls"] = json.dumps( - compliance_config.nist.controls - ) + annotations["compliance.nist.controls"] = json.dumps(compliance_config.nist.controls) if compliance_config.nist.auxiliary: - annotations["compliance.nist.auxiliary"] = json.dumps( - compliance_config.nist.auxiliary - ) + annotations["compliance.nist.auxiliary"] = json.dumps(compliance_config.nist.auxiliary) if compliance_config.nist.exceptions: - annotations["compliance.nist.exceptions"] = json.dumps( - compliance_config.nist.exceptions - ) + annotations["compliance.nist.exceptions"] = json.dumps(compliance_config.nist.exceptions) + return annotations -# Function to sanitize a label value to comply with Kubernetes `label` naming conventions -# https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#syntax-and-character-set -# TODO: -# - retool this feature as a more efficient implementation in `collect_git_info()` and related functions. def sanitize_label_value(value: str) -> str: """ Sanitizes a label value to comply with Kubernetes naming conventions. Args: - value (str): The value to sanitize. + value: The value to sanitize. Returns: str: The sanitized value. @@ -199,28 +203,36 @@ def sanitize_label_value(value: str) -> str: return sanitized[:63] -def flatten_dict(data, parent_key="", sep=".", list_sep=":") -> Dict[str, str]: +def flatten_dict( + data: Dict[str, Any], + parent_key: str = "", + sep: str = ".", + list_sep: str = ":" +) -> Dict[str, str]: """ Flattens a nested dictionary into a single-level dictionary with concatenated keys. Args: - data (dict): The dictionary to flatten. - parent_key (str): The base key string. - sep (str): The separator between keys. - list_sep (str): The separator between list items. + data: The dictionary to flatten. + parent_key: The base key string. + sep: The separator between keys. + list_sep: The separator between list items. Returns: Dict[str, str]: The flattened dictionary. """ - items = [] + items: List[Tuple[str, str]] = [] + for k, v in data.items(): new_key = f"{parent_key}{sep}{k}" if parent_key else k + if isinstance(v, dict): items.extend(flatten_dict(v, new_key, sep=sep, list_sep=list_sep).items()) elif isinstance(v, list): items.append((new_key, list_sep.join(map(str, v)))) elif v is not None: items.append((new_key, str(v))) + return dict(items) @@ -229,12 +241,11 @@ def sanitize_tag_key(key: str) -> str: Sanitizes a string to be used as an AWS tag key. Args: - key (str): The key to sanitize. + key: The key to sanitize. Returns: str: The sanitized key. """ - # AWS tag key must be 1-128 Unicode characters sanitized = re.sub(r"[^a-zA-Z0-9\s_.:/=+\-@]", "-", key) return sanitized[:128] @@ -244,17 +255,74 @@ def sanitize_tag_value(value: str) -> str: Sanitizes a string to be used as an AWS tag value. Args: - value (str): The value to sanitize. + value: The value to sanitize. Returns: str: The sanitized value. """ - # AWS tag value must be 0-256 Unicode characters - # Include colons ':' in the allowed characters sanitized = re.sub(r"[^a-zA-Z0-9\s_./:=+\-@]", "-", value) return sanitized[:256] +def get_remote_url(repo: Repo) -> str: + """ + Gets the remote URL using multiple fallback methods. + + Args: + repo: GitPython Repo object + + Returns: + str: Remote URL or 'N/A' if not found + """ + try: + return next(remote.url for remote in repo.remotes if remote.name == "origin") + except (StopIteration, AttributeError, GitCommandError) as e: + log.warn(f"Failed to get remote URL via remotes: {str(e)}") + + try: + return repo.git.config("--get", "remote.origin.url") + except GitCommandError as e: + log.warn(f"Failed to get remote URL via git config: {str(e)}") + + for env_var in ["CI_REPOSITORY_URL", "GITHUB_REPOSITORY", "GIT_URL"]: + if url := os.getenv(env_var): + return url + + return "N/A" + + +def get_latest_semver_tag(repo: Repo) -> Optional[str]: + """ + Gets the latest semantic version tag from the repository. + + Args: + repo: GitPython Repo object + + Returns: + Optional[str]: Latest semver tag or None if no valid tags found + """ + try: + tags = [str(tag) for tag in repo.tags] + semver_tags: List[Tuple[semver.VersionInfo, str]] = [] + + for tag in tags: + version_str = tag.lstrip("v") + try: + version = semver.VersionInfo.parse(version_str) + semver_tags.append((version, tag)) + except (ValueError, GitCommandError) as e: + log.warn(f"Error parsing tag {tag}: {str(e)}") + continue + + if semver_tags: + return sorted(semver_tags, key=lambda x: x[0])[-1][1] + + except Exception as e: + log.warn(f"Error parsing semver tags: {str(e)}") + + return None + + def collect_git_info() -> Dict[str, str]: """ Collects Git repository information using GitPython. @@ -284,39 +352,30 @@ def collect_git_info() -> Dict[str, str]: } try: - # Initialize repo object - repo = git.Repo(search_parent_directories=True) + repo = Repo(search_parent_directories=True) - # Get remote URL (try multiple methods) try: remote_url = get_remote_url(repo) git_info["remote"] = remote_url except Exception as e: log.warn(f"Failed to get remote URL: {str(e)}") - # Get current branch try: git_info["branch"] = repo.active_branch.name - except TypeError: - # Handle detached HEAD state + except (TypeError, GitCommandError) as e: git_info["branch"] = "HEAD" - except Exception as e: log.warn(f"Failed to get branch name: {str(e)}") - # Get commit information try: commit = repo.head.commit - git_info.update( - { - "commit": commit.hexsha, - "commit_short": commit.hexsha[:8], - "commit_date": commit.committed_datetime.isoformat(), - } - ) + git_info.update({ + "commit": commit.hexsha, + "commit_short": commit.hexsha[:8], + "commit_date": commit.committed_datetime.isoformat(), + }) except Exception as e: log.warn(f"Failed to get commit information: {str(e)}") - # Get latest tag and release information try: latest_tag = get_latest_semver_tag(repo) if latest_tag: @@ -327,21 +386,16 @@ def collect_git_info() -> Dict[str, str]: except Exception as e: log.warn(f"Failed to get tag/release information: {str(e)}") - # Check if working tree is dirty git_info["dirty"] = str(repo.is_dirty()).lower() - log.info(f"Successfully collected git info: {git_info}") - except git.exc.InvalidGitRepositoryError: - log.warn("Not a git repository. Using default values.") except Exception as e: log.warn(f"Error collecting git information: {str(e)}") log.warn("Using default values for git information") return git_info - -def get_remote_url(repo: git.Repo) -> str: +def get_remote_url(repo: Repo) -> str: """ Gets the remote URL using multiple fallback methods. @@ -351,19 +405,17 @@ def get_remote_url(repo: git.Repo) -> str: Returns: str: Remote URL or 'N/A' if not found """ - # Try getting from origin remote try: return next(remote.url for remote in repo.remotes if remote.name == "origin") - except (StopIteration, AttributeError): + except (StopIteration, AttributeError, GitCommandError) as e: + log.warn(f"Failed to get remote URL: {str(e)}") pass - # Try getting from git config try: return repo.git.config("--get", "remote.origin.url") - except git.exc.GitCommandError: + except GitCommandError: pass - # Try environment variables (useful in CI/CD) for env_var in ["CI_REPOSITORY_URL", "GITHUB_REPOSITORY", "GIT_URL"]: if url := os.getenv(env_var): return url @@ -371,34 +423,23 @@ def get_remote_url(repo: git.Repo) -> str: return "N/A" -def get_latest_semver_tag(repo: git.Repo) -> Optional[str]: +def get_latest_semver_tag(repo: Repo) -> Optional[str]: """ Gets the latest semantic version tag from the repository. - Handles both 'v' prefixed and non-prefixed tags. - - Args: - repo: GitPython Repo object - - Returns: - Optional[str]: Latest semver tag or None if no valid tags found """ try: - # Get all tags tags = [str(tag) for tag in repo.tags] - - # Filter for semver tags (with or without 'v' prefix) semver_tags = [] + for tag in tags: - # Remove 'v' prefix if present version_str = tag.lstrip("v") try: - # Parse version and add to list if valid version = semver.VersionInfo.parse(version_str) semver_tags.append((version, tag)) - except ValueError: + except (ValueError, GitCommandError) as e: + log.warn(f"Error parsing tag {tag}: {str(e)}") continue - # Return the latest version tag if any found if semver_tags: return sorted(semver_tags, key=lambda x: x[0])[-1][1] @@ -420,16 +461,47 @@ def sanitize_git_info(git_info: Dict[str, str]) -> Dict[str, str]: Dict[str, str]: Sanitized git information """ sanitized = {} + for key, value in git_info.items(): - # Convert to lowercase and replace invalid characters sanitized_value = re.sub(r"[^a-z0-9-._]", "-", str(value).lower()) - - # Trim to maximum allowed length (63 chars for k8s labels) sanitized_value = sanitized_value[:63] - - # Remove leading/trailing non-alphanumeric characters sanitized_value = re.sub(r"^[^a-z0-9]+|[^a-z0-9]+$", "", sanitized_value) - sanitized[key] = sanitized_value return sanitized + + +def generate_aws_tags( + git_info: Dict[str, str], + compliance_config: ComplianceConfig +) -> Dict[str, str]: + """Generate AWS-specific tags.""" + tags = { + **generate_git_labels(git_info), + **generate_compliance_labels(compliance_config), + "managed-by": "pulumi", + "automation": "konductor" + } + + # Ensure tag values meet AWS requirements + return {k: sanitize_tag_value(str(v)) for k, v in tags.items()} + +def setup_global_metadata(init_config: InitializationConfig) -> None: + """ + Initialize global metadata for resources. + + Args: + init_config: Initialization configuration object + """ + try: + # Collect git information + git_info = collect_git_info() + init_config.git_info = git_info + + # Set global resource metadata + set_global_labels(init_config.metadata.labels) + set_global_annotations(init_config.metadata.annotations) + + except Exception as e: + log.error(f"Failed to setup global metadata: {str(e)}") + raise diff --git a/core/resource_helpers.py b/core/resource_helpers.py new file mode 100644 index 0000000..ada3204 --- /dev/null +++ b/core/resource_helpers.py @@ -0,0 +1,452 @@ +# pulumi/core/resource_helpers.py + +""" +Resource Helper Functions Module + +This module provides helper functions for creating and managing Pulumi resources, +with proper type safety, resource option handling, and error management. +""" + +import os +import tempfile +import pulumi +import pulumi_kubernetes as k8s +from typing import Optional, Dict, Any, List, Union, Callable, cast +from pulumi import ResourceOptions, Resource, Output +from pulumi_kubernetes.core.v1 import Namespace, Secret +from pulumi_kubernetes.meta.v1 import ObjectMetaArgs +from pulumi_kubernetes.helm.v3 import Release, ReleaseArgs, Chart +from pulumi_kubernetes.apiextensions import CustomResource +from pulumi_kubernetes.yaml import ConfigFile + +from .metadata import get_global_labels, get_global_annotations +from .utils import set_resource_metadata + +import importlib +import logging as log +from pulumi_kubernetes import Provider + + +def create_namespace( + name: str, + labels: Optional[Dict[str, str]] = None, + annotations: Optional[Dict[str, str]] = None, + finalizers: Optional[List[str]] = None, + custom_timeouts: Optional[Dict[str, str]] = None, + opts: Optional[ResourceOptions] = None, + k8s_provider: Optional[k8s.Provider] = None, + parent: Optional[Resource] = None, + depends_on: Optional[List[Resource]] = None, +) -> Namespace: + """ + Creates a Kubernetes Namespace with global labels and annotations. + + Args: + name: The name of the namespace + labels: Additional labels to apply + annotations: Additional annotations to apply + finalizers: Finalizers for the namespace + custom_timeouts: Custom timeouts for resource operations + opts: Pulumi resource options + k8s_provider: Kubernetes provider + parent: Parent resource + depends_on: Resources this resource depends on + + Returns: + Namespace: The created Namespace resource + + Raises: + pulumi.RunError: If namespace creation fails + """ + try: + # Initialize default values + labels = labels or {} + annotations = annotations or {} + custom_timeouts = custom_timeouts or { + "create": "5m", + "update": "10m", + "delete": "10m" + } + depends_on = depends_on or [] + + # Merge global metadata + global_labels = get_global_labels() + global_annotations = get_global_annotations() + labels.update(global_labels) + annotations.update(global_annotations) + + # Create metadata + metadata = ObjectMetaArgs( + name=name, + labels=labels, + annotations=annotations, + finalizers=finalizers, + ) + + # Merge resource options + resource_opts = ResourceOptions.merge( + ResourceOptions( + provider=k8s_provider, + depends_on=depends_on, + parent=parent, + custom_timeouts=pulumi.CustomTimeouts( + create=custom_timeouts.get("create", "5m"), + update=custom_timeouts.get("update", "10m"), + delete=custom_timeouts.get("delete", "10m"), + ), + ), + opts or ResourceOptions() + ) + + # Create namespace + return Namespace( + name, + metadata=metadata, + opts=resource_opts, + ) + + except Exception as e: + raise pulumi.RunError(f"Failed to create namespace '{name}': {str(e)}") from e + + +def create_custom_resource( + name: str, + api_version: str, + kind: str, + metadata: Dict[str, Any], + spec: Dict[str, Any], + opts: Optional[ResourceOptions] = None, + k8s_provider: Optional[k8s.Provider] = None, + depends_on: Optional[List[Resource]] = None, + parent: Optional[Resource] = None, +) -> CustomResource: + """ + Creates a Kubernetes CustomResource with global labels and annotations. + + Args: + name: The name of the custom resource + api_version: The API version of the custom resource + kind: The kind of the custom resource + metadata: The metadata for the custom resource + spec: The spec for the custom resource + opts: Pulumi resource options + k8s_provider: Kubernetes provider + depends_on: Resources this custom resource depends on + parent: Parent resource + + Returns: + CustomResource: The created CustomResource + + Raises: + pulumi.RunError: If custom resource creation fails + """ + try: + # Initialize defaults + opts = opts or ResourceOptions() + depends_on = depends_on or [] + + # Get global metadata + global_labels = get_global_labels() + global_annotations = get_global_annotations() + + def custom_resource_transform(args: pulumi.ResourceTransformationArgs) -> pulumi.ResourceTransformationResult: + """Transform resource to include global metadata.""" + props = args.props + if "metadata" in props: + set_resource_metadata( + props["metadata"], + global_labels, + global_annotations + ) + return pulumi.ResourceTransformationResult(props, args.opts) + + # Merge resource options + resource_opts = ResourceOptions.merge( + ResourceOptions( + provider=k8s_provider, + depends_on=depends_on, + parent=parent, + transformations=[custom_resource_transform], + ), + opts + ) + + # Create custom resource + return CustomResource( + resource_name=name, + api_version=api_version, + kind=kind, + metadata=metadata, + spec=spec, + opts=resource_opts, + ) + + except Exception as e: + raise pulumi.RunError(f"Failed to create custom resource '{name}': {str(e)}") from e + + +def create_helm_release( + name: str, + chart: Union[str, Chart], + values: Optional[Dict[str, Any]] = None, + version: Optional[str] = None, + namespace: Optional[str] = None, + repository: Optional[str] = None, + repository_opts: Optional[Dict[str, Any]] = None, + transformations: Optional[List[Callable[[pulumi.ResourceTransformationArgs], Optional[pulumi.ResourceTransformationResult]]]] = None, + opts: Optional[ResourceOptions] = None, + k8s_provider: Optional[k8s.Provider] = None, + depends_on: Optional[List[Resource]] = None, + parent: Optional[Resource] = None, +) -> Release: + """ + Creates a Helm Release with global labels and annotations. + + Args: + name: The release name + chart: The chart name or Chart object + values: The values for the chart + version: The version of the chart + namespace: The namespace to install the release into + repository: The repository URL + repository_opts: Additional repository options + transformations: Additional transformations + opts: Pulumi resource options + k8s_provider: Kubernetes provider + depends_on: Resources this release depends on + parent: Parent resource + + Returns: + Release: The created Helm release + + Raises: + pulumi.RunError: If helm release creation fails + """ + try: + # Initialize defaults + opts = opts or ResourceOptions() + transformations = transformations or [] + depends_on = depends_on or [] + values = values or {} + repository_opts = repository_opts or {} + + # Get global metadata + global_labels = get_global_labels() + global_annotations = get_global_annotations() + + def helm_resource_transform(args: pulumi.ResourceTransformationArgs) -> pulumi.ResourceTransformationResult: + """Transform helm resources to include global metadata.""" + props = args.props + if "metadata" in props: + set_resource_metadata( + props["metadata"], + global_labels, + global_annotations + ) + elif "spec" in props and isinstance(props["spec"], dict): + if "metadata" in props["spec"]: + set_resource_metadata( + props["spec"]["metadata"], + global_labels, + global_annotations + ) + return pulumi.ResourceTransformationResult(props, args.opts) + + transformations.append(helm_resource_transform) + + # Merge resource options + resource_opts = ResourceOptions.merge( + ResourceOptions( + provider=k8s_provider, + depends_on=depends_on, + parent=parent, + transformations=transformations, + ), + opts + ) + + # Create release args + release_args = ReleaseArgs( + chart=chart, + version=version, + namespace=namespace, + repository=repository, + repository_opts=repository_opts, + values=values, + ) + + return Release(name, release_args, opts=resource_opts) + + except Exception as e: + raise pulumi.RunError(f"Failed to create helm release '{name}': {str(e)}") from e + + +def create_secret( + name: str, + namespace: str, + string_data: Dict[str, str], + opts: Optional[ResourceOptions] = None, + k8s_provider: Optional[k8s.Provider] = None, + depends_on: Optional[List[Resource]] = None, + parent: Optional[Resource] = None, +) -> Secret: + """ + Creates a Kubernetes Secret with global labels and annotations. + + Args: + name: The name of the secret + namespace: The namespace for the secret + string_data: The secret data as strings + opts: Pulumi resource options + k8s_provider: Kubernetes provider + depends_on: Resources this secret depends on + parent: Parent resource + + Returns: + Secret: The created Secret + + Raises: + pulumi.RunError: If secret creation fails + """ + try: + # Initialize defaults + opts = opts or ResourceOptions() + depends_on = depends_on or [] + + # Get global metadata + global_labels = get_global_labels() + global_annotations = get_global_annotations() + + # Create metadata + metadata = ObjectMetaArgs( + name=name, + namespace=namespace, + labels=global_labels, + annotations=global_annotations, + ) + + # Merge resource options + resource_opts = ResourceOptions.merge( + ResourceOptions( + provider=k8s_provider, + depends_on=depends_on, + parent=parent, + ), + opts + ) + + return Secret( + name, + metadata=metadata, + string_data=string_data, + opts=resource_opts, + ) + + except Exception as e: + raise pulumi.RunError(f"Failed to create secret '{name}': {str(e)}") from e + + +def create_config_file( + name: str, + file_path: str, + opts: Optional[ResourceOptions] = None, + transformations: Optional[List[Callable[[pulumi.ResourceTransformationArgs], Optional[pulumi.ResourceTransformationResult]]]] = None, + k8s_provider: Optional[k8s.Provider] = None, + depends_on: Optional[List[Resource]] = None, + parent: Optional[Resource] = None, +) -> ConfigFile: + """ + Creates Kubernetes resources from a YAML config file with global labels and annotations. + + Args: + name: The resource name + file_path: The path to the YAML file + opts: Pulumi resource options + transformations: Additional transformations + k8s_provider: Kubernetes provider + depends_on: Resources these resources depend on + parent: Parent resource + + Returns: + ConfigFile: The created resources + + Raises: + pulumi.RunError: If config file creation fails + """ + try: + # Initialize defaults + opts = opts or ResourceOptions() + transformations = transformations or [] + depends_on = depends_on or [] + + # Get global metadata + global_labels = get_global_labels() + global_annotations = get_global_annotations() + + def config_file_transform(args: pulumi.ResourceTransformationArgs) -> pulumi.ResourceTransformationResult: + """Transform config file resources to include global metadata.""" + props = args.props + if "metadata" in props: + set_resource_metadata( + props["metadata"], + global_labels, + global_annotations + ) + elif "spec" in props and isinstance(props["spec"], dict): + if "metadata" in props["spec"]: + set_resource_metadata( + props["spec"]["metadata"], + global_labels, + global_annotations + ) + return pulumi.ResourceTransformationResult(props, args.opts) + + transformations.append(config_file_transform) + + # Merge resource options + resource_opts = ResourceOptions.merge( + ResourceOptions( + provider=k8s_provider, + depends_on=depends_on, + parent=parent, + transformations=transformations, + ), + opts + ) + + return ConfigFile(name, file_path, opts=resource_opts) + + except Exception as e: + raise pulumi.RunError(f"Failed to create config file '{name}': {str(e)}") from e + + +def create_aws_resource( + name: str, + resource_type: str, + args: Dict[str, Any], + opts: Optional[ResourceOptions] = None, + provider: Optional[Provider] = None +) -> pulumi.Resource: + """Create AWS resource with proper error handling.""" + try: + # Ensure required fields + if not name or not resource_type: + raise ValueError("Name and resource_type are required") + + # Get resource class + module = importlib.import_module("pulumi_aws") + resource_class = getattr(module, resource_type) + + # Create resource + return resource_class( + name, + **args, + opts=ResourceOptions.merge( + ResourceOptions(provider=provider), + opts or ResourceOptions() + ) + ) + + except Exception as e: + log.error(f"Failed to create AWS resource {name}: {str(e)}") + raise diff --git a/core/types.py b/core/types.py new file mode 100644 index 0000000..674a031 --- /dev/null +++ b/core/types.py @@ -0,0 +1,443 @@ +# pulumi/core/types.py + +""" +Types and Data Structures Module + +This module defines all shared data classes and types used across all modules. +It provides type-safe configuration structures using Pydantic models and TypedDict. +""" + +from typing import Dict, List, Optional, Any, TypedDict, Protocol +from pydantic import BaseModel, Field, validator +from datetime import datetime +import pulumi +import pulumi_kubernetes as k8s + + +class NamespaceConfig(BaseModel): + """ + Configuration for Kubernetes namespace creation. + + Attributes: + name: Name of the namespace + labels: Kubernetes labels to apply + annotations: Kubernetes annotations to apply + finalizers: List of finalizer strings + protect: Whether to protect the resource from deletion + retain_on_delete: Whether to retain the resource on stack deletion + ignore_changes: List of fields to ignore during updates + custom_timeouts: Custom timeout values for operations + """ + name: str + labels: Dict[str, str] = Field(default_factory=lambda: {"ccio.v1/app": "kargo"}) + annotations: Dict[str, str] = Field(default_factory=dict) + finalizers: List[str] = Field(default_factory=lambda: ["kubernetes"]) + protect: bool = False + retain_on_delete: bool = False + ignore_changes: List[str] = Field( + default_factory=lambda: ["metadata", "spec"] + ) + custom_timeouts: Dict[str, str] = Field( + default_factory=lambda: { + "create": "5m", + "update": "10m", + "delete": "10m" + } + ) + + +class FismaConfig(BaseModel): + """ + FISMA compliance configuration. + + Attributes: + enabled: Whether FISMA compliance is enabled + level: FISMA impact level + ato: Authority to Operate details + """ + enabled: bool = False + level: Optional[str] = None + ato: Dict[str, str] = Field(default_factory=dict) + + @validator("enabled", pre=True) + def parse_enabled(cls, v: Any) -> bool: + """Convert various input types to boolean.""" + if isinstance(v, str): + return v.lower() == "true" + return bool(v) + + +class NistConfig(BaseModel): + """ + NIST compliance configuration. + + Attributes: + enabled: Whether NIST controls are enabled + controls: List of NIST control identifiers + auxiliary: Additional NIST controls + exceptions: NIST control exceptions + """ + enabled: bool = False + controls: List[str] = Field(default_factory=list) + auxiliary: List[str] = Field(default_factory=list) + exceptions: List[str] = Field(default_factory=list) + + @validator("enabled", pre=True) + def parse_enabled(cls, v: Any) -> bool: + """Convert various input types to boolean.""" + if isinstance(v, str): + return v.lower() == "true" + return bool(v) + + +class ScipConfig(BaseModel): + """ + SCIP-specific configuration. + + Attributes: + environment: Target environment identifier + ownership: Resource ownership metadata + provider: Provider-specific configuration + """ + environment: Optional[str] = None + ownership: Dict[str, Any] = Field(default_factory=dict) + provider: Dict[str, Any] = Field(default_factory=dict) + + +class ComplianceConfig(BaseModel): + """ + Comprehensive compliance configuration. + + Attributes: + fisma: FISMA compliance settings + nist: NIST compliance settings + scip: SCIP-specific settings + """ + fisma: FismaConfig = Field(default_factory=FismaConfig) + nist: NistConfig = Field(default_factory=NistConfig) + scip: ScipConfig = Field(default_factory=ScipConfig) + + @classmethod + def merge(cls, user_config: Dict[str, Any]) -> "ComplianceConfig": + """ + Merge user configuration with default configuration. + + Args: + user_config: User-provided configuration dictionary + + Returns: + ComplianceConfig: Merged configuration object + """ + fisma_config = FismaConfig(**(user_config.get("fisma", {}) or {})) + nist_config = NistConfig(**(user_config.get("nist", {}) or {})) + scip_config = ScipConfig(**(user_config.get("scip", {}) or {})) + + return cls( + fisma=fisma_config, + nist=nist_config, + scip=scip_config + ) + + def dict(self, *args: Any, **kwargs: Any) -> Dict[str, Any]: + """ + Convert the configuration to a dictionary. + + Returns: + Dict[str, Any]: Configuration as a dictionary + """ + base_dict = super().dict(*args, **kwargs) + + # Ensure nested models are also converted to dictionaries + base_dict["fisma"] = self.fisma.dict() + base_dict["nist"] = self.nist.dict() + base_dict["scip"] = self.scip.dict() + + return base_dict + + +class ResourceMetadata(BaseModel): + """ + Common resource metadata. + + Attributes: + created_at: Resource creation timestamp + updated_at: Last update timestamp + labels: Resource labels + annotations: Resource annotations + """ + created_at: datetime = Field(default_factory=datetime.utcnow) + updated_at: datetime = Field(default_factory=datetime.utcnow) + labels: Dict[str, str] = Field(default_factory=dict) + annotations: Dict[str, str] = Field(default_factory=dict) + + @validator("updated_at", pre=True, always=True) + def update_timestamp(cls, v: Any, values: Dict[str, Any]) -> datetime: + """Ensure updated_at is always current.""" + return datetime.utcnow() + + +class ModuleBase(BaseModel): + """ + Base class for all module configurations. + + Attributes: + enabled: Whether the module is enabled + version: Module version + metadata: Resource metadata + """ + enabled: bool = False + version: Optional[str] = None + metadata: ResourceMetadata = Field(default_factory=ResourceMetadata) + + class Config: + """Pydantic model configuration.""" + arbitrary_types_allowed = True + json_encoders = { + datetime: lambda v: v.isoformat() + } + + +class ModuleDefaults(TypedDict): + """ + Default configuration for modules. + + Attributes: + enabled: Whether the module is enabled by default + version: Optional default version + config: Additional module configuration + """ + enabled: bool + version: Optional[str] + config: Dict[str, Any] + + +class InitializationConfig(BaseModel): + """ + Configuration for core module initialization. + + This class encapsulates all necessary configuration and state needed + for initializing and managing the core module components. + + Attributes: + config: Pulumi configuration object + stack_name: Name of the current Pulumi stack + project_name: Name of the Pulumi project + default_versions: Default versions for all modules + versions: Current versions of deployed modules + configurations: Module-specific configurations + global_depends_on: Global resource dependencies + k8s_provider: Kubernetes provider instance + git_info: Git repository information + compliance_config: Compliance configuration + metadata: Resource metadata + """ + config: Any # Pulumi.Config can't be type-hinted directly + stack_name: str + project_name: str + default_versions: Dict[str, Any] + versions: Dict[str, str] = Field(default_factory=dict) + configurations: Dict[str, Dict[str, Any]] = Field(default_factory=dict) + global_depends_on: List[Any] = Field(default_factory=list) # Pulumi.Resource list + k8s_provider: Optional[Any] = None # k8s.Provider + git_info: Dict[str, str] = Field(default_factory=dict) + compliance_config: ComplianceConfig = Field(default_factory=ComplianceConfig) + metadata: ResourceMetadata = Field(default_factory=ResourceMetadata) + + @validator("config") + def validate_pulumi_config(cls, v: Any) -> Any: + """Validate that config is a Pulumi.Config instance.""" + if not isinstance(v, pulumi.Config): + raise ValueError("config must be an instance of pulumi.Config") + return v + + @validator("k8s_provider") + def validate_k8s_provider(cls, v: Any) -> Any: + """Validate that k8s_provider is a k8s.Provider instance if provided.""" + if v is not None and not isinstance(v, k8s.Provider): + raise ValueError("k8s_provider must be an instance of k8s.Provider") + return v + + def update_versions(self, module_name: str, version: str) -> None: + """Update version information for a module.""" + self.versions[module_name] = version + + def add_dependency(self, resource: Any) -> None: + """Add a resource to global dependencies.""" + if resource not in self.global_depends_on: + self.global_depends_on.append(resource) + + def get_module_config(self, module_name: str) -> Dict[str, Any]: + """Get configuration for a specific module.""" + return self.configurations.get(module_name, {}) + + class Config: + """Pydantic model configuration.""" + arbitrary_types_allowed = True + json_encoders = { + datetime: lambda v: v.isoformat(), + pulumi.Config: lambda v: str(v), + k8s.Provider: lambda v: str(v) + } + + +# Default module configuration +DEFAULT_MODULE_CONFIG: Dict[str, ModuleDefaults] = { + "aws": {"enabled": False, "version": None, "config": {}}, + "cert_manager": {"enabled": True, "version": None, "config": {}}, + "kubevirt": {"enabled": True, "version": None, "config": {}}, + "multus": {"enabled": True, "version": None, "config": {}}, + "hostpath_provisioner": {"enabled": True, "version": None, "config": {}}, + "containerized_data_importer": {"enabled": True, "version": None, "config": {}}, + "prometheus": {"enabled": True, "version": None, "config": {}} +} + + +class ModuleDeploymentResult(BaseModel): + """ + Results from a module deployment operation. + + Attributes: + success: Whether the deployment was successful + version: Deployed module version + resources: List of created resource IDs + errors: Any errors that occurred + metadata: Additional deployment metadata + """ + success: bool + version: str + resources: List[str] = Field(default_factory=list) + errors: List[str] = Field(default_factory=list) + metadata: Dict[str, Any] = Field(default_factory=dict) + + def add_error(self, error: str) -> None: + """Add an error message.""" + self.errors.append(error) + + def add_metadata(self, key: str, value: Any) -> None: + """Add metadata information.""" + self.metadata[key] = value + + +class ModuleRegistry(BaseModel): + """Registry for available modules and their configurations.""" + modules: Dict[str, ModuleBase] + providers: Dict[str, Any] + dependencies: Dict[str, List[str]] + + def register_module(self, name: str, module: ModuleBase) -> None: + """Register a module with the registry.""" + self.modules[name] = module + + def get_module(self, name: str) -> Optional[ModuleBase]: + """Get a module by name.""" + return self.modules.get(name) + +class ConfigurationValidator: + """Validates module configurations against their schemas.""" + + def __init__(self, registry: ModuleRegistry): + self.registry = registry + + def validate_module_config( + self, + module_name: str, + config: Dict[str, Any] + ) -> List[str]: + """ + Validates module configuration. + Returns list of validation errors. + """ + errors = [] + module = self.registry.get_module(module_name) + if not module: + errors.append(f"Module {module_name} not registered") + return errors + + try: + module.validate_config(config) + except ValidationError as e: + errors.extend(str(error) for error in e.errors()) + return errors + +class DependencyResolver: + """Resolves module deployment order based on dependencies.""" + + def __init__(self, registry: ModuleRegistry): + self.registry = registry + + def resolve_deployment_order( + self, + modules: List[str] + ) -> List[str]: + """ + Returns modules in correct deployment order. + Raises CircularDependencyError if circular dependency detected. + """ + # TODO: Implementation using topological sort + pass + +class ModuleInterface(Protocol): + """Protocol defining required module interface.""" + + def validate_config(self, config: Dict[str, Any]) -> List[str]: + """Validate module configuration.""" + ... + + def deploy(self, ctx: DeploymentContext) -> ModuleDeploymentResult: + """Deploy module resources.""" + ... + + def get_dependencies(self) -> List[str]: + """Get module dependencies.""" + ... + +class AWSDeployer: + def deploy( + self, + dependencies: Optional[List[pulumi.Resource]], + managers: AWSManagers + ) -> Tuple[str, pulumi.Resource, Dict[str, Any]]: + """ + Deploys AWS infrastructure using provided managers. + + Args: + dependencies: Optional resource dependencies + managers: Dictionary of AWS service managers + + Returns: + Tuple containing: + - Version string + - Main infrastructure resource + - Output dictionary + """ + try: + # Initialize core infrastructure + org_resource, org_data = managers["organization"].get_or_create() + + # Get organization root ID + root_id = managers["organization"].get_root_id(org_data) + + # Deploy security controls + security_outputs = managers["security"].deploy_security_controls() + + # Deploy networking + network_outputs = managers["networking"].deploy_network_infrastructure() + + # Deploy resources + resource_outputs = managers["resources"].deploy_resources() + + # Combine outputs + outputs = { + **security_outputs, + **network_outputs, + **resource_outputs, + "organization_id": org_resource.id, + "organization_arn": org_resource.arn, + "root_id": root_id + } + + return "1.0.0", org_resource, outputs + + except Exception as e: + log.error(f"Deployment failed: {str(e)}") + raise diff --git a/core/utils.py b/core/utils.py new file mode 100644 index 0000000..041fab6 --- /dev/null +++ b/core/utils.py @@ -0,0 +1,370 @@ +# pulumi/core/utils.py + +""" +Utility Functions Module + +This module provides generic, reusable utility functions for Pulumi resource management. +Includes resource transformations, Helm interactions, and infrastructure utilities. +""" + +import re +import os +import tempfile +import json +from typing import Optional, Dict, Any, List, Union, Callable, TypeVar, cast +import requests +import yaml +import logging +from packaging.version import parse as parse_version, InvalidVersion, Version +from pulumi import ResourceOptions, Resource, ResourceTransformationArgs, ResourceTransformationResult, runtime +import pulumi_kubernetes as k8s +from pulumi_kubernetes.meta.v1 import ObjectMetaArgs +import time +import pulumi +from pulumi import log + +# Configure logging with structured format +logging.basicConfig( + level=logging.INFO, + format='%(asctime)s - %(levelname)s - %(message)s' +) + +# Type variables for generic functions +T = TypeVar('T') +MetadataType = Union[Dict[str, Any], ObjectMetaArgs] + +def set_resource_metadata( + metadata: MetadataType, + global_labels: Dict[str, str], + global_annotations: Dict[str, str] +) -> None: + """ + Updates resource metadata with global labels and annotations. + Handles both dict and ObjectMetaArgs metadata types. + + Args: + metadata: Resource metadata to update + global_labels: Global labels to apply + global_annotations: Global annotations to apply + + Raises: + TypeError: If metadata is of an unsupported type + """ + try: + if isinstance(metadata, dict): + metadata.setdefault("labels", {}).update(global_labels) + metadata.setdefault("annotations", {}).update(global_annotations) + elif isinstance(metadata, ObjectMetaArgs): + if metadata.labels is None: + metadata.labels = {} + metadata.labels.update(global_labels) + if metadata.annotations is None: + metadata.annotations = {} + metadata.annotations.update(global_annotations) + else: + raise TypeError(f"Unsupported metadata type: {type(metadata)}") + except Exception as e: + logging.error(f"Failed to update resource metadata: {str(e)}") + raise + + +def generate_global_transformations( + global_labels: Dict[str, str], + global_annotations: Dict[str, str] +) -> None: + """ + Registers global transformations for all Pulumi resources. + Ensures consistent metadata across all resources. + + Args: + global_labels: Global labels to apply + global_annotations: Global annotations to apply + """ + def global_transform( + args: pulumi.ResourceTransformationArgs, + ) -> pulumi.ResourceTransformationResult: + """ + Global transformation function for Pulumi resources. + Applies metadata consistently across all resources. + + Args: + args: Resource transformation arguments + + Returns: + ResourceTransformationResult: Transformed resource properties + """ + props = args.props + + try: + if "metadata" in props: + set_resource_metadata(props["metadata"], global_labels, global_annotations) + elif "spec" in props and isinstance(props["spec"], dict): + if "metadata" in props["spec"]: + set_resource_metadata( + props["spec"]["metadata"], + global_labels, + global_annotations + ) + + return ResourceTransformationResult(props, args.opts) + except Exception as e: + logging.error(f"Error in global transform: {str(e)}") + return ResourceTransformationResult(props, args.opts) + + pulumi.runtime.register_stack_transformation(global_transform) + + +def get_latest_helm_chart_version( + repo_url: str, + chart_name: str, + timeout: int = 30, + verify_ssl: bool = True, + max_retries: int = 3, +) -> str: + """ + Fetches the latest stable version of a Helm chart. + Includes retry logic and proper error handling. + + Args: + repo_url: The base URL of the Helm repository + chart_name: The name of the Helm chart + timeout: Request timeout in seconds + verify_ssl: Whether to verify SSL certificates + max_retries: Number of retry attempts + + Returns: + str: The latest stable version or error message + + Raises: + requests.RequestException: If the request fails + yaml.YAMLError: If parsing the index fails + """ + for attempt in range(max_retries): + try: + index_url = repo_url.rstrip("/") + "/index.yaml" + logging.info(f"Fetching Helm repository index from URL: {index_url}") + + response = requests.get( + index_url, + timeout=timeout, + verify=verify_ssl + ) + response.raise_for_status() + + index = yaml.safe_load(response.content) + + if chart_name not in index.get("entries", {}): + logging.warning(f"No chart named '{chart_name}' found in repository") + return "Chart not found" + + chart_versions = index["entries"][chart_name] + stable_versions = [ + v for v in chart_versions + if is_stable_version(v["version"]) + ] + + if not stable_versions: + logging.warning(f"No stable versions found for chart '{chart_name}'") + return "No stable versions found" + + latest_chart = max( + stable_versions, + key=lambda x: parse_version(x["version"]) + ) + + version = latest_chart["version"].lstrip("v") + logging.info(f"Found latest version {version} for chart {chart_name}") + return version + + except (requests.RequestException, yaml.YAMLError) as e: + if attempt == max_retries - 1: + raise + logging.warning(f"Attempt {attempt + 1} failed, retrying: {str(e)}") + time.sleep(2 ** attempt) + + +def is_stable_version(version_str: str) -> bool: + """ + Determines if a version string represents a stable release. + Handles various version formats and edge cases. + + Args: + version_str: The version string to check + + Returns: + bool: True if the version is stable + """ + try: + version = parse_version(version_str) + return ( + isinstance(version, Version) and + not version.is_prerelease and + not version.is_devrelease and + not version.is_postrelease + ) + except InvalidVersion: + return False + + +def extract_repo_name(remote_url: str) -> str: + """ + Extracts the repository name from a Git remote URL. + Handles various Git URL formats. + + Args: + remote_url: The Git remote URL + + Returns: + str: The repository name or original URL if parsing fails + """ + try: + # Handle SSH URLs + if remote_url.startswith("git@"): + parts = remote_url.split(":") + if len(parts) == 2: + return parts[1].rstrip(".git") + + # Handle HTTPS URLs + match = re.search(r"[:/]([^/:]+/[^/\.]+)(\.git)?$", remote_url) + if match: + return match.group(1) + + return remote_url + except Exception as e: + logging.warning(f"Error extracting repo name from {remote_url}: {str(e)}") + return remote_url + + +def wait_for_crds( + crd_names: List[str], + k8s_provider: k8s.Provider, + depends_on: List[Resource], + parent: Resource, + timeout: int = 300 +) -> List[Resource]: + """ + Waits for CRDs to be present and ensures dependencies. + Includes timeout and proper error handling. + + Args: + crd_names: List of CRD names to wait for + k8s_provider: The Kubernetes provider + depends_on: List of dependencies + parent: The parent resource + timeout: Timeout in seconds + + Returns: + List[Resource]: The CRD resources or dummy CRDs during preview + + Raises: + TimeoutError: If CRDs don't become ready within timeout + pulumi.ResourceError: If CRD creation fails + """ + crds: List[Resource] = [] + + for crd_name in crd_names: + try: + crd = k8s.apiextensions.v1.CustomResourceDefinition.get( + resource_name=f"crd-{crd_name}", + id=crd_name, + opts=ResourceOptions( + provider=k8s_provider, + depends_on=depends_on, + parent=parent, + custom_timeouts=pulumi.CustomTimeouts( + create=f"{timeout}s", + delete="60s" + ), + ), + ) + crds.append(crd) + except Exception: + if pulumi.runtime.is_dry_run(): + logging.info(f"CRD {crd_name} not found, creating dummy CRD") + dummy_crd = create_dummy_crd( + crd_name, + k8s_provider, + depends_on, + parent + ) + if dummy_crd: + crds.append(dummy_crd) + + return crds + + +def create_dummy_crd( + crd_name: str, + k8s_provider: k8s.Provider, + depends_on: List[Resource], + parent: Resource +) -> Optional[k8s.yaml.ConfigFile]: + """ + Creates a dummy CRD for preview runs. + Ensures proper cleanup of temporary files. + + Args: + crd_name: The name of the CRD + k8s_provider: The Kubernetes provider + depends_on: List of dependencies + parent: The parent resource + + Returns: + Optional[k8s.yaml.ConfigFile]: The dummy CRD resource + """ + parts = crd_name.split(".") + plural = parts[0] + group = ".".join(parts[1:]) + kind = "".join(word.title() for word in plural.split("_")) + + dummy_crd_yaml = f""" +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + name: {plural}.{group} +spec: + group: {group} + names: + plural: {plural} + singular: {plural.lower()} + kind: {kind} + shortNames: [{plural[:3].lower()}] + scope: Namespaced + versions: + - name: v1 + served: true + storage: true + schema: + openAPIV3Schema: + type: object + properties: + spec: + type: object + x-kubernetes-preserve-unknown-fields: true + """ + + temp_file = None + try: + with tempfile.NamedTemporaryFile( + mode="w", + suffix=".yaml", + delete=False + ) as temp_file: + temp_file.write(dummy_crd_yaml) + temp_file_path = temp_file.name + + return k8s.yaml.ConfigFile( + f"dummy-crd-{crd_name}", + file=temp_file_path, + opts=ResourceOptions( + provider=k8s_provider, + depends_on=depends_on, + parent=parent, + ), + ) + except Exception as e: + logging.error(f"Error creating dummy CRD: {str(e)}") + return None + finally: + if temp_file and os.path.exists(temp_file.name): + os.unlink(temp_file.name) diff --git a/pulumi/default_versions.json b/default_versions.json similarity index 100% rename from pulumi/default_versions.json rename to default_versions.json diff --git a/docs/developer_guide/module_refactoring.md b/docs/developer_guide/module_refactoring.md new file mode 100644 index 0000000..304610e --- /dev/null +++ b/docs/developer_guide/module_refactoring.md @@ -0,0 +1,229 @@ +# Module Refactoring Guide + +## Overview +This document outlines the standardized approach for refactoring Pulumi infrastructure modules to follow Python best practices while maintaining infrastructure-as-code principles. The goal is to create a consistent, maintainable, and testable module structure across the codebase. + +## Core Principles + +### 1. Clear Module Interface +- Each module must expose its public API through `__init__.py` +- Use `__all__` to explicitly declare public interfaces +- Provide type hints for all public interfaces +- Document the module's purpose and usage in the module docstring + +### 2. Separation of Concerns +Modules should separate functionality into distinct files: +- `__init__.py`: Public API and entry points +- `types.py`: Type definitions and configuration classes +- Resource-specific implementation files (e.g., `provider.py`, `resources.py`) +- No direct resource creation in `__init__.py` + +### 3. Object-Oriented Design +- Use classes to encapsulate related functionality +- Prefer composition over inheritance +- Implement clear class responsibilities +- Use properties for computed or lazy-loaded values + +### 4. Type Safety +- Use type hints consistently throughout the codebase +- Leverage Pydantic models for configuration validation +- Define clear interfaces using Protocol classes where appropriate +- Use TypedDict for structured dictionary types + +## Standard Module Structure + +```bash +module_name/ +├── __init__.py +├── types.py +├── provider.py +├── resources.py +├── [component_name].py +└── README.md +``` + +### File Responsibilities + +#### __init__.py +- Expose the public API +- Define the module's entry points +- Import all public interfaces and submodules +- Use `__all__` to manage the public API + +```python +""" +Module docstring describing the purpose and functionality of the module. +""" +from typing import List, Optional, Tuple +from .types import ModuleConfig +from .resources import ResourceManager + +__all__ = [ + "ModuleConfig", + "ResourceManager", + "create_infrastructure" +] + +def create_infrastructure( + config: ModuleConfig, + dependencies: Optional[List[pulumi.Resource]] = None +) -> Tuple[str, pulumi.Resource, Dict[str, Any]]: + """ + Create infrastructure with the given configuration. + """ + pass +``` + +#### types.py + +```python +from typing import TypedDict, Optional +from pydantic import BaseModel + +class ModuleConfig(BaseModel): + """ + Module configuration. + """ + pass +``` + +#### resources.py + +```python +from pulumi import ProviderResource + +class ResourceManager: + """ + Manages the creation and configuration of module resources. + """ + def __init__(self, provider: ProviderResource): + self.provider = provider + + def create_resource(self, config: ModuleConfig) -> pulumi.Resource: + """ + Create a resource with the given configuration. + """ + pass +``` + +## Implementation Requirements + +### 1. Configuration Management +- Use Pydantic models for configuration validation +- Support merging of user configs with defaults +- Validate configuration at initialization time +- Support environment variable overrides where appropriate + +### 2. Resource Management +- Implement idempotent resource creation +- Handle resource dependencies explicitly +- Support resource tagging and metadata +- Implement proper cleanup and error handling + +### 3. Provider Integration +- Abstract provider-specific details +- Support multiple provider configurations +- Handle provider authentication securely +- Support cross-provider dependencies + +### 4. Testing Support +- Design classes for testability +- Support mocking of external dependencies +- Enable unit testing of configuration +- Support integration testing of resource creation + +## Migration Process + +1. **Analysis Phase** + - Review existing module functionality + - Identify public interfaces + - Map resource dependencies + - Document current configuration options + +2. **Refactoring Phase** + - Create new file structure + - Implement type definitions + - Create resource management classes + - Migrate existing functionality + +3. **Testing Phase** + - Write unit tests + - Verify existing functionality + - Test error conditions + - Validate configuration handling + +4. **Documentation Phase** + - Update module documentation + - Add docstrings + - Create usage examples + - Document breaking changes + +## Best Practices + +### Code Organization +- Group related functionality into classes +- Use private methods for implementation details +- Implement clear error handling +- Follow PEP 8 style guidelines + +### Documentation +- Include docstrings for all public interfaces +- Document configuration options +- Provide usage examples +- Document any breaking changes + +### Error Handling +- Use custom exception classes +- Provide meaningful error messages +- Handle resource creation failures +- Implement proper cleanup + +### Testing +- Write unit tests for configuration +- Test resource creation logic +- Implement integration tests +- Test error conditions + +## Breaking Changes +When refactoring modules, maintain backward compatibility: +- Keep existing entry points functional +- Support old configuration formats +- Document migration paths +- Version breaking changes appropriately + +## Example Implementation +See the AWS module implementation as a reference: +- `pulumi/modules/aws/__init__.py` +- `pulumi/modules/aws/types.py` +- `pulumi/modules/aws/resources.py` +- `pulumi/modules/aws/provider.py` + +## Validation Checklist + +- [ ] Clear public API in `__init__.py` +- [ ] Type definitions in `types.py` +- [ ] Resource management classes implemented +- [ ] Configuration validation +- [ ] Error handling +- [ ] Documentation +- [ ] Unit tests +- [ ] Integration tests +- [ ] Breaking changes documented +- [ ] Migration guide provided + +## Next Steps + +1. Complete AWS module refactoring +2. Review and validate changes +3. Create test suite +4. Document changes +5. Apply pattern to remaining modules +6. Validate full codebase +7. Update global documentation + +## References + +- [Python Package Structure](https://docs.python.org/3/tutorial/modules.html) +- [Type Hints PEP 484](https://www.python.org/dev/peps/pep-0484/) +- [Pulumi Architecture](https://www.pulumi.com/docs/intro/concepts/how-pulumi-works/) +- [Python Style Guide](https://www.python.org/dev/peps/pep-0008/) diff --git a/docs/developer_guide/modules/core/README.md b/docs/developer_guide/modules/core/README.md new file mode 100644 index 0000000..5b66c01 --- /dev/null +++ b/docs/developer_guide/modules/core/README.md @@ -0,0 +1,351 @@ +# Konductor Core Module Documentation + +## Introduction + +The **Konductor Core Module** serves as the foundational framework for Infrastructure as Code (IaC) using Pulumi and Python. It provides a robust, type-safe, and maintainable architecture that enables teams to work independently while ensuring consistency and compliance across infrastructure deployments. By abstracting common IaC patterns, the core module makes your codebase more DRY (Don't Repeat Yourself), reusable, and dynamic. + +This documentation is intended for a wide audience, including junior, senior, and principal developers, DevOps practitioners, executive leadership, and the open-source community. + +## Table of Contents + +- [Architecture Overview](#architecture-overview) +- [Core Module Components](#core-module-components) + - [Directory Structure](#directory-structure) + - [Design Philosophy](#design-philosophy) +- [Detailed Component Descriptions](#detailed-component-descriptions) + - [`__init__.py`](#__init__py) + - [`config.py`](#configpy) + - [Why Is Configuration Complex?](#why-is-configuration-complex) + - [`deployment.py`](#deploymentpy) + - [`metadata.py`](#metadatapy) + - [`resource_helpers.py`](#resource_helperspy) + - [`types.py`](#typespy) + - [`utils.py`](#utilspy) +- [Kubernetes Module Centralized Version Control](#kubernetes-module-centralized-version-control) +- [Main Entry Point `__main__.py`](#main-entry-point-__main__py) +- [When to Use Core Module vs. Module-Specific Code](#when-to-use-core-module-vs-module-specific-code) +- [Best Practices](#best-practices) +- [Development Guidelines](#development-guidelines) +- [Conclusion](#conclusion) + +## Architecture Overview + +### Directory Structure + +The core module is organized as follows: + +``` +pulumi/core/ +├── __init__.py # Module initialization and exports +├── config.py # Advanced configuration management system +├── deployment.py # Deployment orchestration and lifecycle management +├── metadata.py # Global metadata and tagging utilities +├── resource_helpers.py # Common resource creation and management functions +├── types.py # Shared type definitions and schemas +└── utils.py # General utility functions +``` + +### Design Philosophy + +The core module implements several key principles: + +1. **Separation of Concerns**: Each component has a single, well-defined responsibility. +2. **Type Safety**: Comprehensive type checking throughout the codebase to prevent runtime errors. +3. **Configuration Isolation**: Modules can define their own configuration schemas independently. +4. **Resource Abstraction**: Centralizes common resource patterns for consistency and reuse. +5. **Compliance Integration**: Built-in support for security and compliance requirements. +6. **Team Independence**: Enables different teams to maintain independent configurations without affecting others. + +## Detailed Component Descriptions + +### `__init__.py` + +This file initializes the core module and exports essential classes and functions for external use. It serves as the entry point for the module, making it easier for other parts of the program to import and utilize core functionalities. + +### `config.py` + +**Purpose**: Manages the complex configuration hierarchy used across the entire IaC codebase. + +**Key Features**: + +- **Layered Configuration**: Merges configurations from global defaults, stack-specific settings, environment variables, and module-specific overrides. +- **Validation**: Ensures that configurations meet predefined schemas and compliance requirements. +- **Flexibility**: Supports multiple deployment environments and scenarios. +- **Team Independence**: Allows each module team to maintain independent configuration specifications. + +#### Why Is Configuration Complex? + +The configuration system is intentionally complex to accommodate: + +- **Multi-Team Development**: Enables teams to define their own configuration schemas without impacting others. +- **Version Control**: Each module maintains its own application versioning, allowing for independent updates. +- **Compliance Requirements**: Centralized validation enforces security and compliance standards. +- **Environment-Specific Configurations**: Supports varying configurations across many teams with their own development, staging, and production environments. +- **Default Value Management**: Provides a mechanism for default settings that can be overridden as needed. + +**Example**: + +```python +class ConfigurationManager: + """ + Manages the complex configuration hierarchy: + 1. Default values + 2. Stack-specific overrides + 3. Environment variables + 4. User-provided configurations + 5. Module-specific settings + """ + + def get_config(self, module_name: str) -> Dict[str, Any]: + # Logic to merge configurations + pass +``` + +### `deployment.py` + +**Purpose**: Orchestrates the deployment process, managing the lifecycle of resources and modules. + +**Key Features**: + +- **Automatic Module Discovery**: Dynamically loads and initializes modules from the `pulumi/modules/` directory. +- **Dependency Resolution**: Ensures resources are created in the correct order based on dependencies. +- **Parallel Deployment Capabilities**: Optimizes deployment time by parallelizing independent resource creations. +- **Comprehensive Error Handling**: Provides robust mechanisms to handle failures and rollbacks. +- **State Management**: Interfaces with Pulumi's state management to track resource states. + +**Example**: + +```python +class DeploymentManager: + """ + Manages the deployment lifecycle: + - Module discovery and loading + - Dependency resolution + - Resource creation ordering + - Error handling and recovery + - State management + """ + + def deploy(self): + # Logic to orchestrate deployment + pass +``` + +### `metadata.py` + +**Purpose**: Handles global metadata and labeling for resources. + +**Key Features**: + +- **Compliance Labels**: Automatically generates and distributes required compliance and audit tags to resources. +- **Git Information Integration**: Incorporates Git metadata (e.g., commit hashes) into resource tags for traceability. +- **Resource Tagging Standards**: Enforces consistent tagging across all modules leaving module maintainers free to implement provider specific tagging implementations. +- **Audit Trail Maintenance**: Facilitates auditing by returning reportable metadata. + +**Example**: + +```python +class MetadataManager: + """ + Manages global metadata: + - Compliance labels + - Git information + - Resource tagging + - Audit trail + """ + + def get_metadata(self) -> Dict[str, str]: + # Returns standardized metadata for resources + pass +``` + +### `resource_helpers.py` + +**Purpose**: Centralizes common resource creation patterns and utilities. + +**Key Features**: + +- **Consistent Metadata Application**: Ensures all resources have the necessary metadata and tags. +- **Standardized Error Handling**: Provides consistent mechanisms for handling resource creation errors. +- **Resource Dependencies**: Simplifies the management of resource dependencies and ordering. +- **Cleanup Procedures**: Implements standardized procedures for resource deletion and cleanup. + +**Example**: + +```python +class ResourceManager: + """ + Provides standardized resource creation patterns: + - Consistent metadata application + - Error handling + - Resource dependencies + - Cleanup procedures + """ + + def create_resource(self, config: Dict[str, Any]) -> pulumi.Resource: + # Logic to create a resource with standardized settings + pass +``` + +### `types.py` + +**Purpose**: Defines shared type definitions and schemas used across modules. + +**Key Features**: + +- **Type Definitions**: Provides base classes and type hints for configurations and resources. +- **Schema Validation**: Ensures that configurations adhere to expected structures. +- **Reusability**: Facilitates consistent typing across different modules. + +**Example**: + +```python +from typing import TypedDict + +class BaseConfig(TypedDict): + """Base configuration structure for all modules.""" + enabled: bool + version: str + parameters: Dict[str, Any] + +class ResourceConfig(TypedDict): + """Standard resource configuration structure.""" + name: str + type: str + metadata: Dict[str, Any] +``` + +### `utils.py` + +**Purpose**: Contains general utility functions that support other components. + +**Key Features**: + +- **Common Utilities**: Functions for tasks like merging configurations, handling strings, etc. +- **Helper Functions**: Small, reusable functions that are used across multiple modules. +- **Performance Optimization**: Includes functions that enhance performance and efficiency. + +**Example**: + +```python +def merge_configurations(*configs: Dict[str, Any]) -> Dict[str, Any]: + """Merges multiple configuration dictionaries into one.""" + # Logic to merge dictionaries + pass +``` + +## Kubernetes Module Centralized Version Control + +**Purpose**: The Kubernetes module centralizes version control to ensure consistency and reliability across all deployments. + +**Reasons for Centralization**: + +- **Consistency**: Guarantees that all modules and environments can opt in to a centralized, common version control system. +- **Simplified Upgrades**: Streamlines the process of maintaining component versions at scale. +- **Compliance and Security**: Ensures that only approved and secure versions are used, adhering to organizational policies. +- **Reduced Conflicts**: Minimizes the risk of version incompatibilities between different modules. + +By centralizing version control in the core module, teams can focus on their specific functionalities without worrying about underlying Kubernetes version discrepancies. + +## Main Entry Point `__main__.py` + +The `__main__.py` file serves as the simple entry point for the Pulumi program. It is designed to be straightforward, allowing different module teams to maintain independent configuration specifications without interference. + +**Key Features**: + +- **Simplified Orchestration**: Delegates the deployment process to the `DeploymentManager` in `deployment.py`. +- **Team Autonomy**: Each team's module can be developed and deployed independently. +- **Preventing Cross-Team Interference**: Changes in one module's deployment do not affect others. + +**Example**: + +```python +import pulumi +from core.deployment import DeploymentManager + +# Initialize the deployment manager +deployment_manager = DeploymentManager() + +# Execute the deployment process +deployment_manager.deploy() +``` + +## When to Use Core Module vs. Module-Specific Code + +### Core Module Code + +**Use the core module when**: + +- The functionality is **used by multiple modules**. +- It **implements organizational standards** or compliance requirements. +- It **provides common resource patterns** that promote consistency. +- You need to **abstract repetitive code** to make it DRY. + +**Example**: + +```python +# Core module - get module configuration +def get_module_configuration(config: Dict[str, Any]) -> Dict[str, Any]: + """Returns the module configuration.""" + pass +``` + +### Module-Specific Code + +**Keep code in individual modules when**: + +- It's **specific to one module's functionality**. +- It implements **module-specific business logic**. +- It handles **module-specific resource types** not used elsewhere. +- It doesn't need to be shared across modules. + +**Example**: + +```python +# AWS module - reusable but only within AWS module resource patterns +def create_compliant_s3_bucket(name: str, config: Dict[str, Any]) -> s3.Bucket: + """Creates an S3 bucket with standard compliance controls.""" + # Logic to create the bucket + pass +``` + +## Best Practices + +- **Maintain Type Safety**: Use type hints and `TypedDict` to prevent runtime errors. +- **Follow the DRY Principle**: Abstract common code into the core module to avoid duplication. +- **Isolate Configurations**: Keep module configurations independent to prevent conflicts. +- **Document Thoroughly**: Provide clear docstrings and comments for all functions and classes. +- **Implement Comprehensive Error Handling**: Use custom exceptions and handle errors gracefully. +- **Use Centralized Metadata**: Apply consistent tagging and labeling using the `metadata.py` utilities. +- **Adhere to Compliance Standards**: Leverage the core module's compliance features to meet organizational policies. + +## Development Guidelines + +- **Adding New Core Features**: + + - **Evaluate the Need**: + - Is the functionality used by multiple modules? + - Does it align with organizational standards? + - Should it be centralized for consistency? + + - **Design**: + - Create clear type definitions. + - Plan for error handling and edge cases. + - Consider backward compatibility. + + - **Implementation**: + - Write type-safe, clean code. + - Add comprehensive unit tests. + - Document the new feature thoroughly. + +- **Testing**: + + - **Unit Tests**: Test individual components for correctness and type safety. + - **Integration Tests**: Verify interactions between components and modules. + - **Compliance Tests**: Ensure new features meet security and compliance requirements. + +- **Contributing**: + + - **Propose Changes**: Discuss your ideas with maintainers before implementation. + - **Follow Code Standards**: Adhere to the project's coding guidelines. + - **Submit Pull Requests**: Provide detailed descriptions and await code reviews. diff --git a/docs/infrastructure_as_code.md b/docs/infrastructure_as_code.md new file mode 100644 index 0000000..e590bbd --- /dev/null +++ b/docs/infrastructure_as_code.md @@ -0,0 +1,131 @@ +# Python Developer Guidelines for Infrastructure as Code (IaC) + +## Table of Contents + +1. [Introduction](#introduction) +2. [Understanding Infrastructure as Code](#understanding-infrastructure-as-code) +3. [State-Based Orchestration Principles](#state-based-orchestration-principles) +4. [Python Coding Patterns for IaC](#python-coding-patterns-for-iac) +5. [Modular Design and Reusability](#modular-design-and-reusability) +6. [State Management and Idempotency](#state-management-and-idempotency) +7. [Resource Lifecycle Management](#resource-lifecycle-management) +8. [Best Practices](#best-practices) +9. [Practical Examples](#practical-examples) +10. [Conclusion](#conclusion) +11. [Further Reading](#further-reading) + +## Introduction + +This document provides comprehensive guidelines for Python developers aiming to master Infrastructure as Code (IaC) principles, focusing on coding patterns and state-based orchestration. It caters to: + +- **Junior Developers**: Proficient in Python but new to IaC and modular design. +- **Senior Developers**: Experienced in cloud automation but unfamiliar with declarative resource management in IaC. +- **Principal Developers**: Skilled in advanced Python architectures but new to IaC in general-purpose languages. +- **AI Coding Assistants**: Seeking high-quality, detailed documentation with maximal semantic density. + +## Understanding Infrastructure as Code + +**Infrastructure as Code (IaC)** is the practice of provisioning and managing computing infrastructure through machine-readable definition files, rather than manual hardware configuration or interactive configuration tools. + +- **Declarative vs. Imperative**: Declarative IaC specifies *what* the desired state is, while imperative IaC specifies *how* to achieve it. +- **Benefits of IaC**: + - **Reproducibility**: Ensures consistent environments across deployments. + - **Version Control**: Infrastructure definitions can be versioned like application code. + - **Automation**: Reduces manual errors and speeds up deployment. + +## State-Based Orchestration Principles + +State-based orchestration manages resources by comparing the desired state (as defined in code) with the current state of the infrastructure. + +- **Desired State Configuration**: Define the intended state without specifying the steps to achieve it. +- **Idempotency**: Applying the same configuration multiple times yields the same result. +- **State Synchronization**: Tools track resource states to determine necessary actions. + +## Python Coding Patterns for IaC + +Using Python for IaC combines the versatility of a general-purpose language with infrastructure management. + +- **Advantages**: + - **Expressiveness**: Leverage Python's syntax and libraries. + - **Modularity**: Organize code into reusable modules. + - **Integration**: Utilize existing Python ecosystems and tools. + +### Key Patterns: + +- **Abstraction**: Encapsulate infrastructure components into classes and functions. +- **Encapsulation**: Hide implementation details, exposing only necessary interfaces. +- **Composition**: Build complex systems by combining simpler components. + +## Modular Design and Reusability + +Modular design promotes maintainability and scalability. + +- **Modules**: Self-contained units of code representing infrastructure components. +- **Reusability**: Write modules once and use them across different projects. +- **Encapsulation**: Prevent external code from depending on internal module details. + +### Implementation Strategies: + +- **Use Classes and Functions**: Encapsulate logic for resource creation. +- **Parameterization**: Allow customization through input parameters. +- **Naming Conventions**: Adopt consistent naming for clarity. + +## State Management and Idempotency + +Managing state is crucial for predictable infrastructure behavior. + +- **State Files**: Store metadata about deployed resources. +- **Idempotent Operations**: Ensure that repeated executions don't produce unintended changes. +- **Change Detection**: Compare desired and actual states to determine necessary updates. + +### Best Practices: + +- **Avoid Mutable Global State**: Use function parameters and return values. +- **Explicit State Definitions**: Clearly define resource states in code. +- **State Isolation**: Separate state management from business logic. + +## Resource Lifecycle Management + +Manage resources by defining or removing their configurations. + +- **Creation**: Define resources in code; the orchestration tool provisions them. +- **Deletion**: Remove resource definitions; the tool destroys them automatically. +- **No Explicit Destroy Logic**: Rely on the orchestration tool's state management. + +### Benefits: + +- **Simplicity**: Focus on desired state without handling low-level details. +- **Consistency**: Reduce errors by automating resource cleanup. +- **Efficiency**: Save time by not writing repetitive destroy code. + +## Best Practices + +- **Version Control Everything**: Keep all code, including infrastructure definitions, in a VCS. +- **Write Tests**: Implement unit and integration tests for your IaC code. +- **Documentation**: Maintain clear and concise documentation for modules and functions. +- **Security**: Protect sensitive data using secrets management tools. +- **Code Reviews**: Regularly review code to ensure adherence to standards. + +## Practical Examples + +### Example 1: Creating an AWS S3 Bucket with Pulumi + +```python +import pulumi +from pulumi_aws import s3 + +# Create an AWS S3 bucket +bucket = s3.Bucket('my-bucket') + +# Export the bucket name +pulumi.export('bucket_name', bucket.id) +``` + +### Example 2: Deleting a Resource by Removing Its Definition + +- **Before Deletion**: Resource is defined in code. +- **After Deletion**: Remove the resource code; run the orchestration tool to update the infrastructure. + +## Conclusion + +Mastering IaC with Python involves understanding state-based orchestration, adopting proper coding patterns, and emphasizing modularity and idempotency. By defining desired states and relying on orchestration tools, developers can efficiently manage infrastructure lifecycles without explicit destroy logic. diff --git a/docs/pulumi_iac.md b/docs/pulumi_iac.md new file mode 100644 index 0000000..3dccb91 --- /dev/null +++ b/docs/pulumi_iac.md @@ -0,0 +1,168 @@ +## Implementing Infrastructure as Code with Pulumi and GitOps + +### Introduction to Pulumi + +**Pulumi** is an open-source infrastructure as code (IaC) tool that enables developers to define and manage cloud resources using general-purpose programming languages like Python. Unlike traditional IaC tools that use domain-specific languages (DSLs), Pulumi leverages the full power of Python, allowing for advanced abstractions, code reuse, and integration with existing software development workflows. + +#### Why Choose Pulumi for IaC with Python? + +- **Unified Language**: Use Python for both application code and infrastructure definitions, reducing context switching. +- **Rich Ecosystem**: Access Python's extensive libraries and frameworks to enhance infrastructure code. +- **Advanced Abstractions**: Implement complex logic, loops, and conditional statements natively. +- **Team Collaboration**: Align development and operations teams by using a common language and tooling. + +#### Benefits Across Stakeholders + +- **Developers**: Simplify infrastructure management with familiar programming constructs. +- **DevOps Practitioners**: Streamline deployment processes and integrate with CI/CD pipelines. +- **Executive Leadership**: Accelerate time-to-market, enhance reliability, and optimize costs through automation. + +### Developing Pulumi Python Infrastructure as Code + +#### Getting Started with Pulumi and Python + +1. **Install Pulumi CLI**: Download and install the Pulumi command-line interface. +2. **Configure Cloud Provider Credentials**: Set up authentication for your target cloud platform (e.g., AWS, Azure, GCP). +3. **Initialize a New Project**: + + ```bash + pulumi new aws-python + ``` + +4. **Define Infrastructure in `__main__.py`**: Write Python code to declare cloud resources. + +#### Defining Infrastructure Resources + +Use Pulumi's Python SDK to create and configure resources. + +```python +import pulumi +from pulumi_aws import s3 + +# Create an S3 bucket +bucket = s3.Bucket('my-bucket', acl='private') + +# Export the bucket name +pulumi.export('bucket_name', bucket.id) +``` + +- **Resource Arguments**: Pass parameters to customize resources. +- **Outputs**: Export resource attributes for use in other components or stacks. + +#### State Management in Pulumi + +Pulumi maintains a **state file** that tracks the desired and actual state of resources. + +- **Backends**: Store state locally or in remote backends like Pulumi Service or AWS S3. +- **State Updates**: Pulumi compares the code with the state file to determine necessary changes. +- **Concurrent Access**: Locking mechanisms prevent simultaneous modifications. + +#### Modularization and Reusability in Pulumi + +Organize code into modules for better maintainability. + +- **Create Reusable Components**: Encapsulate resource definitions in classes or functions. + + ```python + class WebServer(pulumi.ComponentResource): + def __init__(self, name, opts=None): + super().__init__('custom:resource:WebServer', name, {}, opts) + # Define resources here + self.register_outputs({}) + ``` + +- **Parameterization**: Allow modules to accept inputs for flexibility. +- **Packaging**: Distribute modules as Python packages within your organization. + +### GitOps Workflow with Pulumi and Python + +#### Understanding GitOps + +**GitOps** is a workflow that uses Git repositories as the single source of truth for declarative infrastructure and applications. Changes to the infrastructure are made via code commits, triggering automated deployment processes. + +#### Implementing GitOps with Pulumi + +1. **Version Control**: Store all Pulumi code in a Git repository. +2. **Branching Strategy**: Use feature branches for development and pull requests for code reviews. +3. **Automated Pipelines**: Set up CI/CD pipelines to deploy changes upon merge. + + - **Continuous Integration (CI)**: Linting, testing, and validating infrastructure code. + - **Continuous Deployment (CD)**: Automatically apply infrastructure changes using Pulumi. + +4. **Pull Request Workflows**: + + - **Code Review**: Ensure code quality and adherence to standards. + - **Approval Gates**: Implement manual approvals for critical environments. + +#### CI/CD Pipeline Configuration + +- **Pipeline Steps**: + 1. **Checkout Code**: Retrieve the latest code from the repository. + 2. **Install Dependencies**: Set up the Python environment and install Pulumi packages. + 3. **Login to Pulumi Backend**: Authenticate with the state backend. + 4. **Preview Changes**: Run `pulumi preview` to show potential changes. + 5. **Apply Changes**: Execute `pulumi up` to update infrastructure. + +- **Sample Pipeline Configuration** (e.g., using GitHub Actions): + + ```yaml + name: Pulumi CI/CD + + on: + push: + branches: + - main + + jobs: + deploy: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - name: Set up Python + uses: actions/setup-python@v2 + with: + python-version: '3.x' + - name: Install Dependencies + run: pip install -r requirements.txt + - name: Install Pulumi + run: curl -fsSL https://get.pulumi.com | sh + - name: Login to Pulumi Backend + run: pulumi login + - name: Preview Changes + run: pulumi preview + - name: Apply Changes + run: pulumi up --yes + ``` + +#### Best Practices for GitOps with Pulumi + +- **Immutable Infrastructure**: Avoid manual changes to deployed resources. +- **Declarative Definitions**: Ensure all infrastructure configurations are code-driven. +- **Audit Trails**: Utilize Git history for change tracking and compliance. +- **Rollback Mechanisms**: Revert to previous states by checking out earlier commits. + +### Advantages for Executive Leadership + +#### Accelerated Delivery and Agility + +- **Faster Deployments**: Automation reduces time between development and production. +- **Market Responsiveness**: Quickly adapt infrastructure to changing business needs. + +#### Risk Mitigation and Compliance + +- **Consistency**: Code-driven deployments minimize human error. +- **Traceability**: Detailed logs and version control aid in audits and compliance checks. + +#### Cost Optimization + +- **Resource Efficiency**: Automate scaling and teardown of resources to match demand. +- **Operational Overhead Reduction**: Streamline processes reduce manual labor costs. + +#### Strategic Alignment + +- **DevOps Culture**: Foster collaboration between development and operations teams. +- **Innovation Enablement**: Free up teams to focus on value-adding activities rather than manual tasks. + +### Conclusion + +Integrating Pulumi with GitOps workflows empowers organizations to manage infrastructure with the same rigor and agility as application code. By utilizing Python for IaC, teams benefit from a cohesive development experience, advanced abstractions, and seamless integration with existing tools and processes. This approach not only enhances technical efficiency but also delivers strategic advantages that align with organizational goals. diff --git a/modules/aws/__init__.py b/modules/aws/__init__.py new file mode 100644 index 0000000..6bdc6ca --- /dev/null +++ b/modules/aws/__init__.py @@ -0,0 +1,91 @@ +# ./pulumi/modules/aws/__init__.py +""" +AWS Cloud Infrastructure Module + +Provides AWS infrastructure management capabilities including organizations, +networking, and resource provisioning with built-in compliance controls. +""" +from typing import List, Optional, Tuple, Dict, Any, TYPE_CHECKING +import pulumi + +from .types import AWSConfig +from .provider import AWSProvider +from .organization import AWSOrganization +from .resources import ResourceManager +from .networking import NetworkManager +from .iam import IAMManager +from .eks import EksManager +from .security import SecurityManager +from .exceptions import ResourceCreationError, ConfigurationError + +if TYPE_CHECKING: + from pulumi import Resource + +__all__ = [ + 'AWSProvider', + 'AWSOrganization', + 'ResourceManager', + 'NetworkManager', + 'IAMManager', + 'EksManager', + 'SecurityManager', + 'AWSConfig', + 'create_aws_infrastructure', + 'ResourceCreationError', + 'ConfigurationError' +] + +def create_aws_infrastructure( + config: AWSConfig, + dependencies: Optional[List[Resource]] = None +) -> Tuple[str, Resource, Dict[str, Any]]: + """ + Creates AWS infrastructure based on the provided configuration. + + This is the main entry point for AWS infrastructure creation. It orchestrates + the deployment of all AWS resources including organizations, networking, + security controls, and workload resources. + + Args: + config: AWS configuration settings including organization, networking, + security, and workload configurations + dependencies: Optional list of resources this deployment depends on + + Returns: + Tuple containing: + - Version string + - Main infrastructure resource (typically the organization) + - Dictionary of outputs including resource IDs and ARNs + + Raises: + ValueError: If configuration is invalid + ResourceCreationError: If resource creation fails + """ + try: + # Initialize provider with configuration + provider = AWSProvider(config) + + # Create managers in dependency order + security = SecurityManager(provider) + networking = NetworkManager(provider) + organization = AWSOrganization(provider) + resources = ResourceManager(provider) + iam = IAMManager(provider) + eks = EksManager(provider) + + # Deploy infrastructure + return provider.deploy( + dependencies, + managers={ + "security": security, + "networking": networking, + "organization": organization, + "resources": resources, + "iam": iam, + "eks": eks + } + ) + + except Exception as e: + pulumi.log.error(f"Failed to create AWS infrastructure: {str(e)}") + raise diff --git a/pulumi/modules/aws/config.py b/modules/aws/config.py similarity index 63% rename from pulumi/modules/aws/config.py rename to modules/aws/config.py index 50ce57f..701ce34 100644 --- a/pulumi/modules/aws/config.py +++ b/modules/aws/config.py @@ -16,7 +16,7 @@ import os import json -from typing import Dict, Any, Optional +from typing import Dict, Any, Optional, TYPE_CHECKING import pulumi from pulumi import log, Config, ResourceTransformationResult, ResourceTransformationArgs from pulumi_aws import Provider @@ -24,9 +24,10 @@ get_global_labels, generate_compliance_labels, generate_git_labels, + collect_git_info, ) from core.types import ComplianceConfig -from .types import AWSConfig, TenantAccountConfig +from .types import AWSConfig, TenantAccountConfig, validate_config from .taggable import TAGGABLE_RESOURCES # Constants @@ -91,9 +92,6 @@ def global_transform( pulumi.runtime.register_stack_transformation(global_transform) -# pulumi/modules/aws/config.py - - def generate_tags( config: AWSConfig, compliance_config: ComplianceConfig, git_info: Dict[str, str] ) -> Dict[str, str]: @@ -121,7 +119,7 @@ def generate_tags( } # Log generated tags for visibility - # pulumi.log.info(f"Generated AWS tags: {json.dumps(aws_module_tags, indent=2)}") + log.info(f"Generated AWS tags: {json.dumps(aws_module_tags, indent=2)}") # Register the global transformation generate_global_transformations(aws_module_tags) @@ -177,3 +175,109 @@ def load_tenant_account_configs() -> Dict[str, TenantAccountConfig]: ) return tenant_accounts + +def merge_configurations( + base_config: Dict[str, Any], override_config: Dict[str, Any] +) -> Dict[str, Any]: + """ + Merges two configuration dictionaries with override taking precedence. + + Args: + base_config: Base configuration dictionary. + override_config: Override configuration dictionary. + + Returns: + Dict[str, Any]: Merged configuration dictionary. + """ + merged = base_config.copy() + for key, value in override_config.items(): + if ( + key in merged + and isinstance(merged[key], dict) + and isinstance(value, dict) + ): + merged[key] = merge_configurations(merged[key], value) + else: + merged[key] = value + return merged + + +def get_default_config() -> Dict[str, Any]: + """ + Returns default AWS configuration settings. + + Returns: + Dict[str, Any]: Default configuration dictionary. + """ + return { + "region": "us-west-2", + "profile": "default", + "control_tower": { + "enabled": False, + "organizational_unit_name": "LandingZone", + }, + "global_tags": { + "managed-by": "konductor", + "environment": "production", + }, + } + + +def load_environment_overrides() -> Dict[str, Any]: + """ + Loads configuration overrides from environment variables. + + Returns: + Dict[str, Any]: Configuration overrides from environment. + """ + overrides = {} + + # Map environment variables to configuration keys + env_mapping = { + "AWS_REGION": "region", + "AWS_PROFILE": "profile", + "AWS_ACCOUNT_ID": "account_id", + } + + for env_var, config_key in env_mapping.items(): + if value := os.getenv(env_var): + overrides[config_key] = value + + return overrides + +def setup_aws_configuration() -> AWSConfig: + """ + Sets up the complete AWS configuration by combining defaults, + Pulumi config, and environment variables. + + Returns: + AWSConfig: Complete AWS configuration object. + + Raises: + ValueError: If required configuration is missing or invalid. + """ + try: + # Load configurations in order of precedence + default_config = get_default_config() + pulumi_config = load_aws_config() + env_overrides = load_environment_overrides() + + # Merge configurations + merged_config = merge_configurations( + default_config, + pulumi_config.dict() + ) + final_config = merge_configurations( + merged_config, + env_overrides + ) + + # Create and validate config object + config = AWSConfig.merge(final_config) + validate_config(config) + + return config + + except Exception as e: + log.error(f"Failed to setup AWS configuration: {str(e)}") + raise diff --git a/pulumi/modules/aws/deploy.py b/modules/aws/deploy.py similarity index 70% rename from pulumi/modules/aws/deploy.py rename to modules/aws/deploy.py index c681880..90c8fe4 100644 --- a/pulumi/modules/aws/deploy.py +++ b/modules/aws/deploy.py @@ -6,14 +6,21 @@ This script initializes the AWS provider, retrieves the STS caller identity, and ensures compliance metadata is propagated as tags. """ - +from typing import List, Dict, Tuple, Optional, Any, TYPE_CHECKING import pulumi import pulumi_aws as aws from pulumi import ResourceOptions, log -from typing import List, Dict, Tuple +from core.metadata import collect_git_info +from core.types import ComplianceConfig from .types import AWSConfig, TenantAccountConfig -from .config import initialize_aws_provider, generate_tags, load_tenant_account_configs +from .config import ( + initialize_aws_provider, + generate_tags, + load_tenant_account_configs +) +from .exceptions import ResourceCreationError +from .provider import AWSProvider from .resources import ( create_s3_bucket, create_organization, @@ -26,16 +33,20 @@ get_or_create_organization, get_organization_root_id, ) -from core.metadata import collect_git_info -from core.types import ComplianceConfig +from .security import SecurityManager +from .networking import NetworkManager + +if TYPE_CHECKING: + from pulumi import Resource MODULE_NAME = "aws" MODULE_VERSION = "0.0.1" def deploy_aws_module( - config: AWSConfig, global_depends_on: List[pulumi.Resource] -) -> Tuple[str, pulumi.Resource]: + config: AWSConfig, + global_depends_on: List[pulumi.Resource] +) -> Tuple[str, pulumi.Resource, Dict[str, Any]]: """ Deploys the AWS module resources. @@ -45,6 +56,13 @@ def deploy_aws_module( Returns: Tuple[str, pulumi.Resource]: A tuple containing the module version and the main AWS resource deployed. + + TODO: + - Enhance error handling with custom exception types + - Add rollback mechanisms for failed deployments + - Implement deployment status tracking + - Add deployment metrics collection + - Enhance logging with structured logging """ try: # Initialize AWS Provider @@ -85,7 +103,10 @@ def deploy_aws_module( if ou_applications: tenant_configs = load_tenant_account_configs() tenant_accounts = create_tenant_accounts( - organization, ou_applications, tenant_configs, aws_provider + organization, + ou_applications, + tenant_configs, + aws_provider ) # Deploy resources for each tenant @@ -100,9 +121,28 @@ def deploy_aws_module( tenant_config = tenant_configs.get(tenant_account.name) if tenant_config: deploy_tenant_resources( - tenant_provider, tenant_account, tenant_config + tenant_provider, + tenant_account, + tenant_config ) + # Deploy EKS if enabled + if config.eks and config.eks.enabled: + eks_cluster = provider.eks.create_cluster( + config.eks, + vpc.id, + [subnet.id for subnet in private_subnets], + opts=ResourceOptions( + provider=provider.provider, + depends_on=[vpc, *private_subnets] + ) + ) + module_outputs["eks_cluster"] = { + "name": eks_cluster.name, + "endpoint": eks_cluster.endpoint, + "version": eks_cluster.version + } + # Return Dictionary of AWS Module Resources to global configuration dictionary module_outputs = { "ops_data_bucket": s3_bucket.id, diff --git a/modules/aws/eks.py b/modules/aws/eks.py new file mode 100644 index 0000000..e40006d --- /dev/null +++ b/modules/aws/eks.py @@ -0,0 +1,307 @@ +# pulumi/modules/aws/eks.py + +""" +AWS EKS Management Module + +Handles creation and management of EKS clusters including: +- Cluster creation and configuration +- Node group management +- Add-on deployment +- Security and networking integration +""" + +from typing import Dict, List, Optional, Any, TYPE_CHECKING +import json +import pulumi +import pulumi_aws as aws +from pulumi import ResourceOptions, log + +if TYPE_CHECKING: + from .types import EksConfig, EksNodeGroupConfig, EksAddonConfig + from .provider import AWSProvider + +class EksManager: + """ + Manages EKS clusters and related resources. + + This class handles: + - Cluster provisioning + - Node group management + - Add-on deployment + - IAM integration + """ + + def __init__(self, provider: 'AWSProvider'): + """ + Initialize EKS manager. + + Args: + provider: AWSProvider instance for resource management + """ + self.provider = provider + + def create_cluster( + self, + config: 'EksConfig', + vpc_id: str, + subnet_ids: List[str], + opts: Optional[ResourceOptions] = None + ) -> aws.eks.Cluster: + """ + Creates an EKS cluster with specified configuration. + + Args: + config: EKS configuration + vpc_id: VPC ID for the cluster + subnet_ids: Subnet IDs for the cluster + opts: Optional resource options + + Returns: + aws.eks.Cluster: Created EKS cluster + """ + # Create cluster role + cluster_role = self._create_cluster_role() + + # Create KMS key for secrets encryption if enabled + kms_key = None + if config.enable_secrets_encryption: + kms_key = self.provider.security.create_kms_key( + f"eks-{config.cluster_name}-secrets", + description=f"KMS key for EKS cluster {config.cluster_name} secrets", + opts=opts + ) + + # Create the cluster + cluster = aws.eks.Cluster( + config.cluster_name, + name=config.cluster_name, + role_arn=cluster_role.arn, + version=config.kubernetes_version, + vpc_config=aws.eks.ClusterVpcConfigArgs( + subnet_ids=subnet_ids, + endpoint_private_access=config.endpoint_private_access, + endpoint_public_access=config.endpoint_public_access, + ), + encryption_config=[aws.eks.ClusterEncryptionConfigArgs( + provider=aws.eks.ProviderArgs( + key_arn=kms_key.arn + ), + resources=["secrets"] + )] if config.enable_secrets_encryption else None, + enabled_cluster_log_types=[ + "api", + "audit", + "authenticator", + "controllerManager", + "scheduler" + ], + tags=self.provider.get_tags(), + opts=ResourceOptions.merge( + ResourceOptions( + provider=self.provider.provider, + protect=True + ), + opts + ) + ) + + # Enable IRSA if configured + if config.enable_irsa: + self._enable_irsa(cluster, config.cluster_name, opts) + + # Create node groups + for node_group_config in config.node_groups: + self.create_node_group( + cluster, + node_group_config, + subnet_ids, + opts + ) + + # Deploy add-ons + if config.addons: + self._deploy_addons(cluster, config.addons, opts) + + return cluster + + def create_node_group( + self, + cluster: aws.eks.Cluster, + config: 'EksNodeGroupConfig', + subnet_ids: List[str], + opts: Optional[ResourceOptions] = None + ) -> aws.eks.NodeGroup: + """ + Creates an EKS node group. + + Args: + cluster: EKS cluster + config: Node group configuration + subnet_ids: Subnet IDs for the node group + opts: Optional resource options + + Returns: + aws.eks.NodeGroup: Created node group + """ + # Create node role + node_role = self._create_node_role() + + # Create launch template + launch_template = self._create_launch_template( + cluster.name, + config, + opts + ) + + # Create the node group + return aws.eks.NodeGroup( + f"{cluster.name}-{config.name}", + cluster_name=cluster.name, + node_group_name=config.name, + node_role_arn=node_role.arn, + subnet_ids=subnet_ids, + scaling_config=aws.eks.NodeGroupScalingConfigArgs( + desired_size=config.desired_size, + max_size=config.max_size, + min_size=config.min_size + ), + instance_types=[config.instance_type], + capacity_type=config.capacity_type, + ami_type=config.ami_type, + disk_size=config.disk_size, + labels=config.labels, + tags=self.provider.get_tags(), + launch_template=aws.eks.NodeGroupLaunchTemplateArgs( + id=launch_template.id, + version=launch_template.latest_version + ), + opts=ResourceOptions.merge( + ResourceOptions( + provider=self.provider.provider, + protect=True, + parent=cluster + ), + opts + ) + ) + + def _create_cluster_role(self) -> aws.iam.Role: + """Creates IAM role for EKS cluster.""" + return self.provider.iam.create_role( + "eks-cluster", + assume_role_policy={ + "Version": "2012-10-17", + "Statement": [{ + "Effect": "Allow", + "Principal": { + "Service": "eks.amazonaws.com" + }, + "Action": "sts:AssumeRole" + }] + }, + policies=[ + "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy", + "arn:aws:iam::aws:policy/AmazonEKSServicePolicy" + ] + ) + + def _create_node_role(self) -> aws.iam.Role: + """Creates IAM role for EKS nodes.""" + return self.provider.iam.create_role( + "eks-node", + assume_role_policy={ + "Version": "2012-10-17", + "Statement": [{ + "Effect": "Allow", + "Principal": { + "Service": "ec2.amazonaws.com" + }, + "Action": "sts:AssumeRole" + }] + }, + policies=[ + "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy", + "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy", + "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly" + ] + ) + + def _create_launch_template( + self, + cluster_name: str, + config: 'EksNodeGroupConfig', + opts: Optional[ResourceOptions] = None + ) -> aws.ec2.LaunchTemplate: + """Creates launch template for node group.""" + return aws.ec2.LaunchTemplate( + f"{cluster_name}-{config.name}", + name_prefix=f"{cluster_name}-{config.name}", + block_device_mappings=[aws.ec2.LaunchTemplateBlockDeviceMappingArgs( + device_name="/dev/xvda", + ebs=aws.ec2.LaunchTemplateBlockDeviceMappingEbsArgs( + volume_size=config.disk_size, + volume_type="gp3", + encrypted=True + ) + )], + metadata_options=aws.ec2.LaunchTemplateMetadataOptionsArgs( + http_endpoint="enabled", + http_tokens="required", + http_put_response_hop_limit=2 + ), + monitoring=aws.ec2.LaunchTemplateMonitoringArgs( + enabled=True + ), + tags=self.provider.get_tags(), + opts=opts + ) + + def _enable_irsa( + self, + cluster: aws.eks.Cluster, + cluster_name: str, + opts: Optional[ResourceOptions] = None + ) -> None: + """Enables IAM Roles for Service Accounts.""" + # Create OpenID Connect Provider + oidc_url = cluster.identities[0].oidcs[0].issuer + oidc_provider = aws.iam.OpenIdConnectProvider( + f"{cluster_name}-oidc", + client_id_lists=["sts.amazonaws.com"], + thumbprint_lists=["9e99a48a9960b14926bb7f3b02e22da2b0ab7280"], + url=oidc_url, + tags=self.provider.get_tags(), + opts=ResourceOptions.merge( + ResourceOptions( + provider=self.provider.provider, + parent=cluster + ), + opts + ) + ) + + def _deploy_addons( + self, + cluster: aws.eks.Cluster, + addons: 'EksAddonConfig', + opts: Optional[ResourceOptions] = None + ) -> None: + """Deploys EKS add-ons.""" + if addons.vpc_cni: + aws.eks.Addon( + f"{cluster.name}-vpc-cni", + cluster_name=cluster.name, + addon_name="vpc-cni", + resolve_conflicts="OVERWRITE", + tags=self.provider.get_tags(), + opts=ResourceOptions.merge( + ResourceOptions( + provider=self.provider.provider, + parent=cluster + ), + opts + ) + ) + + # Add other add-ons similarly + # TODO: Implement remaining add-ons diff --git a/modules/aws/exceptions.py b/modules/aws/exceptions.py new file mode 100644 index 0000000..900648a --- /dev/null +++ b/modules/aws/exceptions.py @@ -0,0 +1,21 @@ +# pulumi/modules/aws/exceptions.py + +class AWSModuleError(Exception): + """Base exception for AWS module errors.""" + pass + +class ResourceCreationError(AWSModuleError): + """Raised when resource creation fails.""" + pass + +class ConfigurationError(AWSModuleError): + """Raised when configuration is invalid.""" + pass + +class ComplianceError(AWSModuleError): + """Raised when compliance requirements are not met.""" + pass + +class DeploymentError(AWSModuleError): + """Deployment execution error.""" + pass diff --git a/modules/aws/iam.py b/modules/aws/iam.py new file mode 100644 index 0000000..9de5ecf --- /dev/null +++ b/modules/aws/iam.py @@ -0,0 +1,392 @@ +# pulumi/modules/aws/iam.py + +""" +AWS IAM Management Module + +Handles creation and management of IAM resources including: +- Users, Groups, and Roles +- Policies and Policy Attachments +- Cross-account access roles +- Service-linked roles +""" + +from typing import Dict, List, Optional, Any, TYPE_CHECKING +import json +import pulumi +import pulumi_aws as aws +from pulumi import ResourceOptions, log + +if TYPE_CHECKING: + from .types import IAMUserConfig + from .provider import AWSProvider + +class IAMManager: + """ + Manages AWS IAM resources and operations. + + This class handles: + - User and group management + - Role and policy management + - Cross-account access configuration + - Service role management + """ + + def __init__(self, provider: 'AWSProvider'): + """ + Initialize IAM manager. + + Args: + provider: AWSProvider instance for resource management + """ + self.provider = provider + + def create_user( + self, + config: IAMUserConfig, + opts: Optional[ResourceOptions] = None + ) -> aws.iam.User: + """ + Creates an IAM user with associated groups and policies. + + Args: + config: IAM user configuration + opts: Optional resource options + + Returns: + aws.iam.User: Created IAM user resource + """ + if opts is None: + opts = ResourceOptions() + + # Create the IAM user + user = aws.iam.User( + f"user-{config.name}", + name=config.name, + tags=self.provider.get_tags(), + opts=ResourceOptions.merge( + ResourceOptions( + provider=self.provider.provider, + protect=True + ), + opts + ) + ) + + # Create login profile if email is provided + if config.email: + aws.iam.UserLoginProfile( + f"login-{config.name}", + user=user.name, + password_reset_required=True, + opts=ResourceOptions( + provider=self.provider.provider, + parent=user, + protect=True + ) + ) + + # Attach user to groups + for group_name in config.groups: + self.add_user_to_group(user, group_name) + + # Attach policies + for policy_arn in config.policies: + self.attach_user_policy(user, policy_arn) + + return user + + def create_group( + self, + name: str, + policies: Optional[List[str]] = None, + opts: Optional[ResourceOptions] = None + ) -> aws.iam.Group: + """ + Creates an IAM group with attached policies. + + Args: + name: Group name + policies: List of policy ARNs to attach + opts: Optional resource options + + Returns: + aws.iam.Group: Created IAM group + """ + if opts is None: + opts = ResourceOptions() + + group = aws.iam.Group( + f"group-{name}", + name=name, + opts=ResourceOptions.merge( + ResourceOptions( + provider=self.provider.provider, + protect=True + ), + opts + ) + ) + + if policies: + for policy_arn in policies: + aws.iam.GroupPolicyAttachment( + f"policy-{name}-{policy_arn.split('/')[-1]}", + group=group.name, + policy_arn=policy_arn, + opts=ResourceOptions( + provider=self.provider.provider, + parent=group, + protect=True + ) + ) + + return group + + def create_role( + self, + name: str, + assume_role_policy: Dict[str, Any], + policies: Optional[List[str]] = None, + description: Optional[str] = None, + opts: Optional[ResourceOptions] = None + ) -> aws.iam.Role: + """ + Creates an IAM role with specified trust and permission policies. + + Args: + name: Role name + assume_role_policy: Trust policy document + policies: List of policy ARNs to attach + description: Optional role description + opts: Optional resource options + + Returns: + aws.iam.Role: Created IAM role + """ + if opts is None: + opts = ResourceOptions() + + role = aws.iam.Role( + f"role-{name}", + name=name, + assume_role_policy=json.dumps(assume_role_policy), + description=description, + tags=self.provider.get_tags(), + opts=ResourceOptions.merge( + ResourceOptions( + provider=self.provider.provider, + protect=True + ), + opts + ) + ) + + if policies: + for policy_arn in policies: + aws.iam.RolePolicyAttachment( + f"policy-{name}-{policy_arn.split('/')[-1]}", + role=role.name, + policy_arn=policy_arn, + opts=ResourceOptions( + provider=self.provider.provider, + parent=role, + protect=True + ) + ) + + return role + + def create_policy( + self, + name: str, + policy_document: Dict[str, Any], + description: Optional[str] = None, + opts: Optional[ResourceOptions] = None + ) -> aws.iam.Policy: + """ + Creates an IAM policy. + + Args: + name: Policy name + policy_document: Policy document + description: Optional policy description + opts: Optional resource options + + Returns: + aws.iam.Policy: Created IAM policy + """ + if opts is None: + opts = ResourceOptions() + + return aws.iam.Policy( + f"policy-{name}", + name=name, + policy=json.dumps(policy_document), + description=description, + tags=self.provider.get_tags(), + opts=ResourceOptions.merge( + ResourceOptions( + provider=self.provider.provider, + protect=True + ), + opts + ) + ) + + def create_service_linked_role( + self, + aws_service_name: str, + description: Optional[str] = None, + opts: Optional[ResourceOptions] = None + ) -> aws.iam.ServiceLinkedRole: + """ + Creates a service-linked role for AWS services. + + Args: + aws_service_name: AWS service name (e.g., 'eks.amazonaws.com') + description: Optional role description + opts: Optional resource options + + Returns: + aws.iam.ServiceLinkedRole: Created service-linked role + """ + if opts is None: + opts = ResourceOptions() + + return aws.iam.ServiceLinkedRole( + f"slr-{aws_service_name.split('.')[0]}", + aws_service_name=aws_service_name, + description=description, + opts=ResourceOptions.merge( + ResourceOptions( + provider=self.provider.provider, + protect=True + ), + opts + ) + ) + + def create_cross_account_role( + self, + name: str, + trusted_account_id: str, + policies: List[str], + opts: Optional[ResourceOptions] = None + ) -> aws.iam.Role: + """ + Creates a role for cross-account access. + + Args: + name: Role name + trusted_account_id: AWS account ID to trust + policies: List of policy ARNs to attach + opts: Optional resource options + + Returns: + aws.iam.Role: Created cross-account role + """ + assume_role_policy = { + "Version": "2012-10-17", + "Statement": [{ + "Effect": "Allow", + "Principal": { + "AWS": f"arn:aws:iam::{trusted_account_id}:root" + }, + "Action": "sts:AssumeRole" + }] + } + + return self.create_role( + name=name, + assume_role_policy=assume_role_policy, + policies=policies, + description=f"Cross-account access role for {trusted_account_id}", + opts=opts + ) + + def add_user_to_group( + self, + user: aws.iam.User, + group_name: str + ) -> aws.iam.UserGroupMembership: + """ + Adds a user to an IAM group. + + Args: + user: IAM user resource + group_name: Name of the group + + Returns: + aws.iam.UserGroupMembership: Group membership resource + """ + return aws.iam.UserGroupMembership( + f"membership-{user.name}-{group_name}", + user=user.name, + groups=[group_name], + opts=ResourceOptions( + provider=self.provider.provider, + parent=user, + protect=True + ) + ) + + def attach_user_policy( + self, + user: aws.iam.User, + policy_arn: str + ) -> aws.iam.UserPolicyAttachment: + """ + Attaches a policy to an IAM user. + + Args: + user: IAM user resource + policy_arn: ARN of the policy to attach + + Returns: + aws.iam.UserPolicyAttachment: Policy attachment resource + """ + return aws.iam.UserPolicyAttachment( + f"policy-{user.name}-{policy_arn.split('/')[-1]}", + user=user.name, + policy_arn=policy_arn, + opts=ResourceOptions( + provider=self.provider.provider, + parent=user, + protect=True + ) + ) + + def create_instance_profile( + self, + name: str, + role: aws.iam.Role, + opts: Optional[ResourceOptions] = None + ) -> aws.iam.InstanceProfile: + """ + Creates an instance profile for EC2 instances. + + Args: + name: Profile name + role: IAM role to associate + opts: Optional resource options + + Returns: + aws.iam.InstanceProfile: Created instance profile + """ + if opts is None: + opts = ResourceOptions() + + return aws.iam.InstanceProfile( + f"profile-{name}", + name=name, + role=role.name, + tags=self.provider.get_tags(), + opts=ResourceOptions.merge( + ResourceOptions( + provider=self.provider.provider, + parent=role, + protect=True + ), + opts + ) + ) diff --git a/modules/aws/networking.py b/modules/aws/networking.py new file mode 100644 index 0000000..8a4cf9a --- /dev/null +++ b/modules/aws/networking.py @@ -0,0 +1,554 @@ +# pulumi/modules/aws/networking.py + +""" +AWS Networking Management Module + +Handles creation and management of AWS networking resources including: +- VPCs and subnets +- Route tables and routes +- Security groups and rules +- Internet and NAT gateways +- Network ACLs +- VPC endpoints +""" + +from typing import Dict, List, Optional, Any, Tuple, TYPE_CHECKING +import pulumi +import pulumi_aws as aws +from pulumi import ResourceOptions, log +from .security import SecurityManager + +if TYPE_CHECKING: + from .types import NetworkConfig + from .provider import AWSProvider + +class NetworkManager: + """ + Manages AWS networking resources and operations. + + This class handles: + - VPC and subnet management + - Routing configuration + - Security group management + - Gateway provisioning + - Network ACL configuration + """ + + def __init__(self, provider: 'AWSProvider'): + """ + Initialize Network manager. + + Args: + provider: AWSProvider instance for resource management + """ + self.provider = provider + + def create_vpc( + self, + name: str, + cidr_block: str, + enable_dns_hostnames: bool = True, + enable_dns_support: bool = True, + instance_tenancy: str = "default", + opts: Optional[ResourceOptions] = None + ) -> aws.ec2.Vpc: + """ + Creates a VPC with the specified configuration. + + Args: + name: VPC name + cidr_block: CIDR block for the VPC + enable_dns_hostnames: Enable DNS hostnames + enable_dns_support: Enable DNS support + instance_tenancy: Default instance tenancy + opts: Optional resource options + + Returns: + aws.ec2.Vpc: Created VPC resource + """ + # Add flow logs configuration + def enable_vpc_flow_logs(self, vpc: aws.ec2.Vpc) -> aws.ec2.FlowLog: + log_group = aws.cloudwatch.LogGroup(...) + return aws.ec2.FlowLog( + f"flow-log-{vpc.id}", + vpc_id=vpc.id, + traffic_type="ALL", + log_destination=log_group.arn, + opts=ResourceOptions( + provider=self.provider.provider, + parent=vpc + ) + ) + + if opts is None: + opts = ResourceOptions() + + vpc = aws.ec2.Vpc( + f"vpc-{name}", + cidr_block=cidr_block, + enable_dns_hostnames=enable_dns_hostnames, + enable_dns_support=enable_dns_support, + instance_tenancy=instance_tenancy, + tags={ + **self.provider.get_tags(), + "Name": f"vpc-{name}" + }, + opts=ResourceOptions.merge( + ResourceOptions( + provider=self.provider.provider, + protect=True + ), + opts + ) + ) + + # Enable VPC flow logs by default + self.enable_vpc_flow_logs(vpc) + + return vpc + + def enable_vpc_flow_logs( + self, + vpc: aws.ec2.Vpc, + retention_days: int = 7 + ) -> aws.ec2.FlowLog: + """ + Enables VPC flow logs. + + Args: + vpc: VPC resource + retention_days: Log retention period in days + + Returns: + aws.ec2.FlowLog: Flow log resource + """ + # Create log group for flow logs + log_group = aws.cloudwatch.LogGroup( + f"flow-logs-{vpc.id}", + retention_in_days=retention_days, + tags=self.provider.get_tags(), + opts=ResourceOptions( + provider=self.provider.provider, + parent=vpc + ) + ) + + # Create IAM role for flow logs + assume_role_policy = { + "Version": "2012-10-17", + "Statement": [{ + "Effect": "Allow", + "Principal": { + "Service": "vpc-flow-logs.amazonaws.com" + }, + "Action": "sts:AssumeRole" + }] + } + + role = aws.iam.Role( + f"flow-logs-role-{vpc.id}", + assume_role_policy=pulumi.Output.from_input(assume_role_policy).apply(lambda x: pulumi.Output.json_dumps(x)), + tags=self.provider.get_tags(), + opts=ResourceOptions( + provider=self.provider.provider, + parent=vpc + ) + ) + + # Attach policy to role + aws.iam.RolePolicy( + f"flow-logs-policy-{vpc.id}", + role=role.id, + policy=pulumi.Output.all(log_group_arn=log_group.arn).apply( + lambda args: pulumi.Output.json_dumps({ + "Version": "2012-10-17", + "Statement": [{ + "Effect": "Allow", + "Action": [ + "logs:CreateLogGroup", + "logs:CreateLogStream", + "logs:PutLogEvents", + "logs:DescribeLogGroups", + "logs:DescribeLogStreams" + ], + "Resource": [ + args["log_group_arn"], + f"{args['log_group_arn']}:*" + ] + }] + }) + ), + opts=ResourceOptions( + provider=self.provider.provider, + parent=role + ) + ) + + # Create flow log + return aws.ec2.FlowLog( + f"flow-log-{vpc.id}", + vpc_id=vpc.id, + traffic_type="ALL", + iam_role_arn=role.arn, + log_destination=log_group.arn, + tags=self.provider.get_tags(), + opts=ResourceOptions( + provider=self.provider.provider, + parent=vpc + ) + ) + + def create_subnet( + self, + name: str, + vpc_id: pulumi.Input[str], + cidr_block: str, + availability_zone: str, + map_public_ip: bool = False, + opts: Optional[ResourceOptions] = None + ) -> aws.ec2.Subnet: + """ + Creates a subnet in the specified VPC. + + Args: + name: Subnet name + vpc_id: VPC ID + cidr_block: CIDR block for the subnet + availability_zone: AZ for the subnet + map_public_ip: Auto-assign public IPs + opts: Optional resource options + + Returns: + aws.ec2.Subnet: Created subnet resource + """ + if opts is None: + opts = ResourceOptions() + + return aws.ec2.Subnet( + f"subnet-{name}", + vpc_id=vpc_id, + cidr_block=cidr_block, + availability_zone=availability_zone, + map_public_ip_on_launch=map_public_ip, + tags={ + **self.provider.get_tags(), + "Name": f"subnet-{name}" + }, + opts=ResourceOptions.merge( + ResourceOptions( + provider=self.provider.provider, + protect=True + ), + opts + ) + ) + + def create_internet_gateway( + self, + name: str, + vpc_id: pulumi.Input[str], + opts: Optional[ResourceOptions] = None + ) -> aws.ec2.InternetGateway: + """ + Creates and attaches an internet gateway to a VPC. + + Args: + name: Gateway name + vpc_id: VPC ID + opts: Optional resource options + + Returns: + aws.ec2.InternetGateway: Created internet gateway + """ + if opts is None: + opts = ResourceOptions() + + return aws.ec2.InternetGateway( + f"igw-{name}", + vpc_id=vpc_id, + tags={ + **self.provider.get_tags(), + "Name": f"igw-{name}" + }, + opts=ResourceOptions.merge( + ResourceOptions( + provider=self.provider.provider, + protect=True + ), + opts + ) + ) + + def create_nat_gateway( + self, + name: str, + subnet_id: pulumi.Input[str], + opts: Optional[ResourceOptions] = None + ) -> Tuple[aws.ec2.Eip, aws.ec2.NatGateway]: + """ + Creates a NAT gateway with an Elastic IP. + + Args: + name: Gateway name + subnet_id: Subnet ID for the NAT gateway + opts: Optional resource options + + Returns: + Tuple containing: + - Elastic IP resource + - NAT Gateway resource + """ + if opts is None: + opts = ResourceOptions() + + eip = aws.ec2.Eip( + f"eip-{name}", + vpc=True, + tags={ + **self.provider.get_tags(), + "Name": f"eip-{name}" + }, + opts=ResourceOptions.merge( + ResourceOptions( + provider=self.provider.provider, + protect=True + ), + opts + ) + ) + + nat_gateway = aws.ec2.NatGateway( + f"nat-{name}", + subnet_id=subnet_id, + allocation_id=eip.id, + tags={ + **self.provider.get_tags(), + "Name": f"nat-{name}" + }, + opts=ResourceOptions.merge( + ResourceOptions( + provider=self.provider.provider, + protect=True, + depends_on=[eip] + ), + opts + ) + ) + + return eip, nat_gateway + + def create_route_table( + self, + name: str, + vpc_id: pulumi.Input[str], + routes: Optional[List[Dict[str, Any]]] = None, + opts: Optional[ResourceOptions] = None + ) -> aws.ec2.RouteTable: + """ + Creates a route table with specified routes. + + Args: + name: Route table name + vpc_id: VPC ID + routes: List of route configurations + opts: Optional resource options + + Returns: + aws.ec2.RouteTable: Created route table + """ + if opts is None: + opts = ResourceOptions() + + route_table = aws.ec2.RouteTable( + f"rt-{name}", + vpc_id=vpc_id, + tags={ + **self.provider.get_tags(), + "Name": f"rt-{name}" + }, + opts=ResourceOptions.merge( + ResourceOptions( + provider=self.provider.provider, + protect=True + ), + opts + ) + ) + + if routes: + for idx, route in enumerate(routes): + aws.ec2.Route( + f"route-{name}-{idx}", + route_table_id=route_table.id, + destination_cidr_block=route.get("destination_cidr_block"), + gateway_id=route.get("gateway_id"), + nat_gateway_id=route.get("nat_gateway_id"), + opts=ResourceOptions( + provider=self.provider.provider, + parent=route_table + ) + ) + + return route_table + + def create_security_group( + self, + name: str, + vpc_id: pulumi.Input[str], + description: str, + ingress_rules: Optional[List[Dict[str, Any]]] = None, + egress_rules: Optional[List[Dict[str, Any]]] = None, + opts: Optional[ResourceOptions] = None + ) -> aws.ec2.SecurityGroup: + """ + Creates a security group with specified rules. + + Args: + name: Security group name + vpc_id: VPC ID + description: Security group description + ingress_rules: List of ingress rule configurations + egress_rules: List of egress rule configurations + opts: Optional resource options + + Returns: + aws.ec2.SecurityGroup: Created security group + """ + if opts is None: + opts = ResourceOptions() + + security_group = aws.ec2.SecurityGroup( + f"sg-{name}", + vpc_id=vpc_id, + description=description, + tags={ + **self.provider.get_tags(), + "Name": f"sg-{name}" + }, + opts=ResourceOptions.merge( + ResourceOptions( + provider=self.provider.provider, + protect=True + ), + opts + ) + ) + + if ingress_rules: + for idx, rule in enumerate(ingress_rules): + aws.ec2.SecurityGroupRule( + f"sgr-{name}-ingress-{idx}", + type="ingress", + security_group_id=security_group.id, + protocol=rule.get("protocol", "tcp"), + from_port=rule.get("from_port"), + to_port=rule.get("to_port"), + cidr_blocks=rule.get("cidr_blocks"), + source_security_group_id=rule.get("source_security_group_id"), + opts=ResourceOptions( + provider=self.provider.provider, + parent=security_group + ) + ) + + if egress_rules: + for idx, rule in enumerate(egress_rules): + aws.ec2.SecurityGroupRule( + f"sgr-{name}-egress-{idx}", + type="egress", + security_group_id=security_group.id, + protocol=rule.get("protocol", "-1"), + from_port=rule.get("from_port", 0), + to_port=rule.get("to_port", 0), + cidr_blocks=rule.get("cidr_blocks", ["0.0.0.0/0"]), + opts=ResourceOptions( + provider=self.provider.provider, + parent=security_group + ) + ) + + return security_group + + def create_vpc_endpoint( + self, + name: str, + vpc_id: pulumi.Input[str], + service_name: str, + subnet_ids: Optional[List[pulumi.Input[str]]] = None, + security_group_ids: Optional[List[pulumi.Input[str]]] = None, + vpc_endpoint_type: str = "Interface", + private_dns_enabled: bool = True, + opts: Optional[ResourceOptions] = None + ) -> aws.ec2.VpcEndpoint: + """ + Creates a VPC endpoint for AWS services. + + Args: + name: Endpoint name + vpc_id: VPC ID + service_name: AWS service name + subnet_ids: List of subnet IDs for the endpoint + security_group_ids: List of security group IDs + vpc_endpoint_type: Endpoint type (Interface/Gateway) + private_dns_enabled: Enable private DNS + opts: Optional resource options + + Returns: + aws.ec2.VpcEndpoint: Created VPC endpoint + """ + if opts is None: + opts = ResourceOptions() + + return aws.ec2.VpcEndpoint( + f"vpce-{name}", + vpc_id=vpc_id, + service_name=service_name, + vpc_endpoint_type=vpc_endpoint_type, + subnet_ids=subnet_ids, + security_group_ids=security_group_ids, + private_dns_enabled=private_dns_enabled, + tags={ + **self.provider.get_tags(), + "Name": f"vpce-{name}" + }, + opts=ResourceOptions.merge( + ResourceOptions( + provider=self.provider.provider, + protect=True + ), + opts + ) + ) + + def deploy_network_infrastructure(self) -> Dict[str, Any]: + """Deploys networking infrastructure and returns outputs.""" + try: + # Create VPC + vpc = self.create_vpc( + "main", + self.provider.config.network.vpc_cidr + ) + + # Create subnets + subnets = [] + for i, az in enumerate(self.provider.config.network.availability_zones): + subnet = self.create_subnet( + f"subnet-{i}", + vpc.id, + self.provider.config.network.subnet_cidrs["private"][i], + az, + opts=ResourceOptions( + provider=self.provider.provider, + parent=vpc + ) + ) + subnets.append(subnet) + + return { + "vpc_id": vpc.id, + "subnet_ids": [s.id for s in subnets] + } + + except Exception as e: + log.error(f"Failed to deploy network infrastructure: {str(e)}") + raise diff --git a/modules/aws/organization.py b/modules/aws/organization.py new file mode 100644 index 0000000..478cc72 --- /dev/null +++ b/modules/aws/organization.py @@ -0,0 +1,424 @@ +# pulumi/modules/aws/organization.py + +"""AWS Organizations management and operations.""" +from typing import Dict, List, Optional, Tuple, Any, TYPE_CHECKING +import pulumi +import pulumi_aws as aws +from pulumi import ResourceOptions, log + +if TYPE_CHECKING: + from .types import TenantAccountConfig + from .provider import AWSProvider + from .resources import create_tenant_account, assume_role_in_tenant_account, deploy_tenant_resources + +class AWSOrganization: + """ + Manages AWS Organizations resources and operations. + + This class handles: + - Organization creation and management + - Organizational Unit (OU) operations + - Account management + - Control Tower integration + """ + + def __init__(self, provider: 'AWSProvider'): + """ + Initialize AWS Organizations manager. + + Args: + provider: AWSProvider instance for resource management + """ + self.provider = provider + self._organization: Optional[aws.organizations.Organization] = None + self._org_data: Optional[aws.organizations.GetOrganizationResult] = None + + def get_or_create(self) -> Tuple[aws.organizations.Organization, aws.organizations.GetOrganizationResult]: + """ + Retrieves existing AWS Organization or creates a new one. + + Returns: + Tuple containing: + - Organization resource + - Organization data + + Raises: + Exception: If unable to retrieve or create organization + """ + try: + # Try to get existing organization + org_data = aws.organizations.get_organization( + opts=pulumi.InvokeOptions(provider=self.provider.provider + ) + log.info(f"Found existing Organization with ID: {org_data.id}") + + # Create resource reference to existing organization + organization = aws.organizations.Organization.get( + "existing_organization", + id=org_data.id, + opts=ResourceOptions( + provider=self.provider.provider, + protect=True + ) + ) + return organization, org_data + + except Exception as e: + log.warn(f"No existing organization found, creating new: {str(e)}") + + # Create new organization with all features enabled + organization = aws.organizations.Organization( + "aws_organization", + feature_set="ALL", + aws_service_access_principals=[ + "cloudtrail.amazonaws.com", + "config.amazonaws.com", + "sso.amazonaws.com" + ], + enabled_policy_types=[ + "SERVICE_CONTROL_POLICY", + "TAG_POLICY" + ], + opts=ResourceOptions( + provider=self.provider.provider, + protect=True + ) + ) + + # Get organization data after creation + org_data = aws.organizations.get_organization( + opts=pulumi.InvokeOptions(provider=self.provider.provider) + ) + + return organization, org_data + + def get_root_id(self, org_data: aws.organizations.GetOrganizationResult) -> str: + """ + Gets the root ID from organization data. + + Args: + org_data: Organization data containing roots information + + Returns: + str: The root ID + + Raises: + Exception: If no roots found + """ + try: + if org_data.roots: + root = org_data.roots[0] + root_id = root.id + log.info(f"Organization Root ID: {root_id}") + return root_id + else: + raise Exception("No roots found in the organization") + except Exception as e: + log.error(f"Error fetching organization roots: {str(e)}") + raise + + def create_units( + self, + organization: aws.organizations.Organization, + root_id: str, + unit_names: List[str] + ) -> Dict[str, aws.organizations.OrganizationalUnit]: + """ + Creates Organizational Units under the organization root. + + Args: + organization: The AWS Organization resource + root_id: The root ID to create OUs under + unit_names: List of OU names to create + + Returns: + Dict[str, OrganizationalUnit]: Created OUs mapped by name + + Raises: + ValueError: If root_id is invalid + """ + if not root_id: + raise ValueError("Root ID is required to create Organizational Units") + + organizational_units = {} + + for unit_name in unit_names: + # Create OU with standard naming + ou = aws.organizations.OrganizationalUnit( + f"ou_{unit_name.lower()}", + name=unit_name, + parent_id=root_id, + tags=self.provider.get_tags(), + opts=ResourceOptions( + provider=self.provider.provider, + parent=organization, + protect=True + ) + ) + organizational_units[unit_name] = ou + + # Create default policies for the OU + self._create_ou_policies(ou, unit_name) + + return organizational_units + + def _create_ou_policies( + self, + ou: aws.organizations.OrganizationalUnit, + ou_name: str + ) -> None: + """ + Creates default policies for an Organizational Unit. + + Args: + ou: The OU to create policies for + ou_name: Name of the OU for policy naming + """ + # Create Service Control Policy + scp = aws.organizations.Policy( + f"scp_{ou_name.lower()}", + content=self._get_default_scp_content(ou_name), + name=f"{ou_name}-BaselinePolicy", + type="SERVICE_CONTROL_POLICY", + tags=self.provider.get_tags(), + opts=ResourceOptions( + provider=self.provider.provider, + parent=ou, + protect=True + ) + ) + + # Attach policy to OU + aws.organizations.PolicyAttachment( + f"scp_attachment_{ou_name.lower()}", + policy_id=scp.id, + target_id=ou.id, + opts=ResourceOptions( + provider=self.provider.provider, + parent=scp, + protect=True + ) + ) + + def _get_default_scp_content(self, ou_name: str) -> str: + """ + Gets default SCP content based on OU type. + + Args: + ou_name: Name of the OU to determine policy content + + Returns: + str: JSON policy content + """ + if ou_name == "Security": + return """{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "RequireIMDSv2", + "Effect": "Deny", + "Action": "ec2:RunInstances", + "Resource": "arn:aws:ec2:*:*:instance/*", + "Condition": { + "StringNotEquals": { + "ec2:MetadataHttpTokens": "required" + } + } + } + ] + }""" + elif ou_name == "Workloads": + return """{ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "DenyUnencryptedVolumes", + "Effect": "Deny", + "Action": "ec2:CreateVolume", + "Resource": "*", + "Condition": { + "Bool": { + "aws:SecureTransport": "false" + } + } + } + ] + }""" + else: + return """{ + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "*", + "Resource": "*" + } + ] + }""" + + def create_account( + self, + name: str, + email: str, + parent_id: str, + role_name: str = "OrganizationAccountAccessRole" + ) -> aws.organizations.Account: + """ + Creates a new AWS account in the organization. + + Args: + name: Account name + email: Account root email + parent_id: Parent OU ID + role_name: IAM role name for account access + + Returns: + aws.organizations.Account: Created account resource + """ + account = aws.organizations.Account( + f"account_{name.lower()}", + name=name, + email=email, + parent_id=parent_id, + role_name=role_name, + tags=self.provider.get_tags(), + opts=ResourceOptions( + provider=self.provider.provider, + protect=True + ) + ) + + # Wait for account to be created before returning + account.id.apply(lambda id: log.info(f"Created account {name} with ID: {id}")) + + return account + + def move_account( + self, + account: aws.organizations.Account, + source_parent: str, + destination_parent: str + ) -> None: + """ + Moves an AWS account between organizational units. + + Args: + account: Account to move + source_parent: Source parent ID + destination_parent: Destination parent ID + """ + aws.organizations.AccountParent( + f"move_{account.name}", + account_id=account.id, + parent_id=destination_parent, + opts=ResourceOptions( + provider=self.provider.provider, + parent=account, + protect=True, + replace_on_changes=["parent_id"] + ) + ) + + def enable_aws_service_access( + self, + service_principal: str + ) -> aws.organizations.DelegatedService: + """ + Enables AWS service access in the organization. + + Args: + service_principal: Service principal to enable (e.g. 'config.amazonaws.com') + + Returns: + aws.organizations.DelegatedService: Service access resource + """ + return aws.organizations.DelegatedService( + f"service_access_{service_principal.split('.')[0]}", + service_principal=service_principal, + opts=ResourceOptions( + provider=self.provider.provider, + protect=True + ) + ) + + def enable_policy_type( + self, + policy_type: str + ) -> aws.organizations.OrganizationalPolicyAttachment: + """ + Enables a policy type in the organization. + + Args: + policy_type: Type of policy to enable (e.g. 'SERVICE_CONTROL_POLICY') + + Returns: + aws.organizations.OrganizationalPolicyAttachment: Policy type enablement + """ + return aws.organizations.OrganizationalPolicyAttachment( + f"enable_policy_{policy_type.lower()}", + policy_type=policy_type, + target_id=self._org_data.roots[0].id if self._org_data else None, + opts=ResourceOptions( + provider=self.provider.provider, + protect=True + ) + ) + +def deploy_tenant_infrastructure( + tenant_config: TenantAccountConfig, + parent_provider: aws.Provider, + organization_id: str, + depends_on: Optional[List[pulumi.Resource]] = None +) -> Dict[str, Any]: + """ + Deploys infrastructure for a tenant account. + + Args: + tenant_config: Tenant account configuration + parent_provider: Parent AWS provider + organization_id: AWS Organization ID + depends_on: Optional resource dependencies + + Returns: + Dict[str, Any]: Tenant infrastructure outputs + + TODO: + - Implement tenant-specific compliance controls + - Add tenant resource monitoring + - Enhance tenant isolation mechanisms + - Add tenant cost tracking + - Implement tenant backup strategies + """ + try: + # Create tenant account + tenant_account = create_tenant_account( + tenant_config, + parent_provider, + organization_id, + depends_on + ) + + # Assume role in tenant account + tenant_provider = assume_role_in_tenant_account( + tenant_account, + "OrganizationAccountAccessRole", + tenant_config.region, + parent_provider + ) + + # Deploy tenant resources + tenant_resources = deploy_tenant_resources( + tenant_provider, + tenant_account, + tenant_config + ) + + return { + "account_id": tenant_account.id, + "account_arn": tenant_account.arn, + "resources": tenant_resources + } + + except Exception as e: + pulumi.log.error(f"Error deploying tenant infrastructure: {str(e)}") + raise diff --git a/modules/aws/provider.py b/modules/aws/provider.py new file mode 100644 index 0000000..e191ccc --- /dev/null +++ b/modules/aws/provider.py @@ -0,0 +1,352 @@ +"""AWS Provider management and initialization.""" +from typing import Optional, Dict, Any, List, Tuple, TYPE_CHECKING +import os +import pulumi +import pulumi_aws as aws +from pulumi import ResourceOptions, log + +from core.metadata import collect_git_info, generate_compliance_labels, generate_git_labels +from .types import AWSConfig, AWSManagers, AWSDeployer +from .exceptions import ComplianceError, ResourceCreationError +from .security import SecurityManager, setup_cloudtrail +from .networking import NetworkManager +from .organization import AWSOrganization +from .resources import ResourceManager +from .eks import EksManager +from .deploy import AWSDeployer + +if TYPE_CHECKING: + from pulumi import Resource + from .organization import AWSOrganization + from .resources import ResourceManager + +class AWSProvider: + """ + Manages AWS provider initialization and global configuration. + + This class handles: + - Provider initialization and credentials + - Global tag management + - Resource deployment orchestration + - Compliance metadata integration + """ + + def __init__(self, config: AWSConfig): + """ + Initialize AWS provider with configuration. + + Args: + config (AWSConfig): AWS configuration settings + """ + self.config = config + self._provider: Optional[aws.Provider] = None + self._tags: Dict[str, str] = {} + self._organization: Optional[AWSOrganization] = None + self._resources: Optional[ResourceManager] = None + self._eks: Optional[EksManager] = None + + @property + def provider(self) -> aws.Provider: + """Get or create the AWS provider instance.""" + if not self._provider: + self._provider = self._initialize_provider() + return self._provider + + @property + def organization(self) -> AWSOrganization: + """Get or create the AWS Organizations manager.""" + if not self._organization: + self._organization = AWSOrganization(self) + return self._organization + + @property + def resources(self) -> ResourceManager: + """Get or create the Resource manager.""" + if not self._resources: + self._resources = ResourceManager(self) + return self._resources + + @property + def eks(self) -> EksManager: + """Get or create the EKS manager.""" + if not self._eks: + self._eks = EksManager(self) + return self._eks + + def _initialize_provider(self) -> aws.Provider: + """ + Initialize the AWS provider with credentials and region. + + Returns: + aws.Provider: Initialized AWS provider + """ + aws_config = pulumi.Config("aws") + + # Get credentials with fallback chain + access_key = ( + os.getenv("AWS_ACCESS_KEY_ID") or + aws_config.get("access_key_id") or + None + ) + secret_key = ( + os.getenv("AWS_SECRET_ACCESS_KEY") or + aws_config.get("secret_access_key") or + None + ) + + # Create provider with credentials and configuration + return aws.Provider( + "awsProvider", + access_key=access_key, + secret_key=secret_key, + profile=self.config.profile, + region=self.config.region, + # Add default tags to all resources + default_tags={"tags": self.get_tags()}, + opts=ResourceOptions( + protect=True, # Protect provider from accidental deletion + delete_before_replace=False, + ), + ) + + def get_tags(self) -> Dict[str, str]: + """ + Get global AWS resource tags including compliance metadata. + + Returns: + Dict[str, str]: Combined tags from compliance and git metadata + """ + if not self._tags: + # Get git metadata + git_info = collect_git_info() + + # Generate compliance labels + compliance_labels = generate_compliance_labels(self.config.compliance) + git_labels = generate_git_labels(git_info) + + # Combine all tags + self._tags = { + **compliance_labels, + **git_labels, + "managed-by": "konductor", + "environment": self.config.profile, + "region": self.config.region, + "account-id": self.config.account_id, + } + + log.info(f"Generated AWS resource tags: {self._tags}") + + return self._tags + + def deploy( + self, + dependencies: Optional[List[pulumi.Resource]], + managers: AWSManagers + ) -> Tuple[str, pulumi.Resource, Dict[str, Any]]: + """ + Deploys AWS infrastructure using provided managers. + Pulumi automatically handles dependencies and validation. + """ + try: + # Initialize core infrastructure + org_resource, org_data = managers["organization"].get_or_create() + + # Get organization root ID + root_id = managers["organization"].get_root_id(org_data) + + # Deploy security controls - dependencies handled automatically + security_outputs = managers["security"].deploy_security_controls() + + # Deploy networking - dependencies handled automatically + network_outputs = managers["networking"].deploy_network_infrastructure() + + # Deploy resources - dependencies handled automatically + resource_outputs = managers["resources"].deploy_resources() + + # Outputs are validated automatically by Pulumi + outputs = { + **security_outputs, + **network_outputs, + **resource_outputs, + "organization_id": org_resource.id, + "organization_arn": org_resource.arn, + "root_id": root_id + } + + return "1.0.0", org_resource, outputs + + except Exception as e: + log.error(f"Deployment failed: {str(e)}") + raise + + def validate_config(self) -> None: + """ + Validate the AWS provider configuration. + + Raises: + ValueError: If configuration is invalid + """ + if not self.config.account_id: + raise ValueError("AWS account ID is required") + + if not self.config.region: + raise ValueError("AWS region is required") + + if self.config.control_tower.enabled: + if not self.config.control_tower.execution_role_arn: + raise ValueError("Control Tower execution role ARN is required when enabled") + + def validate_compliance( + self, + resource: pulumi.Resource, + compliance_config: Dict[str, Any] + ) -> None: + """Validates resource compliance with requirements.""" + try: + # Validate NIST controls + if "nist" in compliance_config: + self._validate_nist_controls(resource, compliance_config["nist"]) + + # Validate FISMA requirements + if "fisma" in compliance_config: + self._validate_fisma_requirements(resource, compliance_config["fisma"]) + + # Validate custom controls + if "custom" in compliance_config: + self._validate_custom_controls(resource, compliance_config["custom"]) + + except Exception as e: + raise ComplianceError(f"Compliance validation failed: {str(e)}") + + def _validate_nist_controls(self, resource: pulumi.Resource, controls: Dict[str, Any]) -> None: + """Validate NIST controls.""" + pass # TODO: Implement NIST validation + + def _validate_fisma_requirements(self, resource: pulumi.Resource, requirements: Dict[str, Any]) -> None: + """Validate FISMA requirements.""" + pass # TODO: Implement FISMA validation + + def _validate_custom_controls(self, resource: pulumi.Resource, controls: Dict[str, Any]) -> None: + """Validate custom controls.""" + pass # TODO: Implement custom validation + + +def deploy_security_controls( + config: AWSConfig, + provider: aws.Provider, + depends_on: Optional[List[pulumi.Resource]] = None +) -> Dict[str, Any]: + """ + Deploys security controls and compliance mechanisms. + + Args: + config: AWS configuration + provider: AWS provider + depends_on: Optional resource dependencies + + Returns: + Dict[str, Any]: Security control outputs + + TODO: + - Implement automated security assessments + - Add security control validation + - Enhance audit logging + - Add automated remediation + - Implement security metrics collection + """ + try: + # Enable AWS Security Hub + security_hub = aws.securityhub.Account( + "security-hub", + enable_default_standards=True, + opts=ResourceOptions( + provider=provider, + depends_on=depends_on, + protect=True + ) + ) + + # Enable GuardDuty + guard_duty = aws.guardduty.Detector( + "guard-duty", + enable=True, + finding_publishing_frequency="ONE_HOUR", + opts=ResourceOptions( + provider=provider, + depends_on=depends_on, + protect=True + ) + ) + + # Configure CloudTrail + cloud_trail = setup_cloudtrail( + config, + provider, + depends_on + ) + + return { + "security_hub_id": security_hub.id, + "guard_duty_id": guard_duty.id, + "cloud_trail_arn": cloud_trail.arn + } + + except Exception as e: + pulumi.log.error(f"Error deploying security controls: {str(e)}") + raise + + +def setup_cloudtrail( + config: AWSConfig, + provider: aws.Provider, + depends_on: Optional[List[pulumi.Resource]] = None +) -> aws.cloudtrail.Trail: + """ + Sets up AWS CloudTrail for audit logging. + + Args: + config: AWS configuration + provider: AWS provider + depends_on: Optional resource dependencies + + Returns: + aws.cloudtrail.Trail: Configured CloudTrail + + TODO: + - Enhance log encryption + - Add log validation + - Implement log analysis + - Add automated alerting + - Enhance retention policies + """ + try: + # Create S3 bucket for CloudTrail logs + trail_bucket = aws.s3.Bucket( + "cloudtrail-logs", + force_destroy=True, + opts=ResourceOptions( + provider=provider, + depends_on=depends_on, + protect=True + ) + ) + + # Create CloudTrail + trail = aws.cloudtrail.Trail( + "audit-trail", + s3_bucket_name=trail_bucket.id, + include_global_service_events=True, + is_multi_region_trail=True, + enable_logging=True, + opts=ResourceOptions( + provider=provider, + depends_on=[trail_bucket], + protect=True + ) + ) + + return trail + + except Exception as e: + pulumi.log.error(f"Error setting up CloudTrail: {str(e)}") + raise diff --git a/modules/aws/resources.py b/modules/aws/resources.py new file mode 100644 index 0000000..e09cee4 --- /dev/null +++ b/modules/aws/resources.py @@ -0,0 +1,438 @@ +# pulumi/modules/aws/resources.py + +""" +AWS Resource Management Module + +Handles creation and management of AWS resources including: +- S3 buckets and objects +- EC2 instances and volumes +- IAM roles and policies +- VPC and networking components +- Security groups and rules +""" + +from typing import Dict, List, Optional, Any, Tuple, TYPE_CHECKING +import pulumi +import pulumi_aws as aws +from pulumi import ResourceOptions, log + +from .types import AWSConfig +from .exceptions import ResourceCreationError +from .security import SecurityManager + +if TYPE_CHECKING: + from .provider import AWSProvider + from pulumi import Resource + +class ResourceManager: + """ + Manages AWS resources and operations. + + This class handles: + - Resource creation and configuration + - Resource tagging and metadata + - Resource protection settings + - Resource dependencies + """ + + def __init__(self, provider: 'AWSProvider'): + """ + Initialize Resource manager. + + Args: + provider: AWSProvider instance for resource management + """ + self.provider = provider + + def create_s3_bucket( + self, + name: str, + versioning: bool = True, + encryption: bool = True, + public_access_block: bool = True, + opts: Optional[ResourceOptions] = None + ) -> aws.s3.Bucket: + """ + Creates an S3 bucket with standard security configurations. + + Args: + name: Bucket name + versioning: Enable versioning + encryption: Enable encryption + public_access_block: Block public access + opts: Optional resource options + + Returns: + aws.s3.Bucket: Created S3 bucket + """ + if opts is None: + opts = ResourceOptions() + + # Create the bucket + bucket = aws.s3.Bucket( + name, + versioning=aws.s3.BucketVersioningArgs( + enabled=versioning + ) if versioning else None, + server_side_encryption_configuration=aws.s3.BucketServerSideEncryptionConfigurationArgs( + rule=aws.s3.BucketServerSideEncryptionConfigurationRuleArgs( + apply_server_side_encryption_by_default=aws.s3.BucketServerSideEncryptionConfigurationRuleApplyServerSideEncryptionByDefaultArgs( + sse_algorithm="AES256" + ) + ) + ) if encryption else None, + tags=self.provider.get_tags(), + opts=ResourceOptions.merge( + ResourceOptions( + provider=self.provider.provider, + protect=True + ), + opts + ) + ) + + # Block public access if enabled + if public_access_block: + aws.s3.BucketPublicAccessBlock( + f"{name}-public-access-block", + bucket=bucket.id, + block_public_acls=True, + block_public_policy=True, + ignore_public_acls=True, + restrict_public_buckets=True, + opts=ResourceOptions( + provider=self.provider.provider, + parent=bucket, + protect=True + ) + ) + + return bucket + + def create_kms_key( + self, + name: str, + description: str, + deletion_window: int = 30, + enable_key_rotation: bool = True, + opts: Optional[ResourceOptions] = None + ) -> aws.kms.Key: + """ + Creates a KMS key with standard configuration. + + Args: + name: Key name + description: Key description + deletion_window: Key deletion window in days + enable_key_rotation: Enable automatic key rotation + opts: Optional resource options + + Returns: + aws.kms.Key: Created KMS key + """ + if opts is None: + opts = ResourceOptions() + + key = aws.kms.Key( + name, + description=description, + deletion_window_in_days=deletion_window, + enable_key_rotation=enable_key_rotation, + tags=self.provider.get_tags(), + opts=ResourceOptions.merge( + ResourceOptions( + provider=self.provider.provider, + protect=True + ), + opts + ) + ) + + # Create an alias for the key + aws.kms.Alias( + f"{name}-alias", + name=f"alias/{name}", + target_key_id=key.id, + opts=ResourceOptions( + provider=self.provider.provider, + parent=key, + protect=True + ) + ) + + return key + + def create_cloudwatch_log_group( + self, + name: str, + retention_days: int = 30, + kms_key_id: Optional[str] = None, + opts: Optional[ResourceOptions] = None + ) -> aws.cloudwatch.LogGroup: + """ + Creates a CloudWatch Log Group. + + Args: + name: Log group name + retention_days: Log retention period + kms_key_id: Optional KMS key for encryption + opts: Optional resource options + + Returns: + aws.cloudwatch.LogGroup: Created log group + """ + if opts is None: + opts = ResourceOptions() + + return aws.cloudwatch.LogGroup( + name, + retention_in_days=retention_days, + kms_key_id=kms_key_id, + tags=self.provider.get_tags(), + opts=ResourceOptions.merge( + ResourceOptions( + provider=self.provider.provider, + protect=True + ), + opts + ) + ) + + def create_sns_topic( + self, + name: str, + kms_master_key_id: Optional[str] = None, + opts: Optional[ResourceOptions] = None + ) -> aws.sns.Topic: + """ + Creates an SNS topic. + + Args: + name: Topic name + kms_master_key_id: Optional KMS key for encryption + opts: Optional resource options + + Returns: + aws.sns.Topic: Created SNS topic + """ + if opts is None: + opts = ResourceOptions() + + return aws.sns.Topic( + name, + kms_master_key_id=kms_master_key_id, + tags=self.provider.get_tags(), + opts=ResourceOptions.merge( + ResourceOptions( + provider=self.provider.provider, + protect=True + ), + opts + ) + ) + + def create_sqs_queue( + self, + name: str, + visibility_timeout_seconds: int = 30, + message_retention_seconds: int = 345600, + kms_master_key_id: Optional[str] = None, + opts: Optional[ResourceOptions] = None + ) -> aws.sqs.Queue: + """ + Creates an SQS queue. + + Args: + name: Queue name + visibility_timeout_seconds: Message visibility timeout + message_retention_seconds: Message retention period + kms_master_key_id: Optional KMS key for encryption + opts: Optional resource options + + Returns: + aws.sqs.Queue: Created SQS queue + """ + if opts is None: + opts = ResourceOptions() + + return aws.sqs.Queue( + name, + visibility_timeout_seconds=visibility_timeout_seconds, + message_retention_seconds=message_retention_seconds, + kms_master_key_id=kms_master_key_id, + tags=self.provider.get_tags(), + opts=ResourceOptions.merge( + ResourceOptions( + provider=self.provider.provider, + protect=True + ), + opts + ) + ) + + def create_dynamodb_table( + self, + name: str, + hash_key: str, + range_key: Optional[str] = None, + attributes: List[Dict[str, str]] = None, + billing_mode: str = "PAY_PER_REQUEST", + opts: Optional[ResourceOptions] = None + ) -> aws.dynamodb.Table: + """ + Creates a DynamoDB table. + + Args: + name: Table name + hash_key: Partition key name + range_key: Optional sort key name + attributes: List of attribute definitions + billing_mode: Billing mode (PAY_PER_REQUEST or PROVISIONED) + opts: Optional resource options + + Returns: + aws.dynamodb.Table: Created DynamoDB table + """ + if opts is None: + opts = ResourceOptions() + + if attributes is None: + attributes = [{"name": hash_key, "type": "S"}] + if range_key: + attributes.append({"name": range_key, "type": "S"}) + + return aws.dynamodb.Table( + name, + attributes=[ + aws.dynamodb.TableAttributeArgs(**attr) + for attr in attributes + ], + hash_key=hash_key, + range_key=range_key, + billing_mode=billing_mode, + tags=self.provider.get_tags(), + opts=ResourceOptions.merge( + ResourceOptions( + provider=self.provider.provider, + protect=True + ), + opts + ) + ) + + def create_ecr_repository( + self, + name: str, + image_tag_mutability: str = "IMMUTABLE", + scan_on_push: bool = True, + opts: Optional[ResourceOptions] = None + ) -> aws.ecr.Repository: + """ + Creates an ECR repository. + + Args: + name: Repository name + image_tag_mutability: Tag mutability setting + scan_on_push: Enable image scanning on push + opts: Optional resource options + + Returns: + aws.ecr.Repository: Created ECR repository + """ + if opts is None: + opts = ResourceOptions() + + return aws.ecr.Repository( + name, + image_tag_mutability=image_tag_mutability, + image_scanning_configuration=aws.ecr.RepositoryImageScanningConfigurationArgs( + scan_on_push=scan_on_push + ), + tags=self.provider.get_tags(), + opts=ResourceOptions.merge( + ResourceOptions( + provider=self.provider.provider, + protect=True + ), + opts + ) + ) + + def create_backup_vault( + self, + name: str, + kms_key_arn: Optional[str] = None, + opts: Optional[ResourceOptions] = None + ) -> aws.backup.Vault: + """ + Creates an AWS Backup vault. + + Args: + name: Vault name + kms_key_arn: Optional KMS key ARN for encryption + opts: Optional resource options + + Returns: + aws.backup.Vault: Created backup vault + """ + if opts is None: + opts = ResourceOptions() + + return aws.backup.Vault( + name, + kms_key_arn=kms_key_arn, + tags=self.provider.get_tags(), + opts=ResourceOptions.merge( + ResourceOptions( + provider=self.provider.provider, + protect=True + ), + opts + ) + ) + + def create_backup_plan( + self, + name: str, + vault_name: str, + schedule: str, + retention_days: int = 30, + opts: Optional[ResourceOptions] = None + ) -> aws.backup.Plan: + """ + Creates an AWS Backup plan. + + Args: + name: Plan name + vault_name: Backup vault name + schedule: Backup schedule expression + retention_days: Backup retention period + opts: Optional resource options + + Returns: + aws.backup.Plan: Created backup plan + """ + if opts is None: + opts = ResourceOptions() + + backup_plan: aws.backup.Plan = aws.backup.Plan( + name, + rules=[aws.backup.PlanRuleArgs( + rule_name=f"{name}-rule", + target_vault_name=vault_name, + schedule=schedule, + lifecycle=aws.backup.PlanRuleLifecycleArgs( + delete_after=retention_days + ) + )], + tags=self.provider.get_tags(), + opts=ResourceOptions.merge( + ResourceOptions( + provider=self.provider.provider, + protect=True + ), + opts + ) + ) + + return backup_plan diff --git a/modules/aws/security.py b/modules/aws/security.py new file mode 100644 index 0000000..be3b05b --- /dev/null +++ b/modules/aws/security.py @@ -0,0 +1,497 @@ +# pulumi/modules/aws/security.py + +""" +AWS Security Management Module + +Handles creation and management of AWS security resources including: +- KMS keys and key policies +- Security group management +- WAF configurations +- Certificate management +- Secret management +- Security Hub enablement +""" + +from typing import Dict, List, Optional, Any, Union, TYPE_CHECKING +import json +import pulumi +import pulumi_aws as aws +from pulumi import ResourceOptions, log +from .iam import IAMManager + +if TYPE_CHECKING: + from .types import SecurityConfig + from .provider import AWSProvider + +class SecurityManager: + """ + Manages AWS security resources and operations. + + This class handles: + - KMS key management + - Security group configurations + - WAF and security rules + - Certificate and secret management + - Security Hub and GuardDuty + """ + + def __init__(self, provider: 'AWSProvider'): + """ + Initialize Security manager. + + Args: + provider: AWSProvider instance for resource management + """ + self.provider = provider + + def create_kms_key( + self, + name: str, + description: str, + key_usage: str = "ENCRYPT_DECRYPT", + deletion_window: int = 30, + enable_key_rotation: bool = True, + policy: Optional[Dict[str, Any]] = None, + opts: Optional[ResourceOptions] = None + ) -> aws.kms.Key: + """ + Creates a KMS key with specified configuration. + + Args: + name: Key name + description: Key description + key_usage: Key usage type + deletion_window: Key deletion window in days + enable_key_rotation: Enable automatic key rotation + policy: Key policy document + opts: Optional resource options + + Returns: + aws.kms.Key: Created KMS key + """ + if opts is None: + opts = ResourceOptions() + + # Create the KMS key + key = aws.kms.Key( + f"key-{name}", + description=description, + deletion_window_in_days=deletion_window, + enable_key_rotation=enable_key_rotation, + key_usage=key_usage, + policy=json.dumps(policy) if policy else None, + tags=self.provider.get_tags(), + opts=ResourceOptions.merge( + ResourceOptions( + provider=self.provider.provider, + protect=True + ), + opts + ) + ) + + # Create an alias for the key + aws.kms.Alias( + f"alias-{name}", + name=f"alias/{name}", + target_key_id=key.id, + opts=ResourceOptions( + provider=self.provider.provider, + parent=key, + protect=True + ) + ) + + return key + + def create_certificate( + self, + domain_name: str, + validation_method: str = "DNS", + subject_alternative_names: Optional[List[str]] = None, + opts: Optional[ResourceOptions] = None + ) -> aws.acm.Certificate: + """ + Creates an ACM certificate. + + Args: + domain_name: Primary domain name + validation_method: Certificate validation method + subject_alternative_names: Additional domain names + opts: Optional resource options + + Returns: + aws.acm.Certificate: Created certificate + """ + if opts is None: + opts = ResourceOptions() + + certificate = aws.acm.Certificate( + f"cert-{domain_name}", + domain_name=domain_name, + validation_method=validation_method, + subject_alternative_names=subject_alternative_names, + tags=self.provider.get_tags(), + opts=ResourceOptions.merge( + ResourceOptions( + provider=self.provider.provider, + protect=True + ), + opts + ) + ) + + return certificate + + def create_secret( + self, + name: str, + secret_string: Union[str, Dict[str, Any]], + description: Optional[str] = None, + kms_key_id: Optional[str] = None, + opts: Optional[ResourceOptions] = None + ) -> aws.secretsmanager.Secret: + """ + Creates a Secrets Manager secret. + + Args: + name: Secret name + secret_string: Secret value or dictionary + description: Secret description + kms_key_id: KMS key ID for encryption + opts: Optional resource options + + Returns: + aws.secretsmanager.Secret: Created secret + """ + if opts is None: + opts = ResourceOptions() + + # Create the secret + secret = aws.secretsmanager.Secret( + f"secret-{name}", + name=name, + description=description, + kms_key_id=kms_key_id, + tags=self.provider.get_tags(), + opts=ResourceOptions.merge( + ResourceOptions( + provider=self.provider.provider, + protect=True + ), + opts + ) + ) + + # Create the secret version + secret_string_value = ( + json.dumps(secret_string) + if isinstance(secret_string, dict) + else secret_string + ) + + aws.secretsmanager.SecretVersion( + f"secret-version-{name}", + secret_id=secret.id, + secret_string=secret_string_value, + opts=ResourceOptions( + provider=self.provider.provider, + parent=secret, + protect=True + ) + ) + + return secret + + def enable_security_hub( + self, + enable_default_standards: bool = True, + control_findings_visible: bool = True, + opts: Optional[ResourceOptions] = None + ) -> aws.securityhub.Account: + """ + Enables AWS Security Hub for the account. + + Args: + enable_default_standards: Enable default security standards + control_findings_visible: Make findings visible + opts: Optional resource options + + Returns: + aws.securityhub.Account: Security Hub account configuration + """ + if opts is None: + opts = ResourceOptions() + + return aws.securityhub.Account( + "security-hub", + enable_default_standards=enable_default_standards, + control_findings_visible=control_findings_visible, + tags=self.provider.get_tags(), + opts=ResourceOptions.merge( + ResourceOptions( + provider=self.provider.provider, + protect=True + ), + opts + ) + ) + + def enable_guardduty( + self, + enable_s3_logs: bool = True, + opts: Optional[ResourceOptions] = None + ) -> aws.guardduty.Detector: + """ + Enables Amazon GuardDuty for the account. + + Args: + enable_s3_logs: Enable S3 log monitoring + opts: Optional resource options + + Returns: + aws.guardduty.Detector: GuardDuty detector + """ + if opts is None: + opts = ResourceOptions() + + return aws.guardduty.Detector( + "guardduty-detector", + enable=True, + finding_publishing_frequency="ONE_HOUR", + datasources=aws.guardduty.DetectorDatasourcesArgs( + s3_logs=aws.guardduty.DetectorDatasourcesS3LogsArgs( + enable=enable_s3_logs + ) + ), + tags=self.provider.get_tags(), + opts=ResourceOptions.merge( + ResourceOptions( + provider=self.provider.provider, + protect=True + ), + opts + ) + ) + + def create_waf_web_acl( + self, + name: str, + rules: List[Dict[str, Any]], + description: Optional[str] = None, + scope: str = "REGIONAL", + opts: Optional[ResourceOptions] = None + ) -> aws.wafv2.WebAcl: + """ + Creates a WAFv2 Web ACL. + + Args: + name: Web ACL name + rules: List of WAF rules + description: Web ACL description + scope: WAF scope (REGIONAL or CLOUDFRONT) + opts: Optional resource options + + Returns: + aws.wafv2.WebAcl: Created Web ACL + """ + if opts is None: + opts = ResourceOptions() + + return aws.wafv2.WebAcl( + f"waf-{name}", + name=name, + description=description, + scope=scope, + default_action=aws.wafv2.WebAclDefaultActionArgs( + allow=aws.wafv2.WebAclDefaultActionAllowArgs() + ), + rules=rules, + visibility_config=aws.wafv2.WebAclVisibilityConfigArgs( + cloudwatch_metrics_enabled=True, + metric_name=f"waf-{name}-metric", + sampled_requests_enabled=True + ), + tags=self.provider.get_tags(), + opts=ResourceOptions.merge( + ResourceOptions( + provider=self.provider.provider, + protect=True + ), + opts + ) + ) + + def create_cloudwatch_log_group( + self, + name: str, + retention_days: int = 30, + kms_key_id: Optional[str] = None, + opts: Optional[ResourceOptions] = None + ) -> aws.cloudwatch.LogGroup: + """ + Creates a CloudWatch Log Group. + + Args: + name: Log group name + retention_days: Log retention period + kms_key_id: KMS key for encryption + opts: Optional resource options + + Returns: + aws.cloudwatch.LogGroup: Created log group + """ + if opts is None: + opts = ResourceOptions() + + return aws.cloudwatch.LogGroup( + f"log-group-{name}", + name=name, + retention_in_days=retention_days, + kms_key_id=kms_key_id, + tags=self.provider.get_tags(), + opts=ResourceOptions.merge( + ResourceOptions( + provider=self.provider.provider, + protect=True + ), + opts + ) + ) + + def create_cloudtrail( + self, + name: str, + s3_bucket_name: str, + include_global_events: bool = True, + is_multi_region: bool = True, + kms_key_id: Optional[str] = None, + log_group_name: Optional[str] = None, + opts: Optional[ResourceOptions] = None + ) -> aws.cloudtrail.Trail: + """ + Creates a CloudTrail trail. + + Args: + name: Trail name + s3_bucket_name: S3 bucket for logs + include_global_events: Include global service events + is_multi_region: Enable multi-region trail + kms_key_id: KMS key for encryption + log_group_name: CloudWatch log group name + opts: Optional resource options + + Returns: + aws.cloudtrail.Trail: Created trail + """ + if opts is None: + opts = ResourceOptions() + + # Create CloudWatch log group if specified + log_group = None + if log_group_name: + log_group = self.create_cloudwatch_log_group( + log_group_name, + opts=ResourceOptions( + provider=self.provider.provider, + parent=opts.parent if opts else None + ) + ) + + # Create IAM role for CloudTrail to CloudWatch Logs + assume_role_policy = { + "Version": "2012-10-17", + "Statement": [{ + "Effect": "Allow", + "Principal": { + "Service": "cloudtrail.amazonaws.com" + }, + "Action": "sts:AssumeRole" + }] + } + + role: aws.iam.Role = aws.iam.Role( + f"cloudtrail-cloudwatch-role-{name}", + assume_role_policy=json.dumps(assume_role_policy), + tags=self.provider.get_tags(), + opts=ResourceOptions( + provider=self.provider.provider, + parent=log_group + ) + ) + + # Attach policy to role + aws.iam.RolePolicy( + f"cloudtrail-cloudwatch-policy-{name}", + role=role.id, + policy=pulumi.Output.all(log_group_arn=log_group.arn).apply( + lambda args: json.dumps({ + "Version": "2012-10-17", + "Statement": [{ + "Effect": "Allow", + "Action": [ + "logs:CreateLogStream", + "logs:PutLogEvents" + ], + "Resource": f"{args['log_group_arn']}:*" + }] + }) + ), + opts=ResourceOptions( + provider=self.provider.provider, + parent=role + ) + ) + + # Create the trail + return aws.cloudtrail.Trail( + f"trail-{name}", + name=name, + s3_bucket_name=s3_bucket_name, + include_global_service_events=include_global_events, + is_multi_region_trail=is_multi_region, + kms_key_id=kms_key_id, + cloud_watch_logs_group_arn=log_group.arn if log_group else None, + cloud_watch_logs_role_arn=role.arn if log_group else None, + tags=self.provider.get_tags(), + opts=ResourceOptions.merge( + ResourceOptions( + provider=self.provider.provider, + protect=True + ), + opts + ) + ) + + def deploy_security_controls(self) -> Dict[str, Any]: + """Deploys security controls and returns outputs.""" + try: + # Enable Security Hub + security_hub = aws.securityhub.Account( + "security-hub", + enable_default_standards=True, + opts=ResourceOptions( + provider=self.provider.provider, + protect=True + ) + ) + + # Enable GuardDuty + guard_duty = aws.guardduty.Detector( + "guard-duty", + enable=True, + finding_publishing_frequency="ONE_HOUR", + opts=ResourceOptions( + provider=self.provider.provider, + protect=True + ) + ) + + return { + "security_hub_id": security_hub.id, + "guard_duty_id": guard_duty.id + } + + except Exception as e: + log.error(f"Failed to deploy security controls: {str(e)}") + raise diff --git a/pulumi/modules/aws/taggable.py b/modules/aws/taggable.py similarity index 100% rename from pulumi/modules/aws/taggable.py rename to modules/aws/taggable.py diff --git a/modules/aws/types.py b/modules/aws/types.py new file mode 100644 index 0000000..7c0cb70 --- /dev/null +++ b/modules/aws/types.py @@ -0,0 +1,407 @@ +# pulumi/modules/aws/types.py + +""" +AWS Module Configuration Types + +Defines data classes for AWS module configurations using Pydantic for type safety and validation. +Ensures integration of compliance configurations. + +Classes: +- IAMUserConfig: IAM user configuration. +- ControlTowerConfig: AWS Control Tower configuration. +- TenantAccountConfig: Tenant account configuration. +- GlobalTags: Global tags for resources. +- AWSConfig: Aggregated AWS configurations, including compliance settings. +- EksNodeGroupConfig: Configuration for EKS node groups. +- EksAddonConfig: Configuration for EKS add-ons. +- EksConfig: Configuration for EKS clusters. +""" + +from __future__ import annotations +from typing import List, Dict, Optional, Any, Union, TypedDict, Tuple, TYPE_CHECKING +from pydantic import BaseModel, Field, validator, root_validator +from core.types import ComplianceConfig +import pulumi +import pulumi_aws as aws +import ipaddress + +if TYPE_CHECKING: + from .security import SecurityManager + from .networking import NetworkManager + from .organization import AWSOrganization + from .resources import ResourceManager + from .iam import IAMManager + from .eks import EksManager + from .types import AWSConfig, AWSManagers + from .provider import AWSProvider + +class IAMUserConfig(BaseModel): + """Configuration for an IAM User in AWS.""" + + name: str = Field(..., description="Name of the IAM user.") + email: str = Field(..., description="Email address of the IAM user.") + groups: List[str] = Field( + default_factory=list, description="IAM groups the user belongs to." + ) + policies: List[str] = Field( + default_factory=list, description="IAM policy ARNs attached to the user." + ) + tags: Dict[str, str] = Field( + default_factory=dict, description="Tags to apply to the IAM user." + ) + path: Optional[str] = Field(default="/", description="IAM user path.") + permissions_boundary: Optional[str] = Field( + None, description="ARN of the policy to set as permissions boundary." + ) + +class SecurityGroupRule(BaseModel): + """Configuration for a security group rule.""" + type: str = Field(..., description="Rule type (ingress/egress)") + protocol: str = Field(..., description="Network protocol") + from_port: int = Field(..., description="Starting port range") + to_port: int = Field(..., description="Ending port range") + cidr_blocks: Optional[List[str]] = Field(None, description="CIDR blocks") + security_group_id: Optional[str] = Field(None, description="Source/destination security group") + description: Optional[str] = Field(None, description="Rule description") + +class NetworkConfig(BaseModel): + """Network configuration for AWS resources.""" + vpc_cidr: str = Field(..., description="VPC CIDR block") + subnet_cidrs: Dict[str, List[str]] = Field( + ..., description="Subnet CIDR blocks by type (public/private)" + ) + availability_zones: List[str] = Field(..., description="Availability zones to use") + enable_nat_gateway: bool = Field(True, description="Enable NAT Gateway") + enable_vpn_gateway: bool = Field(False, description="Enable VPN Gateway") + enable_flow_logs: bool = Field(True, description="Enable VPC Flow Logs") + tags: Dict[str, str] = Field(default_factory=dict, description="Network resource tags") + + @validator("vpc_cidr") + def validate_vpc_cidr(cls, v): + try: + ipaddress.ip_network(v) + except ValueError: + raise ValueError(f"Invalid VPC CIDR: {v}") + return v + +class ControlTowerConfig(BaseModel): + """Configuration for AWS Control Tower.""" + + enabled: bool = Field(default=False, description="Enable AWS Control Tower.") + organizational_unit_name: str = Field( + default="LandingZone", description="Name of the Organizational Unit." + ) + execution_role_name: str = Field( + default="AWSControlTowerExecution", description="Name of the execution role." + ) + execution_role_arn: Optional[str] = Field( + None, description="ARN of the execution role." + ) + admin_role_name: str = Field( + default="AWSControlTowerAdmin", description="Name of the admin role." + ) + admin_role_arn: Optional[str] = Field(None, description="ARN of the admin role.") + audit_role_name: str = Field( + default="AWSControlTowerAudit", description="Name of the audit role." + ) + audit_role_arn: Optional[str] = Field(None, description="ARN of the audit role.") + log_archive_bucket: Optional[str] = Field( + None, description="Name of the log archive bucket." + ) + + @validator("enabled", pre=True) + def validate_control_tower_fields(cls, v, values): + if v: + required_fields = ["execution_role_arn", "admin_role_arn"] + missing = [field for field in required_fields if not values.get(field)] + if missing: + raise ValueError( + f"Missing fields for Control Tower: {', '.join(missing)}" + ) + return v + +class SecurityConfig(BaseModel): + """Security configuration for AWS resources.""" + enable_security_hub: bool = Field(True, description="Enable Security Hub") + enable_guard_duty: bool = Field(True, description="Enable GuardDuty") + enable_config: bool = Field(True, description="Enable AWS Config") + enable_cloudtrail: bool = Field(True, description="Enable CloudTrail") + kms_deletion_window: int = Field(30, description="KMS key deletion window in days") + enable_key_rotation: bool = Field(True, description="Enable KMS key rotation") + security_group_rules: List[SecurityGroupRule] = Field( + default_factory=list, + description="Security group rules" + ) + + @validator("security_group_rules") + def validate_security_rules(cls, v): + for rule in v: + if rule.type not in ["ingress", "egress"]: + raise ValueError(f"Invalid rule type: {rule.type}") + if not rule.cidr_blocks and not rule.security_group_id: + raise ValueError("Either CIDR blocks or security group ID required") + return v + +class TenantAccountConfig(BaseModel): + """Configuration for a Tenant Account within AWS.""" + + name: str = Field(..., description="Name of the tenant account.") + email: str = Field( + ..., description="Email address associated with the tenant account." + ) + administrators: List[str] = Field( + default_factory=list, description="Administrators of the tenant account." + ) + users: List[str] = Field( + default_factory=list, description="Users of the tenant account." + ) + features: List[str] = Field( + default_factory=list, description="Enabled features for the tenant account." + ) + network: Optional[NetworkConfig] = Field( + None, description="Network configuration for the tenant." + ) + security: Optional[SecurityConfig] = Field( + None, description="Security configuration for the tenant." + ) + aws: Dict[str, Any] = Field( + default_factory=dict, + description="AWS-specific configuration for the tenant account.", + ) + tags: Dict[str, str] = Field( + default_factory=dict, description="Tags for resources in the tenant account." + ) + +class GlobalTags(BaseModel): + """Global tags to apply to all AWS resources.""" + + project: str = Field(default="konductor", description="Project name.") + managed_by: str = Field( + default="NASA_SCIP_OPERATIONS", description="Managed by identifier." + ) + environment: str = Field( + default="production", description="Environment identifier." + ) + cost_center: Optional[str] = Field(None, description="Cost center identifier.") + data_classification: Optional[str] = Field( + None, description="Data classification level." + ) + +class BackupConfig(BaseModel): + """Configuration for AWS backup policies.""" + enabled: bool = Field(True, description="Enable AWS Backup") + retention_days: int = Field(30, description="Backup retention period") + schedule_expression: str = Field("cron(0 5 ? * * *)", description="Backup schedule") + copy_actions: Optional[List[Dict[str, Any]]] = Field( + None, description="Cross-region/account copy actions" + ) + + @validator("retention_days") + def validate_retention_days(cls, v): + if v < 1: + raise ValueError("Retention days must be positive") + return v + + @validator("schedule_expression") + def validate_schedule(cls, v): + if not v.startswith("cron(") or not v.endswith(")"): + raise ValueError("Invalid cron expression format") + return v + +class MonitoringConfig(BaseModel): + """Configuration for AWS monitoring.""" + enable_enhanced_monitoring: bool = Field(True, description="Enable enhanced monitoring") + metrics_collection_interval: int = Field(60, description="Metrics collection interval") + log_retention_days: int = Field(90, description="Log retention period") + alarm_notification_topic: Optional[str] = Field(None, description="SNS topic for alarms") + +class EksNodeGroupConfig(BaseModel): + """Configuration for EKS node groups.""" + name: str = Field(..., description="Node group name") + instance_type: str = Field(default="t3.medium", description="EC2 instance type") + desired_size: int = Field(default=2, description="Desired number of nodes") + min_size: int = Field(default=1, description="Minimum number of nodes") + max_size: int = Field(default=3, description="Maximum number of nodes") + disk_size: int = Field(default=50, description="Node disk size in GB") + ami_type: str = Field(default="AL2_x86_64", description="AMI type") + capacity_type: str = Field(default="ON_DEMAND", description="Capacity type (ON_DEMAND/SPOT)") + labels: Dict[str, str] = Field(default_factory=dict, description="Kubernetes labels") + taints: Optional[List[Dict[str, str]]] = Field(None, description="Kubernetes taints") + +class EksAddonConfig(BaseModel): + """Configuration for EKS add-ons.""" + vpc_cni: bool = Field(default=True, description="Enable AWS VPC CNI") + coredns: bool = Field(default=True, description="Enable CoreDNS") + kube_proxy: bool = Field(default=True, description="Enable kube-proxy") + aws_load_balancer_controller: bool = Field(default=True, description="Enable AWS Load Balancer Controller") + cluster_autoscaler: bool = Field(default=True, description="Enable Cluster Autoscaler") + metrics_server: bool = Field(default=True, description="Enable Metrics Server") + aws_for_fluent_bit: bool = Field(default=True, description="Enable AWS for Fluent Bit") + +class EksConfig(BaseModel): + """Configuration for EKS clusters.""" + enabled: bool = Field(default=False, description="Enable EKS deployment") + cluster_name: str = Field(..., description="EKS cluster name") + kubernetes_version: str = Field(default="1.26", description="Kubernetes version") + endpoint_private_access: bool = Field(default=True, description="Enable private endpoint") + endpoint_public_access: bool = Field(default=False, description="Enable public endpoint") + node_groups: List[EksNodeGroupConfig] = Field(default_factory=list, description="Node group configurations") + addons: EksAddonConfig = Field(default_factory=EksAddonConfig, description="Add-on configurations") + enable_irsa: bool = Field(default=True, description="Enable IAM Roles for Service Accounts") + enable_secrets_encryption: bool = Field(default=True, description="Enable secrets encryption") + enable_vpc_cni_prefix_delegation: bool = Field(default=True, description="Enable VPC CNI prefix delegation") + + @validator("kubernetes_version") + def validate_k8s_version(cls, v): + valid_versions = ["1.24", "1.25", "1.26", "1.27"] + if v not in valid_versions: + raise ValueError(f"Invalid Kubernetes version: {v}") + return v + + @validator("node_groups") + def validate_node_groups(cls, v): + if not v: + raise ValueError("At least one node group required") + return v + +class AWSConfig(BaseModel): + """Aggregated configuration class for AWS module settings.""" + + enabled: bool = Field(default=True, description="Enable the AWS module.") + profile: str = Field(default="main", description="AWS CLI profile to use.") + region: str = Field(default="us-west-2", description="AWS region for deployment.") + account_id: str = Field(..., description="AWS account ID.") + bucket: str = Field(..., description="Name of the S3 bucket for state storage.") + control_tower: ControlTowerConfig = Field( + default_factory=ControlTowerConfig, + description="AWS Control Tower configuration.", + ) + iam_users: List[IAMUserConfig] = Field( + default_factory=list, description="IAM user configurations." + ) + landingzones: List[TenantAccountConfig] = Field( + default_factory=list, description="Tenant account configurations." + ) + network: NetworkConfig = Field( + ..., description="Network configuration." + ) + security: SecurityConfig = Field( + default_factory=SecurityConfig, + description="Security configuration." + ) + backup: BackupConfig = Field( + default_factory=BackupConfig, + description="Backup configuration." + ) + monitoring: MonitoringConfig = Field( + default_factory=MonitoringConfig, + description="Monitoring configuration." + ) + global_tags: GlobalTags = Field( + default_factory=GlobalTags, description="Global tags for all resources." + ) + compliance: ComplianceConfig = Field( + default_factory=ComplianceConfig, description="Compliance configuration." + ) + version: str = Field( + default="0.0.1", description="Version of the local AWS module." + ) + + @validator("region") + def validate_region(cls, v): + valid_regions = ["us-east-1", "us-east-2", "us-west-1", "us-west-2"] + if v not in valid_regions: + raise ValueError(f"Invalid AWS region: {v}") + return v + + @root_validator + def validate_network_config(cls, values): + """Validate network configuration.""" + if "network" in values: + network = values["network"] + if len(network.availability_zones) < 2: + raise ValueError("At least 2 availability zones required") + return values + + @classmethod + def merge(cls, user_config: Dict[str, Any]) -> "AWSConfig": + """Merges user configuration with defaults, handling compliance integration.""" + aws_specific_keys = {k for k in user_config.keys() if k != "compliance"} + compliance_config = user_config.get("compliance", {}) + aws_config = {k: user_config[k] for k in aws_specific_keys} + + # Build compliance configuration + compliance = ComplianceConfig.merge(compliance_config) + aws_config["compliance"] = compliance + + return cls(**aws_config) + +class AWSManagers(TypedDict): + security: SecurityManager + networking: NetworkManager + organization: AWSOrganization + resources: ResourceManager + iam: IAMManager + eks: EksManager + +def validate_config(config: AWSConfig) -> None: + """ + Validates the AWS configuration. + + Args: + config: AWS configuration to validate. + + Raises: + ValueError: If configuration is invalid. + """ + if not config.account_id: + raise ValueError("AWS account ID is required") + + if not config.region: + raise ValueError("AWS region is required") + + if config.control_tower.enabled: + if not config.control_tower.execution_role_arn: + raise ValueError("Control Tower execution role ARN is required when enabled") + + # Validate tenant configurations + if config.landingzones: + for tenant in config.landingzones: + if not tenant.email: + raise ValueError(f"Email is required for tenant account {tenant.name}") + + + +def validate_module_exports( + version: str, + resource: pulumi.Resource, + outputs: Dict[str, Any] +) -> bool: + """ + Validates module exports against required outputs. + + Args: + version: Module version string + resource: Main infrastructure resource + outputs: Dictionary of outputs to validate + + Returns: + bool: True if all required outputs are present + """ + required_outputs = { + "ops_data_bucket": "S3 bucket for operational data", + "organization": "AWS Organization ID", + "organization_arn": "AWS Organization ARN", + "vpc_id": "Primary VPC ID", + "subnet_ids": "List of subnet IDs", + "security_groups": "Map of security group IDs", + "kms_keys": "Map of KMS key ARNs", + "iam_roles": "Map of IAM role ARNs" + } + + missing = [key for key in required_outputs if key not in outputs] + if missing: + pulumi.log.warn(f"Missing required outputs: {', '.join(missing)}") + return False + + return True diff --git a/pulumi/mypy.ini b/mypy.ini similarity index 100% rename from pulumi/mypy.ini rename to mypy.ini diff --git a/pulumi/__main__.py b/pulumi/__main__.py deleted file mode 100644 index 6530c53..0000000 --- a/pulumi/__main__.py +++ /dev/null @@ -1,65 +0,0 @@ -# pulumi/__main__.py - -import pulumi - -from core.config import export_results -from core.deployment import initialize_pulumi, deploy_modules - - -def main(): - try: - # Initialize Pulumi - init = initialize_pulumi() - - # Extract the components from the initialization dictionary. - # TODO: - # - Refactor this to use dataclasses. - # - Relocate the dataclasses to a shared location. - # - Relocate module specific initialization logic into the pulumi/core/deployment.py module. - config = init["config"] - k8s_provider = init["k8s_provider"] - versions = init["versions"] - configurations = init["configurations"] - default_versions = init["default_versions"] - global_depends_on = init["global_depends_on"] - compliance_config = init.get("compliance_config", {}) - - # Map of modules to deploy with default boolean value. - # TODO: - # - Refactor this as a map of module names and default enabled booleans. - # - Map of module:enabled pairs will depricate the DEFAULT_ENABLED_CONFIG list in config.py. - # - This eliminates the need for checking whether a module is enabled in Pulumi stack configuration. - # - Modules are set to enabled=false by defualt, override this to enable by default in DEFAULT_ENABLED_CONFIG. - modules_to_deploy = [ - "aws", - # "cert_manager", - # "prometheus" - ] - - # Deploy modules - # TODO: - # - Simplify deploy_modules signature after relocating the module:enabled map and init dictionary location. - deploy_modules( - modules_to_deploy, - config, - default_versions, - global_depends_on, - k8s_provider, - versions, - configurations, - compliance_config, - ) - - # Export stack outputs. - export_results(versions, configurations, compliance_config) - - except Exception as e: - pulumi.log.error(f"Deployment failed: {str(e)}") - raise - - -# Entry point for the Pulumi program. -# TODO: -# - Re-evaluate structure and best location for export_results function call. -if __name__ == "__main__": - main() diff --git a/pulumi/core/config.py b/pulumi/core/config.py deleted file mode 100644 index 94f7eda..0000000 --- a/pulumi/core/config.py +++ /dev/null @@ -1,216 +0,0 @@ -# pulumi/core/config.py - -""" -Configuration Management Module - -This module handles the retrieval and preparation of configurations for different modules -within the Pulumi IaC program. It centralizes configuration logic to promote reuse -and maintainability. - -Key Functions: -- get_module_config: Retrieves and prepares module configuration. -- load_default_versions: Loads default versions for modules. -- export_results: Exports global deployment stack metadata. - -Includes proper data type handling to ensure configurations are correctly parsed. -""" - -import json -import os -import pulumi -import requests -from typing import Any, Dict, Tuple - -# Default versions URL template -DEFAULT_VERSIONS_URL_TEMPLATE = ( - "https://raw.githubusercontent.com/ContainerCraft/Kargo/rerefactor/pulumi/" -) - -# Module enabled defaults: Setting a module to True enables the module by default -DEFAULT_ENABLED_CONFIG = { - "aws": False, - "cert_manager": True, - "kubevirt": True, - "multus": True, - "hostpath_provisioner": True, - "containerized_data_importer": True, - "prometheus": True, -} - - -def coerce_to_bool(value: Any) -> bool: - """ - Coerces a value to a boolean. - - Args: - value (Any): The value to coerce. - - Returns: - bool: The coerced boolean value. - """ - if isinstance(value, bool): - return value - if isinstance(value, str): - return value.lower() == "true" - return bool(value) - - -def get_module_config( - module_name: str, - config: pulumi.Config, - default_versions: Dict[str, Any], -) -> Tuple[Dict[str, Any], bool]: - """ - Retrieves and prepares the configuration for a module. - - Args: - module_name (str): The name of the module to configure. - config (pulumi.Config): The Pulumi configuration object. - default_versions (Dict[str, Any]): A dictionary of default versions for modules. - - Returns: - Tuple[Dict[str, Any], bool]: A tuple containing the module's configuration dictionary - and a boolean indicating if the module is enabled. - """ - - module_config = config.get_object(module_name) or {} - - # Retrieve enabled status from configuration or defaults to defined default setting - enabled_value = module_config.pop( - "enabled", DEFAULT_ENABLED_CONFIG.get(module_name, False) - ) - module_enabled = coerce_to_bool(enabled_value) - - # Include 'compliance' config into 'module_config' for AWS module - if module_name == "aws": - compliance_config_dict = config.get_object("compliance") or {} - module_config["compliance"] = compliance_config_dict - - # Only set the version if it is *not* the aws module - if module_name != "aws": - module_config["version"] = module_config.get( - "version", default_versions.get(module_name) - ) - - return module_config, module_enabled - - -def load_default_versions( - config: pulumi.Config, force_refresh: bool = False -) -> Dict[str, Any]: - """ - Loads the default versions for modules based on the specified configuration settings. - - This function attempts to load version information from multiple sources in order of precedence: - 1. User-specified source via Pulumi config (`default_versions.source`). - 2. Stack-specific versions file (`./versions/$STACK_NAME.json`) if `versions.stack_name` is set to true. - 3. Local default versions file (`./default_versions.json`). - 4. Remote versions based on the specified channel (`versions.channel`). - - Args: - config (pulumi.Config): The Pulumi configuration object. - force_refresh (bool): Whether to force refresh the versions cache. - - Returns: - Dict[str, Any]: A dictionary containing the default versions for modules. - - Raises: - Exception: If default versions cannot be loaded from any source. - """ - cache_file = "/tmp/default_versions.json" - if not force_refresh and os.path.exists(cache_file): - try: - with open(cache_file) as f: - return json.load(f) - except Exception as e: - pulumi.log.warn(f"Error reading cache file: {e}") - - stack_name = pulumi.get_stack() - default_versions_source = config.get("default_versions.source") - versions_channel = config.get("versions.channel") or "stable" - versions_stack_name = coerce_to_bool(config.get("versions.stack_name")) or False - default_versions = {} - - # Function to try loading default versions from file - def load_versions_from_file(file_path: str) -> dict: - try: - with open(file_path, "r") as f: - versions = json.load(f) - pulumi.log.info(f"Loaded default versions from file: {file_path}") - return versions - except (FileNotFoundError, json.JSONDecodeError) as e: - pulumi.log.warn(f"Error loading versions from file {file_path}: {e}") - return {} - - def load_versions_from_url(url: str) -> dict: - try: - response = requests.get(url) - response.raise_for_status() - versions = response.json() - pulumi.log.info(f"Loaded default versions from URL: {url}") - return versions - except (requests.RequestException, json.JSONDecodeError) as e: - pulumi.log.warn(f"Error loading versions from URL {url}: {e}") - return {} - - if default_versions_source: - if default_versions_source.startswith(("http://", "https://")): - default_versions = load_versions_from_url(default_versions_source) - else: - default_versions = load_versions_from_file(default_versions_source) - - if not default_versions: - raise Exception( - f"Failed to load default versions from specified source: {default_versions_source}" - ) - - else: - if versions_stack_name: - current_dir = os.path.dirname(os.path.abspath(__file__)) - stack_versions_path = os.path.join( - current_dir, "..", "versions", f"{stack_name}.json" - ) - default_versions = load_versions_from_file(stack_versions_path) - - if not default_versions: - current_dir = os.path.dirname(os.path.abspath(__file__)) - default_versions_path = os.path.join( - current_dir, "..", "default_versions.json" - ) - default_versions = load_versions_from_file(default_versions_path) - - if not default_versions: - versions_url = ( - f"{DEFAULT_VERSIONS_URL_TEMPLATE}{versions_channel}_versions.json" - ) - default_versions = load_versions_from_url(versions_url) - - if not default_versions: - raise Exception("Cannot proceed without default versions.") - - with open(cache_file, "w") as f: - json.dump(default_versions, f) - - return default_versions - - -def export_results( - versions: Dict[str, str], configurations: Dict[str, Dict[str, Any]], compliance: Any -): - """ - Exports the results of the deployment processes including versions, configurations, and compliance information. - - Args: - versions (Dict[str, str]): A dictionary containing the versions of the deployed modules. - configurations (Dict[str, Dict[str, Any]]): A dictionary containing the configurations of the deployed modules. - compliance (Any): The compliance configuration, can be ComplianceConfig or a dictionary. - """ - # Convert compliance to a dictionary if it's a Pydantic model - if hasattr(compliance, "dict"): - compliance_dict = compliance.dict() - else: - compliance_dict = compliance - - pulumi.export("versions", versions) - pulumi.export("configuration", configurations) - pulumi.export("compliance", compliance_dict) diff --git a/pulumi/core/deployment.py b/pulumi/core/deployment.py deleted file mode 100644 index 8e00a91..0000000 --- a/pulumi/core/deployment.py +++ /dev/null @@ -1,293 +0,0 @@ -# pulumi/core/deployment.py - -""" -Deployment Management Module - -This module manages the deployment orchestration of modules, -initializes Pulumi and Kubernetes providers, and handles module deployments. -""" - -import os -import inspect -import importlib -from pydantic import BaseModel -from typing import Dict, Any, List, Type, Callable - -import pulumi -import pulumi_kubernetes as k8s -from pulumi import log -from pulumi_kubernetes import Provider - -from .config import get_module_config, load_default_versions -from .metadata import ( - collect_git_info, - generate_git_labels, - generate_git_annotations, - set_global_labels, - set_global_annotations, - generate_compliance_labels, - generate_compliance_annotations, -) -from .utils import generate_global_transformations -from .types import ComplianceConfig - - -def initialize_pulumi() -> Dict[str, Any]: - """ - Initializes Pulumi configuration, Kubernetes provider, and global resources. - - Returns: - Dict[str, Any]: A dictionary containing initialized components. - """ - config = pulumi.Config() - stack_name = pulumi.get_stack() - project_name = pulumi.get_project() - - try: - # Load global default versions and initialize variables from configuration. - default_versions = load_default_versions(config) - versions: Dict[str, str] = {} - - # Initialize empty global configuration and dependency list variables. - configurations: Dict[str, Dict[str, Any]] = {} - global_depends_on: List[pulumi.Resource] = [] - - # Initialize the Kubernetes provider. - kubernetes_config = config.get_object("kubernetes") or {} - kubernetes_context = kubernetes_config.get("context") - - kubeconfig = kubernetes_config.get("kubeconfig") or os.getenv("KUBECONFIG") - - # Initialize the Kubernetes provider. - k8s_provider = Provider( - "k8sProvider", - kubeconfig=kubeconfig, - context=kubernetes_context, - ) - - k8s_provider_secret = pulumi.Output.secret(k8s_provider) - pulumi.export("k8s_provider", k8s_provider_secret) - - log.info(f"Kubeconfig: {kubeconfig}") - log.info(f"Kubernetes context: {kubernetes_context}") - - # Collect and store git information in the global configuration. - git_info = collect_git_info() - configurations["source_repository"] = { - "remote": git_info["remote"], - "branch": git_info["branch"], - "commit": git_info["commit"], - } - - # Retrieve compliance metadata from pulumi configuration. - compliance_config_dict = config.get_object("compliance") or {} - compliance_config = ComplianceConfig.merge(compliance_config_dict) - pulumi.log.info(f"Compliance Config: {compliance_config}") - - # Generate global tags, labels, and annotations. - compliance_labels = generate_compliance_labels(compliance_config) - compliance_annotations = generate_compliance_annotations(compliance_config) - - git_labels = generate_git_labels(git_info) - git_annotations = generate_git_annotations(git_info) - global_labels = {**compliance_labels, **git_labels} - global_annotations = {**compliance_annotations, **git_annotations} - - set_global_labels(global_labels) - set_global_annotations(global_annotations) - generate_global_transformations(global_labels, global_annotations) - - return { - "config": config, - "stack_name": stack_name, - "project_name": project_name, - "default_versions": default_versions, - "versions": versions, - "configurations": configurations, - "global_depends_on": global_depends_on, - "k8s_provider": k8s_provider, - "git_info": git_info, - "compliance_config": compliance_config, - "global_labels": global_labels, - "global_annotations": global_annotations, - } - except Exception as e: - log.error(f"Initialization error: {str(e)}") - raise - - -def deploy_module( - module_name: str, - config: pulumi.Config, - default_versions: Dict[str, Any], - global_depends_on: List[pulumi.Resource], - k8s_provider: k8s.Provider, - versions: Dict[str, str], - configurations: Dict[str, Dict[str, Any]], - compliance_config: ComplianceConfig, # Include this parameter -) -> None: - """ - Helper function to deploy a module based on configuration. - - Args: - module_name (str): Name of the module. - config (pulumi.Config): Pulumi configuration object. - default_versions (Dict[str, Any]): Default versions for modules. - global_depends_on (List[pulumi.Resource]): Global dependencies. - k8s_provider (k8s.Provider): Kubernetes provider. - versions (Dict[str, str]): Dictionary to store versions of deployed modules. - configurations (Dict[str, Dict[str, Any]]): Dictionary to store configurations of deployed modules. - compliance_config (ComplianceConfig): Compliance configuration for the stack. - """ - if not isinstance(module_name, str): - raise TypeError("module_name must be a string") - if not isinstance(config, pulumi.Config): - raise TypeError("config must be an instance of pulumi.Config") - if not isinstance(default_versions, dict): - raise TypeError("default_versions must be a dictionary") - if not isinstance(global_depends_on, list): - raise TypeError("global_depends_on must be a list") - if not isinstance(k8s_provider, k8s.Provider): - raise TypeError( - "k8s_provider must be an instance of pulumi_kubernetes.Provider" - ) - if not isinstance(versions, dict): - raise TypeError("versions must be a dictionary") - if not isinstance(configurations, dict): - raise TypeError("configurations must be a dictionary") - - # Retrieve module configuration and enabled status. - module_config_dict, module_enabled = get_module_config( - module_name, config, default_versions - ) - - if module_enabled: - ModuleConfigClass = discover_config_class(module_name) - deploy_func = discover_deploy_function(module_name) - - config_obj = ModuleConfigClass.merge(module_config_dict) - - deploy_func_args = inspect.signature(deploy_func).parameters.keys() - config_arg_name = list(deploy_func_args)[0] - - deploy_kwargs = { - config_arg_name: config_obj, - "global_depends_on": global_depends_on, - } - - if module_name != "aws": - deploy_kwargs["k8s_provider"] = k8s_provider - - try: - result = deploy_func(**deploy_kwargs) - - if isinstance(result, tuple) and len(result) == 3: - version, release, module_aux_meta = result - elif isinstance(result, tuple) and len(result) == 2: - version, release = result - module_aux_meta = None - else: - raise ValueError( - f"Unexpected return value structure from {module_name} deploy function" - ) - - versions[module_name] = version - configurations[module_name] = {"enabled": module_enabled} - - if module_aux_meta: - # Include module outputs into configurations[module_name] - configurations[module_name].update(module_aux_meta) - - global_depends_on.append(release) - - except Exception as e: - log.error(f"Deployment failed for module {module_name}: {str(e)}") - raise - else: - log.info(f"Module {module_name} is not enabled.") - - -def discover_config_class(module_name: str) -> Type: - """ - Discovers and returns the configuration class from the module's types.py. - Supports both Pydantic BaseModel and dataclasses. - - Args: - module_name (str): The name of the module. - - Returns: - Type: The configuration class, either a Pydantic BaseModel subclass or a dataclass. - - Raises: - ValueError: If no suitable configuration class is found. - """ - types_module = importlib.import_module(f"modules.{module_name}.types") - config_class: Optional[Type] = None - - for name, obj in inspect.getmembers(types_module): - if inspect.isclass(obj): - if issubclass(obj, BaseModel) and obj is not BaseModel: - return obj - elif hasattr(obj, "__dataclass_fields__"): - config_class = obj - - if config_class: - return config_class - - raise ValueError(f"No configuration class found in modules.{module_name}.types") - - -def discover_deploy_function(module_name: str) -> Callable: - """ - Discovers and returns the deploy function from the module's deploy.py. - - Args: - module_name (str): The name of the module. - - Returns: - Callable: The deploy function. - """ - deploy_module = importlib.import_module(f"modules.{module_name}.deploy") - function_name = f"deploy_{module_name}_module" - deploy_function = getattr(deploy_module, function_name, None) - if not deploy_function: - raise ValueError( - f"No deploy function named '{function_name}' found in modules.{module_name}.deploy" - ) - return deploy_function - - -def deploy_modules( - modules: List[str], - config: pulumi.Config, - default_versions: Dict[str, Any], - global_depends_on: List[pulumi.Resource], - k8s_provider: Provider, - versions: Dict[str, str], - configurations: Dict[str, Dict[str, Any]], - compliance_config: ComplianceConfig, -) -> None: - """ - Iterates over a list of modules and deploys each configured and enabled module. - - Args: - modules (List[str]): List of module names to deploy. - config (pulumi.Config): Pulumi configuration object. - default_versions (Dict[str, Any]): Default versions for modules. - global_depends_on (List[pulumi.Resource]): Global dependencies. - k8s_provider (k8s.Provider): Kubernetes provider. - versions (Dict[str, str]): Dictionary to store versions of deployed modules. - configurations (Dict[str, Dict[str, Any]]): Dictionary to store configurations of deployed modules. - """ - for module_name in modules: - log.info(f"Deploying module: {module_name}") - deploy_module( - module_name=module_name, - config=config, - default_versions=default_versions, - global_depends_on=global_depends_on, - k8s_provider=k8s_provider, - versions=versions, - configurations=configurations, - compliance_config=compliance_config, - ) diff --git a/pulumi/core/resource_helpers.py b/pulumi/core/resource_helpers.py deleted file mode 100644 index cc9ceeb..0000000 --- a/pulumi/core/resource_helpers.py +++ /dev/null @@ -1,382 +0,0 @@ -# pulumi/core/resource_helpers.py - -import pulumi -import pulumi_kubernetes as k8s -from typing import Optional, Dict, Any, List, Callable -from .metadata import get_global_labels, get_global_annotations -from .utils import set_resource_metadata - - -def create_namespace( - name: str, - labels: Optional[Dict[str, str]] = None, - annotations: Optional[Dict[str, str]] = None, - finalizers: Optional[List[str]] = None, - custom_timeouts: Optional[Dict[str, str]] = None, - opts: Optional[pulumi.ResourceOptions] = None, - k8s_provider: Optional[k8s.Provider] = None, - parent: Optional[pulumi.Resource] = None, - depends_on: Optional[List[pulumi.Resource]] = None, -) -> k8s.core.v1.Namespace: - """ - Creates a Kubernetes Namespace with global labels and annotations. - - Args: - name (str): The name of the namespace. - labels (Optional[Dict[str, str]]): Additional labels to apply. - annotations (Optional[Dict[str, str]]): Additional annotations to apply. - finalizers (Optional[List[str]]): Finalizers for the namespace. - custom_timeouts (Optional[Dict[str, str]]): Custom timeouts for resource operations. - opts (Optional[pulumi.ResourceOptions]): Pulumi resource options. - k8s_provider (Optional[k8s.Provider]): Kubernetes provider. - depends_on (Optional[List[pulumi.Resource]]): Resources this resource depends on. - - Returns: - k8s.core.v1.Namespace: The created Namespace resource. - """ - - # If the optional arguments are not provided, set them to default values. - # TODO: - # - refactor/simplify for better readability and maintainability - if opts is None: - opts = pulumi.ResourceOptions() - if labels is None: - labels = {} - if annotations is None: - annotations = {} - if custom_timeouts is None: - custom_timeouts = {} - if depends_on is None: - depends_on = [] - if parent is None: - parent = [] - - global_labels = get_global_labels() - global_annotations = get_global_annotations() - labels.update(global_labels) - annotations.update(global_annotations) - - metadata = { - "name": name, - "labels": labels, - "annotations": annotations, - } - - spec = {} - if finalizers: - spec["finalizers"] = finalizers - - # Set Global Pulumi Resource Options - # TODO: - # - Enhance core/config.py with a centralized default pulumi `opts` configuration - # - Support merging with custom opts - # - Adopt across project resources to improve consistency and DRYness - opts = pulumi.ResourceOptions.merge( - opts, - pulumi.ResourceOptions( - provider=k8s_provider, - depends_on=depends_on, - parent=parent, - custom_timeouts=pulumi.CustomTimeouts( - create=custom_timeouts.get("create", "5m"), - update=custom_timeouts.get("update", "10m"), - delete=custom_timeouts.get("delete", "10m"), - ), - ), - ) - - return k8s.core.v1.Namespace( - name, - metadata=metadata, - spec=spec, - opts=opts, - ) - - -def create_custom_resource( - name: str, - args: Dict[str, Any], - opts: Optional[pulumi.ResourceOptions] = None, - k8s_provider: Optional[k8s.Provider] = None, - depends_on: Optional[List[pulumi.Resource]] = None, -) -> k8s.apiextensions.CustomResource: - """ - Creates a Kubernetes CustomResource with global labels and annotations. - - Args: - name (str): The name of the custom resource. - args (Dict[str, Any]): Arguments for creating the custom resource. - opts (Optional[pulumi.ResourceOptions]): Pulumi resource options. - k8s_provider (Optional[k8s.Provider]): Kubernetes provider. - depends_on (Optional[List[pulumi.Resource]]): Resources this custom resource depends on. - - Returns: - k8s.apiextensions.CustomResource: The created CustomResource. - """ - try: - if "kind" not in args or "apiVersion" not in args: - raise ValueError( - "The 'args' dictionary must include 'kind' and 'apiVersion' keys." - ) - - if opts is None: - opts = pulumi.ResourceOptions() - if depends_on is None: - depends_on = [] - - global_labels = get_global_labels() - global_annotations = get_global_annotations() - - def custom_resource_transform(resource_args: pulumi.ResourceTransformationArgs): - props = resource_args.props - if "metadata" in props: - set_resource_metadata( - props["metadata"], global_labels, global_annotations - ) - return pulumi.ResourceTransformationResult(props, resource_args.opts) - - opts = pulumi.ResourceOptions.merge( - opts, - pulumi.ResourceOptions( - provider=k8s_provider, - depends_on=depends_on, - transformations=[custom_resource_transform], - ), - ) - - # Ensure metadata and spec are included if specified - metadata = args.get("metadata", {}) - spec = args.get("spec", {}) - - return k8s.apiextensions.CustomResource( - resource_name=name, - api_version=args["apiVersion"], - kind=args["kind"], - metadata=metadata, - spec=spec, - opts=opts, - ) - - except Exception as e: - pulumi.log.error(f"Failed to create custom resource '{name}': {e}") - raise - - -def create_helm_release( - name: str, - args: k8s.helm.v3.ReleaseArgs, - opts: Optional[pulumi.ResourceOptions] = None, - transformations: Optional[ - List[ - Callable[ - [pulumi.ResourceTransformationArgs], - Optional[pulumi.ResourceTransformationResult], - ] - ] - ] = None, - k8s_provider: Optional[k8s.Provider] = None, - depends_on: Optional[List[pulumi.Resource]] = None, -) -> k8s.helm.v3.Release: - """ - Creates a Helm Release with global labels and annotations. - - Args: - name (str): The release name. - args (k8s.helm.v3.ReleaseArgs): Arguments for the Helm release. - opts (Optional[pulumi.ResourceOptions]): Pulumi resource options. - transformations (Optional[List[Callable]]): Additional transformations. - k8s_provider (Optional[k8s.Provider]): Kubernetes provider. - depends_on (Optional[List[pulumi.Resource]]): Resources this release depends on. - - Returns: - k8s.helm.v3.Release: The created Helm release. - """ - if opts is None: - opts = pulumi.ResourceOptions() - if transformations is None: - transformations = [] - if depends_on is None: - depends_on = [] - - global_labels = get_global_labels() - global_annotations = get_global_annotations() - - def helm_resource_transform(resource_args: pulumi.ResourceTransformationArgs): - props = resource_args.props - if "metadata" in props: - set_resource_metadata(props["metadata"], global_labels, global_annotations) - elif "spec" in props and isinstance(props["spec"], dict): - if "metadata" in props["spec"]: - set_resource_metadata( - props["spec"]["metadata"], global_labels, global_annotations - ) - return pulumi.ResourceTransformationResult(props, resource_args.opts) - - transformations.append(helm_resource_transform) - - opts = pulumi.ResourceOptions.merge( - opts, - pulumi.ResourceOptions( - provider=k8s_provider, - depends_on=depends_on, - transformations=transformations, - ), - ) - - return k8s.helm.v3.Release(name, args, opts=opts) - - -def create_secret( - name: str, - args: Dict[str, Any], - opts: Optional[pulumi.ResourceOptions] = None, - k8s_provider: Optional[k8s.Provider] = None, - depends_on: Optional[List[pulumi.Resource]] = None, -) -> k8s.core.v1.Secret: - """ - Creates a Kubernetes Secret with global labels and annotations. - - Args: - name (str): The name of the secret. - args (Dict[str, Any]): Arguments for creating the secret. - opts (Optional[pulumi.ResourceOptions]): Pulumi resource options. - k8s_provider (Optional[k8s.Provider]): Kubernetes provider. - depends_on (Optional[List[pulumi.Resource]]): Resources this secret depends on. - - Returns: - k8s.core.v1.Secret: The created Secret. - """ - if opts is None: - opts = pulumi.ResourceOptions() - if depends_on is None: - depends_on = [] - - # Merge global labels and annotations (if any) - global_labels = get_global_labels() - global_annotations = get_global_annotations() - - def secret_resource_transform(resource_args: pulumi.ResourceTransformationArgs): - props = resource_args.props - if "metadata" in props: - set_resource_metadata(props["metadata"], global_labels, global_annotations) - return pulumi.ResourceTransformationResult(props, resource_args.opts) - - # Merge resource options - opts = pulumi.ResourceOptions.merge( - opts, - pulumi.ResourceOptions( - provider=k8s_provider, - depends_on=depends_on, - transformations=[secret_resource_transform], - ), - ) - - # Constructor call - return k8s.core.v1.Secret(name, opts, **args) - - -def create_config_file( - name: str, - file: str, - opts: Optional[pulumi.ResourceOptions] = None, - transformations: Optional[ - List[ - Callable[ - [pulumi.ResourceTransformationArgs], - Optional[pulumi.ResourceTransformationResult], - ] - ] - ] = None, - k8s_provider: Optional[k8s.Provider] = None, - depends_on: Optional[List[pulumi.Resource]] = None, -) -> k8s.yaml.ConfigFile: - """ - Creates Kubernetes resources from a YAML config file with global labels and annotations. - - Args: - name (str): The resource name. - file (str): The path to the YAML file. - opts (Optional[pulumi.ResourceOptions]): Pulumi resource options. - transformations (Optional[List[Callable]]): Additional transformations. - k8s_provider (Optional[k8s.Provider]): Kubernetes provider. - depends_on (Optional[List[pulumi.Resource]]): Resources these resources depend on. - - Returns: - k8s.yaml.ConfigFile: The created resources. - """ - if opts is None: - opts = pulumi.ResourceOptions() - if transformations is None: - transformations = [] - if depends_on is None: - depends_on = [] - - global_labels = get_global_labels() - global_annotations = get_global_annotations() - - def config_file_transform(resource_args: pulumi.ResourceTransformationArgs): - props = resource_args.props - if "metadata" in props: - set_resource_metadata(props["metadata"], global_labels, global_annotations) - elif "spec" in props and isinstance(props["spec"], dict): - if "metadata" in props["spec"]: - set_resource_metadata( - props["spec"]["metadata"], global_labels, global_annotations - ) - return pulumi.ResourceTransformationResult(props, resource_args.opts) - - transformations.append(config_file_transform) - - opts = pulumi.ResourceOptions.merge( - opts, - pulumi.ResourceOptions( - provider=k8s_provider, - depends_on=depends_on, - transformations=transformations, - ), - ) - - return k8s.yaml.ConfigFile(name, file, opts=opts) - - -# ------------------------------------------------------------------------------ -# Metadata -# ------------------------------------------------------------------------------ -# TODO: -# - Evaluate full codebase for wider utilization of create_meta_objectmeta() -def create_meta_objectmeta( - name: str, - labels: Optional[Dict[str, str]] = None, - annotations: Optional[Dict[str, str]] = None, - namespace: Optional[str] = None, - **kwargs, -) -> k8s.meta.v1.ObjectMetaArgs: - """ - Creates a Kubernetes ObjectMetaArgs with global labels and annotations. - - Args: - name (str): The name of the resource. - labels (Optional[Dict[str, str]]): Additional labels to apply. - annotations (Optional[Dict[str, str]]): Additional annotations to apply. - namespace (Optional[str]): The namespace of the resource. - - Returns: - k8s.meta.v1.ObjectMetaArgs: The metadata arguments. - """ - if labels is None: - labels = {} - if annotations is None: - annotations = {} - - global_labels = get_global_labels() - global_annotations = get_global_annotations() - labels.update(global_labels) - annotations.update(global_annotations) - - return k8s.meta.v1.ObjectMetaArgs( - name=name, - labels=labels, - annotations=annotations, - namespace=namespace, - **kwargs, - ) diff --git a/pulumi/core/types.py b/pulumi/core/types.py deleted file mode 100644 index 1cfdf95..0000000 --- a/pulumi/core/types.py +++ /dev/null @@ -1,65 +0,0 @@ -# pulumi/core/types.py - -""" -Types and Data Structures Module - -This module defines all shared data classes and types used across all modules. -""" - -from typing import Optional, List, Dict, Any -from pydantic import BaseModel, validator - - -class NamespaceConfig(BaseModel): - name: str - labels: Dict[str, str] = {"ccio.v1/app": "kargo"} - annotations: Dict[str, str] = {} - finalizers: List[str] = ["kubernetes"] - protect: bool = False - retain_on_delete: bool = False - ignore_changes: List[str] = ["metadata", "spec"] - custom_timeouts: Dict[str, str] = {"create": "5m", "update": "10m", "delete": "10m"} - - -class FismaConfig(BaseModel): - enabled: bool = False - level: Optional[str] = None - ato: Dict[str, str] = {} - - @validator("enabled", pre=True) - def parse_enabled(cls, v): - if isinstance(v, str): - return v.lower() == "true" - return bool(v) - - -class NistConfig(BaseModel): - enabled: bool = False - controls: List[str] = [] - auxiliary: List[str] = [] - exceptions: List[str] = [] - - @validator("enabled", pre=True) - def parse_enabled(cls, v): - if isinstance(v, str): - return v.lower() == "true" - return bool(v) - - -class ScipConfig(BaseModel): - environment: Optional[str] = None - ownership: Dict[str, Any] = {} - provider: Dict[str, Any] = {} - - -class ComplianceConfig(BaseModel): - fisma: FismaConfig = FismaConfig() - nist: NistConfig = NistConfig() - scip: ScipConfig = ScipConfig() - - @classmethod - def merge(cls, user_config: Dict[str, Any]) -> "ComplianceConfig": - fisma_config = FismaConfig(**user_config.get("fisma", {})) - nist_config = NistConfig(**user_config.get("nist", {})) - scip_config = ScipConfig(**user_config.get("scip", {})) - return cls(fisma=fisma_config, nist=nist_config, scip=scip_config) diff --git a/pulumi/core/utils.py b/pulumi/core/utils.py deleted file mode 100644 index 794025f..0000000 --- a/pulumi/core/utils.py +++ /dev/null @@ -1,266 +0,0 @@ -# pulumi/core/utils.py - -""" -Utility Functions Module - -This module provides generic, reusable utility functions. -It includes resource transformations, Helm interactions, and miscellaneous helpers. -""" - -import re -import os -import tempfile -import pulumi -import pulumi_kubernetes as k8s -from typing import Optional, Dict, Any, List -import requests -import logging -import yaml -from packaging.version import parse as parse_version, InvalidVersion, Version - - -# Set up basic logging -logging.basicConfig( - level=logging.INFO, format="%(asctime)s - %(levelname)s - %(message)s" -) - - -# Function to update global resource tags, labels, and annotations from compliance config spec -def set_resource_metadata( - metadata: Any, global_labels: Dict[str, str], global_annotations: Dict[str, str] -): - """ - Updates resource metadata with global labels and annotations. - """ - if isinstance(metadata, dict): - metadata.setdefault("labels", {}).update(global_labels) - metadata.setdefault("annotations", {}).update(global_annotations) - elif isinstance(metadata, k8s.meta.v1.ObjectMetaArgs): - if metadata.labels is None: - metadata.labels = {} - metadata.labels.update(global_labels) - if metadata.annotations is None: - metadata.annotations = {} - metadata.annotations.update(global_annotations) - - -# Function to apply global resource tags, labels, and annotations to all yaml objects -def generate_global_transformations( - global_labels: Dict[str, str], global_annotations: Dict[str, str] -): - """ - Generates global transformations for resources. - """ - - def global_transform( - args: pulumi.ResourceTransformationArgs, - ) -> Optional[pulumi.ResourceTransformationResult]: - props = args.props - - if "metadata" in props: - set_resource_metadata(props["metadata"], global_labels, global_annotations) - elif "spec" in props and isinstance(props["spec"], dict): - if "metadata" in props["spec"]: - set_resource_metadata( - props["spec"]["metadata"], global_labels, global_annotations - ) - - return pulumi.ResourceTransformationResult(props, args.opts) - - pulumi.runtime.register_stack_transformation(global_transform) - - -# Function to fetch the latest stable version of a Helm chart from a helm chart index.yaml url -def get_latest_helm_chart_version(repo_url: str, chart_name: str) -> str: - """ - Fetches the latest stable version of a Helm chart from the given repository URL. - - Args: - repo_url (str): The base URL of the Helm repository. - chart_name (str): The name of the Helm chart. - - Returns: - str: The latest stable version of the chart. - """ - try: - index_url = repo_url.rstrip("/") + "/index.yaml" - - logging.info(f"Fetching Helm repository index from URL: {index_url}") - response = requests.get(index_url) - response.raise_for_status() - - index = yaml.safe_load(response.content) - if chart_name in index["entries"]: - chart_versions = index["entries"][chart_name] - stable_versions = [ - v for v in chart_versions if is_stable_version(v["version"]) - ] - if not stable_versions: - logging.info(f"No stable versions found for chart '{chart_name}'.") - return "Chart not found" - latest_chart = max( - stable_versions, key=lambda x: parse_version(x["version"]) - ) - return latest_chart["version"].lstrip("v") - else: - logging.info(f"No chart named '{chart_name}' found in repository.") - return "Chart not found" - - except requests.RequestException as e: - logging.error(f"Error fetching Helm repository index: {e}") - return f"Error fetching data: {e}" - except yaml.YAMLError as e: - logging.error(f"Error parsing Helm repository index YAML: {e}") - return f"Error parsing YAML: {e}" - - -# Sanity check Helm chart versions for stable releases -def is_stable_version(version_str: str) -> bool: - """ - Determines if a version string represents a stable version. - - Args: - version_str (str): The version string to check. - - Returns: - bool: True if the version is stable, False otherwise. - """ - try: - parsed_version = parse_version(version_str) - return ( - isinstance(parsed_version, Version) - and not parsed_version.is_prerelease - and not parsed_version.is_devrelease - ) - except InvalidVersion: - return False - - -# Function to extract the repository name from a Git remote URL -def extract_repo_name(remote_url: str) -> str: - """ - Extracts the repository name from a Git remote URL. - - Args: - remote_url (str): The Git remote URL. - - Returns: - str: The repository name. - """ - match = re.search(r"[:/]([^/:]+/[^/\.]+)(\.git)?$", remote_url) - if match: - return match.group(1) - return remote_url - - -# Function to wait for a list of CRDs to be present -def wait_for_crds( - crd_names: List[str], - k8s_provider: k8s.Provider, - depends_on: List[pulumi.Resource], - parent: pulumi.Resource, -) -> List[pulumi.Resource]: - """ - Waits for the specified CRDs to be present and ensures dependencies. - - Args: - crd_names (List[str]): A list of CRD names. - k8s_provider (k8s.Provider): The Kubernetes provider. - depends_on (List[pulumi.Resource]): A list of dependencies. - parent (pulumi.Resource): The parent resource. - - Returns: - List[pulumi.Resource]: The CRD resources or an empty list during preview. - """ - - # Instantiate crds list to store retrieved CRD resources with enforced type safety for k8s.apiextensions.v1.CustomResourceDefinition - crds: List[pulumi.Resource] = [] - - for crd_name in crd_names: - try: - crd = k8s.apiextensions.v1.CustomResourceDefinition.get( - resource_name=f"crd-{crd_name}", - id=crd_name, - opts=pulumi.ResourceOptions( - provider=k8s_provider, - depends_on=depends_on, - parent=parent, - ), - ) - crds.append(crd) - except Exception: - if pulumi.runtime.is_dry_run(): - pulumi.log.info(f"CRD {crd_name} not found, creating dummy CRD.") - dummy_crd = create_dummy_crd(crd_name, k8s_provider, depends_on, parent) - if dummy_crd: - crds.append(dummy_crd) - - return crds - - -# HACK: Create a dummy CRD definition to use during pulumi dry_run / preview runs if CRDs are not found. -# TODO: Solve this in a more elegant way. -def create_dummy_crd( - crd_name: str, - k8s_provider: k8s.Provider, - depends_on: List[pulumi.Resource], - parent: pulumi.Resource, -) -> Optional[k8s.yaml.ConfigFile]: - """ - Create a dummy CRD definition to use during preview runs. - - Args: - crd_name (str): The name of the CRD. - k8s_provider (k8s.Provider): The Kubernetes provider. - depends_on (List[pulumi.Resource]): A list of dependencies. - parent (pulumi.Resource): The parent resource. - - Returns: - Optional[k8s.yaml.ConfigFile]: The dummy CRD resource. - """ - parts = crd_name.split(".") - plural = parts[0] - group = ".".join(parts[1:]) - kind = "".join(word.title() for word in plural.split("_")) - - dummy_crd_yaml_template = """ -apiVersion: apiextensions.k8s.io/v1 -kind: CustomResourceDefinition -metadata: - name: {metadata_name} -spec: - group: {group} - names: - plural: {plural} - kind: {kind} - scope: Namespaced - versions: - - name: v1 - served: true - storage: true -""" - - dummy_crd_yaml = dummy_crd_yaml_template.format( - metadata_name=f"{plural}.{group}", - group=group, - plural=plural, - kind=kind, - ) - - try: - with tempfile.NamedTemporaryFile(delete=False, mode="w") as temp_file: - temp_file.write(dummy_crd_yaml) - temp_file_path = temp_file.name - - dummy_crd = k8s.yaml.ConfigFile( - "dummy-crd-{}".format(crd_name), - file=temp_file_path, - opts=pulumi.ResourceOptions( - parent=parent, - depends_on=depends_on, - provider=k8s_provider, - ), - ) - return dummy_crd - finally: - os.unlink(temp_file_path) diff --git a/pulumi/modules/aws/resources.py b/pulumi/modules/aws/resources.py deleted file mode 100644 index ef1251d..0000000 --- a/pulumi/modules/aws/resources.py +++ /dev/null @@ -1,717 +0,0 @@ -# pulumi/modules/aws/resources.py - -""" -This module defines reusable resources for the AWS modules available as type-safe functions. -The module defines the following resources: - - - create_organization: Defines the function to create an organization. - - create_organizational_unit: Defines the function to create an organizational unit. - - create_account: Defines the function to create an account. - - create_control_tower: Defines the function to create an AWS Control Tower. - - create_vpc: Defines the function to create a VPC. - - create_subnet: Defines the function to create a subnet. - - create_security_group: Defines the function to create a security group. - - create_internet_gateway: Defines the function to create an internet gateway. - - create_route_table: Defines the function to create a route table. - - create_route: Defines the function to create a route. - - create_subnet_route_table_association: Defines the function to create a subnet route table association. - - create_security_group_rule: Defines the function to create a security group rule. - - create_ec2_instance: Defines the function to create an EC2 instance. - - todo: Add more resources as needed. -""" - -from typing import Dict, List, Any, Tuple, Optional -import pulumi -import pulumi_aws as aws -from pulumi import ResourceOptions, log -from .types import ( - ControlTowerConfig, - IAMUserConfig, - TenantAccountConfig, - AWSConfig, - GlobalTags, -) -from core.metadata import ( - set_global_labels, - set_global_annotations, - generate_compliance_labels, -) -from core.utils import set_resource_metadata - - -def fetch_sts_identity(aws_provider: aws.Provider) -> pulumi.Output[Dict[str, str]]: - try: - identity = aws.get_caller_identity( - opts=pulumi.InvokeOptions(provider=aws_provider) - ) - - # Map the output to a dictionary with explicit keys - return pulumi.Output.from_input( - { - "account_id": identity.account_id, - "arn": identity.arn, - "id": identity.id, - "user_id": identity.user_id, - } - ) - except Exception as e: - log.error(f"Error fetching STS Caller Identity: {str(e)}") - raise - - -def create_s3_bucket( - bucket_name: str, - aws_provider: aws.Provider, -) -> aws.s3.Bucket: - """ - Creates an S3 Bucket with the specified tags. - - Args: - bucket_name (str): The name of the bucket. - tags (dict): The tags to apply to the bucket. - provider (aws.Provider): The AWS Provider. - compliance_config (dict): The compliance configuration. - - Returns: - aws.s3.Bucket: The created S3 bucket. - """ - # Compliance labels now part of global transformations; ensure bucket tags are included. - bucket = aws.s3.Bucket(bucket_name, opts=ResourceOptions(provider=aws_provider)) - - return bucket - - -def get_organization_details(provider: aws.Provider): - try: - return aws.organizations.get_organization( - opts=pulumi.InvokeOptions(provider=aws_provider) - ) - except Exception as e: - log.warn(f"Failed to get existing organization: {str(e)}") - return None - - -def setup_organization_units( - org_details, config: AWSConfig, tags: dict, aws_provider: aws.Provider -): - if org_details.roots: - root_id = org_details.roots[0].id - if config.control_tower.enabled: - ou = aws.organizations.OrganizationalUnit( - "example-ou", - name="example-ou", - parent_id=root_id, - opts=pulumi.ResourceOptions(provider=aws_provider), - ) - else: - log.warn("No roots found in the organization.") - - -def create_organization(aws_provider: aws.Provider) -> aws.organizations.Organization: - """ - Creates an AWS Organization with all features enabled. - - Args: - aws_provider (aws.Provider): The AWS provider. - - Returns: - aws.organizations.Organization: The AWS Organization resource. - """ - try: - organization = aws.organizations.Organization( - resource_name="konductor-scip-dev", - feature_set="ALL", - opts=ResourceOptions(provider=aws_provider), - ) - - # Use .apply to log the organization ID - organization.id.apply( - lambda org_id: log.info(f"Organization created with ID: {org_id}") - ) - - return organization - - except Exception as e: - log.error(f"Failed to create organization: {str(e)}") - raise - - -def get_organization_root_id( - organization_data: aws.organizations.GetOrganizationResult, -) -> str: - """ - Retrieves the root ID of the AWS Organization from the organization data. - - Args: - organization_data: The organization data obtained from get_organization. - - Returns: - str: The root ID. - """ - try: - # Get the roots from the organization data - if organization_data.roots: - root = organization_data.roots[0] - root_id = root.id - log.info(f"Organization Root ID: {root_id}") - return root_id - else: - raise Exception("No roots found in the organization") - except Exception as e: - log.error(f"Error fetching organization roots: {str(e)}") - raise - - -def get_or_create_organization( - aws_provider: aws.Provider, -) -> Tuple[aws.organizations.Organization, aws.organizations.GetOrganizationResult]: - """ - Retrieves the existing AWS Organization or creates a new one if it doesn't exist. - - Returns: - Tuple[aws.organizations.Organization, aws.organizations.GetOrganizationResult]: The AWS Organization resource and the organization data. - """ - try: - # Get existing organization data - organization_data = aws.organizations.get_organization( - opts=pulumi.InvokeOptions(provider=aws_provider) - ) - log.info(f"Found existing Organization with ID: {organization_data.id}") - - # Create an Organization resource referencing the existing organization - organization = aws.organizations.Organization.get( - resource_name="existing_organization", - id=organization_data.id, - opts=pulumi.ResourceOptions(provider=aws_provider), - ) - return organization, organization_data - - except Exception as e: - log.warn(f"No existing organization found, creating a new one: {str(e)}") - # If you have permissions to create an organization then we can uncomment the following to create one - # organization = create_organization(aws_provider) - # return organization - raise Exception("Unable to retrieve or create the AWS Organization") - - -def create_organizational_units( - organization: aws.organizations.Organization, - root_id: str, - ou_names: List[str], - aws_provider: aws.Provider, -) -> Dict[str, aws.organizations.OrganizationalUnit]: - """ - Creates Organizational Units (OUs) under the specified AWS Organization. - - Args: - organization: The AWS Organization resource. - root_id: The root ID of the organization. - ou_names: List of OU names to create. - aws_provider: The AWS provider. - - Returns: - Dict[str, aws.organizations.OrganizationalUnit]: Created OUs. - """ - ou_map = {} - - if root_id: - for ou_name in ou_names: - ou = aws.organizations.OrganizationalUnit( - resource_name=f"ou_{ou_name.lower()}", - name=ou_name, - parent_id=root_id, - opts=ResourceOptions(provider=aws_provider, parent=organization), - ) - ou_map[ou_name] = ou - else: - log.warn("Root ID is not available; cannot create Organizational Units.") - - return ou_map - - -def setup_control_tower(control_tower_config: ControlTowerConfig) -> None: - """ - Sets up AWS Control Tower based on the provided configuration. - - Args: - control_tower_config (ControlTowerConfig): The Control Tower configuration. - """ - if control_tower_config.enabled: - # Placeholder for Control Tower setup logic - # AWS Control Tower does not currently support full automation via API/IaC - log.info( - "AWS Control Tower setup is enabled. Manual configuration may be required." - ) - else: - log.info("AWS Control Tower setup is disabled.") - - -def create_iam_users( - iam_users: List[IAMUserConfig], tags: Dict[str, str], aws_provider: aws.Provider -) -> None: - """ - Creates IAM users and associates them with groups and policies. - - Args: - iam_users (List[IAMUserConfig]): A list of IAMUserConfig objects. - tags (Dict[str, str]): The tags to apply to the IAM resources. - """ - for user_config in iam_users: - iam_user = aws.iam.User( - resource_name=user_config.name, - name=user_config.name, - opts=pulumi.ResourceOptions( - provider=aws_provider, - ), - ) - - for group_name in user_config.groups: - iam_group = aws.iam.Group( - resource_name=f"group_{group_name}", - name=group_name, - opts=pulumi.ResourceOptions( - provider=aws_provider, - ), - ) - aws.iam.UserGroupMembership( - resource_name=f"{user_config.name}_{group_name}_membership", - user=iam_user.name, - groups=[iam_group.name], - opts=pulumi.ResourceOptions( - provider=aws_provider, - ), - ) - - for policy_arn in user_config.policies: - aws.iam.UserPolicyAttachment( - resource_name=f"{user_config.name}_{policy_arn.split('/')[-1]}", - user=iam_user.name, - policy_arn=policy_arn, - opts=pulumi.ResourceOptions( - provider=aws_provider, - ), - ) - - -def create_tenant_accounts( - organization: aws.organizations.Organization, - ou: aws.organizations.OrganizationalUnit, - tenant_configs: Dict[str, TenantAccountConfig], - aws_provider: aws.Provider, -) -> List[aws.organizations.Account]: - """ - Creates tenant accounts under the specified Organizational Unit. - - Args: - organization: The AWS Organization resource. - ou: The Organizational Unit resource. - tenant_configs: Tenant account configurations. - aws_provider: The AWS provider. - - Returns: - List[aws.organizations.Account]: Tenant account resources. - """ - tenant_accounts = [] - ou_id = ou.id if ou else None - - if ou_id: - for tenant_name, tenant_config in tenant_configs.items(): - tenant_account = aws.organizations.Account( - resource_name=f"{tenant_name}_account", - email=tenant_config.email, - name=tenant_config.name, - parent_id=ou_id, - opts=ResourceOptions(provider=aws_provider, parent=organization), - ) - tenant_accounts.append(tenant_account) - else: - log.warn("Organizational Unit ID not found.") - - return tenant_accounts - - -def assume_role_in_tenant_account( - tenant_account: aws.organizations.Account, - role_name: str, - region: str, - aws_provider: aws.Provider, -) -> aws.Provider: - """ - Assumes a role in the tenant account to perform operations. - - Args: - tenant_account: The tenant AWS account. - role_name: The name of the role to assume. - region: The AWS region. - aws_provider: The AWS provider. - - Returns: - aws.Provider: AWS provider configured for the tenant account. - """ - return tenant_account.id.apply( - lambda account_id: aws.Provider( - f"tenant_provider_{account_id}", - assume_role=aws.ProviderAssumeRoleArgs( - role_arn=f"arn:aws:iam::{account_id}:role/{role_name}", - session_name="PulumiSession", - ), - region=region, - ) - ) - - -def deploy_tenant_resources( - tenant_provider: aws.Provider, - tenant_account: aws.organizations.Account, - tenant_config: TenantAccountConfig, -) -> None: - """ - Deploys resources in the tenant account based on the configuration. - - Args: - tenant_provider: The AWS provider for the tenant account. - tenant_account: The tenant AWS account. - tenant_config: Configuration for the tenant account. - """ - if not tenant_config: - log.warn( - f"Configuration for tenant account '{tenant_account.name}' is missing." - ) - return - - if "bucket" in tenant_config.features: - bucket = aws.s3.Bucket( - resource_name=f"{tenant_account.name}_bucket", - bucket=tenant_account.name.apply(lambda x: f"{x}-bucket"), - acl="private", - opts=ResourceOptions(provider=tenant_provider, parent=tenant_account), - ) - pulumi.export(f"{tenant_account.name}_bucket_name", bucket.bucket) - - if "ec2" in tenant_config.features: - ec2_instance = aws.ec2.Instance( - resource_name=f"{tenant_account.name}_instance", - ami="ami-0c94855ba95c71c99", - instance_type="t2.micro", - opts=ResourceOptions(provider=tenant_provider, parent=tenant_account), - ) - pulumi.export(f"{tenant_account.name}_instance_id", ec2_instance.id) - - -def create_vpc( - vpc_name: str, - cidr_block: str, - aws_provider: aws.Provider, - opts: ResourceOptions = None, -) -> aws.ec2.Vpc: - """ - Creates a VPC with the specified configuration. - - Args: - vpc_name (str): The name of the VPC. - cidr_block (str): The CIDR block for the VPC. - provider (aws.Provider): The AWS provider to use. - opts (ResourceOptions, optional): Pulumi resource options. - - Returns: - aws.ec2.Vpc: The created VPC resource. - """ - if opts is None: - opts = ResourceOptions() - vpc = aws.ec2.Vpc( - resource_name=vpc_name, - cidr_block=cidr_block, - enable_dns_hostnames=True, - enable_dns_support=True, - opts=opts.merge(ResourceOptions(provider=aws_provider)), - ) - return vpc - - -def create_subnet( - subnet_name: str, - cidr_block: str, - vpc_id: str, - aws_provider: aws.Provider, - opts: ResourceOptions = None, -) -> aws.ec2.Subnet: - """ - Creates a subnet within the specified VPC. - - Args: - subnet_name (str): The name of the subnet. - cidr_block (str): The CIDR block for the subnet. - vpc_id (str): The ID of the VPC. - provider (aws.Provider): The AWS provider to use. - opts (ResourceOptions, optional): Pulumi resource options. - - Returns: - aws.ec2.Subnet: The created subnet resource. - """ - if opts is None: - opts = ResourceOptions() - subnet = aws.ec2.Subnet( - resource_name=subnet_name, - cidr_block=cidr_block, - vpc_id=vpc_id, - opts=opts.merge(ResourceOptions(provider=aws_provider)), - ) - return subnet - - -def create_security_group( - sg_name: str, - vpc_id: str, - aws_provider: aws.Provider, - description: str = "Default security group", - opts: ResourceOptions = None, -) -> aws.ec2.SecurityGroup: - """ - Creates a security group within the specified VPC. - - Args: - sg_name (str): The name of the security group. - vpc_id (str): The ID of the VPC. - provider (aws.Provider): The AWS provider to use. - description (str, optional): Description of the security group. - opts (ResourceOptions, optional): Pulumi resource options. - - Returns: - aws.ec2.SecurityGroup: The created security group resource. - """ - if opts is None: - opts = ResourceOptions() - sg = aws.ec2.SecurityGroup( - resource_name=sg_name, - name=sg_name, - description=description, - vpc_id=vpc_id, - ingress=[ - aws.ec2.SecurityGroupIngressArgs( - protocol="tcp", - from_port=22, - to_port=22, - cidr_blocks=["0.0.0.0/0"], - ), - aws.ec2.SecurityGroupIngressArgs( - protocol="tcp", - from_port=80, - to_port=80, - cidr_blocks=["0.0.0.0/0"], - ), - ], - egress=[ - aws.ec2.SecurityGroupEgressArgs( - protocol="-1", - from_port=0, - to_port=0, - cidr_blocks=["0.0.0.0/0"], - ), - ], - opts=opts.merge(ResourceOptions(provider=aws_provider)), - ) - return sg - - -def create_internet_gateway( - igw_name: str, - vpc_id: str, - aws_provider: aws.Provider, - opts: ResourceOptions = None, -) -> aws.ec2.InternetGateway: - """ - Creates an Internet Gateway and attaches it to the specified VPC. - - Args: - igw_name (str): The name of the Internet Gateway. - vpc_id (str): The ID of the VPC. - provider (aws.Provider): The AWS provider to use. - opts (ResourceOptions, optional): Pulumi resource options. - - Returns: - aws.ec2.InternetGateway: The created Internet Gateway resource. - """ - if opts is None: - opts = ResourceOptions() - igw = aws.ec2.InternetGateway( - resource_name=igw_name, - vpc_id=vpc_id, - opts=opts.merge(ResourceOptions(provider=aws_provider)), - ) - return igw - - -def create_route_table( - rt_name: str, - vpc_id: str, - aws_provider: aws.Provider, - opts: ResourceOptions = None, -) -> aws.ec2.RouteTable: - """ - Creates a route table within the specified VPC. - - Args: - rt_name (str): The name of the route table. - vpc_id (str): The ID of the VPC. - provider (aws.Provider): The AWS provider to use. - opts (ResourceOptions, optional): Pulumi resource options. - - Returns: - aws.ec2.RouteTable: The created route table resource. - """ - if opts is None: - opts = ResourceOptions() - rt = aws.ec2.RouteTable( - resource_name=rt_name, - vpc_id=vpc_id, - opts=opts.merge(ResourceOptions(provider=aws_provider)), - ) - return rt - - -def create_route( - route_name: str, - route_table_id: str, - destination_cidr_block: str, - gateway_id: str, - aws_provider: aws.Provider, - opts: ResourceOptions = None, -) -> aws.ec2.Route: - """ - Creates a route in the specified route table. - - Args: - route_name (str): The name of the route. - route_table_id (str): The ID of the route table. - destination_cidr_block (str): The destination CIDR block. - gateway_id (str): The gateway ID (e.g., Internet Gateway ID). - provider (aws.Provider): The AWS provider to use. - opts (ResourceOptions, optional): Pulumi resource options. - - Returns: - aws.ec2.Route: The created route resource. - """ - if opts is None: - opts = ResourceOptions() - route = aws.ec2.Route( - resource_name=route_name, - route_table_id=route_table_id, - destination_cidr_block=destination_cidr_block, - gateway_id=gateway_id, - opts=opts.merge(ResourceOptions(provider=aws_provider)), - ) - return route - - -def create_subnet_route_table_association( - association_name: str, - subnet_id: str, - route_table_id: str, - aws_provider: aws.Provider, - opts: ResourceOptions = None, -) -> aws.ec2.RouteTableAssociation: - """ - Associates a subnet with a route table. - - Args: - association_name (str): The name of the association. - subnet_id (str): The ID of the subnet. - route_table_id (str): The ID of the route table. - provider (aws.Provider): The AWS provider to use. - opts (ResourceOptions, optional): Pulumi resource options. - - Returns: - aws.ec2.RouteTableAssociation: The created association resource. - """ - if opts is None: - opts = ResourceOptions() - association = aws.ec2.RouteTableAssociation( - resource_name=association_name, - subnet_id=subnet_id, - route_table_id=route_table_id, - opts=opts.merge(ResourceOptions(provider=aws_provider)), - ) - return association - - -def create_security_group_rule( - rule_name: str, - security_group_id: str, - type: str, # 'ingress' or 'egress' - protocol: str, - from_port: int, - to_port: int, - cidr_blocks: List[str], - aws_provider: aws.Provider, - opts: ResourceOptions = None, -) -> aws.ec2.SecurityGroupRule: - """ - Creates a security group rule. - - Args: - rule_name (str): The name of the rule. - security_group_id (str): The ID of the security group. - type (str): The type of rule ('ingress' or 'egress'). - protocol (str): The protocol (e.g., 'tcp', 'udp', '-1' for all). - from_port (int): The starting port. - to_port (int): The ending port. - cidr_blocks (List[str]): List of CIDR blocks. - provider (aws.Provider): The AWS provider to use. - opts (ResourceOptions, optional): Pulumi resource options. - - Returns: - aws.ec2.SecurityGroupRule: The created security group rule resource. - """ - if opts is None: - opts = ResourceOptions() - rule = aws.ec2.SecurityGroupRule( - resource_name=rule_name, - security_group_id=security_group_id, - type=type, - protocol=protocol, - from_port=from_port, - to_port=to_port, - cidr_blocks=cidr_blocks, - opts=opts.merge(ResourceOptions(provider=aws_provider)), - ) - return rule - - -def create_ec2_instance( - instance_name: str, - ami: str, - instance_type: str, - subnet_id: str, - security_group_ids: List[str], - aws_provider: aws.Provider, - key_name: Optional[str] = None, - opts: ResourceOptions = None, -) -> aws.ec2.Instance: - """ - Creates an EC2 instance with the specified configuration. - - Args: - instance_name (str): The name of the EC2 instance. - ami (str): The AMI ID to use for the instance. - instance_type (str): The instance type (e.g., 't2.micro'). - subnet_id (str): The ID of the subnet to launch the instance in. - security_group_ids (List[str]): List of security group IDs. - provider (aws.Provider): The AWS provider to use. - key_name (Optional[str]): The name of the SSH key pair. - opts (ResourceOptions, optional): Pulumi resource options. - - Returns: - aws.ec2.Instance: The created EC2 instance resource. - """ - if opts is None: - opts = ResourceOptions() - instance = aws.ec2.Instance( - resource_name=instance_name, - ami=ami, - instance_type=instance_type, - subnet_id=subnet_id, - vpc_security_group_ids=security_group_ids, - key_name=key_name, - opts=opts.merge(ResourceOptions(provider=aws_provider)), - ) - return instance diff --git a/pulumi/modules/aws/types.py b/pulumi/modules/aws/types.py deleted file mode 100644 index c20ea27..0000000 --- a/pulumi/modules/aws/types.py +++ /dev/null @@ -1,152 +0,0 @@ -# pulumi/modules/aws/types.py - -""" -AWS Module Configuration Types - -Defines data classes for AWS module configurations using Pydantic for type safety and validation. -Ensures integration of compliance configurations. - -Classes: -- IAMUserConfig: IAM user configuration. -- ControlTowerConfig: AWS Control Tower configuration. -- TenantAccountConfig: Tenant account configuration. -- GlobalTags: Global tags for resources. -- AWSConfig: Aggregated AWS configurations, including compliance settings. -""" - -from typing import List, Dict, Optional, Any -from pydantic import BaseModel, Field, validator -from core.types import ComplianceConfig - - -class IAMUserConfig(BaseModel): - """Configuration for an IAM User in AWS.""" - - name: str = Field(..., description="Name of the IAM user.") - email: str = Field(..., description="Email address of the IAM user.") - groups: List[str] = Field( - default_factory=list, description="IAM groups the user belongs to." - ) - policies: List[str] = Field( - default_factory=list, description="IAM policy ARNs attached to the user." - ) - - -class ControlTowerConfig(BaseModel): - """Configuration for AWS Control Tower.""" - - enabled: bool = Field(default=False, description="Enable AWS Control Tower.") - organizational_unit_name: str = Field( - default="LandingZone", description="Name of the Organizational Unit." - ) - execution_role_name: str = Field( - default="AWSControlTowerExecution", description="Name of the execution role." - ) - execution_role_arn: Optional[str] = Field( - None, description="ARN of the execution role." - ) - admin_role_name: str = Field( - default="AWSControlTowerAdmin", description="Name of the admin role." - ) - admin_role_arn: Optional[str] = Field(None, description="ARN of the admin role.") - audit_role_name: str = Field( - default="AWSControlTowerAudit", description="Name of the audit role." - ) - audit_role_arn: Optional[str] = Field(None, description="ARN of the audit role.") - log_archive_bucket: Optional[str] = Field( - None, description="Name of the log archive bucket." - ) - - @validator("enabled", always=True) - def validate_control_tower_fields(cls, v, values): - if v: - required_fields = ["execution_role_arn", "admin_role_arn"] - missing = [field for field in required_fields if not values.get(field)] - if missing: - raise ValueError( - f"Missing fields for Control Tower: {', '.join(missing)}" - ) - return v - - -class TenantAccountConfig(BaseModel): - """Configuration for a Tenant Account within AWS.""" - - name: str = Field(..., description="Name of the tenant account.") - email: str = Field( - ..., description="Email address associated with the tenant account." - ) - administrators: List[str] = Field( - default_factory=list, description="Administrators of the tenant account." - ) - users: List[str] = Field( - default_factory=list, description="Users of the tenant account." - ) - features: List[str] = Field( - default_factory=list, description="Enabled features for the tenant account." - ) - aws: Dict[str, Any] = Field( - default_factory=dict, - description="AWS-specific configuration for the tenant account.", - ) - tags: Dict[str, str] = Field( - default_factory=dict, description="Tags for resources in the tenant account." - ) - - -class GlobalTags(BaseModel): - """Global tags to apply to all AWS resources.""" - - project: str = Field(default="konductor", description="Project name.") - managed_by: str = Field( - default="NASA_SCIP_OPERATIONS", description="Managed by identifier." - ) - - -class AWSConfig(BaseModel): - """Aggregated configuration class for AWS module settings.""" - - enabled: bool = Field(default=True, description="Enable the AWS module.") - profile: str = Field(default="main", description="AWS CLI profile to use.") - region: str = Field(default="us-west-2", description="AWS region for deployment.") - account_id: str = Field(..., description="AWS account ID.") - bucket: str = Field(..., description="Name of the S3 bucket for state storage.") - control_tower: ControlTowerConfig = Field( - default_factory=ControlTowerConfig, - description="AWS Control Tower configuration.", - ) - iam_users: List[IAMUserConfig] = Field( - default_factory=list, description="IAM user configurations." - ) - landingzones: List[TenantAccountConfig] = Field( - default_factory=list, description="Tenant account configurations." - ) - global_tags: GlobalTags = Field( - default_factory=GlobalTags, description="Global tags for all resources." - ) - compliance: ComplianceConfig = Field( - default_factory=ComplianceConfig, description="Compliance configuration." - ) - version: str = Field( - default="0.0.1", description="Version of the local AWS module." - ) - - @validator("region") - def validate_region(cls, v): - valid_regions = ["us-east-1", "us-east-2", "us-west-1", "us-west-2"] - if v not in valid_regions: - raise ValueError(f"Invalid AWS region: {v}") - return v - - @classmethod - def merge(cls, user_config: Dict[str, Any]) -> "AWSConfig": - """Merges user configuration with defaults, handling compliance integration.""" - aws_specific_keys = {k for k in user_config.keys() if k != "compliance"} - compliance_config = user_config.get("compliance", {}) - aws_config = {k: user_config[k] for k in aws_specific_keys} - - # Build compliance configuration - compliance = ComplianceConfig.merge(compliance_config) - aws_config["compliance"] = compliance - - return cls(**aws_config) diff --git a/pulumi/providers/aws/__init__.py b/pulumi/providers/aws/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/pulumi/providers/k8s/__init__.py b/pulumi/providers/k8s/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/pulumi/stacks/Pulumi.scip-ops-prod.yaml b/pulumi/stacks/Pulumi.scip-ops-prod.yaml deleted file mode 100644 index 6f4e36b..0000000 --- a/pulumi/stacks/Pulumi.scip-ops-prod.yaml +++ /dev/null @@ -1,65 +0,0 @@ -imports: - - NavtecaAwsCredentialsConfigSmce/navteca-aws-credentials-config-smce -config: - compliance: - fisma: - enabled: true - level: "high" - ato: - id: "1234-ATO" - date: "2023-01-01" - nist: - enabled: true - controls: ["AC-2", "AC-3", "AC-6"] - auxiliary: ["AU-6", "AU-7"] - exceptions: ["AC-7", "AC-8"] - scip: - environment: "production" - ownership: - team: "SCIP Operations" - manager: "Dr. Smith" - metadata: - globalLabels: - project: "SCIP Ops" - managedBy: "NASA_SCIP_OPERATIONS" - globalAnnotations: - purpose: "production" - owner: "scip-ops-team" - aws: - enabled: true - account_id: "111122223333" - profile: smdc-cba - region: us-east-2 - bucket: scip-ops-prod-state-bucket - controlTower: - enabled: true - organizationalUnitName: "OperationsOU" - executionRoleName: "AWSControlTowerExecution" - executionRoleArn: "arn:aws:iam::111122223333:role/AWSControlTowerExecution" - adminRoleName: "AWSControlTowerAdmin" - adminRoleArn: "arn:aws:iam::111122223333:role/AWSControlTowerAdmin" - auditRoleName: "AWSControlTowerAudit" - auditRoleArn: "arn:aws:iam::111122223333:role/AWSControlTowerAudit" - logArchiveBucket: "aws-control-tower-logs-111122223333" - iamUsers: - - name: "alice" - email: "alice@example.com" - groups: ["Developers"] - policies: ["arn:aws:iam::aws:policy/AdministratorAccess"] - - name: "bob" - email: "bob@example.com" - groups: ["Operations"] - policies: ["arn:aws:iam::aws:policy/ReadOnlyAccess"] - landingzones: - - name: "Tenant1" - email: "tenant1@example.com" - administrators: ["admin1", "admin2"] - users: ["user1", "user2"] - features: ["bucket", "ec2"] - aws: - vpcCidr: "10.0.0.0/16" - tags: - Environment: "Production" - kubernetes: - context: scip-prod-cluster - kubeconfig: "${KUBECONFIG}" diff --git a/pulumi/requirements.txt b/requirements.txt similarity index 100% rename from pulumi/requirements.txt rename to requirements.txt diff --git a/pulumi/stacks/.gitkeep b/stacks/.gitkeep similarity index 100% rename from pulumi/stacks/.gitkeep rename to stacks/.gitkeep diff --git a/staging/aux/documentation.py b/staging/aux/documentation.py new file mode 100644 index 0000000..ade512c --- /dev/null +++ b/staging/aux/documentation.py @@ -0,0 +1,249 @@ +# pulumi/core/documentation.py + +import inspect +import ast +import re +from typing import Dict, Any, List, Optional, Type, get_type_hints +from pathlib import Path +from dataclasses import dataclass +from pydantic import BaseModel + +@dataclass +class DocSection: + """Represents a section of documentation.""" + title: str + content: str + subsections: List['DocSection'] = None + + def to_markdown(self, level: int = 1) -> str: + """Convert section to markdown.""" + markdown = f"{'#' * level} {self.title}\n\n{self.content}\n\n" + if self.subsections: + for subsection in self.subsections: + markdown += subsection.to_markdown(level + 1) + return markdown + +class DocGenerator: + """Generates comprehensive documentation for modules.""" + + def __init__(self, output_dir: Path): + """ + Initialize DocGenerator. + + Args: + output_dir: Directory where documentation will be written + """ + self.output_dir = output_dir + self.output_dir.mkdir(parents=True, exist_ok=True) + + def generate_module_docs(self, module: Any) -> str: + """ + Generate documentation for a module. + + Args: + module: The module to document + + Returns: + str: Generated markdown documentation + """ + # Get module info + module_name = module.__name__ + module_doc = inspect.getdoc(module) or "No module documentation available." + + sections = [ + DocSection("Overview", module_doc), + self._generate_configuration_section(module), + self._generate_interface_section(module), + self._generate_examples_section(module), + self._generate_schema_section(module) + ] + + # Combine all sections + markdown = f"# {module_name}\n\n" + markdown += self._generate_toc(sections) + markdown += "\n---\n\n" + for section in sections: + markdown += section.to_markdown() + + # Write to file + doc_file = self.output_dir / f"{module_name}.md" + doc_file.write_text(markdown) + return markdown + + def _generate_toc(self, sections: List[DocSection]) -> str: + """Generate table of contents.""" + toc = "## Table of Contents\n\n" + for i, section in enumerate(sections, 1): + toc += f"{i}. [{section.title}](#{section.title.lower().replace(' ', '-')})\n" + if section.subsections: + for j, subsection in enumerate(section.subsections, 1): + toc += f" {i}.{j}. [{subsection.title}](#{subsection.title.lower().replace(' ', '-')})\n" + return toc + "\n" + + def _generate_configuration_section(self, module: Any) -> DocSection: + """Generate configuration documentation.""" + config_classes = self._find_config_classes(module) + content = "## Configuration\n\n" + + for config_class in config_classes: + content += f"### {config_class.__name__}\n\n" + + # Get docstring + class_doc = inspect.getdoc(config_class) + if class_doc: + content += f"{class_doc}\n\n" + + # Get type hints and field documentation + content += "| Field | Type | Required | Description |\n" + content += "|-------|------|----------|-------------|\n" + + hints = get_type_hints(config_class) + for field_name, field_type in hints.items(): + required = "Yes" if self._is_field_required(config_class, field_name) else "No" + desc = self._get_field_description(config_class, field_name) + content += f"| {field_name} | `{field_type}` | {required} | {desc} |\n" + + content += "\n" + + return DocSection("Configuration", content) + + def _generate_interface_section(self, module: Any) -> DocSection: + """Generate interface documentation.""" + content = "" + + # Document public functions + for name, obj in inspect.getmembers(module): + if inspect.isfunction(obj) and not name.startswith('_'): + content += self._document_function(obj) + + return DocSection("Interface", content) + + def _document_function(self, func: Any) -> str: + """Generate documentation for a function.""" + doc = inspect.getdoc(func) or "No documentation available." + signature = inspect.signature(func) + + content = f"### `{func.__name__}`\n\n" + content += f"```python\n{func.__name__}{signature}\n```\n\n" + content += f"{doc}\n\n" + + # Add example usage if available + if "Example:" in doc: + example = doc[doc.index("Example:"):].split("\n", 1)[1] + content += f"**Example:**\n```python\n{example}\n```\n\n" + + return content + + def _generate_examples_section(self, module: Any) -> DocSection: + """Generate examples documentation.""" + # Look for examples in module docstring or dedicated examples + examples = self._extract_examples(module) + content = "Usage examples for this module:\n\n" + + for i, example in enumerate(examples, 1): + content += f"### Example {i}\n\n" + content += f"```python\n{example}\n```\n\n" + + return DocSection("Examples", content) + + def _generate_schema_section(self, module: Any) -> DocSection: + """Generate schema documentation.""" + # Find all Pydantic models and TypedDict classes + schemas = self._find_schemas(module) + content = "Configuration and data schemas used by this module:\n\n" + + for schema in schemas: + content += f"### {schema.__name__}\n\n" + if issubclass(schema, BaseModel): + content += self._document_pydantic_model(schema) + else: + content += self._document_typed_dict(schema) + + return DocSection("Schemas", content) + + def _document_pydantic_model(self, model: Type[BaseModel]) -> str: + """Generate documentation for a Pydantic model.""" + content = f"```python\n{inspect.getsource(model)}\n```\n\n" + + # Add field descriptions + content += "| Field | Type | Description |\n" + content += "|-------|------|-------------|\n" + + for field_name, field in model.__fields__.items(): + desc = field.field_info.description or "No description" + content += f"| {field_name} | `{field.type_}` | {desc} |\n" + + return content + "\n" + + def _find_config_classes(self, module: Any) -> List[Type]: + """Find all configuration classes in a module.""" + return [ + obj for _, obj in inspect.getmembers(module) + if inspect.isclass(obj) and ( + issubclass(obj, BaseModel) or + hasattr(obj, "__annotations__") + ) + ] + + def _is_field_required(self, cls: Type, field_name: str) -> bool: + """Determine if a field is required.""" + if issubclass(cls, BaseModel): + return cls.__fields__[field_name].required + return True # Default to True for TypedDict fields + + def _get_field_description(self, cls: Type, field_name: str) -> str: + """Get field description from docstring or annotations.""" + if issubclass(cls, BaseModel): + return cls.__fields__[field_name].field_info.description or "No description" + + # Try to extract from class docstring + doc = inspect.getdoc(cls) or "" + field_doc_match = re.search( + rf"{field_name}\s*:\s*([^\n]+)", + doc, + re.MULTILINE + ) + return field_doc_match.group(1) if field_doc_match else "No description" + + def _extract_examples(self, module: Any) -> List[str]: + """Extract example code from module.""" + examples = [] + + # Look for examples in module docstring + doc = inspect.getdoc(module) or "" + example_blocks = re.finditer( + r'```python\s*(.*?)\s*```', + doc, + re.DOTALL + ) + examples.extend(match.group(1) for match in example_blocks) + + # Look for example functions + for name, obj in inspect.getmembers(module): + if (inspect.isfunction(obj) and + (name.startswith('example_') or name.endswith('_example'))): + examples.append(inspect.getsource(obj)) + + return examples + + def _find_schemas(self, module: Any) -> List[Type]: + """Find all schema classes in a module.""" + return [ + obj for _, obj in inspect.getmembers(module) + if inspect.isclass(obj) and ( + issubclass(obj, BaseModel) or + hasattr(obj, "__total__") # TypedDict check + ) + ] + +def generate_module_documentation(): + """Generate documentation for all modules.""" + doc_generator = DocGenerator(Path("./docs/modules")) + + # Generate docs for each module + import my_module + doc_generator.generate_module_docs(my_module) + +# Run documentation generation +if __name__ == "__main__": + generate_module_documentation() diff --git a/pulumi/core/__init__.py b/staging/modules/azure/__init__.py similarity index 100% rename from pulumi/core/__init__.py rename to staging/modules/azure/__init__.py diff --git a/pulumi/modules/aws/__init__.py b/staging/modules/ceph/__init__.py similarity index 100% rename from pulumi/modules/aws/__init__.py rename to staging/modules/ceph/__init__.py diff --git a/pulumi/modules/ceph/deploy.py b/staging/modules/ceph/deploy.py similarity index 100% rename from pulumi/modules/ceph/deploy.py rename to staging/modules/ceph/deploy.py diff --git a/pulumi/modules/cert_manager/README.md b/staging/modules/cert_manager/README.md similarity index 100% rename from pulumi/modules/cert_manager/README.md rename to staging/modules/cert_manager/README.md diff --git a/pulumi/modules/azure/__init__.py b/staging/modules/cert_manager/__init__.py similarity index 100% rename from pulumi/modules/azure/__init__.py rename to staging/modules/cert_manager/__init__.py diff --git a/pulumi/modules/cert_manager/deploy.py b/staging/modules/cert_manager/deploy.py similarity index 100% rename from pulumi/modules/cert_manager/deploy.py rename to staging/modules/cert_manager/deploy.py diff --git a/pulumi/modules/cert_manager/types.py b/staging/modules/cert_manager/types.py similarity index 100% rename from pulumi/modules/cert_manager/types.py rename to staging/modules/cert_manager/types.py diff --git a/pulumi/modules/ceph/__init__.py b/staging/modules/cilium/__init__.py similarity index 100% rename from pulumi/modules/ceph/__init__.py rename to staging/modules/cilium/__init__.py diff --git a/pulumi/modules/cilium/deploy.py b/staging/modules/cilium/deploy.py similarity index 100% rename from pulumi/modules/cilium/deploy.py rename to staging/modules/cilium/deploy.py diff --git a/pulumi/modules/cert_manager/__init__.py b/staging/modules/cluster_network_addons/__init__.py similarity index 100% rename from pulumi/modules/cert_manager/__init__.py rename to staging/modules/cluster_network_addons/__init__.py diff --git a/pulumi/modules/cluster_network_addons/deploy.py b/staging/modules/cluster_network_addons/deploy.py similarity index 100% rename from pulumi/modules/cluster_network_addons/deploy.py rename to staging/modules/cluster_network_addons/deploy.py diff --git a/pulumi/modules/cilium/__init__.py b/staging/modules/containerized_data_importer/__init__.py similarity index 100% rename from pulumi/modules/cilium/__init__.py rename to staging/modules/containerized_data_importer/__init__.py diff --git a/pulumi/modules/containerized_data_importer/deploy.py b/staging/modules/containerized_data_importer/deploy.py similarity index 100% rename from pulumi/modules/containerized_data_importer/deploy.py rename to staging/modules/containerized_data_importer/deploy.py diff --git a/pulumi/modules/containerized_data_importer/types.py b/staging/modules/containerized_data_importer/types.py similarity index 100% rename from pulumi/modules/containerized_data_importer/types.py rename to staging/modules/containerized_data_importer/types.py diff --git a/pulumi/modules/cluster_network_addons/__init__.py b/staging/modules/hostpath_provisioner/__init__.py similarity index 100% rename from pulumi/modules/cluster_network_addons/__init__.py rename to staging/modules/hostpath_provisioner/__init__.py diff --git a/pulumi/modules/hostpath_provisioner/deploy.py b/staging/modules/hostpath_provisioner/deploy.py similarity index 100% rename from pulumi/modules/hostpath_provisioner/deploy.py rename to staging/modules/hostpath_provisioner/deploy.py diff --git a/pulumi/modules/hostpath_provisioner/types.py b/staging/modules/hostpath_provisioner/types.py similarity index 100% rename from pulumi/modules/hostpath_provisioner/types.py rename to staging/modules/hostpath_provisioner/types.py diff --git a/pulumi/modules/containerized_data_importer/__init__.py b/staging/modules/kubernetes/__init__.py similarity index 100% rename from pulumi/modules/containerized_data_importer/__init__.py rename to staging/modules/kubernetes/__init__.py diff --git a/pulumi/modules/hostpath_provisioner/__init__.py b/staging/modules/kubernetes_dashboard/__init__.py similarity index 100% rename from pulumi/modules/hostpath_provisioner/__init__.py rename to staging/modules/kubernetes_dashboard/__init__.py diff --git a/pulumi/modules/kubernetes_dashboard/deploy.py b/staging/modules/kubernetes_dashboard/deploy.py similarity index 100% rename from pulumi/modules/kubernetes_dashboard/deploy.py rename to staging/modules/kubernetes_dashboard/deploy.py diff --git a/pulumi/modules/kubevirt/README.md b/staging/modules/kubevirt/README.md similarity index 100% rename from pulumi/modules/kubevirt/README.md rename to staging/modules/kubevirt/README.md diff --git a/pulumi/modules/kubernetes/__init__.py b/staging/modules/kubevirt/__init__.py similarity index 100% rename from pulumi/modules/kubernetes/__init__.py rename to staging/modules/kubevirt/__init__.py diff --git a/pulumi/modules/kubevirt/deploy.py b/staging/modules/kubevirt/deploy.py similarity index 100% rename from pulumi/modules/kubevirt/deploy.py rename to staging/modules/kubevirt/deploy.py diff --git a/pulumi/modules/kubevirt/types.py b/staging/modules/kubevirt/types.py similarity index 100% rename from pulumi/modules/kubevirt/types.py rename to staging/modules/kubevirt/types.py diff --git a/pulumi/modules/kubernetes_dashboard/__init__.py b/staging/modules/kv_manager/__init__.py similarity index 100% rename from pulumi/modules/kubernetes_dashboard/__init__.py rename to staging/modules/kv_manager/__init__.py diff --git a/pulumi/modules/kv_manager/deploy.py b/staging/modules/kv_manager/deploy.py similarity index 100% rename from pulumi/modules/kv_manager/deploy.py rename to staging/modules/kv_manager/deploy.py diff --git a/pulumi/modules/kubevirt/__init__.py b/staging/modules/local_path_storage/__init__.py similarity index 100% rename from pulumi/modules/kubevirt/__init__.py rename to staging/modules/local_path_storage/__init__.py diff --git a/pulumi/modules/local_path_storage/deploy.py b/staging/modules/local_path_storage/deploy.py similarity index 100% rename from pulumi/modules/local_path_storage/deploy.py rename to staging/modules/local_path_storage/deploy.py diff --git a/pulumi/modules/kv_manager/__init__.py b/staging/modules/multus/__init__.py similarity index 100% rename from pulumi/modules/kv_manager/__init__.py rename to staging/modules/multus/__init__.py diff --git a/pulumi/modules/multus/deploy.py b/staging/modules/multus/deploy.py similarity index 100% rename from pulumi/modules/multus/deploy.py rename to staging/modules/multus/deploy.py diff --git a/pulumi/modules/multus/types.py b/staging/modules/multus/types.py similarity index 100% rename from pulumi/modules/multus/types.py rename to staging/modules/multus/types.py diff --git a/pulumi/modules/local_path_storage/__init__.py b/staging/modules/openunison/__init__.py similarity index 100% rename from pulumi/modules/local_path_storage/__init__.py rename to staging/modules/openunison/__init__.py diff --git a/pulumi/modules/openunison/assets/alertmanager.png b/staging/modules/openunison/assets/alertmanager.png similarity index 100% rename from pulumi/modules/openunison/assets/alertmanager.png rename to staging/modules/openunison/assets/alertmanager.png diff --git a/pulumi/modules/openunison/assets/grafana.png b/staging/modules/openunison/assets/grafana.png similarity index 100% rename from pulumi/modules/openunison/assets/grafana.png rename to staging/modules/openunison/assets/grafana.png diff --git a/pulumi/modules/openunison/assets/kubevirt.png b/staging/modules/openunison/assets/kubevirt.png similarity index 100% rename from pulumi/modules/openunison/assets/kubevirt.png rename to staging/modules/openunison/assets/kubevirt.png diff --git a/pulumi/modules/openunison/assets/prometheus.png b/staging/modules/openunison/assets/prometheus.png similarity index 100% rename from pulumi/modules/openunison/assets/prometheus.png rename to staging/modules/openunison/assets/prometheus.png diff --git a/pulumi/modules/openunison/deploy.py b/staging/modules/openunison/deploy.py similarity index 100% rename from pulumi/modules/openunison/deploy.py rename to staging/modules/openunison/deploy.py diff --git a/pulumi/modules/openunison/encoded_assets.py b/staging/modules/openunison/encoded_assets.py similarity index 100% rename from pulumi/modules/openunison/encoded_assets.py rename to staging/modules/openunison/encoded_assets.py diff --git a/pulumi/modules/multus/__init__.py b/staging/modules/prometheus/__init__.py similarity index 100% rename from pulumi/modules/multus/__init__.py rename to staging/modules/prometheus/__init__.py diff --git a/pulumi/modules/prometheus/deploy.py b/staging/modules/prometheus/deploy.py similarity index 100% rename from pulumi/modules/prometheus/deploy.py rename to staging/modules/prometheus/deploy.py diff --git a/pulumi/modules/prometheus/types.py b/staging/modules/prometheus/types.py similarity index 100% rename from pulumi/modules/prometheus/types.py rename to staging/modules/prometheus/types.py diff --git a/pulumi/modules/openunison/__init__.py b/staging/modules/vm/__init__.py similarity index 100% rename from pulumi/modules/openunison/__init__.py rename to staging/modules/vm/__init__.py diff --git a/pulumi/modules/vm/talos.py b/staging/modules/vm/talos.py similarity index 100% rename from pulumi/modules/vm/talos.py rename to staging/modules/vm/talos.py diff --git a/pulumi/modules/vm/ubuntu.py b/staging/modules/vm/ubuntu.py similarity index 100% rename from pulumi/modules/vm/ubuntu.py rename to staging/modules/vm/ubuntu.py diff --git a/pulumi/modules/prometheus/__init__.py b/staging/providers/__init__.py similarity index 100% rename from pulumi/modules/prometheus/__init__.py rename to staging/providers/__init__.py diff --git a/pulumi/modules/vm/__init__.py b/staging/providers/aws/__init__.py similarity index 100% rename from pulumi/modules/vm/__init__.py rename to staging/providers/aws/__init__.py diff --git a/pulumi/providers/aws/config.py b/staging/providers/aws/config.py similarity index 100% rename from pulumi/providers/aws/config.py rename to staging/providers/aws/config.py diff --git a/pulumi/providers/aws/deployment.py b/staging/providers/aws/deployment.py similarity index 100% rename from pulumi/providers/aws/deployment.py rename to staging/providers/aws/deployment.py diff --git a/pulumi/providers/aws/resources.py b/staging/providers/aws/resources.py similarity index 100% rename from pulumi/providers/aws/resources.py rename to staging/providers/aws/resources.py diff --git a/pulumi/providers/aws/types.py b/staging/providers/aws/types.py similarity index 100% rename from pulumi/providers/aws/types.py rename to staging/providers/aws/types.py diff --git a/pulumi/providers/config.py b/staging/providers/config.py similarity index 100% rename from pulumi/providers/config.py rename to staging/providers/config.py diff --git a/pulumi/providers/__init__.py b/staging/providers/k8s/__init__.py similarity index 100% rename from pulumi/providers/__init__.py rename to staging/providers/k8s/__init__.py diff --git a/pulumi/providers/k8s/config.py b/staging/providers/k8s/config.py similarity index 100% rename from pulumi/providers/k8s/config.py rename to staging/providers/k8s/config.py diff --git a/pulumi/providers/k8s/deployment.py b/staging/providers/k8s/deployment.py similarity index 100% rename from pulumi/providers/k8s/deployment.py rename to staging/providers/k8s/deployment.py diff --git a/pulumi/providers/k8s/resources.py b/staging/providers/k8s/resources.py similarity index 100% rename from pulumi/providers/k8s/resources.py rename to staging/providers/k8s/resources.py diff --git a/pulumi/providers/k8s/types.py b/staging/providers/k8s/types.py similarity index 100% rename from pulumi/providers/k8s/types.py rename to staging/providers/k8s/types.py