diff --git a/.mise.toml b/.mise.toml index e3d4fdc16..d38ffb55b 100644 --- a/.mise.toml +++ b/.mise.toml @@ -40,41 +40,30 @@ docker compose --file tests.yaml run --rm --no-TTY tests docker compose --file tests.yaml down --volumes --remove-orphans """ -[tasks."application:service:run:production"] -description = "Run the application service" -run = """ -docker run \ ---env-file .env \ ---publish 8080:8080 \ -pocketsizefund/{{arg(name="application_name")}}:latest \ -""" - -[tasks."application:service:run:development"] +[tasks."application:service:run"] description = "Run the application service locally with hot reloading" run = """ -cd application/{{arg(name="service_name")}} -uv run uvicorn src.{{arg(name="service_name")}}.main:application --reload +cd application/{{option(name="service")}} +uv run uvicorn src.{{option(name="service")}}.main:application --reload """ [tasks."application:service:test:integration"] description = "Run integration tests" run = """ -cd application/{{arg(name="service_name")}} +cd application/{{option(name="service")}} docker compose up --build --abort-on-container-exit --remove-orphans """ [tasks."application:service:test:behavioral"] description = "Run behavioral tests" run = """ -cd application/{{arg(name="service_name")}} -docker compose up --build --abort-on-container-exit -""" - -[tasks."application:service:cleanup:behavioral"] -description = "Clean up behavioral tests" -run = """ -cd application/{{arg(name="service_name")}} -docker compose down -v +cd application/{{option(name="service")}} +if [ {{flag(name="cleanup")}} == true ] +then + docker compose down -v +else + docker compose up --build --abort-on-container-exit +fi """ [tasks."lint"] @@ -108,5 +97,12 @@ uv run pulumi down --yes --stack pocketsizefund/pocketsizefund/production [tasks."cli:datamanager:authorize"] description = "Authorize the CLI with AWS credentials" run = """ -aws iam attach-user-policy --user-name {{arg(user-name="user-name")}} --policy-arn ${{pulumi stack output DATAMANAGER_API_ACCESS_POLICY_ARN}} +aws iam attach-user-policy --user-name {{option(user-name="user-name")}} --policy-arn ${{pulumi stack output DATAMANAGER_API_ACCESS_POLICY_ARN}} +""" + +[tasks."dashboard:upload"] +description = "Upload Grafana dashboard" +run = """ +cd infrastructure +nu upload_grafana_dashboard.nu """ diff --git a/application/datamanager/compose.yaml b/application/datamanager/compose.yaml index bc5fecc11..1be7ecb99 100644 --- a/application/datamanager/compose.yaml +++ b/application/datamanager/compose.yaml @@ -9,13 +9,9 @@ services: - 8080:8080 environment: - POLYGON_API_KEY=${POLYGON_API_KEY} - - DATA_BUCKET_NAME=${DATA_BUCKET_NAME} - - GOOGLE_APPLICATION_CREDENTIALS=/root/.config/gcloud/application_default_credentials.json - - DUCKDB_ACCESS_KEY=${DUCKDB_ACCESS_KEY} - - DUCKDB_SECRET=${DUCKDB_SECRET} + - AWS_S3_DATA_BUCKET_NAME=${AWS_S3_DATA_BUCKET_NAME} volumes: - ./:/app/datamanager - - ~/.config/gcloud/application_default_credentials.json:/root/.config/gcloud/application_default_credentials.json:ro healthcheck: test: ["CMD", "curl", "-f", "http://0.0.0.0:8080/health"] interval: 10s diff --git a/application/datamanager/src/datamanager/main.py b/application/datamanager/src/datamanager/main.py index 0b94244e4..63ca97015 100644 --- a/application/datamanager/src/datamanager/main.py +++ b/application/datamanager/src/datamanager/main.py @@ -54,10 +54,12 @@ async def lifespan(app: FastAPI) -> AsyncGenerator[None, None]: polygon_api_key=os.getenv("POLYGON_API_KEY", ""), ) - app.state.s3_client = S3Client(data_bucket_name=os.getenv("DATA_BUCKET_NAME", "")) + app.state.s3_client = S3Client( + data_bucket_name=os.getenv("AWS_S3_DATA_BUCKET_NAME", "") + ) - duckdb_user_access_key_id = os.getenv("DUCKDB_USER_ACCESS_KEY_ID") - duckdb_user_access_key_secret = os.getenv("DUCKDB_USER_ACCESS_KEY_SECRET") + duckdb_user_access_key_id = os.getenv("AWS_IAM_DUCKDB_USER_ACCESS_KEY_ID") + duckdb_user_access_key_secret = os.getenv("AWS_IAM_DUCKDB_USER_ACCESS_KEY_SECRET") aws_region = os.getenv("AWS_REGION", "us-east-1") app.state.connection = duckdb.connect() diff --git a/infrastructure/__main__.py b/infrastructure/__main__.py index 900514fa3..39b28f053 100644 --- a/infrastructure/__main__.py +++ b/infrastructure/__main__.py @@ -1,72 +1,193 @@ +import json import tomllib from pathlib import Path import pulumi +import pulumi_aws as aws +from api import ( + create_api_access_iam_role, + create_api_gateway, + create_knative_service_api_gateway_integrations, + create_virtual_private_cloud_link, +) from cluster import ( create_kubernetes_cluster, + create_kubernetes_cluster_role, + create_kubernetes_node_role, create_kubernetes_provider, update_kubernetes_cluster_access, ) -from environment_variables import create_environment_variables from images import build_image from ingress import ( - create_alb_controller, - create_alb_controller_role, - create_api_access_policy, - create_api_gateway_with_auth, - create_service_ingress, + create_application_load_balancer, + create_application_load_balancer_listener, + create_application_load_balancer_security_group, + create_application_load_balancer_target_group, ) from keys import create_duckdb_user_access_key -from monitors import create_prometheus_scraper -from publishers_subscribers import ( +from pulumi.config import Config +from services import ( create_knative_broker, create_knative_eventing_core, create_knative_schedule, create_knative_service, create_knative_serving_core, create_knative_trigger, + create_service_environment_variables, ) -from pulumi.config import Config -from roles import ( - create_cluster_role, - create_node_role, +from vpc import ( + create_elastic_ip, + create_internet_gateway, + create_nat_gateway, + create_route_table, + create_subnet, + create_virtual_private_cloud, ) configuration = Config() -cluster_role = create_cluster_role() +virtual_private_cloud = create_virtual_private_cloud() -node_role = create_node_role() +internet_gateway = create_internet_gateway( + virtual_private_cloud=virtual_private_cloud, +) -kubernetes_cluster = create_kubernetes_cluster(cluster_role, node_role) +public_route_table = create_route_table( + virtual_private_cloud=virtual_private_cloud, + internet_gateway=internet_gateway, +) -kubernetes_provider = create_kubernetes_provider(kubernetes_cluster) +aws_region = configuration.get("aws:region") or "us-east-1" + +availability_zones = aws.get_availability_zones( + state="available", + filters=[ + { + "name": "region-name", + "values": [aws_region], + } + ], +).names[:3] + +public_subnets = [ + create_subnet( + virtual_private_cloud=virtual_private_cloud, + route_table=public_route_table, + availability_zone=availability_zones[i], + subnet_number=i + 1, # 1-3 + visibility="public", + ) + for i in range(len(availability_zones)) +] + +elastic_ip = create_elastic_ip(virtual_private_cloud=virtual_private_cloud) + +nat_gateway = create_nat_gateway( + elastic_ip=elastic_ip, + public_subnet=public_subnets[0], # one NAT instance for cost efficiency +) -cluster_access_config = update_kubernetes_cluster_access( - cluster_role=cluster_role, - node_role=node_role, - kubernetes_provider=kubernetes_provider, - pulumi_user_arn=configuration.require_secret("AWS_EKS_IAM_PULUMI_USER_ARN"), - root_user_arn=configuration.require_secret("AWS_EKS_IAM_ROOT_USER_ARN"), +private_route_table = create_route_table( + virtual_private_cloud=virtual_private_cloud, + nat_gateway=nat_gateway, +) + + +private_subnets = [ + create_subnet( + virtual_private_cloud=virtual_private_cloud, + route_table=private_route_table, + availability_zone=availability_zones[i], + subnet_number=i + 4, # 4-6 + visibility="private", + ) + for i in range(len(availability_zones)) +] + + +kubernetes_cluster_role = create_kubernetes_cluster_role() + +kubernetes_node_role = create_kubernetes_node_role() + +kubernetes_cluster = create_kubernetes_cluster( + virtual_private_cloud=virtual_private_cloud, + private_subnets=private_subnets, + kubernetes_cluster_role=kubernetes_cluster_role, + kubernetes_node_role=kubernetes_node_role, ) -alb_controller_role = create_alb_controller_role(kubernetes_cluster) +kubernetes_provider = create_kubernetes_provider(kubernetes_cluster=kubernetes_cluster) -alb_controller = create_alb_controller( +cluster_access_configuration = update_kubernetes_cluster_access( kubernetes_provider=kubernetes_provider, - cluster=kubernetes_cluster, - alb_controller_role=alb_controller_role, + kubernetes_cluster_role=kubernetes_cluster_role, + kubernetes_node_role=kubernetes_node_role, + pulumi_user_arn=configuration.require_secret("AWS_IAM_PULUMI_USER_ARN"), + root_user_arn=configuration.require_secret("AWS_IAM_ROOT_USER_ARN"), ) -knative_serving_core = create_knative_serving_core(kubernetes_provider) +knative_serving_core = create_knative_serving_core( + kubernetes_provider=kubernetes_provider, +) -knative_eventing_core = create_knative_eventing_core(kubernetes_provider) +knative_eventing_core = create_knative_eventing_core( + kubernetes_provider=kubernetes_provider, +) knative_broker = create_knative_broker( kubernetes_provider=kubernetes_provider, knative_eventing_core=knative_eventing_core, ) +duckdb_user_access_key = create_duckdb_user_access_key( + data_bucket_name=configuration.require_secret("AWS_S3_DATA_BUCKET_NAME"), +) + +service_environment_variables = create_service_environment_variables( + inputs=[ + ("ALPACA_API_KEY", configuration.require_secret("ALPACA_API_KEY")), + ("ALPACA_API_SECRET", configuration.require_secret("ALPACA_API_SECRET")), + ( + "AWS_S3_DATA_BUCKET_NAME", + configuration.require_secret("AWS_S3_DATA_BUCKET_NAME"), + ), + ("POLYGON_API_KEY", configuration.require_secret("POLYGON_API_KEY")), + ("AWS_IAM_DUCKDB_USER_ACCESS_KEY_ID", duckdb_user_access_key.id), + ("AWS_IAM_DUCKDB_USER_ACCESS_KEY_SECRET", duckdb_user_access_key.secret), + ("AWS_REGION", aws_region), + ], +) + +application_load_balancer_security_group = ( + create_application_load_balancer_security_group( + virtual_private_cloud=virtual_private_cloud, + ) +) + +application_load_balancer = create_application_load_balancer( + application_load_balancer_security_group=application_load_balancer_security_group, + public_subnets=public_subnets, +) + +virtual_private_cloud_link = create_virtual_private_cloud_link( + application_load_balancer_security_group=application_load_balancer_security_group, + public_subnets=public_subnets, +) + +api_gateway = create_api_gateway( + application_load_balancer_security_group=application_load_balancer_security_group, +) + +target_group = create_application_load_balancer_target_group( + virtual_private_cloud=virtual_private_cloud, + application_load_balancer=application_load_balancer, +) + +listener = create_application_load_balancer_listener( + application_load_balancer=application_load_balancer, + application_load_balancer_target_group=target_group, +) + try: with Path("pyproject.toml").open("rb") as f: project_data = tomllib.load(f) @@ -76,48 +197,76 @@ message = f"Failed to read version from infrastructure pyproject.toml: {e}" raise RuntimeError(message) from e +username = configuration.require_secret("DOCKERHUB_USERNAME") +password = configuration.require_secret("DOCKERHUB_PASSWORD") + datamanager_image = build_image( service_name="datamanager", service_version=version, + dockerhub_username=username, + dockerhub_password=password, ) +datamanager_knative_service = create_knative_service( + kubernetes_provider=kubernetes_provider, + service_name="datamanager", + image=datamanager_image, + application_load_balancer_service_target_group=target_group, + knative_serving_core=knative_serving_core, + environment_variables=service_environment_variables, +) -duckdb_user_access_key = create_duckdb_user_access_key() - -environment_variables = create_environment_variables(duckdb_user_access_key) +endpoint_information = [ + {"path": "/health", "method": "GET"}, + {"path": "/equity-bars", "method": "GET"}, + {"path": "/equity-bars/fetch", "method": "POST"}, + {"path": "/equity-bars", "method": "DELETE"}, +] -datamanager_service = create_knative_service( - kubernetes_provider=kubernetes_provider, +create_knative_service_api_gateway_integrations( service_name="datamanager", - image_reference=datamanager_image.ref, - environment_variables=environment_variables, - depends_on=[knative_serving_core], + endpoint_information=endpoint_information, + api_gateway=api_gateway, + application_load_balancer_listener=listener, + vpc_link=virtual_private_cloud_link, +) + +api_access_iam_role = create_api_access_iam_role( + api_gateway=api_gateway, + pulumi_user_arn=configuration.require_secret("AWS_IAM_PULUMI_USER_ARN"), + endpoint_information=endpoint_information, ) predictionengine_image = build_image( service_name="predictionengine", service_version=version, + dockerhub_username=username, + dockerhub_password=password, ) -predictionengine_service = create_knative_service( +predictionengine_knative_service = create_knative_service( kubernetes_provider=kubernetes_provider, service_name="predictionengine", - image_reference=predictionengine_image.ref, - environment_variables=environment_variables, - depends_on=[knative_serving_core], + image=predictionengine_image, + application_load_balancer_service_target_group=target_group, + knative_serving_core=knative_serving_core, + environment_variables=service_environment_variables, ) positionmanager_image = build_image( service_name="positionmanager", service_version=version, + dockerhub_username=username, + dockerhub_password=password, ) -positionmanager_service = create_knative_service( +positionmanager_knative_service = create_knative_service( kubernetes_provider=kubernetes_provider, service_name="positionmanager", - image_reference=positionmanager_image.ref, - environment_variables=environment_variables, - depends_on=[knative_serving_core], + image=positionmanager_image, + application_load_balancer_service_target_group=target_group, + knative_serving_core=knative_serving_core, + environment_variables=service_environment_variables, ) open_positions_from_predictions_trigger = create_knative_trigger( @@ -125,7 +274,7 @@ source_service_name="predictionengine", source_attribute_type="application.predictionengine.predictions.created", target_service_name="positionmanager", - depends_on=[predictionengine_service, positionmanager_service, knative_broker], + knative_eventing_core=knative_eventing_core, ) midnight_data_fetch_schedule = create_knative_schedule( @@ -133,7 +282,7 @@ target_service_name="datamanager", target_path="/equity-bars/fetch", cron_schedule="0 0 * * *", - depends_on=[datamanager_service, knative_eventing_core], + knative_eventing_core=knative_eventing_core, ) monday_morning_open_positions_schedule = create_knative_schedule( @@ -141,7 +290,7 @@ target_service_name="predictionengine", target_path="/predictions/create", cron_schedule="0 10 * * 1", - depends_on=[predictionengine_service, knative_eventing_core], + knative_eventing_core=knative_eventing_core, ) friday_evening_close_positions_schedule = create_knative_schedule( @@ -149,35 +298,43 @@ target_service_name="positionmanager", target_path="/positions/close", cron_schedule="0 13 * * 5", - depends_on=[positionmanager_service, knative_eventing_core], + knative_eventing_core=knative_eventing_core, ) -cluster_monitoring_scraper = create_prometheus_scraper( - workspace_arn=configuration.require_secret("AWS_PROMETHEUS_WORKSPACE_ARN"), - cluster=kubernetes_cluster, + +pulumi.export("DATAMANAGER_SERVICE_IMAGE", datamanager_image.ref) + +pulumi.export( + "PREDICTIONENGINE_SERVICE_IMAGE", + predictionengine_image.ref, ) -datamanager_ingress = create_service_ingress( - kubernetes_provider=kubernetes_provider, - service_name="datamanager", - cluster=kubernetes_cluster, - depends_on=[alb_controller, datamanager_service], +pulumi.export( + "POSITIONMANAGER_SERVICE_IMAGE", + positionmanager_image.ref, ) -datamanager_alb_url = datamanager_ingress.status.load_balancer.ingress[ - 0 -].hostname.apply(lambda hostname: f"http://{hostname}") +pulumi.export( + "AWS_EKS_CLUSTER_NAME", + kubernetes_cluster.eks_cluster.name.apply(lambda cluster_name: f"{cluster_name}"), +) -datamanager_api = create_api_gateway_with_auth( - service_name="datamanager", - target_url=datamanager_alb_url, +pulumi.export( + "AWS_EKS_KUBECONFIG", + kubernetes_cluster.kubeconfig.apply(json.dumps), ) -datamanager_api_access_policy = create_api_access_policy( - api_gateway=datamanager_api, - service_name="datamanager", +pulumi.export( + "AWS_VIRTUAL_PRIVATE_CLOUD_ID", + virtual_private_cloud.id.apply(lambda vpc_id: f"{vpc_id}"), ) -pulumi.export("DATAMANAGER_ALB_URL", datamanager_alb_url) -pulumi.export("DATAMANAGER_API_GATEWAY_URL", datamanager_api.api_endpoint) -pulumi.export("DATAMANAGER_API_ACCESS_POLICY_ARN", datamanager_api_access_policy.arn) +pulumi.export( + "AWS_API_GATEWAY_ACCESS_IAM_ROLE_ARN", + api_access_iam_role.arn.apply(lambda arn: f"{arn}"), +) + +pulumi.export( + "AWS_API_GATEWAY_ENDPOINT_URL", + api_gateway.api_endpoint.apply(lambda endpoint: f"{endpoint}/production"), +) diff --git a/infrastructure/api.py b/infrastructure/api.py new file mode 100644 index 000000000..7e1b62d20 --- /dev/null +++ b/infrastructure/api.py @@ -0,0 +1,162 @@ +import json + +import pulumi +import pulumi_aws as aws +from tags import pulumi_tags + + +def create_virtual_private_cloud_link( + application_load_balancer_security_group: aws.ec2.SecurityGroup, + public_subnets: list[aws.ec2.Subnet], +) -> aws.apigatewayv2.VpcLink: + return aws.apigatewayv2.VpcLink( + resource_name="pocketsizefund-api-gateway-vpc-link", + name="pocketsizefund-vpc-link", + security_group_ids=[application_load_balancer_security_group.id], + subnet_ids=[subnet.id for subnet in public_subnets], + opts=pulumi.ResourceOptions( + depends_on=[ + application_load_balancer_security_group, + *public_subnets, + ] + ), + tags=pulumi_tags, + ) + + +def create_api_gateway( + application_load_balancer_security_group: aws.ec2.SecurityGroup, +) -> aws.apigatewayv2.Api: + api_gateway = aws.apigatewayv2.Api( + resource_name="pocketsizefund-api-gateway", + protocol_type="HTTP", + route_selection_expression="$request.method $request.path", + opts=pulumi.ResourceOptions( + depends_on=[application_load_balancer_security_group], + ), + tags=pulumi_tags, + ) + + aws.apigatewayv2.Stage( + resource_name="pocketsizefund-api-gateway-stage", + api_id=api_gateway.id, + name="production", + auto_deploy=True, + opts=pulumi.ResourceOptions( + depends_on=[ + api_gateway, + application_load_balancer_security_group, + ], + ), + tags=pulumi_tags, + ) + + return api_gateway + + +def create_knative_service_api_gateway_integrations( + service_name: str, + endpoint_information: list[dict[str, str]], + api_gateway: aws.apigatewayv2.Api, + application_load_balancer_listener: aws.lb.Listener, + vpc_link: aws.apigatewayv2.VpcLink, +) -> None: + for endpoint in endpoint_information: + endpoint_path = endpoint["path"].strip("/").replace("/", "-") + endpoint_path = "-".join(filter(None, endpoint_path.split("-"))) + endpoint_method_lower = endpoint["method"].lower() + endpoint_method_upper = endpoint["method"].upper() + + integration = aws.apigatewayv2.Integration( + resource_name=f"pocketsizefund-{service_name}-{endpoint_path}-{endpoint_method_lower}-api-gateway-integration", + api_id=api_gateway.id, + integration_type="HTTP_PROXY", + integration_uri=application_load_balancer_listener.arn, + integration_method=endpoint_method_upper, + connection_type="VPC_LINK", + connection_id=vpc_link.id, + opts=pulumi.ResourceOptions( + depends_on=[ + api_gateway, + application_load_balancer_listener, + vpc_link, + ], + ), + ) + + aws.apigatewayv2.Route( + resource_name=f"pocketsizefund-{service_name}-{endpoint_path}-{endpoint_method_lower}-api-gateway-route", + api_id=api_gateway.id, + route_key=f"{endpoint_method_upper} /{endpoint_path}", + target=integration.id.apply( + lambda integration_id: f"integrations/{integration_id}" + ), + opts=pulumi.ResourceOptions( + depends_on=[ + api_gateway, + application_load_balancer_listener, + vpc_link, + integration, + ], + ), + ) + + +def create_api_access_iam_role( + api_gateway: aws.apigatewayv2.Api, + pulumi_user_arn: pulumi.Output[str], + endpoint_information: list[dict[str, str]], +) -> aws.iam.Role: + api_access_iam_role = aws.iam.Role( + resource_name="pocketsizefund-api-access-role", + name="pocketsizefund-api-access-role", + description="Pocket Size Fund API access role", + assume_role_policy=pulumi_user_arn.apply( + lambda arn: json.dumps( + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": {"AWS": f"{arn}"}, + "Action": "sts:AssumeRole", + } + ], + } + ) + ), + opts=pulumi.ResourceOptions( + depends_on=[api_gateway], + ), + tags=pulumi_tags, + ) + + aws.iam.RolePolicy( + resource_name="pocketsizefund-api-access-role-policy", + role=api_access_iam_role.id, + policy=api_gateway.arn.apply( + lambda arn: json.dumps( + { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Action": "execute-api:Invoke", + "Resource": [ + f"{arn}/*/{t['method'].upper()}/{t['path'].lstrip('/')}" + for t in endpoint_information + ], + } + ], + } + ) + ), + opts=pulumi.ResourceOptions( + depends_on=[ + api_gateway, + api_access_iam_role, + ], + ), + ) + + return api_access_iam_role diff --git a/infrastructure/cluster.py b/infrastructure/cluster.py index 48cd5d3fc..1aaad965f 100644 --- a/infrastructure/cluster.py +++ b/infrastructure/cluster.py @@ -4,154 +4,95 @@ import pulumi_aws as aws import pulumi_eks as eks import pulumi_kubernetes as k8s -from tags import common_tags +from tags import pulumi_tags def create_kubernetes_cluster( - cluster_role: aws.iam.Role, - node_role: aws.iam.Role, + virtual_private_cloud: aws.ec2.Vpc, + private_subnets: list[aws.ec2.Subnet], + kubernetes_cluster_role: aws.iam.Role, + kubernetes_node_role: aws.iam.Role, ) -> eks.Cluster: - virtual_private_cloud = aws.ec2.Vpc( - resource_name="pocketsizefund-virtual-private-cloud", - cidr_block="10.0.0.0/16", - enable_dns_hostnames=True, - enable_dns_support=True, - tags=common_tags, - ) - - internet_gateway = aws.ec2.InternetGateway( - resource_name="pocketsizefund-internet-gateway", - vpc_id=virtual_private_cloud.id, - tags=common_tags, - ) - - public_route_table = aws.ec2.RouteTable( - resource_name="pocketsizefund-public-route-table", - vpc_id=virtual_private_cloud.id, - routes=[ - { - "cidr_block": "0.0.0.0/0", - "gateway_id": internet_gateway.id, - } - ], - tags=common_tags, - ) - - availability_zones = aws.get_availability_zones( - state="available", - filters=[ - { - "name": "region-name", - "values": ["us-east-1"], - } - ], - ).names[:2] - - public_subnets: list[aws.ec2.Subnet] = [] - for i, availability_zone in enumerate(availability_zones): - subnet = aws.ec2.Subnet( - resource_name=f"pocketsizefund-public-subnet-{i}", - vpc_id=virtual_private_cloud.id, - cidr_block=f"10.0.{i}.0/24", - availability_zone=availability_zone, - map_public_ip_on_launch=True, - tags=common_tags, - ) - - aws.ec2.RouteTableAssociation( - resource_name=f"pocketsizefund-public-route-table-association-{i}", - subnet_id=subnet.id, - route_table_id=public_route_table.id, - ) - - public_subnets.append(subnet) - - nat_elastic_ip = aws.ec2.Eip( - resource_name="pocketsizefund-nat-elastic-ip", - domain="vpc", - tags=common_tags, - ) - - nat_gateway = aws.ec2.NatGateway( - resource_name="pocketsizefund-nat-gateway", - allocation_id=nat_elastic_ip.id, - subnet_id=public_subnets[0].id, - opts=pulumi.ResourceOptions(depends_on=[internet_gateway]), - tags=common_tags, - ) - - private_route_table = aws.ec2.RouteTable( - resource_name="pocketsizefund-private-route-table", - vpc_id=virtual_private_cloud.id, - routes=[ - { - "cidr_block": "0.0.0.0/0", - "nat_gateway_id": nat_gateway.id, - } - ], - tags=common_tags, - ) - - private_subnets: list[aws.ec2.Subnet] = [] - for i, availability_zone in enumerate(availability_zones): - subnet = aws.ec2.Subnet( - resource_name=f"pocketsizefund-private-subnet-{i}", - vpc_id=virtual_private_cloud.id, - cidr_block=f"10.0.{i + 10}.0/24", - availability_zone=availability_zone, - tags=common_tags, - ) - - aws.ec2.RouteTableAssociation( - resource_name=f"pocketsizefund-private-route-table-association-{i}", - subnet_id=subnet.id, - route_table_id=private_route_table.id, - ) - private_subnets.append(subnet) - - cluster = eks.Cluster( - resource_name="pocketsizefund-cluster", + return eks.Cluster( + resource_name="pocketsizefund-kubernetes-cluster", desired_capacity=2, min_size=1, max_size=3, instance_type="t3.small", - instance_role=node_role, - service_role=cluster_role, + service_role=kubernetes_cluster_role, + instance_role=kubernetes_node_role, vpc_id=virtual_private_cloud.id, - private_subnet_ids=[subnet.id for subnet in private_subnets], - public_subnet_ids=[subnet.id for subnet in public_subnets], - tags=common_tags, + subnet_ids=[subnet.id for subnet in private_subnets], + opts=pulumi.ResourceOptions( + depends_on=[ + virtual_private_cloud, + *private_subnets, + kubernetes_cluster_role, + kubernetes_node_role, + ], + ), + tags=pulumi_tags, ) - pulumi.export("KUBECONFIG", cluster.kubeconfig) - pulumi.export("CLUSTER_NAME", cluster.pulumi_resource_type) - pulumi.export("VPC_ID", virtual_private_cloud.id) - - return cluster - -def create_kubernetes_provider(cluster: eks.Cluster) -> k8s.Provider: +def create_kubernetes_provider(kubernetes_cluster: eks.Cluster) -> k8s.Provider: return k8s.Provider( resource_name="pocketsizefund-kubernetes-provider", - kubeconfig=cluster.kubeconfig.apply(json.dumps), + kubeconfig=kubernetes_cluster.kubeconfig.apply(json.dumps), opts=pulumi.ResourceOptions( - depends_on=[cluster], + replace_on_changes=["kubeconfig"], custom_timeouts=pulumi.CustomTimeouts( create="10m", update="10m", delete="10m", ), + depends_on=[kubernetes_cluster], ), ) def update_kubernetes_cluster_access( - cluster_role: aws.iam.Role, - node_role: aws.iam.Role, kubernetes_provider: k8s.Provider, + kubernetes_cluster_role: aws.iam.Role, + kubernetes_node_role: aws.iam.Role, pulumi_user_arn: pulumi.Output[str], root_user_arn: pulumi.Output[str], ) -> k8s.core.v1.ConfigMap: + map_roles = pulumi.Output.json_dumps( + [ + { + "rolearn": kubernetes_node_role.arn, + "username": "system:node:{{EC2PrivateDNSName}}", + "groups": [ + "system:bootstrappers", + "system:nodes", + ], + }, + { + "rolearn": kubernetes_cluster_role.arn, + "username": "system:master", + "groups": [ + "system:masters", + ], + }, + ] + ) + + map_users = pulumi.Output.json_dumps( + [ + { + "userarn": pulumi_user_arn, + "username": "pulumi-user", + "groups": ["system:masters"], + }, + { + "userarn": root_user_arn, + "username": "root-user", + "groups": ["system:masters"], + }, + ] + ) + return k8s.core.v1.ConfigMap( resource_name="pocketsizefund-aws-auth", metadata=k8s.meta.v1.ObjectMetaArgs( @@ -162,39 +103,86 @@ def update_kubernetes_cluster_access( }, ), data={ - "mapRoles": pulumi.Output.concat( - "- rolearn: ", - node_role.arn, - "\n", - " username: system:node:{{EC2PrivateDNSName}}\n", - " groups:\n", - " - system:bootstrappers\n", - " - system:nodes\n", - "- rolearn: ", - cluster_role.arn, - "\n", - " username: system:master\n", - " groups:\n", - " - system:masters\n", - ), - "mapUsers": pulumi.Output.concat( - "- userarn: ", - pulumi_user_arn, - "\n", - " username: pulumi-user\n", - " groups:\n", - " - system:masters\n", - "- userarn: ", - root_user_arn, - "\n", - " username: root-user\n", - " groups:\n", - " - system:masters\n", - ), + "mapRoles": map_roles, + "mapUsers": map_users, }, opts=pulumi.ResourceOptions( - provider=kubernetes_provider, replace_on_changes=["*"], - depends_on=[cluster_role, node_role], + provider=kubernetes_provider, + depends_on=[ + kubernetes_provider, + kubernetes_cluster_role, + kubernetes_node_role, + ], ), ) + + +def create_kubernetes_cluster_role() -> aws.iam.Role: + assume_role_policy = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": {"Service": ["eks.amazonaws.com"]}, + "Action": "sts:AssumeRole", + } + ], + } + + cluster_role = aws.iam.Role( + resource_name="pocketsizefund-cluster-role", + description="Role for EKS cluster to manage resources", + name="pocketsizefund-cluster-role", + assume_role_policy=json.dumps(assume_role_policy), + tags=pulumi_tags, + ) + + aws.iam.RolePolicyAttachment( + resource_name="pocketsizefund-cluster-policy", + role=cluster_role.name, + policy_arn="arn:aws:iam::aws:policy/AmazonEKSClusterPolicy", + ) + + return cluster_role + + +def create_kubernetes_node_role() -> aws.iam.Role: + assume_role_policy = { + "Version": "2012-10-17", + "Statement": [ + { + "Effect": "Allow", + "Principal": {"Service": ["ec2.amazonaws.com"]}, + "Action": "sts:AssumeRole", + } + ], + } + + node_role = aws.iam.Role( + resource_name="pocketsizefund-node-role", + description="Role for EKS worker nodes to manage resources", + name="pocketsizefund-node-role", + assume_role_policy=json.dumps(assume_role_policy), + tags=pulumi_tags, + ) + + aws.iam.RolePolicyAttachment( + resource_name="pocketsizefund-node-policy", + role=node_role.name, + policy_arn="arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy", + ) + + aws.iam.RolePolicyAttachment( + resource_name="pocketsizefund-node-role-ecr-policy", + role=node_role.name, + policy_arn="arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly", + ) + + aws.iam.RolePolicyAttachment( + resource_name="pocketsizefund-node-role-cni-policy", + role=node_role.name, + policy_arn="arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy", + ) + + return node_role diff --git a/infrastructure/environment_variables.py b/infrastructure/environment_variables.py deleted file mode 100644 index 14624a9ce..000000000 --- a/infrastructure/environment_variables.py +++ /dev/null @@ -1,32 +0,0 @@ -import pulumi -import pulumi_aws as aws -from pulumi.config import Config - -configuration = Config() - - -alpaca_api_key = configuration.require_secret("ALPACA_API_KEY") -alpaca_api_secret = configuration.require_secret("ALPACA_API_SECRET") -data_bucket_name = configuration.require_secret("DATA_BUCKET_NAME") -polygon_api_key = configuration.require_secret("POLYGON_API_KEY") -duckdb_access_key = configuration.require_secret("DUCKDB_ACCESS_KEY") -duckdb_secret = configuration.require_secret("DUCKDB_SECRET") -aws_region = configuration.get("aws:region") or "us-east-1" - - -def create_environment_variables( - duckdb_user_access_key: aws.iam.AccessKey, -) -> pulumi.Output[dict[str, str]]: - return pulumi.Output.all( - [ - ("ALPACA_API_KEY", alpaca_api_key), - ("ALPACA_API_SECRET", alpaca_api_secret), - ("DATA_BUCKET_NAME", data_bucket_name), - ("POLYGON_API_KEY", polygon_api_key), - ("DUCKDB_ACCESS_KEY", duckdb_access_key), - ("DUCKDB_SECRET", duckdb_secret), - ("AWS_REGION", aws_region), - ("DUCKDB_USER_ACCESS_KEY_ID", duckdb_user_access_key.id), - ("DUCKDB_USER_ACCESS_KEY_SECRET", duckdb_user_access_key.secret), - ] - ).apply(lambda secrets: dict(secrets)) diff --git a/infrastructure/grafana-dashboard.json b/infrastructure/grafana-dashboard.json new file mode 100644 index 000000000..e4dde270a --- /dev/null +++ b/infrastructure/grafana-dashboard.json @@ -0,0 +1,505 @@ +{ + "dashboard": { + "id": null, + "title": "Pocket Size Fund", + "description": "Comprehensive monitoring dashboard for Pocket Size Fund trading services, infrastructure, and performance metrics", + "tags": [ + "open-source", + "quantitative", + "hedge-fund" + ], + "style": "dark", + "timezone": "America/New_York", + "editable": true, + "graphTooltip": 1, + "time": { + "from": "now-1h", + "to": "now" + }, + "refresh": "30s", + "panels": [ + { + "id": 1, + "title": "Infrastructure Overview", + "type": "stat", + "gridPos": { + "h": 4, + "w": 24, + "x": 0, + "y": 0 + }, + "targets": [ + { + "expr": "up{job=\"kubernetes-nodes\"}", + "legendFormat": "Cluster Nodes", + "refId": "A" + }, + { + "expr": "sum(kube_pod_status_ready{condition=\"true\", namespace=\"default\"})", + "legendFormat": "Ready Pods", + "refId": "B" + } + ], + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "unit": "short", + "min": 0 + } + } + }, + { + "id": 2, + "title": "Service Health Status", + "type": "table", + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 4 + }, + "targets": [ + { + "expr": "up{job=\"kubernetes-pods\", pod=~\".*datamanager.*|.*predictionengine.*|.*positionmanager.*\"}", + "legendFormat": "{{pod}}", + "refId": "A", + "format": "table", + "instant": true + } + ], + "transformations": [ + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true, + "__name__": true, + "job": true, + "instance": true + }, + "renameByName": { + "pod": "Service", + "Value": "Status" + } + } + } + ], + "fieldConfig": { + "defaults": { + "custom": { + "displayMode": "color-background" + }, + "mappings": [ + { + "options": { + "0": { + "text": "Down", + "color": "red" + } + }, + "type": "value" + }, + { + "options": { + "1": { + "text": "Up", + "color": "green" + } + }, + "type": "value" + } + ] + } + } + }, + { + "id": 3, + "title": "API Request Rate", + "type": "timeseries", + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 4 + }, + "targets": [ + { + "expr": "sum(rate(http_requests_total{job=\"kubernetes-pods\", pod=~\".*datamanager.*\"}[5m])) by (method, path)", + "legendFormat": "{{method}} {{path}}", + "refId": "A" + } + ], + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "unit": "reqps", + "min": 0 + } + } + }, + { + "id": 4, + "title": "Data Manager Performance", + "type": "timeseries", + "gridPos": { + "h": 8, + "w": 8, + "x": 0, + "y": 12 + }, + "targets": [ + { + "expr": "histogram_quantile(0.95, sum(rate(http_request_duration_seconds_bucket{job=\"kubernetes-pods\", pod=~\".*datamanager.*\"}[5m])) by (le))", + "legendFormat": "95th percentile", + "refId": "A" + }, + { + "expr": "histogram_quantile(0.50, sum(rate(http_request_duration_seconds_bucket{job=\"kubernetes-pods\", pod=~\".*datamanager.*\"}[5m])) by (le))", + "legendFormat": "50th percentile", + "refId": "B" + } + ], + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "unit": "s", + "min": 0 + } + } + }, + { + "id": 5, + "title": "Prediction Engine Machine Learning Metrics", + "type": "timeseries", + "gridPos": { + "h": 8, + "w": 8, + "x": 8, + "y": 12 + }, + "targets": [ + { + "expr": "predictions_generated_total{job=\"kubernetes-pods\", pod=~\".*predictionengine.*\"}", + "legendFormat": "Total Predictions", + "refId": "A" + }, + { + "expr": "rate(prediction_processing_duration_seconds_sum{job=\"kubernetes-pods\", pod=~\".*predictionengine.*\"}[5m])", + "legendFormat": "Processing Time", + "refId": "B" + } + ], + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "unit": "short", + "min": 0 + } + } + }, + { + "id": 6, + "title": "Position Manager Trading Activity", + "type": "timeseries", + "gridPos": { + "h": 8, + "w": 8, + "x": 16, + "y": 12 + }, + "targets": [ + { + "expr": "positions_opened_total{job=\"kubernetes-pods\", pod=~\".*positionmanager.*\"}", + "legendFormat": "Positions Opened", + "refId": "A" + }, + { + "expr": "positions_closed_total{job=\"kubernetes-pods\", pod=~\".*positionmanager.*\"}", + "legendFormat": "Positions Closed", + "refId": "B" + } + ], + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "unit": "short", + "min": 0 + } + } + }, + { + "id": 7, + "title": "Resource Utilization", + "type": "timeseries", + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 20 + }, + "targets": [ + { + "expr": "sum(rate(container_cpu_usage_seconds_total{namespace=\"default\", pod=~\".*datamanager.*|.*predictionengine.*|.*positionmanager.*\"}[5m])) by (pod)", + "legendFormat": "{{pod}} CPU", + "refId": "A" + } + ], + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "unit": "percentunit", + "min": 0, + "max": 1 + } + } + }, + { + "id": 8, + "title": "Memory Usage", + "type": "timeseries", + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 20 + }, + "targets": [ + { + "expr": "sum(container_memory_working_set_bytes{namespace=\"default\", pod=~\".*datamanager.*|.*predictionengine.*|.*positionmanager.*\"}) by (pod)", + "legendFormat": "{{pod}} Memory", + "refId": "A" + } + ], + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "unit": "bytes", + "min": 0 + } + } + }, + { + "id": 9, + "title": "Trading Schedule Events", + "type": "logs", + "gridPos": { + "h": 6, + "w": 24, + "x": 0, + "y": 28 + }, + "targets": [ + { + "expr": "{namespace=\"default\", pod=~\".*datamanager.*|.*predictionengine.*|.*positionmanager.*\"} |~ \"schedule|trigger|cron\"", + "refId": "A" + } + ], + "options": { + "showTime": true, + "showLabels": false, + "showCommonLabels": true, + "wrapLogMessage": true, + "enableLogDetails": true + } + }, + { + "id": 10, + "title": "Error Rate", + "type": "stat", + "gridPos": { + "h": 4, + "w": 8, + "x": 0, + "y": 34 + }, + "targets": [ + { + "expr": "sum(rate(http_requests_total{job=\"kubernetes-pods\", status=~\"5..\", pod=~\".*datamanager.*|.*predictionengine.*|.*positionmanager.*\"}[5m]))", + "legendFormat": "5xx Errors/sec", + "refId": "A" + } + ], + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "unit": "reqps", + "min": 0, + "thresholds": { + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "yellow", + "value": 0.1 + }, + { + "color": "red", + "value": 1 + } + ] + } + } + } + }, + { + "id": 11, + "title": "Knative Scale Events", + "type": "stat", + "gridPos": { + "h": 4, + "w": 8, + "x": 8, + "y": 34 + }, + "targets": [ + { + "expr": "sum(increase(knative_serving_revision_ready{namespace=\"default\"}[1h]))", + "legendFormat": "Scale Events", + "refId": "A" + } + ], + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "unit": "short", + "min": 0 + } + } + }, + { + "id": 12, + "title": "AWS Load Balancer Health", + "type": "stat", + "gridPos": { + "h": 4, + "w": 8, + "x": 16, + "y": 34 + }, + "targets": [ + { + "expr": "aws_applicationelb_target_response_time_average{load_balancer=~\".*pocketsizefund.*\"}", + "legendFormat": "ALB Response Time", + "refId": "A" + } + ], + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "unit": "ms", + "min": 0, + "thresholds": { + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "yellow", + "value": 100 + }, + { + "color": "red", + "value": 500 + } + ] + } + } + } + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + }, + { + "datasource": "prometheus", + "enable": true, + "expr": "changes(up{job=\"kubernetes-pods\", pod=~\".*datamanager.*|.*predictionengine.*|.*positionmanager.*\"}[5m]) > 0", + "iconColor": "red", + "name": "Service Restarts", + "titleFormat": "{{pod}} restarted", + "tagKeys": "pod,namespace" + } + ] + }, + "templating": { + "list": [ + { + "current": { + "selected": false, + "text": "All", + "value": "$__all" + }, + "datasource": "prometheus", + "definition": "label_values(up{job=\"kubernetes-pods\", namespace=\"default\"}, pod)", + "hide": 0, + "includeAll": true, + "multi": true, + "name": "pod", + "options": [], + "query": { + "query": "label_values(up{job=\"kubernetes-pods\", namespace=\"default\"}, pod)", + "refId": "prometheus-pod-Variable-Query" + }, + "refresh": 1, + "regex": ".*(datamanager|predictionengine|positionmanager).*", + "skipUrlSync": false, + "sort": 1, + "type": "query" + } + ] + } + }, + "meta": { + "type": "db", + "canSave": true, + "canEdit": true, + "canAdmin": true, + "canStar": true, + "slug": "pocketsizefund-infrastructure", + "url": "/d/pocketsizefund/pocketsizefund-infrastructure", + "expires": "0001-01-01T00:00:00Z", + "created": "2025-01-19T10:00:00Z", + "updated": "2025-01-19T10:00:00Z", + "updatedBy": "admin", + "createdBy": "admin", + "version": 1, + "hasAcl": false, + "isFolder": false, + "folderId": 0, + "folderTitle": "General", + "folderUrl": "", + "provisioned": false, + "provisionedExternalId": "" + } +} \ No newline at end of file diff --git a/infrastructure/grafana_dashboard.json b/infrastructure/grafana_dashboard.json deleted file mode 100644 index a49092a48..000000000 --- a/infrastructure/grafana_dashboard.json +++ /dev/null @@ -1,270 +0,0 @@ -{ - "dashboard": { - "id": null, - "title": "Pocket Size Fund Metrics", - "tags": ["pocketsizefund", "open-source", "quantitative", "hedge-fund"], - "timezone": "browser", - "panels": [ - { - "id": 1, - "title": "Equity Bars Data Volume", - "type": "stat", - "targets": [ - { - "expr": "equity_bars_total_rows", - "refId": "A" - } - ], - "fieldConfig": { - "defaults": { - "unit": "short", - "decimals": 0 - } - }, - "options": { - "colorMode": "background", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto" - }, - "gridPos": { - "h": 8, - "w": 6, - "x": 0, - "y": 0 - } - }, - { - "id": 2, - "title": "Portfolio Total Value", - "type": "stat", - "targets": [ - { - "expr": "portfolio_total_value", - "refId": "A" - } - ], - "fieldConfig": { - "defaults": { - "unit": "currencyUSD", - "decimals": 2 - } - }, - "options": { - "colorMode": "background", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto" - }, - "gridPos": { - "h": 8, - "w": 6, - "x": 6, - "y": 0 - } - }, - { - "id": 3, - "title": "Cash Balance", - "type": "stat", - "targets": [ - { - "expr": "portfolio_cash_balance", - "refId": "A" - } - ], - "fieldConfig": { - "defaults": { - "unit": "currencyUSD", - "decimals": 2 - } - }, - "options": { - "colorMode": "background", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto" - }, - "gridPos": { - "h": 8, - "w": 6, - "x": 12, - "y": 0 - } - }, - { - "id": 4, - "title": "Number of Positions", - "type": "stat", - "targets": [ - { - "expr": "portfolio_positions_count", - "refId": "A" - } - ], - "fieldConfig": { - "defaults": { - "unit": "short", - "decimals": 0 - } - }, - "options": { - "colorMode": "background", - "graphMode": "area", - "justifyMode": "auto", - "orientation": "auto" - }, - "gridPos": { - "h": 8, - "w": 6, - "x": 18, - "y": 0 - } - }, - { - "id": 5, - "title": "Portfolio Value Over Time", - "type": "timeseries", - "targets": [ - { - "expr": "portfolio_total_value", - "refId": "A", - "legendFormat": "Total Portfolio Value" - }, - { - "expr": "portfolio_cash_balance", - "refId": "B", - "legendFormat": "Cash Balance" - } - ], - "fieldConfig": { - "defaults": { - "unit": "currencyUSD", - "decimals": 2 - } - }, - "options": { - "legend": { - "displayMode": "visible", - "placement": "bottom" - } - }, - "gridPos": { - "h": 9, - "w": 12, - "x": 0, - "y": 8 - } - }, - { - "id": 6, - "title": "Position Values by Symbol", - "type": "barchart", - "targets": [ - { - "expr": "portfolio_position_value", - "refId": "A", - "legendFormat": "{{symbol}}" - } - ], - "fieldConfig": { - "defaults": { - "unit": "currencyUSD", - "decimals": 2 - } - }, - "options": { - "legend": { - "displayMode": "visible", - "placement": "right" - } - }, - "gridPos": { - "h": 9, - "w": 12, - "x": 12, - "y": 8 - } - }, - { - "id": 7, - "title": "Position Profit and Loss by Symbol", - "type": "barchart", - "targets": [ - { - "expr": "portfolio_position_profit_and_loss", - "refId": "A", - "legendFormat": "{{symbol}}" - } - ], - "fieldConfig": { - "defaults": { - "unit": "currencyUSD", - "decimals": 2, - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "red", - "value": null - }, - { - "color": "green", - "value": 0 - } - ] - } - } - }, - "options": { - "legend": { - "displayMode": "visible", - "placement": "right" - } - }, - "gridPos": { - "h": 9, - "w": 12, - "x": 0, - "y": 17 - } - }, - { - "id": 8, - "title": "Data Volume Trend", - "type": "timeseries", - "targets": [ - { - "expr": "equity_bars_total_rows", - "refId": "A", - "legendFormat": "Total Rows" - } - ], - "fieldConfig": { - "defaults": { - "unit": "short", - "decimals": 0 - } - }, - "options": { - "legend": { - "displayMode": "visible", - "placement": "bottom" - } - }, - "gridPos": { - "h": 9, - "w": 12, - "x": 12, - "y": 17 - } - } - ], - "time": { - "from": "now-6h", - "to": "now" - }, - "refresh": "5m" - }, - "overwrite": true -} \ No newline at end of file diff --git a/infrastructure/images.py b/infrastructure/images.py index 51d9bb0e4..87d9a4bea 100644 --- a/infrastructure/images.py +++ b/infrastructure/images.py @@ -10,13 +10,15 @@ def build_image( service_name: str, service_version: str, + dockerhub_username: pulumi.Output[str], + dockerhub_password: pulumi.Output[str], ) -> docker_build.Image: service_directory = Path("../application") / service_name if not service_directory.exists(): message = f"Service directory not found: {service_directory}" raise FileNotFoundError(message) - image = docker_build.Image( + return docker_build.Image( resource_name=f"pocketsizefund-{service_name}-image", tags=[f"pocketsizefund/{service_name}:{service_version}"], context=docker_build.BuildContextArgs(location=str(service_directory)), @@ -28,12 +30,8 @@ def build_image( registries=[ docker_build.RegistryArgs( address="docker.io", - username=configuration.require_secret("DOCKERHUB_USERNAME"), - password=configuration.require_secret("DOCKERHUB_PASSWORD"), + username=dockerhub_username, + password=dockerhub_password, ) ], ) - - pulumi.export(f"{service_name.upper()}_IMAGE", image.ref) - - return image diff --git a/infrastructure/ingress.py b/infrastructure/ingress.py index 9ea08ab39..199d17e8b 100644 --- a/infrastructure/ingress.py +++ b/infrastructure/ingress.py @@ -1,247 +1,114 @@ -import json - import pulumi import pulumi_aws as aws -import pulumi_eks as eks -import pulumi_kubernetes as k8s -from pulumi.config import Config -from tags import common_tags - -configuration = Config() - - -def create_alb_controller_role(cluster: eks.Cluster) -> aws.iam.Role: - policy_document = pulumi.Output.all( - oidc_provider_arn=cluster.core.oidc_provider.arn, - oidc_provider_url=cluster.core.oidc_provider.url, - ).apply( - lambda args: json.dumps( - { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": {"Federated": args["oidc_provider_arn"]}, - "Action": "sts:AssumeRoleWithWebIdentity", - "Condition": { - "StringEquals": { - f"{args['oidc_provider_url'].replace('https://', '')}:sub": "system:serviceaccount:kube-system:aws-load-balancer-controller", # noqa: E501 - f"{args['oidc_provider_url'].replace('https://', '')}:aud": "sts.amazonaws.com", # noqa: E501 - } - }, - } - ], - } - ) - ) - - alb_controller_role = aws.iam.Role( - resource_name="pocketsizefund-alb-controller-role", - name="pocketsizefund-alb-controller-role", - assume_role_policy=policy_document, - tags=common_tags, - ) - - aws.iam.RolePolicyAttachment( - resource_name="pocketsizefund-alb-controller-policy", - role=alb_controller_role.name, - policy_arn="arn:aws:iam::aws:policy/ElasticLoadBalancingFullAccess", - ) - - return alb_controller_role - - -def create_alb_controller( - kubernetes_provider: k8s.Provider, - cluster: eks.Cluster, - alb_controller_role: aws.iam.Role, -) -> k8s.helm.v3.Release: - alb_controller_service_account = k8s.core.v1.ServiceAccount( - resource_name="pocketsizefund-alb-controller-service-account", - metadata=k8s.meta.v1.ObjectMetaArgs( - name="aws-load-balancer-controller", - namespace="kube-system", - annotations={ - "eks.amazonaws.com/role-arn": alb_controller_role.arn, - }, - ), - opts=pulumi.ResourceOptions(provider=kubernetes_provider), - ) - - return k8s.helm.v3.Release( - resource_name="pocketsizefund-alb-controller", - name="aws-load-balancer-controller", - chart="aws-load-balancer-controller", - namespace="kube-system", - repository_opts=k8s.helm.v3.RepositoryOptsArgs( - repo="https://aws.github.io/eks-charts" - ), - values={ - "clusterName": cluster.name, # type: ignore - "serviceAccount": { - "create": False, - "name": "aws-load-balancer-controller", - }, - "region": configuration.get("aws:region") or "us-east-1", - "vpcId": cluster.eks_cluster.vpc_config.vpc_id, - }, +from tags import pulumi_tags + + +def create_application_load_balancer_security_group( + virtual_private_cloud: aws.ec2.Vpc, +) -> aws.ec2.SecurityGroup: + return aws.ec2.SecurityGroup( + resource_name="pocketsizefund-alb-security-group", + vpc_id=virtual_private_cloud.id, + ingress=[ + aws.ec2.SecurityGroupIngressArgs( + protocol="tcp", + from_port=80, + to_port=80, + cidr_blocks=["0.0.0.0/0"], + ), + aws.ec2.SecurityGroupIngressArgs( + protocol="tcp", + from_port=443, + to_port=443, + cidr_blocks=["0.0.0.0/0"], + ), + ], + egress=[ + aws.ec2.SecurityGroupEgressArgs( + protocol="-1", + from_port=0, + to_port=0, + cidr_blocks=["0.0.0.0/0"], + ) + ], opts=pulumi.ResourceOptions( - provider=kubernetes_provider, - depends_on=[alb_controller_service_account], + depends_on=[virtual_private_cloud], ), + tags=pulumi_tags, ) -def create_service_ingress( - kubernetes_provider: k8s.Provider, - service_name: str, - cluster: eks.Cluster, - certificate_arn: pulumi.Output[str] | None = None, - depends_on: list[pulumi.Resource] | None = None, -) -> k8s.networking.v1.Ingress: - annotations = { - "kubernetes.io/ingress.class": "alb", - "alb.ingress.kubernetes.io/scheme": "internet-facing", - "alb.ingress.kubernetes.io/target-type": "pod", - "alb.ingress.kubernetes.io/load-balancer-name": f"pocketsizefund-{service_name}", # noqa: E501 - "alb.ingress.kubernetes.io/subnets": cluster.public_subnet_ids.apply( # type: ignore - lambda subnets: ",".join(subnets) - ), - } - - if certificate_arn: - annotations.update( - { - "alb.ingress.kubernetes.io/listen-ports": '[{"HTTP": 80}, {"HTTPS": 443}]', # noqa: E501 - "alb.ingress.kubernetes.io/certificate-arn": certificate_arn, - "alb.ingress.kubernetes.io/ssl-redirect": "443", - } - ) - else: - annotations["alb.ingress.kubernetes.io/listen-ports"] = '[{"HTTP": 80}]' - - return k8s.networking.v1.Ingress( - resource_name=f"pocketsizefund-{service_name}-ingress", - metadata=k8s.meta.v1.ObjectMetaArgs( - name=f"{service_name}-ingress", - namespace="default", - annotations=annotations, - ), - spec=k8s.networking.v1.IngressSpecArgs( - rules=[ - k8s.networking.v1.IngressRuleArgs( - http=k8s.networking.v1.HTTPIngressRuleValueArgs( - paths=[ - k8s.networking.v1.HTTPIngressPathArgs( - path="/", - path_type="Prefix", - backend=k8s.networking.v1.IngressBackendArgs( - service=k8s.networking.v1.IngressServiceBackendArgs( - name=service_name, - port=k8s.networking.v1.ServiceBackendPortArgs( - number=80 - ), - ) - ), - ) - ] - ) - ) - ] - ), +def create_application_load_balancer( + application_load_balancer_security_group: aws.ec2.SecurityGroup, + public_subnets: list[aws.ec2.Subnet], +) -> aws.lb.LoadBalancer: + return aws.lb.LoadBalancer( + resource_name="pocketsizefund-alb", + internal=False, + load_balancer_type="application", + security_groups=[application_load_balancer_security_group.id], + subnets=[subnet.id for subnet in public_subnets], opts=pulumi.ResourceOptions( - provider=kubernetes_provider, - depends_on=depends_on or [], - ), - ) - - -def create_self_signed_certificate() -> aws.acm.Certificate: - return aws.acm.Certificate( - resource_name="pocketsizefund-self-signed-cert", - domain_name="*.amazonaws.com", - validation_method="DNS", - subject_alternative_names=["*.elb.amazonaws.com"], - tags=common_tags, - ) - - -def create_api_gateway_with_auth( - service_name: str, - target_url: pulumi.Output[str], -) -> aws.apigatewayv2.Api: - api = aws.apigatewayv2.Api( - resource_name=f"pocketsizefund-{service_name}-api", - name=f"pocketsizefund-{service_name}", - protocol_type="HTTP", - cors_configuration=aws.apigatewayv2.ApiCorsConfigurationArgs( - allow_origins=["*"], # reduce allowed origins in production - allow_methods=["GET", "POST", "DELETE"], - allow_headers=["Content-Type", "Authorization", "Host"], - max_age=86400, + depends_on=[ + application_load_balancer_security_group, + *public_subnets, + ], ), - tags=common_tags, + tags=pulumi_tags, ) - integration = aws.apigatewayv2.Integration( - resource_name=f"pocketsizefund-{service_name}-integration", - api_id=api.id, - integration_type="HTTP_PROXY", - integration_method="ANY", - integration_uri=target_url, - connection_type="INTERNET", - ) - aws.apigatewayv2.Route( - resource_name=f"pocketsizefund-{service_name}-route", - api_id=api.id, - route_key="ANY /{proxy+}", - target=integration.id.apply( - lambda integration_id: f"integrations/{integration_id}" +def create_application_load_balancer_target_group( + virtual_private_cloud: aws.ec2.Vpc, + application_load_balancer: aws.lb.LoadBalancer, +) -> aws.lb.TargetGroup: + return aws.lb.TargetGroup( + resource_name="pocketsizefund-tg", + port=8080, # match service port + protocol="HTTP", + target_type="ip", + vpc_id=virtual_private_cloud.id, + health_check=aws.lb.TargetGroupHealthCheckArgs( + enabled=True, + healthy_threshold=3, + unhealthy_threshold=3, + interval=60, + path="/health", + port="8080", + protocol="HTTP", + timeout=10, ), - authorization_type="AWS_IAM", - ) - - aws.apigatewayv2.Stage( - resource_name=f"pocketsizefund-{service_name}-stage", - api_id=api.id, - name="$default", - auto_deploy=True, - default_route_settings=aws.apigatewayv2.StageDefaultRouteSettingsArgs( - throttling_burst_limit=100, - throttling_rate_limit=50, + opts=pulumi.ResourceOptions( + replace_on_changes=["*"], + depends_on=[ + virtual_private_cloud, + application_load_balancer, + ], ), - tags=common_tags, + tags=pulumi_tags, ) - pulumi.export(f"{service_name.upper()}_API_GATEWAY_URL", api.api_endpoint) - - return api - -def create_api_access_policy( - api_gateway: aws.apigatewayv2.Api, - service_name: str, -) -> aws.iam.Policy: - policy_document = api_gateway.arn.apply( - lambda arn: json.dumps( - { - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Action": ["execute-api:Invoke"], - "Resource": f"{arn}/$default/*", - } - ], - } - ) - ) - - return aws.iam.Policy( - resource_name=f"pocketsizefund-{service_name}-api-access", - name=f"pocketsizefund-{service_name}-api-access", - description=f"Policy for accessing {service_name} API Gateway", - policy=policy_document, - tags=common_tags, +def create_application_load_balancer_listener( + application_load_balancer: aws.lb.LoadBalancer, + application_load_balancer_target_group: aws.lb.TargetGroup, +) -> aws.lb.Listener: + return aws.lb.Listener( + resource_name="pocketsizefund-listener", + load_balancer_arn=application_load_balancer.arn, + port=80, # publicly exposed port + protocol="HTTP", + default_actions=[ + aws.lb.ListenerDefaultActionArgs( + type="forward", + target_group_arn=application_load_balancer_target_group.arn, + ) + ], + opts=pulumi.ResourceOptions( + depends_on=[ + application_load_balancer, + application_load_balancer_target_group, + ], + ), + tags=pulumi_tags, ) diff --git a/infrastructure/keys.py b/infrastructure/keys.py index 11c7c74a1..029b3202a 100644 --- a/infrastructure/keys.py +++ b/infrastructure/keys.py @@ -1,21 +1,23 @@ import json +import pulumi import pulumi_aws as aws -from environment_variables import data_bucket_name -from tags import common_tags +from tags import pulumi_tags -def create_duckdb_user_access_key() -> aws.iam.AccessKey: +def create_duckdb_user_access_key( + data_bucket_name: pulumi.Output[str], +) -> aws.iam.AccessKey: duckdb_user = aws.iam.User( resource_name="pocketsizefund-duckdb-user", name="pocketsizefund-duckdb-user", - tags=common_tags, + tags=pulumi_tags, ) duckdb_policy = aws.iam.Policy( resource_name="pocketsizefund-duckdb-policy", name="pocketsizefund-duckdb-policy", - description="Policy for DuckDB access", + description="Policy for application service DuckDB access", policy=json.dumps( { "Version": "2012-10-17", @@ -36,7 +38,7 @@ def create_duckdb_user_access_key() -> aws.iam.AccessKey: ], } ), - tags=common_tags, + tags=pulumi_tags, ) aws.iam.UserPolicyAttachment( diff --git a/infrastructure/monitors.py b/infrastructure/monitors.py index eb23a6198..5f41ea50e 100644 --- a/infrastructure/monitors.py +++ b/infrastructure/monitors.py @@ -1,88 +1,151 @@ import pulumi import pulumi_aws as aws import pulumi_eks as eks -from tags import common_tags +from tags import pulumi_tags def create_prometheus_scraper( - workspace_arn: pulumi.Output[str], - cluster: eks.Cluster, + prometheus_workspace_arn: pulumi.Output[str], + kubernetes_cluster: eks.Cluster, + security_group: aws.ec2.SecurityGroup, ) -> aws.amp.Scraper: + scrape_configuration = pulumi.Output.json_dumps( + { + "global": { + "scrape_interval": "15m", + "evaluation_interval": "15m", + }, + "scrape_configs": [ + { + "job_name": "kubernetes-apiservers", + "kubernetes_sd_configs": [ + { + "role": "endpoints", + "api_server": kubernetes_cluster.core.endpoint, + } + ], + "scheme": "https", + "tls_config": { + "ca_file": "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt", # noqa: E501 + }, + "bearer_token_file": "/var/run/secrets/kubernetes.io/serviceaccount/token", # noqa: E501 + "relabel_configs": [ + { + "source_labels": [ + "__meta_kubernetes_namespace", + "__meta_kubernetes_service_name", + "__meta_kubernetes_endpoint_port_name", + ], + "action": "keep", + "regex": "default;kubernetes;https", + } + ], + }, + { + "job_name": "kubernetes-nodes", + "kubernetes_sd_configs": [ + { + "role": "node", + "api_server": kubernetes_cluster.core.endpoint, + } + ], + "scheme": "https", + "tls_config": { + "ca_file": "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt", # noqa: E501 + }, + "bearer_token_file": "/var/run/secrets/kubernetes.io/serviceaccount/token", # noqa: E501 + "relabel_configs": [ + { + "action": "labelmap", + "regex": "__meta_kubernetes_node_label_(.+)", + }, + { + "target_label": "__address__", + "replacement": "kubernetes.default.svc:443", + }, + { + "source_labels": ["__meta_kubernetes_node_name"], + "regex": "(.+)", + "target_label": "__metrics_path__", + "replacement": "/api/v1/nodes/$1/proxy/metrics", + }, + ], + }, + { + "job_name": "kubernetes-pods", + "kubernetes_sd_configs": [ + { + "role": "pod", + "api_server": kubernetes_cluster.core.endpoint, + } + ], + "relabel_configs": [ + { + "source_labels": [ + "__meta_kubernetes_pod_annotation_prometheus_io_scrape" + ], + "action": "keep", + "regex": "true", + }, + { + "source_labels": [ + "__meta_kubernetes_pod_annotation_prometheus_io_path" + ], + "action": "replace", + "target_label": "__metrics_path__", + "regex": "(.+)", + }, + { + "source_labels": [ + "__address__", + "__meta_kubernetes_pod_annotation_prometheus_io_port", + ], + "action": "replace", + "regex": r"([^:]+)(?::\\d+)?;(\\d+)", + "replacement": "$1:$2", + "target_label": "__address__", + }, + { + "action": "labelmap", + "regex": "__meta_kubernetes_pod_label_(.+)", + }, + { + "source_labels": ["__meta_kubernetes_namespace"], + "action": "replace", + "target_label": "kubernetes_namespace", + }, + { + "source_labels": ["__meta_kubernetes_pod_name"], + "action": "replace", + "target_label": "kubernetes_pod_name", + }, + ], + }, + ], + } + ) + return aws.amp.Scraper( resource_name="pocketsizefund-prometheus-scraper", alias="pocketsizefund-cluster-scraper", - scrape_configuration=cluster.eks_cluster.endpoint.apply( - lambda endpoint: f""" -global: - scrape_interval: 15m - evaluation_interval: 15m -scrape_configs: -- job_name: 'kubernetes-apiservers' - kubernetes_sd_configs: - - role: endpoints - api_server: {endpoint} - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - relabel_configs: - - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] - action: keep - regex: default;kubernetes;https -- job_name: 'kubernetes-nodes' - kubernetes_sd_configs: - - role: node - api_server: {endpoint} - scheme: https - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics -- job_name: 'kubernetes-pods' - kubernetes_sd_configs: - - role: pod - api_server: {endpoint} - relabel_configs: - - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] - action: keep - regex: true - - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] - action: replace - target_label: __metrics_path__ - regex: (.+) - - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] - action: replace - regex: ([^:]+)(?::\\d+)?;(\\d+) - replacement: $1:$2 - target_label: __address__ - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: kubernetes_namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: kubernetes_pod_name -""" # noqa: E501 - ), + scrape_configuration=scrape_configuration, destination=aws.amp.ScraperDestinationArgs( amp=aws.amp.ScraperDestinationAmpArgs( - workspace_arn=workspace_arn, + workspace_arn=prometheus_workspace_arn, ), ), source=aws.amp.ScraperSourceArgs( eks=aws.amp.ScraperSourceEksArgs( - cluster_arn=cluster.eks_cluster.arn, - subnet_ids=cluster.private_subnet_ids, # type: ignore + cluster_arn=kubernetes_cluster.eks_cluster.arn, + subnet_ids=kubernetes_cluster.eks_cluster.vpc_config.subnet_ids, + security_group_ids=[security_group.id], ), ), - tags=common_tags, + opts=pulumi.ResourceOptions( + depends_on=[ + kubernetes_cluster, + security_group, + ], + ), + tags=pulumi_tags, ) diff --git a/infrastructure/ping.nu b/infrastructure/ping.nu deleted file mode 100644 index eee608f57..000000000 --- a/infrastructure/ping.nu +++ /dev/null @@ -1,47 +0,0 @@ -use std/assert - -let cluster_endpoint = kubectl config view --minify --output jsonpath='{.clusters[0].cluster.server}' - -let token = aws eks get-token --cluster-name pocketsizefund-cluster | from json | get status.token - -let headers = [Authorization $"Bearer ($token)"] - -let services = [ - { - name: "datamanager" - url: $"($cluster_endpoint)/api/v1/namespaces/default/services/datamanager:8080/proxy" - } - { - name: "positionmanager" - url: $"($cluster_endpoint)/api/v1/namespaces/default/services/positionmanager:8080/proxy" - } - { - name: "predictionengine" - url: $"($cluster_endpoint)/api/v1/namespaces/default/services/predictionengine:8080/proxy" - } -] - -$services -| each {|service| - http get --full --headers $headers $"($service.url)/health" - print $"($service.name) healthy" -} - -let datamanager_url: string = ($services | where name == "datamanager" | get url | first) - -let datamanager_get = http get --headers $headers $"($datamanager_url)/equity-bars?date=2025-01-07" | from json - -assert (($datamanager_get | get count) >= 100) - -let datamanager_query = { - scheme: https - host: ($cluster_endpoint | str replace "https://" "") - path: "/api/v1/namespaces/default/services/datamanager:8080/proxy/equity-bars" - params: {start_date: "2025-01-07" end_date: "2025-01-09"} -} -| url join -| http get --full --headers $headers $in -| get status - -assert equal $datamanager_query 200 - diff --git a/infrastructure/pyproject.toml b/infrastructure/pyproject.toml index 579afc09e..4bc30b170 100644 --- a/infrastructure/pyproject.toml +++ b/infrastructure/pyproject.toml @@ -1,18 +1,12 @@ [project] name = "infrastructure" -version = "20250709.1" +version = "20250716.1" requires-python = "==3.12.10" dependencies = [ "pulumi>=3.169.0", "pulumi-aws>=6.0.0", - "pulumi-awsx>=2.0.0", "pulumi-eks>=3.9.1", - "pulumi-std>=2.2.0", "pulumi-docker>=3.0.0", "pulumi-docker-build>=0.0.12", "pulumi-kubernetes>=4.23.0", - "loguru>=0.7.3", - "boto3>=1.38.23", - "botocore>=1.38.23", - "requests>=2.32.0", # fixes CVE-2024-35195 ] diff --git a/infrastructure/roles.py b/infrastructure/roles.py deleted file mode 100644 index 91bfaed17..000000000 --- a/infrastructure/roles.py +++ /dev/null @@ -1,74 +0,0 @@ -import pulumi_aws as aws -from tags import common_tags - - -def create_cluster_role() -> aws.iam.Role: - cluster_role = aws.iam.Role( - resource_name="pocketsizefund-cluster-role", - description="Role for EKS cluster to manage resources", - name="pocketsizefund-cluster-role", - assume_role_policy="""{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Service": [ - "eks.amazonaws.com" - ] - }, - "Action": "sts:AssumeRole" - } - ] - }""", - tags=common_tags, - ) - - aws.iam.RolePolicyAttachment( - resource_name="pocketsizefund-cluster-role-eks-cluster-policy", - role=cluster_role.name, - policy_arn="arn:aws:iam::aws:policy/AmazonEKSClusterPolicy", - ) - - return cluster_role - - -def create_node_role() -> aws.iam.Role: - node_role = aws.iam.Role( - resource_name="pocketsizefund-node-role", - description="Role for EKS worker nodes to manage resources", - name="pocketsizefund-node-role", - assume_role_policy="""{ - "Version": "2012-10-17", - "Statement": [ - { - "Effect": "Allow", - "Principal": { - "Service": "ec2.amazonaws.com" - }, - "Action": "sts:AssumeRole" - } - ] - }""", - tags=common_tags, - ) - - aws.iam.RolePolicyAttachment( - resource_name="pocketsizefund-node-policy", - role=node_role.name, - policy_arn="arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy", - ) - - aws.iam.RolePolicyAttachment( - resource_name="pocketsizefund-node-role-ecr-policy", - role=node_role.name, - policy_arn="arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly", - ) - - aws.iam.RolePolicyAttachment( - resource_name="pocketsizefund-node-role-cni-policy", - role=node_role.name, - policy_arn="arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy", - ) - - return node_role diff --git a/infrastructure/publishers_subscribers.py b/infrastructure/services.py similarity index 63% rename from infrastructure/publishers_subscribers.py rename to infrastructure/services.py index 5372bf737..8b54bb553 100644 --- a/infrastructure/publishers_subscribers.py +++ b/infrastructure/services.py @@ -1,14 +1,27 @@ +from typing import Any + import pulumi +import pulumi_aws as aws +import pulumi_docker_build as docker_build import pulumi_kubernetes as k8s +def create_service_environment_variables( + inputs: list[tuple[str, Any]], +) -> pulumi.Output[dict[str, str]]: + return pulumi.Output.all(*inputs).apply(lambda secrets: dict(secrets)) + + def create_knative_serving_core( kubernetes_provider: k8s.Provider, ) -> k8s.yaml.v2.ConfigGroup: knative_serving_namespace = k8s.core.v1.Namespace( resource_name="pocketsizefund-knative-serving-namespace", metadata={"name": "knative-serving"}, - opts=pulumi.ResourceOptions(provider=kubernetes_provider), + opts=pulumi.ResourceOptions( + provider=kubernetes_provider, + depends_on=[kubernetes_provider], + ), ) knative_serving_crds = k8s.yaml.v2.ConfigGroup( # custom resource definition @@ -18,7 +31,15 @@ def create_knative_serving_core( ], opts=pulumi.ResourceOptions( provider=kubernetes_provider, - depends_on=[knative_serving_namespace], + depends_on=[ + kubernetes_provider, + knative_serving_namespace, + ], + custom_timeouts=pulumi.CustomTimeouts( + create="2m", + update="2m", + delete="2m", + ), ), ) @@ -29,16 +50,20 @@ def create_knative_serving_core( ], opts=pulumi.ResourceOptions( provider=kubernetes_provider, - depends_on=[knative_serving_crds], + depends_on=[ + kubernetes_provider, + knative_serving_namespace, + knative_serving_crds, + ], custom_timeouts=pulumi.CustomTimeouts( - create="15m", - update="15m", - delete="15m", + create="2m", + update="2m", + delete="2m", ), ), ) - # NEW ADDITION + # NOTE: check if this or its specific configurations are necessary k8s.core.v1.ConfigMap( resource_name="pocketsizefund-knative-configuration-network", metadata=k8s.meta.v1.ObjectMetaArgs( @@ -52,7 +77,17 @@ def create_knative_serving_core( }, opts=pulumi.ResourceOptions( provider=kubernetes_provider, - depends_on=[knative_serving_core], + depends_on=[ + kubernetes_provider, + knative_serving_namespace, + knative_serving_core, + knative_serving_crds, + ], + custom_timeouts=pulumi.CustomTimeouts( + create="2m", + update="2m", + delete="2m", + ), ), ) @@ -65,7 +100,10 @@ def create_knative_eventing_core( knative_eventing_namespace = k8s.core.v1.Namespace( resource_name="pocketsizefund-eventing-namespace", metadata={"name": "knative-eventing"}, - opts=pulumi.ResourceOptions(provider=kubernetes_provider), + opts=pulumi.ResourceOptions( + provider=kubernetes_provider, + depends_on=[kubernetes_provider], + ), ) knative_eventing_crds = k8s.yaml.v2.ConfigGroup( @@ -75,7 +113,15 @@ def create_knative_eventing_core( ], opts=pulumi.ResourceOptions( provider=kubernetes_provider, - depends_on=[knative_eventing_namespace], + depends_on=[ + kubernetes_provider, + knative_eventing_namespace, + ], + custom_timeouts=pulumi.CustomTimeouts( + create="2m", + update="2m", + delete="2m", + ), ), ) @@ -86,22 +132,55 @@ def create_knative_eventing_core( ], opts=pulumi.ResourceOptions( provider=kubernetes_provider, - depends_on=[knative_eventing_crds], + depends_on=[ + kubernetes_provider, + knative_eventing_namespace, + knative_eventing_crds, + ], custom_timeouts=pulumi.CustomTimeouts( - create="15m", - update="15m", - delete="15m", + create="2m", + update="2m", + delete="2m", ), ), ) -def create_knative_service( +def create_knative_broker( + kubernetes_provider: k8s.Provider, + knative_eventing_core: k8s.yaml.v2.ConfigGroup, +) -> k8s.yaml.v2.ConfigGroup: + content = { + "apiVersion": "eventing.knative.dev/v1", + "kind": "Broker", + "metadata": { + "name": "default", + "namespace": "default", + }, + } + + return k8s.yaml.v2.ConfigGroup( + resource_name="pocketsizefund-default-broker", + objs=[content], + opts=pulumi.ResourceOptions( + provider=kubernetes_provider, + depends_on=[kubernetes_provider, knative_eventing_core], + custom_timeouts=pulumi.CustomTimeouts( + create="2m", + update="2m", + delete="2m", + ), + ), + ) + + +def create_knative_service( # noqa: PLR0913 kubernetes_provider: k8s.Provider, service_name: str, - image_reference: pulumi.Output[str], + image: docker_build.Image, + application_load_balancer_service_target_group: aws.lb.TargetGroup, + knative_serving_core: k8s.yaml.v2.ConfigGroup, environment_variables: pulumi.Output[dict[str, str]] | None = None, - depends_on: list[pulumi.Resource] | None = None, ) -> k8s.yaml.v2.ConfigGroup: formatted_environment_variables = ( environment_variables.apply( @@ -116,11 +195,17 @@ def create_knative_service( content = { "apiVersion": "serving.knative.dev/v1", "kind": "Service", - "metadata": {"name": service_name, "namespace": "default"}, + "metadata": { + "name": service_name, + "namespace": "default", + }, "spec": { "template": { "metadata": { "annotations": { + "alb.ingress.kubernetes.io/scheme": "internet-facing", + "alb.ingress.kubernetes.io/target-type": "ip", + "alb.ingress.kubernetes.io/target-group-arn": application_load_balancer_service_target_group.arn, # noqa: E501 "prometheus.io/scrape": "true", "prometheus.io/path": "/metrics", "prometheus.io/port": "8080", @@ -129,13 +214,16 @@ def create_knative_service( "spec": { "containers": [ { - "image": image_reference, + "image": image.ref, "name": service_name, "env": formatted_environment_variables, "resources": { "requests": {"cpu": "100m", "memory": "128Mi"}, - "limits": {"cpu": "500m", "memory": "512Mi"}, + "limits": {"cpu": "1000m", "memory": "512Mi"}, }, + "ports": [ + {"containerPort": 8080}, + ], } ] }, @@ -148,29 +236,17 @@ def create_knative_service( objs=[content], opts=pulumi.ResourceOptions( provider=kubernetes_provider, - depends_on=depends_on, - ), - ) - - -def create_knative_broker( - kubernetes_provider: k8s.Provider, - knative_eventing_core: k8s.yaml.v2.ConfigGroup, -) -> k8s.yaml.v2.ConfigGroup: - content = { - "apiVersion": "eventing.knative.dev/v1", - "kind": "Broker", - "metadata": { - "name": "default", - "namespace": "default", - }, - } - - return k8s.yaml.v2.ConfigGroup( - resource_name="pocketsizefund-default-broker", - objs=[content], - opts=pulumi.ResourceOptions( - provider=kubernetes_provider, depends_on=[knative_eventing_core] + depends_on=[ + kubernetes_provider, + image, + application_load_balancer_service_target_group, + knative_serving_core, + ], + custom_timeouts=pulumi.CustomTimeouts( + create="2m", + update="2m", + delete="2m", + ), ), ) @@ -180,7 +256,7 @@ def create_knative_trigger( source_service_name: str, source_attribute_type: str, target_service_name: str, - depends_on: list[pulumi.Resource] | None = None, + knative_eventing_core: k8s.yaml.v2.ConfigGroup, ) -> k8s.yaml.v2.ConfigGroup: resource_name = ( f"pocketsizefund-{source_service_name}-to-{target_service_name}-trigger" @@ -215,7 +291,12 @@ def create_knative_trigger( objs=[content], opts=pulumi.ResourceOptions( provider=kubernetes_provider, - depends_on=depends_on, + depends_on=[kubernetes_provider, knative_eventing_core], + custom_timeouts=pulumi.CustomTimeouts( + create="2m", + update="2m", + delete="2m", + ), ), ) @@ -225,7 +306,7 @@ def create_knative_schedule( target_service_name: str, target_path: str, cron_schedule: str, - depends_on: list[pulumi.Resource] | None = None, + knative_eventing_core: k8s.yaml.v2.ConfigGroup, ) -> k8s.yaml.v2.ConfigGroup: content = { "apiVersion": "sources.knative.dev/v1", @@ -254,6 +335,11 @@ def create_knative_schedule( objs=[content], opts=pulumi.ResourceOptions( provider=kubernetes_provider, - depends_on=depends_on, + depends_on=[kubernetes_provider, knative_eventing_core], + custom_timeouts=pulumi.CustomTimeouts( + create="2m", + update="2m", + delete="2m", + ), ), ) diff --git a/infrastructure/tags.py b/infrastructure/tags.py index f77cebe7c..225ee150d 100644 --- a/infrastructure/tags.py +++ b/infrastructure/tags.py @@ -1,7 +1,13 @@ import pulumi -common_tags = { +pulumi_tags = { "project": "pocketsizefund", "manager": "pulumi", "stack": pulumi.get_stack(), } + +manual_tags = { + "project": "pocketsizefund", + "manager": "manual", + "stack": "none", +} diff --git a/infrastructure/upload_grafana_dashboard.nu b/infrastructure/upload_grafana_dashboard.nu index 19e878244..c7362d284 100644 --- a/infrastructure/upload_grafana_dashboard.nu +++ b/infrastructure/upload_grafana_dashboard.nu @@ -1,48 +1,186 @@ #!/usr/bin/env nu -# upload Grafana dashboard to Grafana Cloud -# Usage: nu upload_grafana_dashboard.nu +def main [ + --dashboard-file: string = "grafana-dashboard.json" + --workspace-id: string = "" + --region: string = "us-east-1" + --profile: string = "pulumi" + --dry-run = false +] { + check_prerequisites + + print "Pocket Size Fund Grafana Dashboard Upload" + print "================================================" -let grafana_url = $env.GRAFANA_CLOUD_URL? | default "" -let grafana_api_key = $env.GRAFANA_API_KEY? | default "" + if not ($dashboard_file | path exists) { + print $"Dashboard file not found: ($dashboard_file)" + exit 1 + } -if ($grafana_api_key | is-empty) { - print "GRAFANA_API_KEY environment variable is required" - exit 1 -} + print $"Using dashboard file: ($dashboard_file)" -if ($grafana_url == "") { - print "GRAFANA_CLOUD_URL environment variable is required" - exit 1 -} + let workspace_id = if ($workspace_id | is-empty) { + print "Getting Grafana workspace ID from AWS..." + try { + let result = (aws grafana list-workspaces + --region $region + --profile $profile + --query 'workspaces[?name==`pocketsizefund`]' + --output json + | from json | get 0.id ) + if ($result | is-empty) { + print "No workspace found with name 'pocketsizefund'" + exit 1 + } + $result + } catch { + print "Failed to get workspace ID. Provide --workspace-id or ensure workspace exists" + exit 1 + } + } else { + $workspace_id + } -let dashboard_file = "grafana_dashboard.json" + if ($workspace_id | is-empty) { + print "No Grafana workspace found named 'pocketsizefund'" + print "Create workspace first or provide specific --workspace-id" + exit 1 + } -if not ($dashboard_file | path exists) { - print $"dashboard file '($dashboard_file)' not found" - exit 1 -} + print $"Target workspace ID: ($workspace_id)" -let dashboard_content = open $dashboard_file | from json + print "Getting Grafana workspace endpoint..." + let workspace_info = try { + (aws grafana describe-workspace + --workspace-id $workspace_id + --region $region + --profile $profile + --output json + | from json) + } catch { + print $"Failed to describe workspace: ($workspace_id)" + exit 1 + } -let upload_payload = { - dashboard: $dashboard_content.dashboard - overwrite: true - message: "uploaded via Nu script" -} + let grafana_endpoint = $workspace_info.workspace.endpoint + print $"Grafana endpoint: ($grafana_endpoint)" -let headers = [ - "Authorization" $"Bearer ($grafana_api_key)" - "Content-Type" "application/json" -] + print "Loading dashboard configuration..." + let dashboard_content = try { + open $dashboard_file | from json + } catch { + print $"Invalid JSON in dashboard file: ($dashboard_file)" + exit 1 + } -try { - let response = $upload_payload - | to json - | http post --headers $headers $"($grafana_url)/api/dashboards/db" - - print "dashboard uploaded successfully!" - -} catch { |error| - print $"failed to upload dashboard: ($error)" + let upload_payload = { + dashboard: $dashboard_content.dashboard + overwrite: true + message: $"Uploaded via Nu script at (date now | format date '%Y-%m-%d %H:%M:%S')" + } + + if $dry_run { + print "DRY RUN MODE - Dashboard payload preview:" + print ($upload_payload | to json) + print "\nDry run completed. Use without --dry-run to upload." + exit 0 + } + + print "Creating Grafana API key..." + let api_key_response = try { + (aws grafana create-workspace-api-key + --workspace-id $workspace_id + --key-name $"pocketsizefund-upload-($workspace_id)-(date now | format date '%Y%m%d-%H%M%S')" + --key-role ADMIN + --seconds-to-live 3600 + --region $region + --profile $profile + --output json + | from json) + } catch { + print "Failed to create API key. Check permissions." + exit 1 + } + + let api_key = $api_key_response.key + print "API key created successfully" + + print "Uploading dashboard to Grafana..." + let upload_result = try { + (http post $"($grafana_endpoint)/api/dashboards/db" + -H [ + "Authorization" $"Bearer ($api_key)" + "Content-Type" "application/json" + ] + ($upload_payload | to json)) + } catch { + print "Failed to upload dashboard" + + try { + (aws grafana delete-workspace-api-key + --workspace-id $workspace_id + --key-name $api_key_response.keyName + --region $region + --profile $profile + | ignore) + } catch { + # Ignore cleanup errors + } + + exit 1 + } + + let result = try { + $upload_result | from json + } catch { + print "Upload may have succeeded but response parsing failed" + print $"Response: ($upload_result)" + } + + if ($result | get status? | default "unknown") == "success" { + print "Dashboard uploaded successfully!" + print $"Dashboard URL: ($grafana_endpoint)/d/($result.slug)" + print $"Dashboard ID: ($result.id)" + print $"Version: ($result.version)" + } else { + print "Upload completed with unknown status" + print $"Response: ($result)" + } + + print "Cleaning up temporary API key..." + try { + (aws grafana delete-workspace-api-key + --workspace-id $workspace_id + --key-name $api_key_response.keyName + --region $region + --profile $profile + | ignore) + print "API key cleaned up" + } catch { + print "Failed to clean up API key (manual deletion may be needed)" + } + + print "\nDashboard upload completed!" + print $"Access your dashboard at: ($grafana_endpoint)" } + +def check_prerequisites [] { + print "Checking prerequisites..." + + try { + aws --version | ignore + print "AWS CLI available" + } catch { + print "AWS CLI not found. Please install AWS CLI." + exit 1 + } + + try { + which jq | ignore + print "jq available" + } catch { + print "jq not found (optional but recommended for JSON debugging)" + } + + print "Prerequisites check completed\n" +} \ No newline at end of file diff --git a/infrastructure/vpc.py b/infrastructure/vpc.py new file mode 100644 index 000000000..d61e186c4 --- /dev/null +++ b/infrastructure/vpc.py @@ -0,0 +1,134 @@ +import pulumi +import pulumi_aws as aws +from tags import pulumi_tags + + +def create_virtual_private_cloud() -> aws.ec2.Vpc: + return aws.ec2.Vpc( + resource_name="pocketsizefund-vpc", + cidr_block="10.0.0.0/16", + enable_dns_support=True, + enable_dns_hostnames=True, + tags=pulumi_tags, + ) + + +def create_internet_gateway( + virtual_private_cloud: aws.ec2.Vpc, +) -> aws.ec2.InternetGateway: + return aws.ec2.InternetGateway( + resource_name="pocketsizefund-internet-gateway", + vpc_id=virtual_private_cloud.id, + opts=pulumi.ResourceOptions(depends_on=[virtual_private_cloud]), + tags=pulumi_tags, + ) + + +def create_elastic_ip(virtual_private_cloud: aws.ec2.Vpc) -> aws.ec2.Eip: + return aws.ec2.Eip( + resource_name="pocketsizefund-elastic-ip", + opts=pulumi.ResourceOptions( + depends_on=[virtual_private_cloud], + ), + tags=pulumi_tags, + ) + + +def create_nat_gateway( + elastic_ip: aws.ec2.Eip, + public_subnet: aws.ec2.Subnet, +) -> aws.ec2.NatGateway: + return aws.ec2.NatGateway( + resource_name="pocketsizefund-nat-gateway", + allocation_id=elastic_ip.id, + subnet_id=public_subnet.id, + tags=pulumi_tags, + opts=pulumi.ResourceOptions( + depends_on=[ + elastic_ip, + public_subnet, + ], + ), + ) + + +def create_route_table( + virtual_private_cloud: aws.ec2.Vpc, + internet_gateway: aws.ec2.InternetGateway | None = None, + nat_gateway: aws.ec2.NatGateway | None = None, +) -> aws.ec2.RouteTable: + depends_on: list[pulumi.Resource] = [virtual_private_cloud] + if internet_gateway: + depends_on.append(internet_gateway) + if nat_gateway: + depends_on.append(nat_gateway) + + if internet_gateway and nat_gateway: + message = "Cannot specify both internet_gateway and nat_gateway" + raise ValueError(message) + if not internet_gateway and not nat_gateway: + message = "Must specify either internet_gateway or nat_gateway" + raise ValueError(message) + + visibility = "public" if internet_gateway else "private" + + return aws.ec2.RouteTable( + resource_name=f"pocketsizefund-{visibility}-route-table", + vpc_id=virtual_private_cloud.id, + routes=[ + aws.ec2.RouteTableRouteArgs( + cidr_block="0.0.0.0/0", + gateway_id=internet_gateway.id if internet_gateway else None, + nat_gateway_id=nat_gateway.id if nat_gateway else None, + ) + ], + opts=pulumi.ResourceOptions(depends_on=depends_on), + tags=pulumi_tags, + ) + + +def create_subnet( + virtual_private_cloud: aws.ec2.Vpc, + route_table: aws.ec2.RouteTable, + availability_zone: str, + subnet_number: int, + visibility: str = "public", +) -> aws.ec2.Subnet: + minimum_subnet_number = 0 + maximum_subnet_number = 255 + + if not minimum_subnet_number <= subnet_number <= maximum_subnet_number: + message = f"subnet_number must be between 0 and 255, got {subnet_number}" + raise ValueError(message) + + visibility = visibility.lower() + + subnet = aws.ec2.Subnet( + resource_name=f"pocketsizefund-{visibility}-subnet-{subnet_number}", + vpc_id=virtual_private_cloud.id, + cidr_block=f"10.0.{subnet_number}.0/24", + availability_zone=availability_zone, + map_public_ip_on_launch=visibility == "public", + tags=pulumi_tags, + opts=pulumi.ResourceOptions( + depends_on=[ + virtual_private_cloud, + route_table, + ], + ), + ) + + aws.ec2.RouteTableAssociation( + resource_name=f"pocketsizefund-{visibility}-route-table-subnet-association-{subnet_number}", + subnet_id=subnet.id, + route_table_id=route_table.id, + opts=pulumi.ResourceOptions( + depends_on=[ + virtual_private_cloud, + subnet, + route_table, + ], + ), + ) + + return subnet diff --git a/uv.lock b/uv.lock index 56e9ca0f7..a2fce8198 100644 --- a/uv.lock +++ b/uv.lock @@ -373,8 +373,8 @@ dependencies = [ [package.metadata] requires-dist = [ - { name = "boto3", specifier = ">=1.34.0" }, - { name = "botocore", specifier = ">=1.34.0" }, + { name = "boto3", specifier = ">=1.38.23" }, + { name = "botocore", specifier = ">=1.38.23" }, { name = "loguru", specifier = ">=0.7.3" }, { name = "requests", specifier = ">=2.31.0" }, ] @@ -565,15 +565,15 @@ dev = [{ name = "behave", specifier = ">=1.2.6" }] [[package]] name = "debugpy" -version = "1.8.14" +version = "1.8.15" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/bd/75/087fe07d40f490a78782ff3b0a30e3968936854105487decdb33446d4b0e/debugpy-1.8.14.tar.gz", hash = "sha256:7cd287184318416850aa8b60ac90105837bb1e59531898c07569d197d2ed5322", size = 1641444, upload-time = "2025-04-10T19:46:10.981Z" } +sdist = { url = "https://files.pythonhosted.org/packages/8c/8b/3a9a28ddb750a76eaec445c7f4d3147ea2c579a97dbd9e25d39001b92b21/debugpy-1.8.15.tar.gz", hash = "sha256:58d7a20b7773ab5ee6bdfb2e6cf622fdf1e40c9d5aef2857d85391526719ac00", size = 1643279, upload-time = "2025-07-15T16:43:29.135Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/d9/2a/ac2df0eda4898f29c46eb6713a5148e6f8b2b389c8ec9e425a4a1d67bf07/debugpy-1.8.14-cp312-cp312-macosx_14_0_universal2.whl", hash = "sha256:8899c17920d089cfa23e6005ad9f22582fd86f144b23acb9feeda59e84405b84", size = 2501268, upload-time = "2025-04-10T19:46:26.044Z" }, - { url = "https://files.pythonhosted.org/packages/10/53/0a0cb5d79dd9f7039169f8bf94a144ad3efa52cc519940b3b7dde23bcb89/debugpy-1.8.14-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6bb5c0dcf80ad5dbc7b7d6eac484e2af34bdacdf81df09b6a3e62792b722826", size = 4221077, upload-time = "2025-04-10T19:46:27.464Z" }, - { url = "https://files.pythonhosted.org/packages/f8/d5/84e01821f362327bf4828728aa31e907a2eca7c78cd7c6ec062780d249f8/debugpy-1.8.14-cp312-cp312-win32.whl", hash = "sha256:281d44d248a0e1791ad0eafdbbd2912ff0de9eec48022a5bfbc332957487ed3f", size = 5255127, upload-time = "2025-04-10T19:46:29.467Z" }, - { url = "https://files.pythonhosted.org/packages/33/16/1ed929d812c758295cac7f9cf3dab5c73439c83d9091f2d91871e648093e/debugpy-1.8.14-cp312-cp312-win_amd64.whl", hash = "sha256:5aa56ef8538893e4502a7d79047fe39b1dae08d9ae257074c6464a7b290b806f", size = 5297249, upload-time = "2025-04-10T19:46:31.538Z" }, - { url = "https://files.pythonhosted.org/packages/97/1a/481f33c37ee3ac8040d3d51fc4c4e4e7e61cb08b8bc8971d6032acc2279f/debugpy-1.8.14-py2.py3-none-any.whl", hash = "sha256:5cd9a579d553b6cb9759a7908a41988ee6280b961f24f63336835d9418216a20", size = 5256230, upload-time = "2025-04-10T19:46:54.077Z" }, + { url = "https://files.pythonhosted.org/packages/ab/4a/4508d256e52897f5cdfee6a6d7580974811e911c6d01321df3264508a5ac/debugpy-1.8.15-cp312-cp312-macosx_14_0_universal2.whl", hash = "sha256:3dcc7225cb317469721ab5136cda9ff9c8b6e6fb43e87c9e15d5b108b99d01ba", size = 2511197, upload-time = "2025-07-15T16:43:42.343Z" }, + { url = "https://files.pythonhosted.org/packages/99/8d/7f6ef1097e7fecf26b4ef72338d08e41644a41b7ee958a19f494ffcffc29/debugpy-1.8.15-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:047a493ca93c85ccede1dbbaf4e66816794bdc214213dde41a9a61e42d27f8fc", size = 4229517, upload-time = "2025-07-15T16:43:44.14Z" }, + { url = "https://files.pythonhosted.org/packages/3f/e8/e8c6a9aa33a9c9c6dacbf31747384f6ed2adde4de2e9693c766bdf323aa3/debugpy-1.8.15-cp312-cp312-win32.whl", hash = "sha256:b08e9b0bc260cf324c890626961dad4ffd973f7568fbf57feb3c3a65ab6b6327", size = 5276132, upload-time = "2025-07-15T16:43:45.529Z" }, + { url = "https://files.pythonhosted.org/packages/e9/ad/231050c6177b3476b85fcea01e565dac83607b5233d003ff067e2ee44d8f/debugpy-1.8.15-cp312-cp312-win_amd64.whl", hash = "sha256:e2a4fe357c92334272eb2845fcfcdbec3ef9f22c16cf613c388ac0887aed15fa", size = 5317645, upload-time = "2025-07-15T16:43:46.968Z" }, + { url = "https://files.pythonhosted.org/packages/07/d5/98748d9860e767a1248b5e31ffa7ce8cb7006e97bf8abbf3d891d0a8ba4e/debugpy-1.8.15-py2.py3-none-any.whl", hash = "sha256:bce2e6c5ff4f2e00b98d45e7e01a49c7b489ff6df5f12d881c67d2f1ac635f3d", size = 5282697, upload-time = "2025-07-15T16:44:07.996Z" }, ] [[package]] @@ -1038,37 +1038,25 @@ wheels = [ [[package]] name = "infrastructure" -version = "20250709.1" +version = "20250716.1" source = { virtual = "infrastructure" } dependencies = [ - { name = "boto3" }, - { name = "botocore" }, - { name = "loguru" }, { name = "pulumi" }, { name = "pulumi-aws" }, - { name = "pulumi-awsx" }, { name = "pulumi-docker" }, { name = "pulumi-docker-build" }, { name = "pulumi-eks" }, { name = "pulumi-kubernetes" }, - { name = "pulumi-std" }, - { name = "requests" }, ] [package.metadata] requires-dist = [ - { name = "boto3", specifier = ">=1.34.0" }, - { name = "botocore", specifier = ">=1.34.0" }, - { name = "loguru", specifier = ">=0.7.3" }, { name = "pulumi", specifier = ">=3.169.0" }, { name = "pulumi-aws", specifier = ">=6.0.0" }, - { name = "pulumi-awsx", specifier = ">=2.0.0" }, { name = "pulumi-docker", specifier = ">=3.0.0" }, { name = "pulumi-docker-build", specifier = ">=0.0.12" }, { name = "pulumi-eks", specifier = ">=3.9.1" }, { name = "pulumi-kubernetes", specifier = ">=4.23.0" }, - { name = "pulumi-std", specifier = ">=2.2.0" }, - { name = "requests", specifier = ">=2.31.0" }, ] [[package]] @@ -1733,7 +1721,7 @@ wheels = [ [[package]] name = "pulumi" -version = "3.171.0" +version = "3.184.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "debugpy" }, @@ -1745,7 +1733,7 @@ dependencies = [ { name = "semver" }, ] wheels = [ - { url = "https://files.pythonhosted.org/packages/7b/1b/14d5e7a76b21c5f168634f9ba05a39efc8239f0f9c120314ab2acf56cde6/pulumi-3.171.0-py3-none-any.whl", hash = "sha256:4092439e01a42509132c2ef837e09317eaf3fd38a4d12faa2b826015e58df094", size = 353578, upload-time = "2025-05-21T16:15:31.019Z" }, + { url = "https://files.pythonhosted.org/packages/d1/2a/45bfef6b6dbbceba00a37d3a5928fc0dee61b7ece0383d3f5244c9d26ae2/pulumi-3.184.0-py3-none-any.whl", hash = "sha256:2f4345763d6e171bfb1d900611ab15583bb7aacb97e332a0c8e04dc7e7299ff1", size = 364173, upload-time = "2025-07-18T12:24:47.195Z" }, ] [[package]] @@ -1762,35 +1750,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/30/0b/3ed17e6fc68d9c0b572c4b259c5e291a8267011c640a9a9bb5a26b596763/pulumi_aws-6.83.0-py3-none-any.whl", hash = "sha256:e61144b9680ae1ebd98daa7bfffce1f646796a9640f9b73119b91e72c26e3bbc", size = 10608060, upload-time = "2025-06-16T21:58:14.969Z" }, ] -[[package]] -name = "pulumi-awsx" -version = "2.22.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "parver" }, - { name = "pulumi" }, - { name = "pulumi-aws" }, - { name = "pulumi-docker" }, - { name = "pulumi-docker-build" }, - { name = "semver" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/2b/99/02c171e4f54fe7b89bf06435dc98b5f6656684ece73535915abb83e804a2/pulumi_awsx-2.22.0.tar.gz", hash = "sha256:1852b2e0f57a80b2e325b9223b2538665d0ac512637f737494815c6aa67ff28b", size = 104537, upload-time = "2025-06-25T17:46:13.272Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/e6/41/04295b10120bb95ded11476804bf2eb70827116d8ecddc880888dad4b3cb/pulumi_awsx-2.22.0-py3-none-any.whl", hash = "sha256:6607557dfc88431457f9a737e488a23e4fa9ce8c006a70efe433729973ce7253", size = 121001, upload-time = "2025-06-25T17:46:11.601Z" }, -] - [[package]] name = "pulumi-docker" -version = "4.7.0" +version = "4.8.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "parver" }, { name = "pulumi" }, { name = "semver" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/9e/d7/9e1838f2b7575ef7658f140074b7cbd1efa64de7b017ff4d7e33151fcc83/pulumi_docker-4.7.0.tar.gz", hash = "sha256:cb50ec39f3d6201b336db57997d40018e10ab754dad48d7663b8d2e96c48a3f8", size = 96058, upload-time = "2025-05-21T18:44:17.188Z" } +sdist = { url = "https://files.pythonhosted.org/packages/88/47/4ef6f5c2f2a53b6883c152d945bab0ee5e66ed3d295eca8b1e16aa462ca1/pulumi_docker-4.8.0.tar.gz", hash = "sha256:c2460c1d3f011a3602f46d8ef0a4fabd7cc08556a84e7c43ee31acb09d064ad7", size = 106590, upload-time = "2025-06-18T21:39:52.577Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/30/0c/f1c6a28e341328128d10cb7cbdc2a43926e60c94b203e52c9611d64b6e00/pulumi_docker-4.7.0-py3-none-any.whl", hash = "sha256:9316152c805adc23af7af6bcc4180d80980b05d7c4974460bfc450a8617cc996", size = 117331, upload-time = "2025-05-21T18:44:16.041Z" }, + { url = "https://files.pythonhosted.org/packages/7e/a7/c345d79c6f5592b41b76e31e27d08ff43fce4ba92a1040fe885065e1f4cb/pulumi_docker-4.8.0-py3-none-any.whl", hash = "sha256:75f2be3d9a646292b6d1985edcc03239cc0ab077e96a188e8423b737cf6c958f", size = 128007, upload-time = "2025-06-18T21:39:50.8Z" }, ] [[package]] @@ -1838,17 +1809,6 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/cc/00/983975f1bcf02601f12b4afb60a4dfdf9f81ab11cbf2493aa349781684ae/pulumi_kubernetes-4.23.0-py3-none-any.whl", hash = "sha256:4866c00259170e5e1604c0a3335063036e80c29000bf9b88a380cd600fe44c4e", size = 2794243, upload-time = "2025-05-02T17:34:10.082Z" }, ] -[[package]] -name = "pulumi-std" -version = "2.2.0" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "parver" }, - { name = "pulumi" }, - { name = "semver" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/95/f1/1623662490d001ead46064cd25475d9732a1ada974d9c5c92af11616e4c2/pulumi_std-2.2.0.tar.gz", hash = "sha256:e46759862b5068f26ff4c2252a9c9e9c720e6ea55ae58ac7b83d0bf7a9ac25bb", size = 25811, upload-time = "2025-02-13T17:30:16.232Z" } - [[package]] name = "pyarrow" version = "20.0.0"