diff --git a/apps/engineering/app/components/mermaid.tsx b/apps/engineering/app/components/mermaid.tsx new file mode 100644 index 0000000000..3fc27f641a --- /dev/null +++ b/apps/engineering/app/components/mermaid.tsx @@ -0,0 +1,32 @@ +"use client"; + +import mermaid from "mermaid"; +import { useTheme } from "next-themes"; +import { useEffect, useRef } from "react"; + +export function Mermaid({ chart }: { chart: string }) { + const ref = useRef(null); + const { resolvedTheme } = useTheme(); + + useEffect(() => { + if (!ref.current) { + return; + } + mermaid.initialize({ + startOnLoad: false, + theme: resolvedTheme === "dark" ? "dark" : "default", + }); + + void mermaid.run({ + nodes: [ref.current], + }); + }, [resolvedTheme]); + + return ( +
+
+ {chart} +
+
+ ); +} diff --git a/apps/engineering/app/docs/[[...slug]]/page.tsx b/apps/engineering/app/docs/[[...slug]]/page.tsx index 467bf2e49b..dbab4465dd 100644 --- a/apps/engineering/app/docs/[[...slug]]/page.tsx +++ b/apps/engineering/app/docs/[[...slug]]/page.tsx @@ -1,3 +1,4 @@ +import { Mermaid } from "@/app/components/mermaid"; import { source } from "@/app/source"; import { Banner } from "fumadocs-ui/components/banner"; import { Tab, Tabs } from "fumadocs-ui/components/tabs"; @@ -39,6 +40,7 @@ export default async function Page(props: { Tabs, Tab, Banner, + Mermaid, }} /> diff --git a/apps/engineering/content/docs/architecture/meta.json b/apps/engineering/content/docs/architecture/meta.json index b58cebb048..84b9debb19 100644 --- a/apps/engineering/content/docs/architecture/meta.json +++ b/apps/engineering/content/docs/architecture/meta.json @@ -3,5 +3,5 @@ "description": "How does Unkey work", "icon": "Pencil", "root": false, - "pages": ["index", "services"] + "pages": ["index", "services", "workflows"] } diff --git a/apps/engineering/content/docs/architecture/workflows/creating-services.mdx b/apps/engineering/content/docs/architecture/workflows/creating-services.mdx new file mode 100644 index 0000000000..3f360307f6 --- /dev/null +++ b/apps/engineering/content/docs/architecture/workflows/creating-services.mdx @@ -0,0 +1,176 @@ +--- +title: Creating Workflow Services +description: Guide to adding new Restate workflow services +--- + +# Creating Workflow Services + +## When to Use Workflows + +Use Restate workflows for operations that: + +- ✅ Are long-running (seconds to hours) +- ✅ Need guaranteed completion despite failures +- ✅ Involve multiple external systems +- ✅ Must not run concurrently (use Virtual Objects) + +Don't use workflows for: + +- ❌ Simple CRUD operations +- ❌ Synchronous API calls +- ❌ Operations that complete in milliseconds + +## Steps + +### 1. Define the Proto + +Create `go/proto/hydra/v1/yourservice.proto`: + +```protobuf +syntax = "proto3"; +package hydra.v1; + +import "dev/restate/sdk/go.proto"; + +option go_package = "github.com/unkeyed/unkey/go/gen/proto/hydra/v1;hydrav1"; + +service YourService { + option (dev.restate.sdk.go.service_type) = VIRTUAL_OBJECT; + rpc YourOperation(YourRequest) returns (YourResponse) {} +} + +message YourRequest { + string key_field = 1; // Used as Virtual Object key +} + +message YourResponse {} +``` + +**Key decisions:** + +- Service type: `VIRTUAL_OBJECT` for serialization, `SERVICE` otherwise +- Key field: The field used for Virtual Object key (e.g., `user_id`, `project_id`) + +### 2. Generate Code + +```bash +cd go +make generate +``` + +### 3. Implement the Service + +Create `go/apps/ctrl/workflows/yourservice/`: + +**service.go:** + +```go +package yourservice + +import ( + hydrav1 "github.com/unkeyed/unkey/go/gen/proto/hydra/v1" + "github.com/unkeyed/unkey/go/pkg/db" + "github.com/unkeyed/unkey/go/pkg/otel/logging" +) + +type Service struct { + hydrav1.UnimplementedYourServiceServer + db db.Database + logger logging.Logger +} + +func New(cfg Config) *Service { + return &Service{db: cfg.DB, logger: cfg.Logger} +} +``` + +**your_operation_handler.go:** + +```go +func (s *Service) YourOperation( + ctx restate.ObjectContext, + req *hydrav1.YourRequest, +) (*hydrav1.YourResponse, error) { + // Step 1: Durable step example + data, err := restate.Run(ctx, func(stepCtx restate.RunContext) (db.YourData, error) { + return db.Query.FindYourData(stepCtx, s.db.RO(), req.KeyField) + }, restate.WithName("fetch data")) + if err != nil { + return nil, err + } + + // Step 2: Another durable step + _, err = restate.Run(ctx, func(stepCtx restate.RunContext) (restate.Void, error) { + // Your logic here + return restate.Void{}, nil + }, restate.WithName("process data")) + + return &hydrav1.YourResponse{}, nil +} +``` + +### 4. Register the Service + +Update `go/apps/ctrl/run.go`: + +```go +import ( + "github.com/unkeyed/unkey/go/apps/ctrl/workflows/yourservice" +) + +func Run(ctx context.Context, cfg Config) error { + // ... existing setup ... + + restateSrv.Bind(hydrav1.NewYourServiceServer(yourservice.New(yourservice.Config{ + DB: database, + Logger: logger, + }))) +} +``` + +### 5. Call the Service + +These are ugly, they're working on generating proper clients from the proto definitions +https://github.com/restatedev/sdk-go/issues/103 + +**Blocking call:** + +```go +response, err := restateingress.Object[*hydrav1.YourRequest, *hydrav1.YourResponse]( + restateClient, + "hydra.v1.YourService", + keyValue, + "YourOperation", +).Request(ctx, request) +``` + +**Fire-and-forget:** + +```go +invocation := restateingress.WorkflowSend[*hydrav1.YourRequest]( + restateClient, + "hydra.v1.YourService", + keyValue, + "YourOperation", +).Send(ctx, request) +``` + +## Best Practices + +1. **Small Steps**: Break operations into focused, single-purpose durable steps +2. **Named Steps**: Always use `restate.WithName("step name")` for observability +3. **Terminal Errors**: Use `restate.TerminalError(err, statusCode)` for validation failures +4. **Virtual Object Keys**: Choose keys that represent the resource being protected + +## Examples + +See existing implementations: + +- **DeploymentService**: `go/apps/ctrl/workflows/deploy/` +- **RoutingService**: `go/apps/ctrl/workflows/routing/` +- **CertificateService**: `go/apps/ctrl/workflows/certificate/` + +## References + +- [Restate Go SDK Docs](https://docs.restate.dev/develop/go/) +- [Restate Overview](./index) diff --git a/apps/engineering/content/docs/architecture/workflows/deployment-service.mdx b/apps/engineering/content/docs/architecture/workflows/deployment-service.mdx new file mode 100644 index 0000000000..0c035a3fd5 --- /dev/null +++ b/apps/engineering/content/docs/architecture/workflows/deployment-service.mdx @@ -0,0 +1,93 @@ +--- +title: Deployment Service +description: Durable deployment workflow orchestration +--- + +# Deployment Service + +The `DeploymentService` orchestrates the complete deployment lifecycle, from building containers to assigning domains. + +**Location:** `go/apps/ctrl/workflows/deploy/` +**Proto:** `go/proto/hydra/v1/deployment.proto` +**Key:** `project_id` + +## Operations + +### Deploy + + FetchMeta[Fetch Metadata] + FetchMeta --> StatusBuilding[Status: Building] + StatusBuilding --> CreateKrane[Create in Krane] + CreateKrane --> PollStatus{Poll Until Ready} + PollStatus --> UpsertVMs[Upsert VM Records] + UpsertVMs --> PollStatus + PollStatus --> ScrapeAPI[Scrape OpenAPI Spec] + ScrapeAPI --> BuildDomains[Build Domain List] + BuildDomains --> AssignDomains[Call RoutingService] + AssignDomains --> StatusReady[Status: Ready] + StatusReady --> UpdateLive[Update Live Deployment] + UpdateLive --> End([Complete]) + + style AssignDomains fill:#e1f5fe + style StatusReady fill:#c8e6c9 +`} /> + +Creates a new deployment: +1. Fetch deployment, workspace, project, environment metadata +2. Create deployment in Krane +3. Poll until instances are running +4. Scrape OpenAPI spec +5. Call RoutingService to assign domains atomically +6. Update project's live deployment ID + +Implementation: `go/apps/ctrl/workflows/deploy/deploy_handler.go` + +### Rollback + + Validate[Validate Deployments] + Validate --> CheckVMs[Check Target VMs] + CheckVMs --> FindDomains[Find Sticky Domains] + FindDomains --> SwitchDomains[Call RoutingService] + SwitchDomains --> UpdateProject[Update Live Deployment] + UpdateProject --> End([Success]) + + style SwitchDomains fill:#e1f5fe + style UpdateProject fill:#c8e6c9 +`} /> + +Rolls back to a previous deployment: +1. Validate source/target deployments +2. Find sticky domains (live + environment level) +3. Call RoutingService to switch domains atomically +4. Update project metadata + +Implementation: `go/apps/ctrl/workflows/deploy/rollback_handler.go` + +### Promote + + Validate[Validate Deployment] + Validate --> FindDomains[Find All Domains] + FindDomains --> SwitchDomains[Call RoutingService] + SwitchDomains --> ClearFlag[Clear Rolled Back Flag] + ClearFlag --> End([Success]) + + style SwitchDomains fill:#e1f5fe +`} /> + +Promotes a deployment to live, removing rolled-back state: +1. Validate deployment is ready +2. Find all project domains +3. Call RoutingService to reassign domains +4. Clear rolled_back flag + +Implementation: `go/apps/ctrl/workflows/deploy/promote_handler.go` + +## Why RoutingService? + +All domain/gateway operations are delegated to `RoutingService` to: +- Ensure atomic updates (gateway configs → domains) +- Serialize domain operations per project +- Provide rollback capabilities for failed routing changes diff --git a/apps/engineering/content/docs/architecture/workflows/index.mdx b/apps/engineering/content/docs/architecture/workflows/index.mdx new file mode 100644 index 0000000000..f769306768 --- /dev/null +++ b/apps/engineering/content/docs/architecture/workflows/index.mdx @@ -0,0 +1,91 @@ +--- +title: Durable Workflows with Restate +description: How we use Restate for durable execution of deployment operations +--- + +# Durable Workflows with Restate + +Unkey uses [Restate](https://restate.dev) for durable workflow execution in critical deployment operations. Restate provides: + +- **Durable Execution**: Operations resume from the last successful step after failures +- **Automatic Retries**: Transient failures are retried automatically +- **State Management**: Workflow state is managed by Restate, not in our database +- **Observability**: Built-in UI to inspect running workflows + +## Core Concepts + +### Virtual Objects + +Virtual Objects provide key-based concurrency control - only one handler can execute at a time per object key. Example: `DeploymentService` is keyed by `project_id`, ensuring only one deployment per project runs at a time. + +### Durable Steps + +Each `restate.Run()` step executes once and stores its result. After failures, workflows resume from stored results without re-executing completed steps. + +### Service Communication + +Workflows call each other using blocking (`Object.Request`) or fire-and-forget (`WorkflowSend.Send`) patterns. See the Go implementation files for examples. + +## Workflow Services + +### DeploymentService + +**Location:** `go/apps/ctrl/workflows/deploy/` +**Proto:** `go/proto/hydra/v1/deployment.proto` +**Key:** `project_id` +**Operations:** Deploy, Rollback, Promote + +Handles deployment lifecycle: building containers via Krane, polling status, scraping OpenAPI specs, and assigning domains. + +See: [Deployment Service](./deployment-service) + +### RoutingService + +**Location:** `go/apps/ctrl/workflows/routing/` +**Proto:** `go/proto/hydra/v1/routing.proto` +**Key:** `project_id` +**Operations:** AssignDomains, SwitchDomains + +Manages atomic domain and gateway configuration changes across control plane (domains DB) and data plane (gateway configs). + +See: [Routing Service](./routing-service) + +### CertificateService + +**Location:** `go/apps/ctrl/workflows/certificate/` +**Proto:** `go/proto/hydra/v1/certificate.proto` +**Key:** `domain` +**Operations:** ProcessChallenge + +Handles ACME certificate challenges and issuance for custom domains. + +## Configuration + +Services auto-register with Restate on startup via `go/apps/ctrl/run.go`. Config fields (see `go/apps/ctrl/config.go`): + +- `Restate.IngressURL`: Restate ingress endpoint for invoking workflows +- `Restate.AdminURL`: Restate admin endpoint for service registration +- `Restate.HttpPort`: Port where ctrl listens for Restate HTTP requests +- `Restate.RegisterAs`: Public URL of this service for self-registration + +## Error Handling + +- **Terminal Errors**: Use `restate.TerminalError(err, statusCode)` for business logic failures that shouldn't retry +- **Transient Errors**: Return regular errors for automatic retry + +## Best Practices + +1. **Idempotent Steps**: Use UPSERT instead of INSERT for database operations +2. **Named Steps**: Always use `restate.WithName("step name")` for observability +3. **Small Steps**: Break operations into focused, single-purpose steps +4. **Virtual Objects**: Use for automatic serialization instead of manual locking + +## Observability + +Restate UI (port 9070) shows running/completed invocations, step execution history, failures, and retries. + +## References + +- [Restate Official Docs](https://docs.restate.dev) +- [Restate Go SDK](https://github.com/restatedev/sdk-go) +- [Creating Workflow Services](./creating-services) diff --git a/apps/engineering/content/docs/architecture/workflows/meta.json b/apps/engineering/content/docs/architecture/workflows/meta.json new file mode 100644 index 0000000000..019af49ebb --- /dev/null +++ b/apps/engineering/content/docs/architecture/workflows/meta.json @@ -0,0 +1,7 @@ +{ + "title": "Durable Workflows", + "description": "Restate-based workflow orchestration", + "icon": "Workflow", + "root": false, + "pages": ["index", "deployment-service", "routing-service", "creating-services"] +} diff --git a/apps/engineering/content/docs/architecture/workflows/routing-service.mdx b/apps/engineering/content/docs/architecture/workflows/routing-service.mdx new file mode 100644 index 0000000000..b72166a85a --- /dev/null +++ b/apps/engineering/content/docs/architecture/workflows/routing-service.mdx @@ -0,0 +1,89 @@ +--- +title: Routing Service +description: Atomic domain and gateway configuration management +--- + +# Routing Service + +The `RoutingService` manages atomic domain assignment and gateway configuration updates, ensuring consistency between control plane (domains DB) and data plane (gateway configs). + +**Location:** `go/apps/ctrl/workflows/routing/` +**Proto:** `go/proto/hydra/v1/routing.proto` +**Key:** `project_id` + +## Why Separate Service? + +Domain and gateway operations are the **critical section** of deployments: +- Must be atomic - both succeed or both fail +- Must be serialized per project to prevent race conditions +- Should not block non-routing operations (like building containers) + +By separating routing, we: +- Allow multiple deployments to build in parallel +- Serialize only the sensitive routing mutations +- Provide clear boundaries for concurrency control + +## Operations + +### AssignDomains + + LoopDomains{For Each Domain} + LoopDomains --> FindDomain[Find or Create Domain] + FindDomain --> NextDomain + NextDomain --> LoopDomains + LoopDomains --> PrepareGateway[Prepare Gateway Configs] + PrepareGateway --> BulkUpsert[Bulk Upsert to Partition DB] + BulkUpsert --> End([Return Changed Domains]) + + style BulkUpsert fill:#e1f5fe +`} /> + +Creates or reassigns domains to a deployment: +1. For each domain: find existing or insert new domain record +2. Skip domains marked as rolled back +3. Prepare gateway configs (exclude local domains like `.local`, `.test`) +4. Bulk upsert gateway configs to partition database +5. Return list of changed domains + +Implementation: `go/apps/ctrl/workflows/routing/assign_domains_handler.go` + +**Domain Sticky Levels:** +- `UNSPECIFIED`: Per-commit domains (e.g., `abc123.domain.com`) +- `BRANCH`: Branch-level (e.g., `main.domain.com`) +- `ENVIRONMENT`: Environment-level (e.g., `staging.domain.com`) +- `LIVE`: Production domain (e.g., `domain.com`) + +### SwitchDomains + + FetchGateway[Fetch Target Gateway Config] + FetchGateway --> FetchDomains[Fetch Domains by IDs] + FetchDomains --> UpsertGateway[Upsert Gateway Configs] + UpsertGateway --> ReassignDomains[Reassign Domains] + ReassignDomains --> End([Success]) + + style UpsertGateway fill:#e1f5fe + style ReassignDomains fill:#c8e6c9 +`} /> + +Reassigns existing domains to a different deployment: +1. Fetch target deployment's gateway config +2. Fetch domain records +3. Upsert gateway configs first (data plane ready) +4. Reassign domains (control plane pointers updated) + +Implementation: `go/apps/ctrl/workflows/routing/switch_domains_handler.go` + +## Order of Operations + +Critical: Gateway configs are always updated **before** domain reassignment. This ensures: +1. Gateway is ready to serve traffic before DNS/routing points to it +2. No downtime during switches +3. Rollback is possible if domain update fails + +## Local Domain Filtering + +Domains with `.local` or `.test` TLDs, or `localhost`/`127.0.0.1` are excluded from gateway config generation since they're for local development only. + +Helper: `go/apps/ctrl/workflows/routing/helpers.go:isLocalHostname()` diff --git a/apps/engineering/content/docs/infrastructure/database-schema.mdx b/apps/engineering/content/docs/infrastructure/database-schema.mdx index 96941b239f..3862bfa704 100644 --- a/apps/engineering/content/docs/infrastructure/database-schema.mdx +++ b/apps/engineering/content/docs/infrastructure/database-schema.mdx @@ -3,7 +3,6 @@ title: "Database Schema Management" description: "How database schemas are managed and applied in the Unkey platform" --- - ## Overview Unkey uses multiple MySQL databases that are automatically created and initialized during development: @@ -17,14 +16,13 @@ Unkey uses multiple MySQL databases that are automatically created and initializ Schema definitions are maintained in separate files: - `go/pkg/db/schema.sql` - Main Unkey application schema -- `go/pkg/hydra/store/schema.sql` - Hydra workflow engine schema -- `go/pkg/partition/schema.sq.` - Dataplane schema - +- `go/pkg/partition/schema.sql` - Dataplane schema ## Docker Development Setup During local development, schemas are automatically applied via Docker: ### File Structure + ``` deployment/ ├── Dockerfile.mysql # MySQL container definition @@ -32,8 +30,7 @@ deployment/ └── docker-compose.yaml # Service orchestration go/pkg/ -├── db/schema.sql # Main application schema -└── hydra/store/schema.sql # Hydra workflow schema +├── db/schema.sql # Main application schema ``` ### Initialization Order @@ -42,8 +39,7 @@ The MySQL container applies files from `/docker-entrypoint-initdb.d/` in alphabe 1. **00-init-databases.sql** - Creates databases and grants permissions 2. **01-main-schema.sql** - Creates all main application tables in `unkey` database -3. **02-partition_schema.sql - Creates the partition tables in the `partition_001` database -4. **03-hydra-schema.sql** - Creates workflow tables in `hydra` database +3. \*\*02-partition_schema.sql - Creates the partition tables in the `partition_001` database ### Database-Qualified Table Names @@ -56,11 +52,6 @@ CREATE TABLE `unkey`.`apis` ( -- ... ); --- Hydra schema (go/pkg/hydra/store/schema.sql) -CREATE TABLE `hydra`.`workflow_executions` ( - id VARCHAR(255) PRIMARY KEY, - -- ... -); ``` ## Schema Management Strategy @@ -77,14 +68,11 @@ The `go/pkg/db/schema.sql` file is **manually maintained** and must be kept in s ### Future State (SQL-First) In the future, we plan to reverse this workflow: + - `go/pkg/db/schema.sql` will become the primary source of truth - Drizzle schema will be generated from the SQL schema - This will enable better tooling and consistency -### Hydra Database - -The `hydra` database uses `go/pkg/hydra/store/schema.sql` as its source of truth since it's Go-native and doesn't use Drizzle. - ## SQLC Integration The project uses [SQLC](https://sqlc.dev/) to generate type-safe Go code from SQL queries: @@ -101,6 +89,7 @@ After schema changes, regenerate the SQLC code: cd go go generate ./... ``` + Do not use `sqlc generate` directly as we have some custom logic during the generation step. ## Making Schema Changes @@ -117,13 +106,3 @@ Do not use `sqlc generate` directly as we have some custom logic during the gene docker-compose down docker-compose up --build ``` - -### For Hydra Database - -1. **Update the schema file** `go/pkg/hydra/store/schema.sql` -2. **Rebuild Docker containers** to apply changes: - ```bash - docker stop $(docker ps -aq); docker system prune -f; docker volume prune --all -f - docker compose up --build - ``` - diff --git a/apps/engineering/package.json b/apps/engineering/package.json index 31dde5bcf5..ac7764df60 100644 --- a/apps/engineering/package.json +++ b/apps/engineering/package.json @@ -21,7 +21,9 @@ "fumadocs-ui": "14.4.0", "geist": "^1.3.1", "lucide-react": "^0.378.0", + "mermaid": "^11.12.0", "next": "14.2.15", + "next-themes": "^0.4.6", "react": "^18.3.1", "react-dom": "^18.3.1", "react-element-to-jsx-string": "^15.0.0", diff --git a/deployment/docker-compose.yaml b/deployment/docker-compose.yaml index 09a74841cc..e509e093dd 100644 --- a/deployment/docker-compose.yaml +++ b/deployment/docker-compose.yaml @@ -286,6 +286,26 @@ services: UNKEY_DOCKER_SOCKET: "/var/run/docker.sock" UNKEY_DEPLOYMENT_EVICTION_TTL: "10m" + restate: + networks: + - default + container_name: restate + image: docker.io/restatedev/restate:latest + ports: + - "8081:8080" # Ingress endpoint (host:container) + - "9070:9070" # Admin endpoint and UI + environment: + RESTATE_TRACING_ENDPOINT: "http://otel:4317" + depends_on: + otel: + condition: service_started + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:9070/health"] + timeout: 5s + retries: 10 + start_period: 10s + interval: 5s + ctrl: networks: - default @@ -298,6 +318,7 @@ services: command: ["run", "ctrl"] ports: - "7091:7091" + - "9080:9080" # Restate workflow service port depends_on: mysql: condition: service_healthy @@ -308,6 +329,9 @@ services: krane: condition: service_started required: true + restate: + condition: service_healthy + required: true volumes: - /var/run/docker.sock:/var/run/docker.sock environment: @@ -320,6 +344,12 @@ services: UNKEY_KRANE_ADDRESS: "http://krane:8080" UNKEY_DEFAULT_DOMAIN: "unkey.local" + # Restate configuration + UNKEY_RESTATE_INGRESS_URL: "http://restate:8080" + UNKEY_RESTATE_ADMIN_URL: "http://restate:9070" + UNKEY_RESTATE_HTTP_PORT: "9080" + UNKEY_RESTATE_REGISTER_AS: "http://ctrl:9080" + UNKEY_VAULT_S3_URL: "http://s3:3902" UNKEY_VAULT_S3_BUCKET: "acme-vault" UNKEY_VAULT_S3_ACCESS_KEY_ID: "minio_root_user" diff --git a/go/GO_DOCUMENTATION_GUIDELINES.md b/go/GO_DOCUMENTATION_GUIDELINES.md index 240ead638e..eb09e92d07 100644 --- a/go/GO_DOCUMENTATION_GUIDELINES.md +++ b/go/GO_DOCUMENTATION_GUIDELINES.md @@ -6,23 +6,25 @@ This document outlines the documentation standards for our Go codebase. 1. **Everything public MUST be documented** - No exceptions 2. **Internal code should explain "why", not "how"** - Focus on reasoning and trade-offs -3. **Be comprehensive and verbose** - Prefer thorough explanations over terse summaries +3. **Document what matters** - Focus on non-obvious behavior, constraints, and context that helps users succeed 4. **Add substantial value** - Documentation should teach, not just restate the obvious 5. **Follow Go conventions** - Start with the item name, use present tense ## Documentation Philosophy -**Serve both beginners and experts.** Documentation should provide clear, accessible entry points for newcomers AND comprehensive details for experts who need to understand the full picture, including architectural decisions and edge cases. +**Match documentation depth to complexity.** Simple functions need simple documentation. Complex functions with edge cases, performance implications, or subtle behavior need detailed documentation. Don't force every function into the same verbose template. -**Clarity is better than terse.** We prefer comprehensive documentation that fully explains what the code does in detail, why it exists and its role in the system, how it relates to other components, what callers need to know about behavior and performance characteristics, when to use it versus alternatives, and what can go wrong and why. +**Focus on what users need to know.** Document the "what" (what does it do), the "when" (when should I use it), the "watch out" (what can go wrong), and the "why" (why this design). Skip sections that don't apply. Most simple functions only need "what" and maybe "watch out." -**Every piece of documentation should add substantial value.** If the documentation doesn't teach something beyond what's obvious from the function signature, it needs to be expanded. +**Every piece of documentation should add substantial value.** If the documentation doesn't teach something beyond what's obvious from the function signature, it's sufficient to just explain the purpose clearly. Not every function needs parameter explanations, error details, performance notes, and concurrency information. -**Prioritize practical examples over theory.** Every non-trivial function should include working code examples that developers can copy and adapt. Examples should demonstrate real usage patterns, not artificial toy cases. +**Don't document irrelevant details.** If a function has no special concurrency considerations, don't document that "it's safe for concurrent use" unless the type is designed for concurrent access. If performance is O(1) and unremarkable, don't document it. If context handling is standard, don't explain it. -**Make functionality discoverable.** Use extensive cross-references to help developers find related functions and understand how pieces fit together. If a function works with or is an alternative to another function, mention that explicitly. +**Prioritize practical examples for non-trivial usage.** Simple getters, setters, and straightforward functions don't need examples. Focus examples on complex workflows, non-obvious usage patterns, and common integration scenarios. -**Write in full sentences, not bullet points.** Code documentation should read like well-written prose that flows naturally. Avoid bullet points for general explanations, behavior descriptions, or conceptual information. Only use bullet points when they genuinely improve readability for specific lists such as error codes, configuration options, or step-by-step procedures. Most documentation should be written as coherent paragraphs that explain concepts thoroughly. +**Make functionality discoverable.** Use cross-references to help developers find related functions and understand how pieces fit together. If a function works with or is an alternative to another function, mention that explicitly. + +**Write naturally.** Use prose for explanations. Use lists (bullet points, parameter lists, error lists) only when they genuinely improve readability for enumerable items. Don't force structured sections into every docstring. ## Package-Level Documentation @@ -75,6 +77,7 @@ package ratelimit ``` ### Requirements for doc.go Files + - **File name**: Must be exactly `doc.go` in the package root - **Content**: Only package documentation and package declaration - no other code - **Format**: Start with "Package [name] [verb]..." @@ -87,16 +90,34 @@ package ratelimit ## Function and Method Documentation -Every exported function and method must be documented. Focus on: -- What it does -- What parameters mean (especially if not obvious) -- What it returns -- Important behavior or side effects -- When it might fail +Every exported function and method must be documented. Start with what it does. Then add details only if they're non-obvious or important: + +- Parameter meanings (only if not clear from name/type) +- Return value specifics (only if behavior is subtle) +- Important side effects or behavioral quirks +- Error conditions (only if non-standard or important to handle) +- Performance or concurrency notes (only if they significantly impact usage) -### Public Functions - Comprehensive Documentation +### Simple Functions - Minimal Documentation -Every public function must be thoroughly documented. Here's what comprehensive documentation looks like: +Most functions are straightforward and need only a clear explanation: + +```go +// GetUserID extracts the user ID from the request context. +// Returns an empty string if no user ID is present in the context. +func GetUserID(ctx context.Context) string + +// Close releases all resources held by the client, including network connections +// and background goroutines. After calling Close, the client must not be used. +func (c *Client) Close() error + +// SetTimeout updates the request timeout duration for all future requests. +func (c *Client) SetTimeout(d time.Duration) +``` + +### Complex Functions - Detailed Documentation + +Complex functions with edge cases, distributed behavior, or subtle semantics need thorough documentation: ```go // Allow determines whether the specified identifier can perform the requested number @@ -106,63 +127,53 @@ Every public function must be thoroughly documented. Here's what comprehensive d // across all nodes in the cluster. It uses a lease-based algorithm to coordinate // between nodes and ensure accurate rate limiting even under high concurrency. // -// Parameters: -// - identifier: A unique string identifying the entity being rate limited. This is -// typically a user ID, API key, IP address, or other business identifier. The -// identifier is used as the key for rate limit bucketing and should be stable -// across requests from the same entity. -// - cost: The number of operations being requested. For most use cases this is 1, -// but can be higher for batch operations or when implementing weighted rate limiting. -// Must be positive; zero or negative values will return an error. -// -// Behavior: -// - Checks the current rate limit status for the identifier -// - Coordinates with other cluster nodes if necessary to maintain consistency -// - Updates the rate limit counters atomically if the request is allowed -// - Implements fair queuing to prevent starvation under high load +// The identifier should be a stable business identifier (user ID, API key, IP address). +// The cost is typically 1 for single operations, but can be higher for batch requests. +// Cost must be positive or an error is returned. // -// Returns: -// - (true, nil): Request is allowed and counters have been updated -// - (false, nil): Request is rate limited, no error condition -// - (false, error): System error occurred, decision may be unreliable +// Returns (true, nil) if allowed, (false, nil) if rate limited, or (false, error) +// if a system error occurs. Possible errors include ErrInvalidCost for invalid cost +// values, ErrClusterUnavailable when <50% of cluster nodes are reachable, +// context.DeadlineExceeded on timeout (default 5s), and network errors on storage failures. // -// Error conditions (be specific about when each occurs): -// - ErrInvalidCost: cost <= 0 or cost > MaxCost -// - ErrClusterUnavailable: <50% of cluster nodes reachable -// - context.DeadlineExceeded: operation timeout (default 5s) -// - Network errors: underlying storage failures, retries exhausted -// -// Concurrency: -// This method is safe for concurrent use from multiple goroutines. Internal -// coordination ensures that concurrent requests for the same identifier are -// handled correctly without race conditions. -// -// Context handling: -// The context is used for request timeout and cancellation. If the context -// is cancelled before the rate limit check completes, the method returns -// the context error and no rate limit counters are modified. -// -// Context Guidelines: -// - Always document timeout behavior and defaults -// - Explain what happens on cancellation -// - Mention if context values are used +// This method is safe for concurrent use. Context is used for timeout and cancellation; +// if cancelled, no rate limit counters are modified. func (r *RateLimiter) Allow(ctx context.Context, identifier string, cost int) (bool, error) ``` -Compare this to insufficient documentation: +The documentation is detailed because this function has distributed behavior, multiple error conditions, +and important concurrency guarantees. Compare to a simpler alternative that would be insufficient: + ```go // Allow checks if a request is allowed. -// Returns true if allowed, false if rate limited. func (r *RateLimiter) Allow(ctx context.Context, identifier string, cost int) (bool, error) ``` -The second example adds almost no value beyond the function signature and would be rejected. +This doesn't explain the distributed coordination, error conditions, or the meaning of the bool return +vs error return, so it would be insufficient. + +### When to Include Specific Details -### Function Documentation Approach +**Parameters**: Document them when the purpose isn't obvious from the name and type, or when there +are constraints (e.g., "must be positive", "should be stable across calls"). -Write function documentation as natural, flowing prose that explains what actually matters for each specific function. Start with what the function does, then include whatever information is genuinely relevant and useful for callers. Some functions might need detailed parameter explanations, others might need performance notes, and simple functions might just need a clear explanation of their purpose. Don't force every function into the same template - let the function's complexity and use case guide what information to include. +**Return values**: Explain if the return pattern is subtle (e.g., bool success + separate error), +or if there are multiple success states. + +**Error conditions**: List specific errors only when callers need to handle them differently, or +when they're not obvious from context. Generic "returns error on failure" is usually sufficient. + +**Concurrency**: Only document if the function/type is designed for concurrent use OR if it +explicitly must not be used concurrently. Don't document for simple stateless functions. + +**Performance**: Only mention if there are non-obvious performance characteristics that affect +usage decisions (e.g., "O(n²) - use [AlternativeFunc] for large inputs" or "blocks until response"). + +**Context**: Only document context behavior if it's non-standard (e.g., uses context values, +has specific timeout behavior, or has special cancellation semantics). ### Internal Functions (Focus on "Why") + ```go // retryWithBackoff handles retries for failed lease acquisitions. // @@ -177,45 +188,53 @@ func (r *RateLimiter) retryWithBackoff(ctx context.Context, fn func() error) err ## Type Documentation -Document all exported types, focusing on: -- What the type represents -- Its role in the system -- Important invariants or constraints -- Lifecycle considerations +Document what the type represents and any non-obvious aspects like invariants, constraints, +or lifecycle requirements. Document struct fields only when their purpose isn't clear from +the name and type. ### Structs + +Simple config structs with self-explanatory fields need minimal documentation: + +```go +// Config holds rate limiter configuration. +type Config struct { + // Window is the time period over which the rate limit applies. + Window time.Duration + + // Limit is the maximum number of operations allowed within Window. + Limit int64 +} +``` + +Add more context when there are constraints, relationships, or non-obvious semantics: + ```go // Config holds the configuration for a rate limiter instance. // -// Window and Limit work together to define the rate limiting behavior. +// Window and Limit work together to define rate limiting behavior. // For example, Window=1m and Limit=100 means "100 operations per minute". -// -// ClusterNodes is required for distributed operation. For single-node -// deployments, use a slice with only the local node. type Config struct { - // Window is the time period over which operations are counted Window time.Duration + Limit int64 - // Limit is the maximum number of operations allowed within Window - Limit int64 - - // ClusterNodes lists all nodes participating in distributed rate limiting. - // Must include at least the local node. + // ClusterNodes lists all nodes in the cluster. Required for distributed + // operation; for single-node deployments, include only the local node. ClusterNodes []string } ``` ### Interfaces + +Document the interface's purpose and any important implementation requirements like +concurrency safety, guarantees, or trade-offs: + ```go // Cache provides a generic caching interface with support for distributed invalidation. // // Implementations must be safe for concurrent use. The cache may return stale data // during network partitions to maintain availability, but will eventually converge // when connectivity is restored. -// -// We chose this interface design over more specific cache types because our -// use cases vary widely (small config objects vs large binary data), and -// the generic approach allows for better testing and modularity. type Cache[T any] interface { // Get retrieves a value by key. Returns the value and whether it was found. // A cache miss (found=false) is not an error. @@ -229,46 +248,43 @@ type Cache[T any] interface { ## Error Documentation -Document error conditions and types: +Document sentinel errors with what they mean and when they occur: ```go var ( // ErrRateLimited is returned when an operation exceeds the configured rate limit. - // This is expected behavior, not a system error. ErrRateLimited = errors.New("rate limit exceeded") - // ErrClusterUnavailable indicates that the required number of cluster nodes - // are not reachable. Operations may still succeed if configured to fail-open. + // ErrClusterUnavailable indicates that insufficient cluster nodes are reachable. ErrClusterUnavailable = errors.New("insufficient cluster nodes available") ) +``` + +Only list specific error conditions in function docs when callers need to handle them differently: +```go // ProcessRequest handles incoming rate limit requests. // -// Returns ErrRateLimited if the request exceeds the configured limits. -// Returns ErrClusterUnavailable if distributed consensus cannot be achieved. -// Other errors indicate system problems (network, storage, etc.). +// Returns ErrRateLimited if the request exceeds configured limits, ErrClusterUnavailable +// if distributed consensus cannot be achieved, or other errors for system problems. func ProcessRequest(ctx context.Context, req *Request) (*Response, error) ``` ## Constants and Variables -Document the purpose and valid values: +Document the purpose. Add reasoning only for non-obvious design choices: ```go const ( - // DefaultWindow is the standard rate limiting window for new limiters. - // Chosen as a balance between memory usage and granularity for most use cases. + // DefaultWindow is the standard rate limiting window. DefaultWindow = time.Minute // MaxBurstRatio determines how much bursting is allowed above the base rate. - // Set to 1.5 based on analysis of traffic patterns in production. MaxBurstRatio = 1.5 ) var ( // GlobalRegistry tracks all active rate limiters for monitoring and cleanup. - // We use a global registry instead of dependency injection here because - // rate limiters need to be accessible from signal handlers for graceful shutdown. GlobalRegistry = &Registry{limiters: make(map[string]*RateLimiter)} ) ``` @@ -366,11 +382,13 @@ func TestConcurrentAccess(t *testing.T) { ## Consistency and Style **Terminology must be consistent** across the entire codebase: + - Use the same terms for the same concepts (e.g., always "identifier", never mix with "key" or "ID") - Define domain-specific terms in package documentation - Create a glossary for complex domains **Parameter naming should be predictable:** + - `ctx context.Context` (always first parameter) - `id string` or `identifier string` for rate limit keys - `cost int` or `count int` for operation quantities @@ -420,15 +438,16 @@ Follow these formatting and style conventions: Before submitting code, verify: -- [ ] **Every package has a dedicated `doc.go` file** with comprehensive package documentation +- [ ] **Every package has a dedicated `doc.go` file** with package documentation - [ ] Every exported function, method, type, constant, and variable is documented -- [ ] Package documentation in `doc.go` explains purpose, key concepts but not details +- [ ] Documentation depth matches code complexity (simple code = simple docs) +- [ ] Only relevant details are included (skip irrelevant concurrency, performance, context notes) - [ ] Internal code explains "why" decisions were made, not just "what" it does -- [ ] Error conditions and return values are clearly explained +- [ ] Error conditions are mentioned when callers need to handle them differently - [ ] Complex algorithms include reasoning for the chosen approach - [ ] Examples are provided for non-trivial usage patterns - [ ] Documentation starts with the item name and uses present tense -- [ ] All documentation follows Go formatting conventions (proper line breaks, etc.) +- [ ] All documentation follows Go formatting conventions - [ ] Cross-references use proper `[Reference]` format - [ ] Edge cases and non-obvious behaviors are documented - [ ] Concurrency guarantees are documented if and only if the code is designed to be concurrently safe diff --git a/go/Makefile b/go/Makefile index 049d5c1305..39e62fd64b 100644 --- a/go/Makefile +++ b/go/Makefile @@ -24,7 +24,7 @@ pull: @docker compose -f ../deployment/docker-compose.yaml pull up: pull - @docker compose -f ../deployment/docker-compose.yaml up -d planetscale mysql redis clickhouse s3 otel + @docker compose -f ../deployment/docker-compose.yaml up -d planetscale mysql redis clickhouse s3 otel restate clean: @@ -34,8 +34,11 @@ build: go build -o unkey ./main.go generate: - buf generate + go install github.com/restatedev/sdk-go/protoc-gen-go-restate@latest + buf generate --template ./buf.gen.connect.yaml --clean --path "./proto/ctrl" --path "./proto/krane" --path "./proto/partition" --path "./proto/vault" + buf generate --template ./buf.gen.restate.yaml --path "./proto/hydra" go generate ./... + go fmt ./... test: test-unit @@ -84,6 +87,7 @@ k8s-up: k8s-check ## Deploy all services to current Kubernetes cluster @kubectl wait --for=condition=ready pod -l app=mysql -n unkey --timeout=180s @kubectl wait --for=condition=ready pod -l app=clickhouse -n unkey --timeout=180s @kubectl wait --for=condition=ready pod -l app=s3 -n unkey --timeout=180s + @kubectl wait --for=condition=ready pod -l app=restate -n unkey --timeout=180s @kubectl wait --for=condition=ready pod -l app=api -n unkey --timeout=180s @kubectl wait --for=condition=ready pod -l app=gw -n unkey --timeout=180s @kubectl wait --for=condition=ready pod -l app=ctrl -n unkey --timeout=180s @@ -138,6 +142,9 @@ start-planetscale: k8s-check ## Deploy only PlanetScale HTTP driver start-observability: k8s-check ## Deploy only Observability stack $(call deploy-service,observability,Observability stack,,,otel-collector) +start-restate: k8s-check ## Deploy only Restate + $(call deploy-service,restate,Restate) + start-api: k8s-check ## Deploy only API service (3 replicas) $(call deploy-service,api,API,unkey:latest,.) @@ -152,7 +159,7 @@ start-dashboard: k8s-check ## Deploy only dashboard service start-unkey-services: start-api start-gw start-ctrl start-dashboard ## Deploy all Unkey services -start-dependencies: start-mysql start-clickhouse start-redis start-s3 start-planetscale start-observability ## Deploy all dependency services +start-dependencies: start-mysql start-clickhouse start-redis start-s3 start-planetscale start-observability start-restate ## Deploy all dependency services start-all: start-dependencies start-unkey-services ## Deploy all services individually diff --git a/go/Tiltfile b/go/Tiltfile index 9df63c0fe0..0b613d2ea2 100644 --- a/go/Tiltfile +++ b/go/Tiltfile @@ -24,6 +24,7 @@ start_clickhouse = 'all' in services or 'clickhouse' in services start_s3 = 'all' in services or 's3' in services start_observability = 'all' in services or 'observability' in services start_planetscale = 'all' in services or 'planetscale' in services +start_restate = 'all' in services or 'restate' in services start_api = 'all' in services or 'api' in services start_gw = 'all' in services or 'gateway' in services or 'gw' in services start_ctrl = 'all' in services or 'ctrl' in services @@ -110,6 +111,18 @@ if start_planetscale: labels=['database'] ) +# Restate service +if start_restate: + print("Setting up Restate...") + k8s_yaml('k8s/manifests/restate.yaml') + deps = ['otel-collector'] if start_observability else [] + k8s_resource( + 'restate', + port_forwards=['8081:8080', '9070:9070'], + resource_deps=deps, + labels=['database'] + ) + # Observability stack if start_observability: print("Setting up observability stack...") @@ -229,6 +242,7 @@ if start_ctrl: ctrl_deps = [] if start_mysql: ctrl_deps.append('mysql') if start_s3: ctrl_deps.append('s3') + if start_restate: ctrl_deps.append('restate') # Add compilation dependency for Unkey services ctrl_deps.append('unkey-compile') @@ -310,6 +324,7 @@ if start_clickhouse: active_services.append('clickhouse') if start_s3: active_services.append('s3') if start_planetscale: active_services.append('planetscale') if start_observability: active_services.extend(['prometheus', 'otel-collector']) +if start_restate: active_services.append('restate') if start_api: active_services.append('api') if start_gw: active_services.append('gw') if start_ctrl: active_services.append('ctrl') @@ -328,6 +343,8 @@ API: http://localhost:7070 Gateway: http://localhost:8080 Ctrl: http://localhost:7091 Krane: http://localhost:8090 +Restate Ingress: http://localhost:8081 +Restate Admin: http://localhost:9070 Prometheus: http://localhost:9090 S3 Console: http://localhost:9000 ClickHouse: http://localhost:8123 diff --git a/go/apps/ctrl/config.go b/go/apps/ctrl/config.go index 51f58c7ae2..8a1d9402d7 100644 --- a/go/apps/ctrl/config.go +++ b/go/apps/ctrl/config.go @@ -29,6 +29,22 @@ type AcmeConfig struct { Cloudflare CloudflareConfig } +type RestateConfig struct { + + // RestateIngressURL is the URL of the Restate ingress endpoint for invoking workflows (e.g., "http://restate:8080") + IngressURL string + + // AdminURL is the URL of the Restate admin endpoint for service registration (e.g., "http://restate:9070") + AdminURL string + + // RestateHttpPort is the port where the control plane listens for Restate HTTP requests + HttpPort int + + // RegisterAs is the url of this service, used for self-registration with the Restate platform + // ie: http://ctrl:9080 + RegisterAs string +} + type Config struct { // InstanceID is the unique identifier for this instance of the control plane server InstanceID string @@ -50,7 +66,6 @@ type Config struct { // DatabasePrimary is the primary database connection string for read and write operations DatabasePrimary string DatabasePartition string - DatabaseHydra string // --- OpenTelemetry configuration --- @@ -84,6 +99,8 @@ type Config struct { Acme AcmeConfig DefaultDomain string + + Restate RestateConfig } func (c Config) Validate() error { diff --git a/go/apps/ctrl/run.go b/go/apps/ctrl/run.go index 9733e2d7ee..3d55e541a2 100644 --- a/go/apps/ctrl/run.go +++ b/go/apps/ctrl/run.go @@ -1,29 +1,33 @@ package ctrl import ( + "bytes" "context" - "database/sql" "fmt" "log/slog" "net/http" "time" "connectrpc.com/connect" + restateIngress "github.com/restatedev/sdk-go/ingress" + restateServer "github.com/restatedev/sdk-go/server" "github.com/unkeyed/unkey/go/apps/ctrl/middleware" "github.com/unkeyed/unkey/go/apps/ctrl/services/acme" - "github.com/unkeyed/unkey/go/apps/ctrl/services/acme/providers" "github.com/unkeyed/unkey/go/apps/ctrl/services/ctrl" "github.com/unkeyed/unkey/go/apps/ctrl/services/deployment" "github.com/unkeyed/unkey/go/apps/ctrl/services/openapi" + "github.com/unkeyed/unkey/go/apps/ctrl/workflows/certificate" + "github.com/unkeyed/unkey/go/apps/ctrl/workflows/deploy" + "github.com/unkeyed/unkey/go/apps/ctrl/workflows/routing" deployTLS "github.com/unkeyed/unkey/go/deploy/pkg/tls" "github.com/unkeyed/unkey/go/gen/proto/ctrl/v1/ctrlv1connect" + hydrav1 "github.com/unkeyed/unkey/go/gen/proto/hydra/v1" "github.com/unkeyed/unkey/go/gen/proto/krane/v1/kranev1connect" "github.com/unkeyed/unkey/go/pkg/db" - "github.com/unkeyed/unkey/go/pkg/hydra" "github.com/unkeyed/unkey/go/pkg/otel" "github.com/unkeyed/unkey/go/pkg/otel/logging" + "github.com/unkeyed/unkey/go/pkg/retry" "github.com/unkeyed/unkey/go/pkg/shutdown" - "github.com/unkeyed/unkey/go/pkg/uid" "github.com/unkeyed/unkey/go/pkg/vault" "github.com/unkeyed/unkey/go/pkg/vault/storage" pkgversion "github.com/unkeyed/unkey/go/pkg/version" @@ -94,6 +98,8 @@ func Run(ctx context.Context, cfg Config) error { return fmt.Errorf("unable to create vault service: %w", err) } } + // make go happy + _ = vaultSvc // Initialize database database, err := db.New(db.Config{ @@ -117,31 +123,6 @@ func Run(ctx context.Context, cfg Config) error { shutdowns.Register(partitionDB.Close) shutdowns.Register(database.Close) - // Initialize Hydra workflow engine with DSN - hydraEngine, err := hydra.New(hydra.Config{ - DSN: cfg.DatabaseHydra, - Namespace: "ctrl", - Clock: cfg.Clock, - Logger: logger, - Marshaller: hydra.NewJSONMarshaller(), - }) - if err != nil { - return fmt.Errorf("failed to initialize hydra engine: %w", err) - } - - // Create Hydra worker - hydraWorker, err := hydra.NewWorker(hydraEngine, hydra.WorkerConfig{ - WorkerID: cfg.InstanceID, - Concurrency: 10, - PollInterval: 2 * time.Second, // Less aggressive polling - HeartbeatInterval: 30 * time.Second, - ClaimTimeout: 30 * time.Minute, // Handle long builds - CronInterval: 1 * time.Minute, // Standard cron interval - }) - if err != nil { - return fmt.Errorf("unable to create hydra worker: %w", err) - } - // Create krane client for VM operations var httpClient *http.Client var authMode string @@ -186,17 +167,87 @@ func Run(ctx context.Context, cfg Config) error { ) logger.Info("krane client configured", "address", cfg.KraneAddress, "auth_mode", authMode) - // Register deployment workflow with Hydra worker - deployWorkflow := deployment.NewDeployWorkflow(deployment.DeployWorkflowConfig{ + // Restate Client and Server + + restateClient := restateIngress.NewClient(cfg.Restate.IngressURL) + + restateSrv := restateServer.NewRestate() + + restateSrv.Bind(hydrav1.NewDeploymentServiceServer(deploy.New(deploy.Config{ Logger: logger, DB: database, PartitionDB: partitionDB, Krane: kraneClient, DefaultDomain: cfg.DefaultDomain, - }) - err = hydra.RegisterWorkflow(hydraWorker, deployWorkflow) - if err != nil { - return fmt.Errorf("unable to register deployment workflow: %w", err) + }))) + + restateSrv.Bind(hydrav1.NewRoutingServiceServer(routing.New(routing.Config{ + Logger: logger, + DB: database, + PartitionDB: partitionDB, + DefaultDomain: cfg.DefaultDomain, + }))) + + restateSrv.Bind(hydrav1.NewCertificateServiceServer(certificate.New(certificate.Config{ + Logger: logger, + DB: database, + PartitionDB: partitionDB, + Vault: vaultSvc, + }))) + + go func() { + addr := fmt.Sprintf(":%d", cfg.Restate.HttpPort) + logger.Info("Starting Restate server", "addr", addr) + if startErr := restateSrv.Start(ctx, addr); startErr != nil { + logger.Error("failed to start restate server", "error", startErr.Error()) + } + }() + + // Register with Restate admin API if RegisterAs is configured + if cfg.Restate.RegisterAs != "" { + go func() { + // Wait a moment for the restate server to be ready + time.Sleep(2 * time.Second) + + registerURL := fmt.Sprintf("%s/deployments", cfg.Restate.AdminURL) + payload := fmt.Sprintf(`{"uri": "%s"}`, cfg.Restate.RegisterAs) + + logger.Info("Registering with Restate", "admin_url", registerURL, "service_uri", cfg.Restate.RegisterAs) + + retrier := retry.New( + retry.Attempts(10), + retry.Backoff(func(n int) time.Duration { + return 5 * time.Second + }), + ) + + err := retrier.Do(func() error { + req, err := http.NewRequestWithContext(ctx, "POST", registerURL, bytes.NewBufferString(payload)) + if err != nil { + return fmt.Errorf("failed to create registration request: %w", err) + } + + req.Header.Set("Content-Type", "application/json") + + resp, err := http.DefaultClient.Do(req) + if err != nil { + return fmt.Errorf("failed to register with Restate: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode >= 200 && resp.StatusCode < 300 { + return nil + } + + return fmt.Errorf("registration returned status %d", resp.StatusCode) + }) + + if err != nil { + logger.Error("failed to register with Restate after retries", "error", err.Error()) + } else { + logger.Info("Successfully registered with Restate") + } + }() } // Create the connect handler @@ -220,12 +271,16 @@ func Run(ctx context.Context, cfg Config) error { } mux.Handle(ctrlv1connect.NewCtrlServiceHandler(ctrl.New(cfg.InstanceID, database), connectOptions...)) - mux.Handle(ctrlv1connect.NewDeploymentServiceHandler(deployment.New(database, partitionDB, hydraEngine, logger), connectOptions...)) + mux.Handle(ctrlv1connect.NewDeploymentServiceHandler(deployment.New(deployment.Config{ + Database: database, + PartitionDB: partitionDB, + Restate: restateClient, + Logger: logger, + }), connectOptions...)) mux.Handle(ctrlv1connect.NewOpenApiServiceHandler(openapi.New(database, logger), connectOptions...)) mux.Handle(ctrlv1connect.NewAcmeServiceHandler(acme.New(acme.Config{ PartitionDB: partitionDB, DB: database, - HydraEngine: hydraEngine, Logger: logger, }), connectOptions...)) @@ -280,168 +335,6 @@ func Run(ctx context.Context, cfg Config) error { } }() - if cfg.Acme.Enabled { - acmeClient, err := acme.GetOrCreateUser(ctx, acme.UserConfig{ - DB: database, - Logger: logger, - Vault: vaultSvc, - WorkspaceID: "unkey", - }) - if err != nil { - return fmt.Errorf("failed to create ACME user: %w", err) - } - - // Set up our custom HTTP-01 challenge provider on the ACME client - httpProvider := providers.NewHTTPProvider(providers.HTTPProviderConfig{ - DB: database, - Logger: logger, - }) - err = acmeClient.Challenge.SetHTTP01Provider(httpProvider) - if err != nil { - return fmt.Errorf("failed to set HTTP-01 provider: %w", err) - } - - // Set up Cloudflare DNS-01 challenge provider if enabled - if cfg.Acme.Cloudflare.Enabled { - cloudflareProvider, err := providers.NewCloudflareProvider(providers.CloudflareProviderConfig{ - DB: database, - Logger: logger, - APIToken: cfg.Acme.Cloudflare.ApiToken, - DefaultDomain: cfg.DefaultDomain, - }) - if err != nil { - logger.Error("failed to create Cloudflare DNS provider", "error", err) - return fmt.Errorf("failed to create Cloudflare DNS provider: %w", err) - } - - err = acmeClient.Challenge.SetDNS01Provider(cloudflareProvider) - if err != nil { - logger.Error("failed to set DNS-01 provider", "error", err) - return fmt.Errorf("failed to set DNS-01 provider: %w", err) - } - - logger.Info("Cloudflare DNS-01 challenge provider configured") - - if cfg.DefaultDomain != "" { - wildcardDomain := "*." + cfg.DefaultDomain - - // Check if we already have a challenge or certificate for the wildcard domain - _, err := db.Query.FindDomainByDomain(ctx, database.RO(), wildcardDomain) - if err != nil && !db.IsNotFound(err) { - logger.Error("Failed to check existing wildcard domain", "error", err, "domain", wildcardDomain) - } else if db.IsNotFound(err) { - now := time.Now().UnixMilli() - domainID := uid.New("domain") - - // Insert domain record - err = db.Query.InsertDomain(ctx, database.RW(), db.InsertDomainParams{ - ID: domainID, - WorkspaceID: "unkey", // Default workspace for wildcard cert - Domain: wildcardDomain, - CreatedAt: now, - Type: db.DomainsTypeCustom, - }) - if err != nil { - logger.Error("Failed to create wildcard domain", "error", err, "domain", wildcardDomain) - } else { - // Insert challenge record - expiresAt := time.Now().Add(90 * 24 * time.Hour).UnixMilli() // 90 days - - err = db.Query.InsertAcmeChallenge(ctx, database.RW(), db.InsertAcmeChallengeParams{ - WorkspaceID: "unkey", - DomainID: domainID, - Token: "", - Authorization: "", - Status: db.AcmeChallengesStatusWaiting, - Type: db.AcmeChallengesTypeDNS01, // Use DNS-01 for wildcard - CreatedAt: now, - UpdatedAt: sql.NullInt64{Valid: true, Int64: now}, - ExpiresAt: expiresAt, - }) - if err != nil { - logger.Error("Failed to create wildcard challenge", "error", err, "domain", wildcardDomain) - } else { - logger.Info("Created wildcard domain and challenge", "domain", wildcardDomain) - } - } - } - } - } - - // Register deployment workflow with Hydra worker - acmeWorkflows := acme.NewCertificateChallenge(acme.CertificateChallengeConfig{ - DB: database, - PartitionDB: partitionDB, - Logger: logger, - AcmeClient: acmeClient, - Vault: vaultSvc, - }) - err = hydra.RegisterWorkflow(hydraWorker, acmeWorkflows) - if err != nil { - logger.Error("unable to register ACME certificate workflow", "error", err) - return fmt.Errorf("unable to register deployment workflow: %w", err) - } - - go func() { - logger.Info("Starting cert worker") - - // HTTP-01 challenges are always available (we always set up HTTP provider) - supportedTypes := []db.AcmeChallengesType{db.AcmeChallengesTypeHTTP01} - - // DNS-01 challenges require Cloudflare to be enabled - if cfg.Acme.Cloudflare.Enabled { - supportedTypes = append(supportedTypes, db.AcmeChallengesTypeDNS01) - } - - registerErr := hydraEngine.RegisterCron("*/5 * * * *", "start-certificate-challenges", func(ctx context.Context, payload hydra.CronPayload) error { - executableChallenges, err := db.Query.ListExecutableChallenges(ctx, database.RO(), supportedTypes) - if err != nil { - logger.Error("Failed to start workflow", "error", err) - return err - } - - logger.Info("Starting certificate challenges", - "executable_challenges", len(executableChallenges), - "supported_types", supportedTypes) - - for _, challenge := range executableChallenges { - executionID, err := hydraEngine.StartWorkflow(ctx, "certificate_challenge", - acme.CertificateChallengeRequest{ - WorkspaceID: challenge.WorkspaceID, - Domain: challenge.Domain, - }, - hydra.WithMaxAttempts(24), - hydra.WithTimeout(25*time.Hour), - hydra.WithRetryBackoff(1*time.Hour), - ) - if err != nil { - logger.Error("Failed to start workflow", "error", err) - continue - } - - logger.Info("Workflow started", "executionID", executionID) - } - - return nil - }) - - if registerErr != nil { - logger.Error("Failed to register daily report cron job", "error", err) - return - } - }() - } - - // Start Hydra worker - go func() { - logger.Info("Starting Hydra workflow worker") - if err := hydraWorker.Start(ctx); err != nil { - logger.Error("Failed to start Hydra worker", "error", err) - } - }() - - shutdowns.RegisterCtx(hydraWorker.Shutdown) - // Wait for signal and handle shutdown logger.Info("Ctrl server started successfully") if err := shutdowns.WaitForSignal(ctx); err != nil { diff --git a/go/apps/ctrl/services/acme/certificate_workflow.go b/go/apps/ctrl/services/acme/certificate_workflow.go deleted file mode 100644 index f7175aac06..0000000000 --- a/go/apps/ctrl/services/acme/certificate_workflow.go +++ /dev/null @@ -1,182 +0,0 @@ -package acme - -import ( - "context" - "database/sql" - "time" - - "github.com/go-acme/lego/v4/certificate" - "github.com/go-acme/lego/v4/lego" - vaultv1 "github.com/unkeyed/unkey/go/gen/proto/vault/v1" - "github.com/unkeyed/unkey/go/pkg/db" - "github.com/unkeyed/unkey/go/pkg/hydra" - "github.com/unkeyed/unkey/go/pkg/otel/logging" - pdb "github.com/unkeyed/unkey/go/pkg/partition/db" - "github.com/unkeyed/unkey/go/pkg/vault" -) - -// CertificateChallenge tries to get a certificate from Let's Encrypt -type CertificateChallenge struct { - db db.Database - partitionDB db.Database - logger logging.Logger - acmeClient *lego.Client - vault *vault.Service -} - -type CertificateChallengeConfig struct { - DB db.Database - PartitionDB db.Database - Logger logging.Logger - AcmeClient *lego.Client - Vault *vault.Service -} - -// NewCertificateChallenge creates a new certificate challenges workflow instance -// and ensures that we have a valid ACME User -func NewCertificateChallenge(config CertificateChallengeConfig) *CertificateChallenge { - return &CertificateChallenge{ - db: config.DB, - partitionDB: config.PartitionDB, - logger: config.Logger, - acmeClient: config.AcmeClient, - vault: config.Vault, - } -} - -// Name returns the workflow name for registration -func (w *CertificateChallenge) Name() string { - return "certificate_challenge" -} - -// CertificateChallengeRequest defines the input for the certificate challenge workflow -type CertificateChallengeRequest struct { - WorkspaceID string `json:"workspace_id"` - Domain string `json:"domain"` -} - -type EncryptedCertificate struct { - Certificate string `json:"certificate"` - EncryptedPrivateKey string `json:"encrypted_private_key"` - ExpiresAt int64 `json:"expires_at"` -} - -// Run executes the complete build and deployment workflow -func (w *CertificateChallenge) Run(ctx hydra.WorkflowContext, req *CertificateChallengeRequest) error { - w.logger.Info("starting certificate challenge", "workspace_id", req.WorkspaceID, "domain", req.Domain) - - dom, err := hydra.Step(ctx, "resolve-domain", func(stepCtx context.Context) (db.Domain, error) { - return db.Query.FindDomainByDomain(stepCtx, w.db.RO(), req.Domain) - }) - if err != nil { - return err - } - - err = hydra.StepVoid(ctx, "acquire-challenge", func(stepCtx context.Context) error { - return db.Query.UpdateAcmeChallengeTryClaiming(stepCtx, w.db.RW(), db.UpdateAcmeChallengeTryClaimingParams{ - DomainID: dom.ID, - Status: db.AcmeChallengesStatusPending, - UpdatedAt: sql.NullInt64{Int64: time.Now().UnixMilli(), Valid: true}, - }) - }) - if err != nil { - return err - } - - cert, err := hydra.Step(ctx, "obtain-certificate", func(stepCtx context.Context) (EncryptedCertificate, error) { - // A certificate request can be either - // A: We have a new domain WITHOUT a certificate - // B: We have to renew a existing certificate - // Regardless we first claim the challenge so that no-other job tries to do the same, this will just annoy acme ratelimits - if err != nil { - db.Query.UpdateAcmeChallengeStatus(ctx.Context(), w.db.RW(), db.UpdateAcmeChallengeStatusParams{ - DomainID: dom.ID, - Status: db.AcmeChallengesStatusFailed, - UpdatedAt: sql.NullInt64{Valid: true, Int64: time.Now().UnixMilli()}, - }) - return EncryptedCertificate{}, err - } - - currCert, err := pdb.Query.FindCertificateByHostname(stepCtx, w.partitionDB.RO(), req.Domain) - if err != nil && !db.IsNotFound(err) { - return EncryptedCertificate{}, err - } - - shouldRenew := !db.IsNotFound(err) - var certificates *certificate.Resource - if shouldRenew { - resp, err := w.vault.Decrypt(stepCtx, &vaultv1.DecryptRequest{ - Keyring: "unkey", - Encrypted: string(currCert.EncryptedPrivateKey), - }) - if err != nil { - return EncryptedCertificate{}, err - } - - certificates, err = w.acmeClient.Certificate.Renew(certificate.Resource{ - Domain: req.Domain, - PrivateKey: []byte(resp.Plaintext), - Certificate: []byte(currCert.Certificate), - }, true, false, "") - } else { - certificates, err = w.acmeClient.Certificate.Obtain(certificate.ObtainRequest{ - Domains: []string{req.Domain}, - Bundle: true, - }) - } - if err != nil { - return EncryptedCertificate{}, err - } - - resp, err := w.vault.Encrypt(stepCtx, &vaultv1.EncryptRequest{ - Keyring: "unkey", - Data: string(certificates.PrivateKey), - }) - if err != nil { - return EncryptedCertificate{}, err - } - - expiresAt, err := getCertificateExpiry(string(certificates.Certificate)) - if err != nil { - return EncryptedCertificate{}, err - } - - return EncryptedCertificate{ - ExpiresAt: expiresAt.UnixMilli(), - Certificate: string(certificates.Certificate), - EncryptedPrivateKey: resp.Encrypted, - }, nil - }) - if err != nil { - return err - } - - err = hydra.StepVoid(ctx, "persist-certificate", func(stepCtx context.Context) error { - now := time.Now().UnixMilli() - return pdb.Query.InsertCertificate(stepCtx, w.partitionDB.RW(), pdb.InsertCertificateParams{ - WorkspaceID: dom.WorkspaceID, - Hostname: req.Domain, - Certificate: cert.Certificate, - EncryptedPrivateKey: cert.EncryptedPrivateKey, - CreatedAt: now, - UpdatedAt: sql.NullInt64{Valid: true, Int64: now}, - }) - }) - if err != nil { - return err - } - - err = hydra.StepVoid(ctx, "complete-challenge", func(stepCtx context.Context) error { - return db.Query.UpdateAcmeChallengeVerifiedWithExpiry(stepCtx, w.db.RW(), db.UpdateAcmeChallengeVerifiedWithExpiryParams{ - Status: db.AcmeChallengesStatusVerified, - ExpiresAt: cert.ExpiresAt, - UpdatedAt: sql.NullInt64{Valid: true, Int64: time.Now().UnixMilli()}, - DomainID: dom.ID, - }) - }) - if err != nil { - return err - } - - return nil -} diff --git a/go/apps/ctrl/services/acme/service.go b/go/apps/ctrl/services/acme/service.go index 8cf661c927..94e841f8db 100644 --- a/go/apps/ctrl/services/acme/service.go +++ b/go/apps/ctrl/services/acme/service.go @@ -3,7 +3,6 @@ package acme import ( "github.com/unkeyed/unkey/go/gen/proto/ctrl/v1/ctrlv1connect" "github.com/unkeyed/unkey/go/pkg/db" - "github.com/unkeyed/unkey/go/pkg/hydra" "github.com/unkeyed/unkey/go/pkg/otel/logging" ) @@ -11,14 +10,12 @@ type Service struct { ctrlv1connect.UnimplementedAcmeServiceHandler db db.Database partitionDB db.Database - hydraEngine *hydra.Engine logger logging.Logger } type Config struct { PartitionDB db.Database DB db.Database - HydraEngine *hydra.Engine Logger logging.Logger } @@ -27,7 +24,6 @@ func New(cfg Config) *Service { UnimplementedAcmeServiceHandler: ctrlv1connect.UnimplementedAcmeServiceHandler{}, db: cfg.DB, partitionDB: cfg.PartitionDB, - hydraEngine: cfg.HydraEngine, logger: cfg.Logger, } } diff --git a/go/apps/ctrl/services/deployment/create_deployment.go b/go/apps/ctrl/services/deployment/create_deployment.go index 60c1bb524b..b5551b13df 100644 --- a/go/apps/ctrl/services/deployment/create_deployment.go +++ b/go/apps/ctrl/services/deployment/create_deployment.go @@ -9,9 +9,10 @@ import ( "time" "connectrpc.com/connect" + restateingress "github.com/restatedev/sdk-go/ingress" ctrlv1 "github.com/unkeyed/unkey/go/gen/proto/ctrl/v1" + hydrav1 "github.com/unkeyed/unkey/go/gen/proto/hydra/v1" "github.com/unkeyed/unkey/go/pkg/db" - "github.com/unkeyed/unkey/go/pkg/hydra" "github.com/unkeyed/unkey/go/pkg/uid" ) @@ -143,31 +144,27 @@ func (s *Service) CreateDeployment( ) // Start the deployment workflow directly - deployReq := &DeployRequest{ - WorkspaceID: req.Msg.GetWorkspaceId(), - ProjectID: req.Msg.GetProjectId(), - EnvironmentID: env.ID, - DeploymentID: deploymentID, - DockerImage: req.Msg.GetDockerImage(), - KeyspaceID: req.Msg.GetKeyspaceId(), + deployReq := &hydrav1.DeployRequest{ + DeploymentId: deploymentID, + DockerImage: req.Msg.GetDockerImage(), + KeyAuthId: req.Msg.KeyspaceId, } - - executionID, err := s.hydraEngine.StartWorkflow(ctx, "deployment", deployReq, - hydra.WithMaxAttempts(3), - hydra.WithTimeout(25*time.Minute), - hydra.WithRetryBackoff(1*time.Minute), - ) - - if err != nil { - s.logger.Error("failed to start deployment workflow", - "deployment_id", deploymentID, - "error", err) - // Don't fail deployment creation - workflow can be retried - } else { - s.logger.Info("deployment workflow started", - "deployment_id", deploymentID, - "execution_id", executionID) + // this is ugly, but we're waiting for + // https://github.com/restatedev/sdk-go/issues/103 + invocation := restateingress.WorkflowSend[*hydrav1.DeployRequest]( + s.restate, + "hydra.v1.DeploymentService", + project.ID, + "Deploy", + ).Send(ctx, deployReq) + if invocation.Error != nil { + s.logger.Error("failed to start deployment workflow", "error", invocation.Error.Error()) + return nil, connect.NewError(connect.CodeInternal, fmt.Errorf("unable to start workflow: %w", invocation.Error)) } + s.logger.Info("deployment workflow started", + "deployment_id", deploymentID, + "invocation_id", invocation.Id, + ) res := connect.NewResponse(&ctrlv1.CreateDeploymentResponse{ DeploymentId: deploymentID, diff --git a/go/apps/ctrl/services/deployment/deploy_workflow.go b/go/apps/ctrl/services/deployment/deploy_workflow.go deleted file mode 100644 index 5a109bf151..0000000000 --- a/go/apps/ctrl/services/deployment/deploy_workflow.go +++ /dev/null @@ -1,724 +0,0 @@ -package deployment - -import ( - "context" - "database/sql" - "encoding/base64" - "fmt" - "io" - "net/http" - "strings" - "time" - - "connectrpc.com/connect" - ctrlv1 "github.com/unkeyed/unkey/go/gen/proto/ctrl/v1" - kranev1 "github.com/unkeyed/unkey/go/gen/proto/krane/v1" - "github.com/unkeyed/unkey/go/gen/proto/krane/v1/kranev1connect" - partitionv1 "github.com/unkeyed/unkey/go/gen/proto/partition/v1" - "github.com/unkeyed/unkey/go/pkg/db" - "github.com/unkeyed/unkey/go/pkg/hydra" - "github.com/unkeyed/unkey/go/pkg/otel/logging" - partitiondb "github.com/unkeyed/unkey/go/pkg/partition/db" - "github.com/unkeyed/unkey/go/pkg/uid" - "google.golang.org/protobuf/encoding/protojson" -) - -const hardcodedNamespace = "unkey" // TODO change to workspace scope - -// DeployWorkflow orchestrates the complete build and deployment process using Hydra -type DeployWorkflow struct { - db db.Database - partitionDB db.Database - logger logging.Logger - krane kranev1connect.DeploymentServiceClient - defaultDomain string -} - -type DeployWorkflowConfig struct { - Logger logging.Logger - DB db.Database - PartitionDB db.Database - Krane kranev1connect.DeploymentServiceClient - DefaultDomain string -} - -// NewDeployWorkflow creates a new deploy workflow instance -func NewDeployWorkflow(cfg DeployWorkflowConfig) *DeployWorkflow { - return &DeployWorkflow{ - db: cfg.DB, - partitionDB: cfg.PartitionDB, - logger: cfg.Logger, - krane: cfg.Krane, - defaultDomain: cfg.DefaultDomain, - } -} - -// Name returns the workflow name for registration -func (w *DeployWorkflow) Name() string { - return "deployment" -} - -// DeployRequest defines the input for the deploy workflow -type DeployRequest struct { - WorkspaceID string `json:"workspace_id"` - ProjectID string `json:"project_id"` - KeyspaceID string `json:"keyspace_id"` - DeploymentID string `json:"deployment_id"` - EnvironmentID string `json:"environment_id"` - DockerImage string `json:"docker_image"` -} - -// DeploymentResult holds the deployment outcome -type DeploymentResult struct { - DeploymentID string `json:"deployment_id"` - Status string `json:"status"` -} - -// Run executes the complete build and deployment workflow -func (w *DeployWorkflow) Run(ctx hydra.WorkflowContext, req *DeployRequest) error { - w.logger.Info("starting deployment workflow", - "execution_id", ctx.ExecutionID(), - "deployment_id", req.DeploymentID, - "docker_image", req.DockerImage, - "workspace_id", req.WorkspaceID, - "project_id", req.ProjectID, - ) - - workspace, err := hydra.Step(ctx, "get-workspace", func(stepCtx context.Context) (db.Workspace, error) { - return db.Query.FindWorkspaceByID(stepCtx, w.db.RW(), req.WorkspaceID) - }) - if err != nil { - return err - } - project, err := hydra.Step(ctx, "get-project", func(stepCtx context.Context) (db.FindProjectByIdRow, error) { - return db.Query.FindProjectById(stepCtx, w.db.RW(), req.ProjectID) - }) - if err != nil { - return err - } - environment, err := hydra.Step(ctx, "get-environment", func(stepCtx context.Context) (db.FindEnvironmentByIdRow, error) { - return db.Query.FindEnvironmentById(stepCtx, w.db.RW(), req.EnvironmentID) - }) - if err != nil { - return err - } - deployment, err := hydra.Step(ctx, "get-deployment", func(stepCtx context.Context) (db.FindDeploymentByIdRow, error) { - return db.Query.FindDeploymentById(stepCtx, w.db.RW(), req.DeploymentID) - }) - if err != nil { - return err - } - - // Log deployment pending - err = hydra.StepVoid(ctx, "log-deployment-pending", func(stepCtx context.Context) error { - return db.Query.InsertDeploymentStep(stepCtx, w.db.RW(), db.InsertDeploymentStepParams{ - WorkspaceID: req.WorkspaceID, - ProjectID: req.ProjectID, - DeploymentID: req.DeploymentID, - Status: "pending", - Message: "Deployment queued and ready to start", - CreatedAt: time.Now().UnixMilli(), - }) - }) - if err != nil { - return err - } - - // Update version status to building - _, err = hydra.Step(ctx, "update-version-building", func(stepCtx context.Context) (*struct{}, error) { - updateErr := db.Query.UpdateDeploymentStatus(stepCtx, w.db.RW(), db.UpdateDeploymentStatusParams{ - ID: req.DeploymentID, - Status: db.DeploymentsStatusBuilding, - UpdatedAt: sql.NullInt64{Valid: true, Int64: time.Now().UnixMilli()}, - }) - if updateErr != nil { - return nil, fmt.Errorf("failed to update version status to building: %w", updateErr) - } - return &struct{}{}, nil - }) - if err != nil { - return err - } - - err = hydra.StepVoid(ctx, "create-deployment", func(stepCtx context.Context) error { - // Create deployment request - deploymentReq := &kranev1.CreateDeploymentRequest{ - Deployment: &kranev1.DeploymentRequest{ - Namespace: hardcodedNamespace, - DeploymentId: req.DeploymentID, - Image: req.DockerImage, - Replicas: 1, - CpuMillicores: 512, - MemorySizeMib: 512, - }, - } - - _, err := w.krane.CreateDeployment(stepCtx, connect.NewRequest(deploymentReq)) - if err != nil { - return fmt.Errorf("krane CreateDeployment failed for image %s: %w", req.DockerImage, err) - } - - return nil - }) - if err != nil { - return err - } - - w.logger.Info("deployment created", "deployment_id", req.DeploymentID) - - // Update version status to deploying - _, err = hydra.Step(ctx, "update-version-deploying", func(stepCtx context.Context) (*struct{}, error) { - deployingErr := db.Query.UpdateDeploymentStatus(stepCtx, w.db.RW(), db.UpdateDeploymentStatusParams{ - ID: req.DeploymentID, - Status: db.DeploymentsStatusDeploying, - UpdatedAt: sql.NullInt64{Valid: true, Int64: time.Now().UnixMilli()}, - }) - if deployingErr != nil { - return nil, fmt.Errorf("failed to update version status to deploying: %w", deployingErr) - } - return &struct{}{}, nil - }) - if err != nil { - return err - } - - createdInstances, err := hydra.Step(ctx, "polling deployment prepare", func(stepCtx context.Context) ([]*kranev1.Instance, error) { - // prevent updating the db unnecessarily - - for i := range 300 { - time.Sleep(time.Second) - if i%10 == 0 { // Log every 10 seconds instead of every second - w.logger.Info("polling deployment status", "deployment_id", req.DeploymentID, "iteration", i) - } - - resp, err := w.krane.GetDeployment(stepCtx, connect.NewRequest(&kranev1.GetDeploymentRequest{ - Namespace: hardcodedNamespace, - DeploymentId: req.DeploymentID, - })) - if err != nil { - return nil, fmt.Errorf("krane GetDeployment failed for deployment %s: %w", req.DeploymentID, err) - } - - w.logger.Info("deployment status", - "deployment_id", req.DeploymentID, - "status", resp.Msg, - ) - - allReady := true - for _, instance := range resp.Msg.GetInstances() { - if instance.Status != kranev1.DeploymentStatus_DEPLOYMENT_STATUS_RUNNING { - allReady = false - } - - var status partitiondb.VmsStatus - switch instance.Status { - case kranev1.DeploymentStatus_DEPLOYMENT_STATUS_PENDING: - status = partitiondb.VmsStatusProvisioning - case kranev1.DeploymentStatus_DEPLOYMENT_STATUS_RUNNING: - status = partitiondb.VmsStatusRunning - - case kranev1.DeploymentStatus_DEPLOYMENT_STATUS_TERMINATING: - status = partitiondb.VmsStatusStopping - case kranev1.DeploymentStatus_DEPLOYMENT_STATUS_UNSPECIFIED: - status = partitiondb.VmsStatusAllocated - } - - upsertParams := partitiondb.UpsertVMParams{ - ID: instance.Id, - DeploymentID: req.DeploymentID, - Address: sql.NullString{Valid: true, String: instance.Address}, - CpuMillicores: 1000, // TODO derive from spec - MemoryMb: 1024, // TODO derive from spec - Status: status, // TODO - } - - w.logger.Info("upserting VM to database", - "vm_id", instance.Id, - "deployment_id", req.DeploymentID, - "address", instance.Address, - "status", "running") - - if err := partitiondb.Query.UpsertVM(stepCtx, w.partitionDB.RW(), upsertParams); err != nil { - return nil, fmt.Errorf("failed to upsert VM %s: %w", instance.Id, err) - } - - w.logger.Info("successfully upserted VM to database", "vm_id", instance.Id) - - } - - if allReady { - return resp.Msg.GetInstances(), nil - } - // next loop - - } - - return nil, fmt.Errorf("deployment never became ready") - }) - if err != nil { - return err - } - - openapiSpec, err := hydra.Step(ctx, "scrape-openapi-spec", func(stepCtx context.Context) (string, error) { - - for _, instance := range createdInstances { - openapiURL := fmt.Sprintf("http://%s/openapi.yaml", instance.GetAddress()) - w.logger.Info("trying to scrape OpenAPI spec", "url", openapiURL, "host_port", instance.GetAddress(), "deployment_id", req.DeploymentID) - - resp, err := http.DefaultClient.Get(openapiURL) - if err != nil { - w.logger.Warn("openapi scraping failed for host address", "error", err, "host_addr", instance.GetAddress(), "deployment_id", req.DeploymentID) - continue - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - w.logger.Warn("openapi endpoint returned non-200 status", "status", resp.StatusCode, "host_addr", instance.GetAddress(), "deployment_id", req.DeploymentID) - continue - } - - // Read the OpenAPI spec - specBytes, err := io.ReadAll(resp.Body) - if err != nil { - w.logger.Warn("failed to read OpenAPI spec response", "error", err, "host_addr", instance.GetAddress(), "deployment_id", req.DeploymentID) - continue - } - - w.logger.Info("openapi spec scraped successfully", "host_addr", instance.GetAddress(), "deployment_id", req.DeploymentID, "spec_size", len(specBytes)) - return base64.StdEncoding.EncodeToString(specBytes), nil - } - // not an error really, just no OpenAPI spec found - return "", nil - - }) - - if err != nil { - return err - } - - if openapiSpec != "" { - - err = hydra.StepVoid(ctx, "update openapi for deployment", func(innerCtx context.Context) error { - - return db.Query.UpdateDeploymentOpenapiSpec(innerCtx, w.db.RW(), db.UpdateDeploymentOpenapiSpecParams{ - ID: deployment.ID, - UpdatedAt: sql.NullInt64{Valid: true, Int64: time.Now().UnixMilli()}, - OpenapiSpec: sql.NullString{Valid: true, String: openapiSpec}, - }) - - }) - - } - - w.logger.Info("openapi", - - "spec", openapiSpec) - allDomains := buildDomains( - workspace.Slug, - project.Slug, - environment.Slug, - deployment.GitCommitSha.String, - deployment.GitBranch.String, - w.defaultDomain, - ctrlv1.SourceType_SOURCE_TYPE_CLI_UPLOAD, // hardcoded for now cause I really need to move on - ) - - // Create database entries for all domains - - // Track domains that actually need to be changed in the dataplane - changedDomains := []string{} - - for _, domain := range allDomains { - - err = hydra.StepVoid(ctx, fmt.Sprintf("create-domain-entry-%s", domain.domain), func(stepCtx context.Context) error { - - now := time.Now().UnixMilli() - - // This is more verbose than we initially thought - // A simple ON DUPLICATE UPDATE was insufficient, because it could leak domains into other workspaces - // because workspace slugs can change over time. - // And we also need more control over updating rolled back domains - return db.Tx(stepCtx, w.db.RW(), func(txCtx context.Context, tx db.DBTX) error { - - existing, err := db.Query.FindDomainByDomain(txCtx, tx, domain.domain) - if err != nil { - if !db.IsNotFound(err) { - return fmt.Errorf("failed to find domain entry for deployment %s: %w", req.DeploymentID, err) - - } - - // Domain does not exist, create it - insertError := db.Query.InsertDomain(txCtx, tx, db.InsertDomainParams{ - ID: uid.New("domain"), - WorkspaceID: req.WorkspaceID, - ProjectID: sql.NullString{Valid: true, String: req.ProjectID}, - EnvironmentID: sql.NullString{Valid: true, String: req.EnvironmentID}, - Domain: domain.domain, - Sticky: domain.sticky, - DeploymentID: sql.NullString{Valid: true, String: req.DeploymentID}, - CreatedAt: now, - Type: db.DomainsTypeWildcard, - }) - if insertError != nil { - return fmt.Errorf("failed to create domain entry for deployment %s: %w", req.DeploymentID, err) - } - changedDomains = append(changedDomains, domain.domain) - return nil - } - - if project.IsRolledBack { - w.logger.Info("Skipping domain cause project is rolled back", - "domain_id", existing.ID, - "domain", existing.Domain, - ) - return nil - } - updateErr := db.Query.ReassignDomain(txCtx, tx, db.ReassignDomainParams{ - ID: existing.ID, - TargetWorkspaceID: workspace.ID, - DeploymentID: sql.NullString{Valid: true, String: req.DeploymentID}, - }) - - if updateErr != nil { - return fmt.Errorf("failed to update domain entry for deployment %s: %w", req.DeploymentID, updateErr) - } - changedDomains = append(changedDomains, existing.Domain) - - return nil - - }) - }) - } - - if err != nil { - return err - } - - // Create gateway configs for all domains in bulk (except local ones) - err = hydra.StepVoid(ctx, "create-gateway-configs-bulk", func(stepCtx context.Context) error { - // Prepare gateway configs for all non-local domains - var gatewayParams []partitiondb.UpsertGatewayParams - var skippedDomains []string - for _, domain := range changedDomains { - if isLocalHostname(domain, w.defaultDomain) { - skippedDomains = append(skippedDomains, domain) - continue - } - - // Create VM protobuf objects for gateway config - gatewayConfig := &partitionv1.GatewayConfig{ - Deployment: &partitionv1.Deployment{ - Id: req.DeploymentID, - IsEnabled: true, - }, - Vms: make([]*partitionv1.VM, len(createdInstances)), - } - - for i, vm := range createdInstances { - gatewayConfig.Vms[i] = &partitionv1.VM{ - Id: vm.Id, - } - } - - // Only add AuthConfig if we have a KeyspaceID - if req.KeyspaceID != "" { - gatewayConfig.AuthConfig = &partitionv1.AuthConfig{ - KeyAuthId: req.KeyspaceID, - } - } - - if openapiSpec != "" { - gatewayConfig.ValidationConfig = &partitionv1.ValidationConfig{ - OpenapiSpec: openapiSpec, - } - } - - // Marshal protobuf to bytes - configBytes, err := protojson.Marshal(gatewayConfig) - if err != nil { - w.logger.Error("failed to marshal gateway config", "error", err, "domain", domain) - continue - } - - gatewayParams = append(gatewayParams, partitiondb.UpsertGatewayParams{ - WorkspaceID: req.WorkspaceID, - DeploymentID: req.DeploymentID, - Hostname: domain, - Config: configBytes, - }) - } - // Perform bulk upsert for all gateway configs - if len(gatewayParams) > 0 { - if err := partitiondb.BulkQuery.UpsertGateway(stepCtx, w.partitionDB.RW(), gatewayParams); err != nil { - return fmt.Errorf("failed to upsert %d gateway configs for deployment %s: %w", len(gatewayParams), req.DeploymentID, err) - } - } - - return db.Query.InsertDeploymentStep(stepCtx, w.db.RW(), db.InsertDeploymentStepParams{ - DeploymentID: req.DeploymentID, - Status: db.DeploymentStepsStatusAssigningDomains, - Message: fmt.Sprintf("Created %d gateway configs for %d domains (skipped %d local domains)", len(gatewayParams), len(allDomains), len(skippedDomains)), - CreatedAt: time.Now().UnixMilli(), - }) - }) - if err != nil { - return err - } - - // Update deployment status to ready - err = hydra.StepVoid(ctx, "update-deployment-ready", func(stepCtx context.Context) error { - return db.Query.UpdateDeploymentStatus(stepCtx, w.db.RW(), db.UpdateDeploymentStatusParams{ - ID: req.DeploymentID, - Status: db.DeploymentsStatusReady, - UpdatedAt: sql.NullInt64{Valid: true, Int64: time.Now().UnixMilli()}, - }) - }) - if err != nil { - return err - } - - if !project.IsRolledBack { - // only update this if the deployment is not rolled back - err = hydra.StepVoid(ctx, "update-project-deployment-pointers", func(stepCtx context.Context) error { - return db.Query.UpdateProjectDeployments(stepCtx, w.db.RW(), db.UpdateProjectDeploymentsParams{ - ID: req.ProjectID, - LiveDeploymentID: sql.NullString{Valid: true, String: req.DeploymentID}, - UpdatedAt: sql.NullInt64{Valid: true, Int64: time.Now().UnixMilli()}, - }) - }) - if err != nil { - return err - } - } - - /* - - // Step 23: Scrape OpenAPI spec from container (using host port mapping) - openapiSpec, err := hydra.Step(ctx, "scrape-openapi-spec", func(stepCtx context.Context) (string, error) { - - // Find the port mapping for container port 8080 - var hostPort int32 - for _, portMapping := range vmInfo.NetworkInfo.PortMappings { - if portMapping.ContainerPort == 8080 { - hostPort = portMapping.HostPort - break - } - } - - if hostPort == 0 { - w.logger.Warn("no host port mapping found for container port 8080", "deployment_id", req.DeploymentID) - return "", nil - } - - // Try multiple host addresses to reach the Docker host - hostAddresses := []string{ - "host.docker.internal", // Docker Desktop (Windows/Mac) and some Linux setups - "gateway.docker.internal", // Docker gateway - "172.17.0.1", // Default Docker bridge gateway - "172.18.0.1", // Alternative Docker bridge - } - - client := &http.Client{Timeout: 10 * time.Second} - - for _, hostAddr := range hostAddresses { - openapiURL := fmt.Sprintf("http://%s:%d/openapi.yaml", hostAddr, hostPort) - w.logger.Info("trying to scrape OpenAPI spec", "url", openapiURL, "host_port", hostPort, "deployment_id", req.DeploymentID) - - resp, err := client.Get(openapiURL) - if err != nil { - w.logger.Warn("openapi scraping failed for host address", "error", err, "host_addr", hostAddr, "deployment_id", req.DeploymentID) - continue - } - defer resp.Body.Close() - - if resp.StatusCode != http.StatusOK { - w.logger.Warn("openapi endpoint returned non-200 status", "status", resp.StatusCode, "host_addr", hostAddr, "deployment_id", req.DeploymentID) - continue - } - - // Read the OpenAPI spec - specBytes, err := io.ReadAll(resp.Body) - if err != nil { - w.logger.Warn("failed to read OpenAPI spec response", "error", err, "host_addr", hostAddr, "deployment_id", req.DeploymentID) - continue - } - - w.logger.Info("openapi spec scraped successfully", "host_addr", hostAddr, "deployment_id", req.DeploymentID, "spec_size", len(specBytes)) - return string(specBytes), nil - } - - return "", fmt.Errorf("failed to scrape OpenAPI spec from all host addresses: %v", hostAddresses) - }) - if err != nil { - return err - } - - // Step 24: Update gateway config with OpenAPI spec - err = hydra.StepVoid(ctx, "update-gateway-config-openapi", func(stepCtx context.Context) error { - // Only update if we have both hostname and OpenAPI spec - if req.Hostname == "" || openapiSpec == "" { - w.logger.Info("skipping gateway config OpenAPI update", - "has_hostname", req.Hostname != "", - "has_openapi_spec", openapiSpec != "", - "deployment_id", req.DeploymentID) - return nil - } - - w.logger.Info("updating gateway config with OpenAPI spec", "hostname", req.Hostname, "deployment_id", req.DeploymentID, "spec_size", len(openapiSpec)) - - // Fetch existing gateway config - existingConfig, err := partitiondb.Query.FindGatewayByHostname(stepCtx, w.partitionDB.RO(), req.Hostname) - if err != nil { - return fmt.Errorf("failed to fetch existing gateway config for %s: %w", req.Hostname, err) - } - - // Unmarshal existing config - // IMPORTANT: Gateway configs are stored as JSON in the database for compatibility with the gateway service - var gatewayConfig partitionv1.GatewayConfig - if err := protojson.Unmarshal(existingConfig.Config, &gatewayConfig); err != nil { - return fmt.Errorf("failed to unmarshal existing gateway config for %s: %w", req.Hostname, err) - } - - // Add or update ValidationConfig with OpenAPI spec - if gatewayConfig.ValidationConfig == nil { - gatewayConfig.ValidationConfig = &partitionv1.ValidationConfig{} - } - gatewayConfig.ValidationConfig.OpenapiSpec = openapiSpec - - // Marshal updated config - // Gateway configs must be stored as JSON for compatibility with the gateway service - configBytes, err := protojson.Marshal(&gatewayConfig) - if err != nil { - return fmt.Errorf("failed to marshal updated gateway config: %w", err) - } - - // Update gateway config in partition database - params := partitiondb.UpsertGatewayParams{ - Hostname: req.Hostname, - Config: configBytes, - } - - if err := partitiondb.Query.UpsertGateway(stepCtx, w.partitionDB.RW(), params); err != nil { - return fmt.Errorf("failed to update gateway config with OpenAPI spec for %s: %w", req.Hostname, err) - } - - w.logger.Info("gateway config updated with OpenAPI spec successfully", "hostname", req.Hostname, "deployment_id", req.DeploymentID) - return nil - }) - if err != nil { - // Don't fail the deployment for this - } - - // Step 25: Store OpenAPI spec in database - err = hydra.StepVoid(ctx, "store-openapi-spec", func(stepCtx context.Context) error { - if openapiSpec == "" { - w.logger.Info("no OpenAPI spec to store", "deployment_id", req.DeploymentID) - return nil - } - - // Store in database - err := db.Query.UpdateDeploymentOpenapiSpec(stepCtx, w.db.RW(), db.UpdateDeploymentOpenapiSpecParams{ - ID: req.DeploymentID, - OpenapiSpec: sql.NullString{String: openapiSpec, Valid: true}, - UpdatedAt: sql.NullInt64{Valid: true, Int64: time.Now().UnixMilli()}, - }) - if err != nil { - w.logger.Warn("failed to store OpenAPI spec in database", "error", err, "deployment_id", req.DeploymentID) - return nil // Don't fail the deployment - } - - w.logger.Info("openapi spec stored in database successfully", "deployment_id", req.DeploymentID, "spec_size", len(openapiSpec)) - return nil - }) - if err != nil { - return err - } - - */ - // Log deployment completed - err = hydra.StepVoid(ctx, "log-completed", func(stepCtx context.Context) error { - return db.Query.InsertDeploymentStep(stepCtx, w.db.RW(), db.InsertDeploymentStepParams{ - DeploymentID: req.DeploymentID, - Status: "completed", - Message: "Deployment completed successfully", - CreatedAt: time.Now().UnixMilli(), - }) - }) - if err != nil { - return err - } - - w.logger.Info("deployment workflow completed", - "deployment_id", req.DeploymentID, - "status", "succeeded", - "domains", len(allDomains)) - - return nil -} - -// createGatewayConfig creates a gateway configuration protobuf object -// -// ENCODING POLICY FOR GATEWAY CONFIGS: -// Gateway configs are stored as JSON (using protojson.Marshal) for easier debugging -// and readability during development/demo. This makes it simpler to inspect and -// modify configs directly in the database. -// IMPORTANT: Always use protojson.Marshal for writes and protojson.Unmarshal for reads. -func createGatewayConfig(deploymentID, keyspaceID string, instances []*kranev1.Instance) (*partitionv1.GatewayConfig, error) { - // Create VM protobuf objects for gateway config - gatewayConfig := &partitionv1.GatewayConfig{ - Deployment: &partitionv1.Deployment{ - Id: deploymentID, - IsEnabled: true, - }, - Vms: make([]*partitionv1.VM, len(instances)), - } - - for i, vm := range instances { - gatewayConfig.Vms[i] = &partitionv1.VM{ - Id: vm.Id, - } - } - - // Only add AuthConfig if we have a KeyspaceID - if keyspaceID != "" { - gatewayConfig.AuthConfig = &partitionv1.AuthConfig{ - KeyAuthId: keyspaceID, - } - } - - return gatewayConfig, nil -} - -// isLocalHostname checks if a hostname should be skipped from gateway config creation -// Returns true for localhost/development domains that shouldn't get gateway configs -func isLocalHostname(hostname, defaultDomain string) bool { - // Lowercase for case-insensitive comparison - hostname = strings.ToLower(hostname) - defaultDomain = strings.ToLower(defaultDomain) - - // Exact matches for common local hosts - these should be skipped - if hostname == "localhost" || hostname == "127.0.0.1" { - return true - } - - // If hostname uses the default domain, it should NOT be skipped (return false) - // This allows gateway configs to be created for the default domain - if strings.HasSuffix(hostname, "."+defaultDomain) || hostname == defaultDomain { - return false - } - - // Check for local-only TLD suffixes - these should be skipped - // Note: .dev is a real TLD owned by Google, so it's excluded - localSuffixes := []string{ - ".local", - ".test", - } - - for _, suffix := range localSuffixes { - if strings.HasSuffix(hostname, suffix) { - return true - } - } - - return false -} diff --git a/go/apps/ctrl/services/deployment/promote.go b/go/apps/ctrl/services/deployment/promote.go index d92b8c114e..b96de8a8d2 100644 --- a/go/apps/ctrl/services/deployment/promote.go +++ b/go/apps/ctrl/services/deployment/promote.go @@ -2,24 +2,22 @@ package deployment import ( "context" - "database/sql" "fmt" - "time" "connectrpc.com/connect" + restateingress "github.com/restatedev/sdk-go/ingress" ctrlv1 "github.com/unkeyed/unkey/go/gen/proto/ctrl/v1" - "github.com/unkeyed/unkey/go/pkg/assert" + hydrav1 "github.com/unkeyed/unkey/go/gen/proto/hydra/v1" "github.com/unkeyed/unkey/go/pkg/db" - pdb "github.com/unkeyed/unkey/go/pkg/partition/db" ) -// Promote reassigns all domains to a deployment and removes the rolled back state +// Promote reassigns all domains to a deployment and removes the rolled back state via Restate workflow func (s *Service) Promote(ctx context.Context, req *connect.Request[ctrlv1.PromoteRequest]) (*connect.Response[ctrlv1.PromoteResponse], error) { - - s.logger.Info("initiating promotion", + s.logger.Info("initiating promotion via Restate", "target", req.Msg.GetTargetDeploymentId(), ) + // Get target deployment to determine project ID for keying targetDeployment, err := db.Query.FindDeploymentById(ctx, s.db.RO(), req.Msg.GetTargetDeploymentId()) if err != nil { if db.IsNotFound(err) { @@ -32,120 +30,28 @@ func (s *Service) Promote(ctx context.Context, req *connect.Request[ctrlv1.Promo return nil, connect.NewError(connect.CodeInternal, fmt.Errorf("failed to get deployment: %w", err)) } - project, err := db.Query.FindProjectById(ctx, s.db.RO(), targetDeployment.ProjectID) - if err != nil { - if db.IsNotFound(err) { - return nil, connect.NewError(connect.CodeNotFound, fmt.Errorf("project not found: %s", targetDeployment.ProjectID)) - } - s.logger.Error("failed to get project", - "project_id", targetDeployment.ProjectID, - "error", err.Error(), - ) - return nil, connect.NewError(connect.CodeInternal, fmt.Errorf("failed to get project: %w", err)) - } - - if err := assert.All( - assert.Equal(targetDeployment.Status, db.DeploymentsStatusReady), - assert.True(project.LiveDeploymentID.Valid), - assert.NotEqual(targetDeployment.ID, project.LiveDeploymentID.String), - ); err != nil { - return nil, connect.NewError(connect.CodeFailedPrecondition, err) - } - - vms, err := pdb.Query.FindVMsByDeploymentId(ctx, s.partitionDB.RO(), targetDeployment.ID) - if err != nil { - s.logger.Error("failed to get VMs", - "deployment_id", targetDeployment.ID, - "error", err.Error(), - ) - return nil, connect.NewError(connect.CodeInternal, fmt.Errorf("failed to get VMs: %w", err)) - } - runningVms := 0 - for _, vm := range vms { - if vm.Status == pdb.VmsStatusRunning { - runningVms++ - } - } - if runningVms == 0 { - s.logger.Error("no VMs found", - "deployment_id", targetDeployment.ID, - ) - return nil, connect.NewError(connect.CodeNotFound, fmt.Errorf("no VMs found for deployment: %s", targetDeployment.ID)) - } - - domains, err := db.Query.FindDomainsForPromotion(ctx, s.db.RO(), db.FindDomainsForPromotionParams{ - EnvironmentID: sql.NullString{Valid: true, String: targetDeployment.EnvironmentID}, - Sticky: []db.NullDomainsSticky{ - db.NullDomainsSticky{Valid: true, DomainsSticky: db.DomainsStickyLive}, - db.NullDomainsSticky{Valid: true, DomainsSticky: db.DomainsStickyEnvironment}, - }, + // Call the Restate workflow using project ID as the key + // This ensures only one operation per project can run at a time + _, err = restateingress.Object[*hydrav1.PromoteRequest, *hydrav1.PromoteResponse]( + s.restate, + "hydra.v1.DeploymentService", + targetDeployment.ProjectID, + "Promote", + ).Request(ctx, &hydrav1.PromoteRequest{ + TargetDeploymentId: req.Msg.GetTargetDeploymentId(), }) - if err != nil { - s.logger.Error("failed to get domains", - "deployment_id", targetDeployment.ID, - "error", err.Error(), - ) - return nil, connect.NewError(connect.CodeInternal, fmt.Errorf("failed to get domains: %w", err)) - } - - if len(domains) == 0 { - s.logger.Error("no domains found", - "deployment_id", targetDeployment.ID, - ) - return nil, connect.NewError(connect.CodeNotFound, fmt.Errorf("no domains found for deployment: %s", targetDeployment.ID)) - } - gatewayConfig, err := pdb.Query.FindGatewayByDeploymentId(ctx, s.partitionDB.RO(), targetDeployment.ID) if err != nil { - s.logger.Error("failed to get gateway config", - "deployment_id", targetDeployment.ID, + s.logger.Error("promotion workflow failed", + "target", req.Msg.GetTargetDeploymentId(), "error", err.Error(), ) - return nil, connect.NewError(connect.CodeInternal, fmt.Errorf("failed to get gateway config: %w", err)) - } - - gatewayChanges := make([]pdb.UpsertGatewayParams, len(domains)) - for i, domain := range domains { - gatewayChanges[i] = pdb.UpsertGatewayParams{ - WorkspaceID: domain.WorkspaceID, - DeploymentID: targetDeployment.ID, - Hostname: domain.Domain, - Config: gatewayConfig.Config, - } - } - - err = pdb.BulkQuery.UpsertGateway(ctx, s.partitionDB.RW(), gatewayChanges) - if err != nil { - s.logger.Error("failed to upsert gateway", "error", err.Error()) - return nil, connect.NewError(connect.CodeInternal, fmt.Errorf("failed to upsert gateway: %w", err)) + return nil, connect.NewError(connect.CodeInternal, fmt.Errorf("promotion workflow failed: %w", err)) } - for _, domain := range domains { - err = db.Query.ReassignDomain(ctx, s.db.RW(), db.ReassignDomainParams{ - ID: domain.ID, - TargetWorkspaceID: targetDeployment.WorkspaceID, - DeploymentID: sql.NullString{Valid: true, String: targetDeployment.ID}, - UpdatedAt: sql.NullInt64{Valid: true, Int64: time.Now().UnixMilli()}, - }) - if err != nil { - s.logger.Error("failed to update domain", "error", err.Error()) - return nil, connect.NewError(connect.CodeInternal, fmt.Errorf("failed to update domain: %w", err)) - } - } - - err = db.Query.UpdateProjectDeployments(ctx, s.db.RW(), db.UpdateProjectDeploymentsParams{ - ID: project.ID, - LiveDeploymentID: sql.NullString{Valid: true, String: targetDeployment.ID}, - IsRolledBack: false, - UpdatedAt: sql.NullInt64{Valid: true, Int64: time.Now().UnixMilli()}, - }) - if err != nil { - s.logger.Error("failed to update project deployments", - "project_id", project.ID, - "error", err.Error(), - ) - return nil, connect.NewError(connect.CodeInternal, fmt.Errorf("failed to update project's live deployment id: %w", err)) - } + s.logger.Info("promotion completed successfully via Restate", + "target", req.Msg.GetTargetDeploymentId(), + ) return connect.NewResponse(&ctrlv1.PromoteResponse{}), nil } diff --git a/go/apps/ctrl/services/deployment/rollback.go b/go/apps/ctrl/services/deployment/rollback.go index 8867bc9254..04e9cd6ad6 100644 --- a/go/apps/ctrl/services/deployment/rollback.go +++ b/go/apps/ctrl/services/deployment/rollback.go @@ -2,34 +2,24 @@ package deployment import ( "context" - "database/sql" - "errors" "fmt" - "time" "connectrpc.com/connect" + restateingress "github.com/restatedev/sdk-go/ingress" ctrlv1 "github.com/unkeyed/unkey/go/gen/proto/ctrl/v1" - "github.com/unkeyed/unkey/go/pkg/assert" + hydrav1 "github.com/unkeyed/unkey/go/gen/proto/hydra/v1" "github.com/unkeyed/unkey/go/pkg/db" - pdb "github.com/unkeyed/unkey/go/pkg/partition/db" ) -// Rollback performs a rollback to a previous deployment +// Rollback performs a rollback to a previous deployment via Restate workflow // This is the main rollback implementation that the dashboard will call func (s *Service) Rollback(ctx context.Context, req *connect.Request[ctrlv1.RollbackRequest]) (*connect.Response[ctrlv1.RollbackResponse], error) { - - s.logger.Info("initiating rollback", + s.logger.Info("initiating rollback via Restate", "source", req.Msg.GetSourceDeploymentId(), "target", req.Msg.GetTargetDeploymentId(), ) - if err := assert.NotEqual( - req.Msg.GetSourceDeploymentId(), - req.Msg.GetTargetDeploymentId(), - ); err != nil { - return nil, connect.NewError(connect.CodeInvalidArgument, err) - } - + // Get source deployment to determine project ID for keying sourceDeployment, err := db.Query.FindDeploymentById(ctx, s.db.RO(), req.Msg.GetSourceDeploymentId()) if err != nil { if db.IsNotFound(err) { @@ -42,149 +32,32 @@ func (s *Service) Rollback(ctx context.Context, req *connect.Request[ctrlv1.Roll return nil, connect.NewError(connect.CodeInternal, fmt.Errorf("failed to get deployment: %w", err)) } - targetDeployment, err := db.Query.FindDeploymentById(ctx, s.db.RO(), req.Msg.GetTargetDeploymentId()) - if err != nil { - if db.IsNotFound(err) { - return nil, connect.NewError(connect.CodeNotFound, fmt.Errorf("deployment not found: %s", req.Msg.GetTargetDeploymentId())) - } - s.logger.Error("failed to get deployment", - "deployment_id", req.Msg.GetTargetDeploymentId(), - "error", err.Error(), - ) - return nil, connect.NewError(connect.CodeInternal, fmt.Errorf("failed to get deployment: %w", err)) - } - - if err := assert.All( - assert.Equal(targetDeployment.EnvironmentID, sourceDeployment.EnvironmentID), - assert.Equal(targetDeployment.ProjectID, sourceDeployment.ProjectID), - ); err != nil { - return nil, connect.NewError(connect.CodeInvalidArgument, err) - } - - project, err := db.Query.FindProjectById(ctx, s.db.RO(), sourceDeployment.ProjectID) - if err != nil { - if db.IsNotFound(err) { - return nil, connect.NewError(connect.CodeNotFound, fmt.Errorf("project not found: %s", sourceDeployment.ProjectID)) - } - s.logger.Error("failed to get project", - "project_id", sourceDeployment.ProjectID, - "error", err.Error(), - ) - return nil, connect.NewError(connect.CodeInternal, fmt.Errorf("failed to get project: %w", err)) - } - - if err := assert.All( - assert.True(project.LiveDeploymentID.Valid), - assert.Equal(sourceDeployment.ID, project.LiveDeploymentID.String), - ); err != nil { - return nil, connect.NewError(connect.CodeInvalidArgument, err) - } - - vms, err := pdb.Query.FindVMsByDeploymentId(ctx, s.partitionDB.RO(), targetDeployment.ID) - if err != nil { - s.logger.Error("failed to get VMs", - "deployment_id", targetDeployment.ID, - "error", err.Error(), - ) - return nil, connect.NewError(connect.CodeInternal, fmt.Errorf("failed to get VMs: %w", err)) - } - runningVms := 0 - for _, vm := range vms { - if vm.Status == pdb.VmsStatusRunning { - runningVms++ - } - } - if runningVms == 0 { - s.logger.Error("no VMs found", - "deployment_id", targetDeployment.ID, - ) - return nil, connect.NewError(connect.CodeNotFound, fmt.Errorf("no VMs found for deployment: %s", targetDeployment.ID)) - } - - // get all domains on the live deployment that are sticky - domains, err := db.Query.FindDomainsForRollback(ctx, s.db.RO(), db.FindDomainsForRollbackParams{ - EnvironmentID: sql.NullString{Valid: true, String: sourceDeployment.EnvironmentID}, - Sticky: []db.NullDomainsSticky{ - db.NullDomainsSticky{Valid: true, DomainsSticky: db.DomainsStickyLive}, - db.NullDomainsSticky{Valid: true, DomainsSticky: db.DomainsStickyEnvironment}, - }, + // Call the Restate workflow using project ID as the key + // This ensures only one rollback per project can run at a time + // Using Object for blocking/synchronous invocation + _, err = restateingress.Object[*hydrav1.RollbackRequest, *hydrav1.RollbackResponse]( + s.restate, + "hydra.v1.DeploymentService", + sourceDeployment.ProjectID, + "Rollback", + ).Request(ctx, &hydrav1.RollbackRequest{ + SourceDeploymentId: req.Msg.GetSourceDeploymentId(), + TargetDeploymentId: req.Msg.GetTargetDeploymentId(), }) - if err != nil { - s.logger.Error("failed to get domains", - "deployment_id", sourceDeployment.ID, - "error", err.Error(), - ) - return nil, connect.NewError(connect.CodeInternal, fmt.Errorf("failed to get domains: %w", err)) - } - gatewayConfig, err := pdb.Query.FindGatewayByDeploymentId(ctx, s.partitionDB.RO(), targetDeployment.ID) if err != nil { - s.logger.Error("failed to get gateway config", - "deployment_id", targetDeployment.ID, + s.logger.Error("rollback workflow failed", + "source", req.Msg.GetSourceDeploymentId(), + "target", req.Msg.GetTargetDeploymentId(), "error", err.Error(), ) - return nil, connect.NewError(connect.CodeInternal, fmt.Errorf("failed to get gateway config: %w", err)) + return nil, connect.NewError(connect.CodeInternal, fmt.Errorf("rollback workflow failed: %w", err)) } - domainChanges := []db.ReassignDomainParams{} - gatewayChanges := []pdb.UpsertGatewayParams{} - - for _, domain := range domains { - if domain.Sticky.Valid && - (domain.Sticky.DomainsSticky == db.DomainsStickyLive || - domain.Sticky.DomainsSticky == db.DomainsStickyEnvironment) { - - domainChanges = append(domainChanges, db.ReassignDomainParams{ - ID: domain.ID, - TargetWorkspaceID: project.WorkspaceID, - DeploymentID: sql.NullString{Valid: true, String: targetDeployment.ID}, - UpdatedAt: sql.NullInt64{Valid: true, Int64: time.Now().UnixMilli()}, - }) - - gatewayChanges = append(gatewayChanges, pdb.UpsertGatewayParams{ - WorkspaceID: project.WorkspaceID, - DeploymentID: targetDeployment.ID, - Hostname: domain.Domain, - Config: gatewayConfig.Config, - }) - } - - } - - if len(domainChanges) == 0 { - return nil, connect.NewError(connect.CodeInvalidArgument, errors.New("no domains to rollback")) - } - err = pdb.BulkQuery.UpsertGateway(ctx, s.partitionDB.RW(), gatewayChanges) - if err != nil { - s.logger.Error("failed to upsert gateway", "error", err.Error()) - return nil, connect.NewError(connect.CodeInternal, fmt.Errorf("failed to upsert gateway: %w", err)) - - } - - // Not sure why there isn't a bulk query generated, but this will do for now - // cause we're only rolling back one domain anyways - for _, change := range domainChanges { - s.logger.Info("rolling back domain", "domain", change) - err = db.Query.ReassignDomain(ctx, s.db.RW(), change) - if err != nil { - s.logger.Error("failed to update domain", "error", err.Error()) - return nil, connect.NewError(connect.CodeInternal, fmt.Errorf("failed to update domain: %w", err)) - } - } - - err = db.Query.UpdateProjectDeployments(ctx, s.db.RW(), db.UpdateProjectDeploymentsParams{ - ID: project.ID, - LiveDeploymentID: sql.NullString{Valid: true, String: targetDeployment.ID}, - IsRolledBack: true, - UpdatedAt: sql.NullInt64{Valid: true, Int64: time.Now().UnixMilli()}, - }) - if err != nil { - s.logger.Error("failed to update project deployments", - "project_id", project.ID, - "error", err.Error(), - ) - return nil, connect.NewError(connect.CodeInternal, fmt.Errorf("failed to update project's live deployment id: %w", err)) - } + s.logger.Info("rollback completed successfully via Restate", + "source", req.Msg.GetSourceDeploymentId(), + "target", req.Msg.GetTargetDeploymentId(), + ) return connect.NewResponse(&ctrlv1.RollbackResponse{}), nil } diff --git a/go/apps/ctrl/services/deployment/service.go b/go/apps/ctrl/services/deployment/service.go index f85ce853a1..c207c8b1e9 100644 --- a/go/apps/ctrl/services/deployment/service.go +++ b/go/apps/ctrl/services/deployment/service.go @@ -1,9 +1,9 @@ package deployment import ( + restateingress "github.com/restatedev/sdk-go/ingress" "github.com/unkeyed/unkey/go/gen/proto/ctrl/v1/ctrlv1connect" "github.com/unkeyed/unkey/go/pkg/db" - "github.com/unkeyed/unkey/go/pkg/hydra" "github.com/unkeyed/unkey/go/pkg/otel/logging" ) @@ -11,16 +11,26 @@ type Service struct { ctrlv1connect.UnimplementedDeploymentServiceHandler db db.Database partitionDB db.Database - hydraEngine *hydra.Engine - logger logging.Logger + + restate *restateingress.Client + + logger logging.Logger } -func New(database db.Database, partitionDB db.Database, hydraEngine *hydra.Engine, logger logging.Logger) *Service { +type Config struct { + Database db.Database + PartitionDB db.Database + Restate *restateingress.Client + Logger logging.Logger +} + +func New(cfg Config) *Service { + return &Service{ UnimplementedDeploymentServiceHandler: ctrlv1connect.UnimplementedDeploymentServiceHandler{}, - db: database, - partitionDB: partitionDB, - hydraEngine: hydraEngine, - logger: logger, + db: cfg.Database, + partitionDB: cfg.PartitionDB, + restate: cfg.Restate, + logger: cfg.Logger, } } diff --git a/go/apps/ctrl/workflows/certificate/doc.go b/go/apps/ctrl/workflows/certificate/doc.go new file mode 100644 index 0000000000..716df091f9 --- /dev/null +++ b/go/apps/ctrl/workflows/certificate/doc.go @@ -0,0 +1,79 @@ +// Package certificate implements ACME certificate challenge workflows for SSL/TLS provisioning. +// +// This package handles the complete lifecycle of certificate provisioning using the ACME +// (Automatic Certificate Management Environment) protocol. It coordinates with certificate +// authorities to validate domain ownership and obtain SSL/TLS certificates. +// +// # Built on Restate +// +// All workflows in this package are built on top of Restate (restate.dev) for durable +// execution. This provides critical guarantees: +// +// - Automatic retries on transient failures +// - Exactly-once execution semantics for each workflow step +// - Durable state that survives process crashes and restarts +// - Virtual object concurrency control keyed by domain name +// +// The virtual object model ensures that only one certificate challenge runs per domain +// at any given time, preventing race conditions and duplicate certificate requests that +// could trigger rate limits from certificate authorities. +// +// # Key Types +// +// [Service] is the main entry point that implements the ACME certificate workflow. +// It handles the [Service.ProcessChallenge] method which orchestrates the entire +// certificate issuance process. +// +// # Usage +// +// The service is typically initialized with database connections and a vault service +// for secure storage of private keys: +// +// svc := certificate.New(certificate.Config{ +// DB: mainDB, +// PartitionDB: partitionDB, +// Vault: vaultService, +// Logger: logger, +// }) +// +// Certificate challenges are processed through the ProcessChallenge RPC: +// +// resp, err := svc.ProcessChallenge(ctx, &hydrav1.ProcessChallengeRequest{ +// WorkspaceId: "ws_123", +// Domain: "api.example.com", +// }) +// if err != nil { +// // Handle error +// } +// if resp.Status == "success" { +// // Certificate issued successfully +// } +// +// # ACME Challenge Flow +// +// The certificate challenge process follows these steps: +// +// 1. Domain validation - Verify the domain exists and belongs to the workspace +// 2. Challenge claiming - Acquire exclusive lock on the domain challenge +// 3. ACME client setup - Get or create an ACME account for the workspace +// 4. Certificate obtain/renew - Request certificate from the CA +// 5. Certificate persistence - Store certificate and encrypted private key +// 6. Challenge completion - Mark the challenge as verified with expiry time +// +// Each step is wrapped in a restate.Run call, making it durable and retryable. If the +// workflow crashes at any point, Restate will resume from the last completed step rather +// than restarting from the beginning. This ensures that ACME challenges can complete +// reliably even in the face of system failures, network partitions, or process restarts. +// +// # Security Considerations +// +// Private keys are encrypted before storage using the vault service. Certificates +// are stored in the partition database for fast access by gateways. ACME account +// credentials are workspace-scoped to prevent cross-workspace access. +// +// # Error Handling +// +// The package uses Restate's error handling model. Terminal errors with appropriate +// HTTP status codes are returned for client errors (invalid input, not found, etc.). +// System errors are returned for unexpected failures that may be retried. +package certificate diff --git a/go/apps/ctrl/workflows/certificate/pem.go b/go/apps/ctrl/workflows/certificate/pem.go new file mode 100644 index 0000000000..3aa23dc304 --- /dev/null +++ b/go/apps/ctrl/workflows/certificate/pem.go @@ -0,0 +1,67 @@ +package certificate + +import ( + "crypto/ecdsa" + "crypto/x509" + "encoding/pem" + "fmt" + "time" +) + +// privateKeyToString converts an ECDSA private key to PEM-encoded string format. +// +// The key is marshaled to DER format and then encoded as PEM with the "EC PRIVATE KEY" +// block type. This format is compatible with standard TLS libraries and certificate +// authorities. +func privateKeyToString(privateKey *ecdsa.PrivateKey) (string, error) { + // Marshal the private key to DER format + privKeyBytes, err := x509.MarshalECPrivateKey(privateKey) + if err != nil { + return "", fmt.Errorf("failed to marshal private key: %w", err) + } + + // Encode to PEM format + privKeyPEM := pem.EncodeToMemory(&pem.Block{ + Type: "EC PRIVATE KEY", + Bytes: privKeyBytes, + }) + + return string(privKeyPEM), nil +} + +// stringToPrivateKey parses a PEM-encoded ECDSA private key from a string. +// +// Returns an error if the PEM block cannot be decoded or if the key format is invalid. +func stringToPrivateKey(pemString string) (*ecdsa.PrivateKey, error) { + // Decode PEM format + block, _ := pem.Decode([]byte(pemString)) + if block == nil { + return nil, fmt.Errorf("failed to decode PEM block") + } + + // Parse the EC private key + privateKey, err := x509.ParseECPrivateKey(block.Bytes) + if err != nil { + return nil, fmt.Errorf("failed to parse EC private key: %w", err) + } + + return privateKey, nil +} + +// getCertificateExpiry extracts the expiration time from a PEM-encoded certificate. +// +// Returns the NotAfter timestamp from the certificate, which indicates when the +// certificate expires and needs renewal. +func getCertificateExpiry(certPEM string) (time.Time, error) { + block, _ := pem.Decode([]byte(certPEM)) + if block == nil { + return time.Time{}, fmt.Errorf("failed to decode PEM block") + } + + cert, err := x509.ParseCertificate(block.Bytes) + if err != nil { + return time.Time{}, fmt.Errorf("failed to parse certificate: %w", err) + } + + return cert.NotAfter, nil +} diff --git a/go/apps/ctrl/workflows/certificate/process_challenge_handler.go b/go/apps/ctrl/workflows/certificate/process_challenge_handler.go new file mode 100644 index 0000000000..1ee80e8dc5 --- /dev/null +++ b/go/apps/ctrl/workflows/certificate/process_challenge_handler.go @@ -0,0 +1,157 @@ +package certificate + +import ( + "database/sql" + "time" + + "github.com/go-acme/lego/v4/certificate" + restate "github.com/restatedev/sdk-go" + hydrav1 "github.com/unkeyed/unkey/go/gen/proto/hydra/v1" + "github.com/unkeyed/unkey/go/pkg/db" + pdb "github.com/unkeyed/unkey/go/pkg/partition/db" +) + +// EncryptedCertificate holds a certificate and its encrypted private key. +type EncryptedCertificate struct { + // Certificate is the PEM-encoded certificate. + Certificate string + + // EncryptedPrivateKey is the encrypted PEM-encoded private key. + EncryptedPrivateKey string + + // ExpiresAt is the certificate expiration time as Unix milliseconds. + ExpiresAt int64 +} + +// ProcessChallenge handles the complete ACME certificate challenge flow. +// +// This method implements a multi-step durable workflow using Restate to obtain or renew +// an SSL/TLS certificate for a domain. Each step is wrapped in restate.Run for durability, +// allowing the workflow to resume from the last completed step if interrupted. +// +// The workflow performs these steps: +// 1. Resolve domain - Verify domain exists and belongs to workspace +// 2. Claim challenge - Acquire exclusive lock on the domain challenge +// 3. Setup ACME client - Get or create ACME account (TODO: not yet implemented) +// 4. Obtain certificate - Request certificate from CA (TODO: not yet implemented) +// 5. Persist certificate - Store in partition DB for gateway access +// 6. Mark verified - Update challenge status with expiry time +// +// Returns status "success" if certificate was issued, "failed" if the ACME challenge +// failed or ACME client setup is not yet implemented. +func (s *Service) ProcessChallenge( + ctx restate.ObjectContext, + req *hydrav1.ProcessChallengeRequest, +) (*hydrav1.ProcessChallengeResponse, error) { + s.logger.Info("starting certificate challenge", + "workspace_id", req.GetWorkspaceId(), + "domain", req.GetDomain(), + ) + + // Step 1: Resolve domain + dom, err := restate.Run(ctx, func(stepCtx restate.RunContext) (db.Domain, error) { + return db.Query.FindDomainByDomain(stepCtx, s.db.RO(), req.GetDomain()) + }, restate.WithName("resolving domain")) + if err != nil { + return nil, err + } + + // Step 2: Claim the challenge + _, err = restate.Run(ctx, func(stepCtx restate.RunContext) (restate.Void, error) { + return restate.Void{}, db.Query.UpdateAcmeChallengeTryClaiming(stepCtx, s.db.RW(), db.UpdateAcmeChallengeTryClaimingParams{ + DomainID: dom.ID, + Status: db.AcmeChallengesStatusPending, + UpdatedAt: sql.NullInt64{Int64: time.Now().UnixMilli(), Valid: true}, + }) + }, restate.WithName("acquiring challenge")) + if err != nil { + return nil, err + } + + // Step 3: Get or create ACME client for this workspace + acmeClient, err := restate.Run(ctx, func(stepCtx restate.RunContext) (*certificate.Resource, error) { + // TODO: Get ACME client for workspace + // This requires implementing GetOrCreateUser from acme/user.go + // and setting up challenge providers (HTTP-01, DNS-01) + + // For now, return error indicating this needs ACME client setup + return nil, restate.TerminalError( + err, + 500, + ) + }, restate.WithName("setup acme client")) + if err != nil { + _, _ = restate.Run(ctx, func(stepCtx restate.RunContext) (restate.Void, error) { + db.Query.UpdateAcmeChallengeStatus(stepCtx, s.db.RW(), db.UpdateAcmeChallengeStatusParams{ + DomainID: dom.ID, + Status: db.AcmeChallengesStatusFailed, + UpdatedAt: sql.NullInt64{Valid: true, Int64: time.Now().UnixMilli()}, + }) + return restate.Void{}, nil + }, restate.WithName("mark challenge failed")) + return &hydrav1.ProcessChallengeResponse{ + Status: "failed", + }, nil + } + + // Step 4: Obtain or renew certificate + cert, err := restate.Run(ctx, func(stepCtx restate.RunContext) (EncryptedCertificate, error) { + currCert, err := pdb.Query.FindCertificateByHostname(stepCtx, s.partitionDB.RO(), req.GetDomain()) + if err != nil && !db.IsNotFound(err) { + return EncryptedCertificate{}, err + } + + // TODO: Implement certificate obtain/renew logic + // This requires the ACME client from step 3 + _ = currCert + _ = acmeClient + + return EncryptedCertificate{}, restate.TerminalError( + err, + 500, + ) + }, restate.WithName("obtaining certificate")) + if err != nil { + return &hydrav1.ProcessChallengeResponse{ + Status: "failed", + }, nil + } + + // Step 5: Persist certificate to partition DB + _, err = restate.Run(ctx, func(stepCtx restate.RunContext) (restate.Void, error) { + now := time.Now().UnixMilli() + return restate.Void{}, pdb.Query.InsertCertificate(stepCtx, s.partitionDB.RW(), pdb.InsertCertificateParams{ + WorkspaceID: dom.WorkspaceID, + Hostname: req.GetDomain(), + Certificate: cert.Certificate, + EncryptedPrivateKey: cert.EncryptedPrivateKey, + CreatedAt: now, + UpdatedAt: sql.NullInt64{Valid: true, Int64: now}, + }) + }, restate.WithName("persisting certificate")) + if err != nil { + return nil, err + } + + // Step 6: Mark challenge as verified + _, err = restate.Run(ctx, func(stepCtx restate.RunContext) (restate.Void, error) { + return restate.Void{}, db.Query.UpdateAcmeChallengeVerifiedWithExpiry(stepCtx, s.db.RW(), db.UpdateAcmeChallengeVerifiedWithExpiryParams{ + Status: db.AcmeChallengesStatusVerified, + ExpiresAt: cert.ExpiresAt, + UpdatedAt: sql.NullInt64{Valid: true, Int64: time.Now().UnixMilli()}, + DomainID: dom.ID, + }) + }, restate.WithName("completing challenge")) + if err != nil { + return nil, err + } + + s.logger.Info("certificate challenge completed successfully", + "domain", req.GetDomain(), + "expires_at", cert.ExpiresAt, + ) + + return &hydrav1.ProcessChallengeResponse{ + Status: "success", + }, nil +} diff --git a/go/apps/ctrl/workflows/certificate/service.go b/go/apps/ctrl/workflows/certificate/service.go new file mode 100644 index 0000000000..b2029aef7c --- /dev/null +++ b/go/apps/ctrl/workflows/certificate/service.go @@ -0,0 +1,52 @@ +package certificate + +import ( + hydrav1 "github.com/unkeyed/unkey/go/gen/proto/hydra/v1" + "github.com/unkeyed/unkey/go/pkg/db" + "github.com/unkeyed/unkey/go/pkg/otel/logging" + "github.com/unkeyed/unkey/go/pkg/vault" +) + +// Service handles ACME certificate challenge workflows. +// +// This service orchestrates the complete certificate issuance process including +// domain validation, challenge claiming, ACME protocol communication, and certificate +// storage. It implements the hydrav1.CertificateServiceServer interface. +// +// The service uses Restate virtual objects keyed by domain name to ensure that only +// one certificate challenge runs per domain at any time, preventing duplicate requests +// and rate limit violations. +type Service struct { + hydrav1.UnimplementedCertificateServiceServer + db db.Database + partitionDB db.Database + vault *vault.Service + logger logging.Logger +} + +var _ hydrav1.CertificateServiceServer = (*Service)(nil) + +// Config holds the configuration for creating a certificate service. +type Config struct { + // DB is the main database connection for workspace and domain data. + DB db.Database + + // PartitionDB is the partition database connection for certificate storage. + PartitionDB db.Database + + // Vault provides encryption services for private key storage. + Vault *vault.Service + + // Logger for structured logging. + Logger logging.Logger +} + +// New creates a new certificate service instance. +func New(cfg Config) *Service { + return &Service{ + db: cfg.DB, + partitionDB: cfg.PartitionDB, + vault: cfg.Vault, + logger: cfg.Logger, + } +} diff --git a/go/apps/ctrl/workflows/deploy/deploy_handler.go b/go/apps/ctrl/workflows/deploy/deploy_handler.go new file mode 100644 index 0000000000..723df944d2 --- /dev/null +++ b/go/apps/ctrl/workflows/deploy/deploy_handler.go @@ -0,0 +1,374 @@ +package deploy + +import ( + "database/sql" + "encoding/base64" + "fmt" + "io" + "net/http" + "time" + + "connectrpc.com/connect" + restate "github.com/restatedev/sdk-go" + ctrlv1 "github.com/unkeyed/unkey/go/gen/proto/ctrl/v1" + hydrav1 "github.com/unkeyed/unkey/go/gen/proto/hydra/v1" + kranev1 "github.com/unkeyed/unkey/go/gen/proto/krane/v1" + partitionv1 "github.com/unkeyed/unkey/go/gen/proto/partition/v1" + "github.com/unkeyed/unkey/go/pkg/db" + partitiondb "github.com/unkeyed/unkey/go/pkg/partition/db" +) + +// Deploy orchestrates the complete deployment of a new Docker image. +// +// This durable workflow performs the following steps: +// 1. Load deployment, workspace, project, and environment data +// 2. Create deployment in Krane (container orchestration) +// 3. Poll for all instances to become ready +// 4. Register VMs in partition database +// 5. Scrape OpenAPI spec from running instances (if available) +// 6. Assign domains and create gateway configs via routing service +// 7. Update deployment status to ready +// 8. Update project's live deployment pointer (if production and not rolled back) +// +// Each step is wrapped in restate.Run for durability. If the workflow is interrupted, +// it resumes from the last completed step. A deferred error handler ensures that +// failed deployments are properly marked in the database even if the workflow crashes. +// +// The workflow uses a 5-minute polling loop to wait for instances to become ready, +// checking Krane deployment status every second and logging progress every 10 seconds. +func (w *Workflow) Deploy(ctx restate.ObjectContext, req *hydrav1.DeployRequest) (*hydrav1.DeployResponse, error) { + + var finishedSuccessfully = false + + deployment, err := restate.Run(ctx, func(stepCtx restate.RunContext) (db.FindDeploymentByIdRow, error) { + return db.Query.FindDeploymentById(stepCtx, w.db.RW(), req.DeploymentId) + }, restate.WithName("finding deployment")) + if err != nil { + return nil, err + } + + // If anything goes wrong, we need to update the deployment status to failed + defer func() { + if finishedSuccessfully { + return + } + + if err := w.updateDeploymentStatus(ctx, deployment.ID, db.DeploymentsStatusFailed); err != nil { + w.logger.Error("deployment failed but we can not set the status", "error", err.Error()) + } + + }() + + workspace, err := restate.Run(ctx, func(stepCtx restate.RunContext) (db.Workspace, error) { + return db.Query.FindWorkspaceByID(stepCtx, w.db.RW(), deployment.WorkspaceID) + }, restate.WithName("finding workspace")) + if err != nil { + return nil, err + } + + project, err := restate.Run(ctx, func(stepCtx restate.RunContext) (db.FindProjectByIdRow, error) { + return db.Query.FindProjectById(stepCtx, w.db.RW(), deployment.ProjectID) + }, restate.WithName("finding project")) + if err != nil { + return nil, err + } + environment, err := restate.Run(ctx, func(stepCtx restate.RunContext) (db.FindEnvironmentByIdRow, error) { + return db.Query.FindEnvironmentById(stepCtx, w.db.RW(), deployment.EnvironmentID) + }, restate.WithName("finding environment")) + if err != nil { + return nil, err + } + + // Log deployment pending + _, err = restate.Run(ctx, func(stepCtx restate.RunContext) (restate.Void, error) { + err = db.Query.InsertDeploymentStep(stepCtx, w.db.RW(), db.InsertDeploymentStepParams{ + WorkspaceID: deployment.WorkspaceID, + ProjectID: deployment.ProjectID, + DeploymentID: deployment.ID, + Status: "pending", + Message: "Deployment queued and ready to start", + CreatedAt: time.Now().UnixMilli(), + }) + return restate.Void{}, err + }, restate.WithName("logging deployment pending")) + if err != nil { + return nil, err + } + + // Update version status to building + + if err = w.updateDeploymentStatus(ctx, deployment.ID, db.DeploymentsStatusBuilding); err != nil { + return nil, err + } + + _, err = restate.Run(ctx, func(stepCtx restate.RunContext) (restate.Void, error) { + // Create deployment request + + _, err := w.krane.CreateDeployment(stepCtx, connect.NewRequest(&kranev1.CreateDeploymentRequest{ + + Deployment: &kranev1.DeploymentRequest{ + Namespace: hardcodedNamespace, + DeploymentId: deployment.ID, + Image: req.DockerImage, + Replicas: 1, + CpuMillicores: 512, + MemorySizeMib: 512, + }, + })) + if err != nil { + return restate.Void{}, fmt.Errorf("krane CreateDeployment failed for image %s: %w", req.DockerImage, err) + } + + return restate.Void{}, nil + }, restate.WithName("creating deployment in krane")) + if err != nil { + return nil, err + } + + w.logger.Info("deployment created", "deployment_id", deployment.ID) + + // Update version status to deploying + if err = w.updateDeploymentStatus(ctx, deployment.ID, db.DeploymentsStatusDeploying); err != nil { + return nil, err + } + createdInstances, err := restate.Run(ctx, func(stepCtx restate.RunContext) ([]*kranev1.Instance, error) { + // prevent updating the db unnecessarily + + for i := range 300 { + time.Sleep(time.Second) + if i%10 == 0 { // Log every 10 seconds instead of every second + w.logger.Info("polling deployment status", "deployment_id", deployment.ID, "iteration", i) + } + + resp, err := w.krane.GetDeployment(stepCtx, connect.NewRequest(&kranev1.GetDeploymentRequest{ + Namespace: hardcodedNamespace, + DeploymentId: deployment.ID, + })) + if err != nil { + return nil, fmt.Errorf("krane GetDeployment failed for deployment %s: %w", deployment.ID, err) + } + + w.logger.Info("deployment status", + "deployment_id", deployment.ID, + "status", resp.Msg, + ) + + allReady := true + for _, instance := range resp.Msg.GetInstances() { + if instance.Status != kranev1.DeploymentStatus_DEPLOYMENT_STATUS_RUNNING { + allReady = false + } + + var status partitiondb.VmsStatus + switch instance.Status { + case kranev1.DeploymentStatus_DEPLOYMENT_STATUS_PENDING: + status = partitiondb.VmsStatusProvisioning + case kranev1.DeploymentStatus_DEPLOYMENT_STATUS_RUNNING: + status = partitiondb.VmsStatusRunning + + case kranev1.DeploymentStatus_DEPLOYMENT_STATUS_TERMINATING: + status = partitiondb.VmsStatusStopping + case kranev1.DeploymentStatus_DEPLOYMENT_STATUS_UNSPECIFIED: + status = partitiondb.VmsStatusAllocated + } + + upsertParams := partitiondb.UpsertVMParams{ + ID: instance.Id, + DeploymentID: deployment.ID, + Address: sql.NullString{Valid: true, String: instance.Address}, + CpuMillicores: 1000, // TODO derive from spec + MemoryMb: 1024, // TODO derive from spec + Status: status, // TODO + } + + w.logger.Info("upserting VM to database", + "vm_id", instance.Id, + "deployment_id", deployment.ID, + "address", instance.Address, + "status", status) + if err := partitiondb.Query.UpsertVM(stepCtx, w.partitionDB.RW(), upsertParams); err != nil { + return nil, fmt.Errorf("failed to upsert VM %s: %w", instance.Id, err) + } + + w.logger.Info("successfully upserted VM to database", "vm_id", instance.Id) + + } + + if allReady { + return resp.Msg.GetInstances(), nil + } + // next loop + + } + + return nil, fmt.Errorf("deployment never became ready") + }, restate.WithName("polling deployment status")) + if err != nil { + return nil, err + } + + openapiSpec, err := restate.Run(ctx, func(stepCtx restate.RunContext) (string, error) { + + for _, instance := range createdInstances { + openapiURL := fmt.Sprintf("http://%s/openapi.yaml", instance.GetAddress()) + w.logger.Info("trying to scrape OpenAPI spec", "url", openapiURL, "host_port", instance.GetAddress(), "deployment_id", deployment.ID) + + resp, err := http.DefaultClient.Get(openapiURL) + if err != nil { + w.logger.Warn("openapi scraping failed for host address", "error", err, "host_addr", instance.GetAddress(), "deployment_id", deployment.ID) + continue + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + w.logger.Warn("openapi endpoint returned non-200 status", "status", resp.StatusCode, "host_addr", instance.GetAddress(), "deployment_id", deployment.ID) + continue + } + + // Read the OpenAPI spec + specBytes, err := io.ReadAll(resp.Body) + if err != nil { + w.logger.Warn("failed to read OpenAPI spec response", "error", err, "host_addr", instance.GetAddress(), "deployment_id", deployment.ID) + continue + } + + w.logger.Info("openapi spec scraped successfully", "host_addr", instance.GetAddress(), "deployment_id", deployment.ID, "spec_size", len(specBytes)) + return base64.StdEncoding.EncodeToString(specBytes), nil + } + // not an error really, just no OpenAPI spec found + return "", nil + + }, restate.WithName("scrape openapi spec")) + + if err != nil { + return nil, err + } + + if openapiSpec != "" { + + _, err = restate.Run(ctx, func(innerCtx restate.RunContext) (restate.Void, error) { + + return restate.Void{}, db.Query.UpdateDeploymentOpenapiSpec(innerCtx, w.db.RW(), db.UpdateDeploymentOpenapiSpecParams{ + ID: deployment.ID, + UpdatedAt: sql.NullInt64{Valid: true, Int64: time.Now().UnixMilli()}, + OpenapiSpec: sql.NullString{Valid: true, String: openapiSpec}, + }) + + }, restate.WithName("update deployment openapi spec")) + + } + + allDomains := buildDomains( + workspace.Slug, + project.Slug, + environment.Slug, + deployment.GitCommitSha.String, + deployment.GitBranch.String, + w.defaultDomain, + ctrlv1.SourceType_SOURCE_TYPE_CLI_UPLOAD, // hardcoded for now cause I really need to move on + ) + + // Create VM protobuf objects for gateway config + gatewayConfig := &partitionv1.GatewayConfig{ + Deployment: &partitionv1.Deployment{ + Id: deployment.ID, + IsEnabled: true, + }, + Vms: make([]*partitionv1.VM, len(createdInstances)), + } + + for i, vm := range createdInstances { + gatewayConfig.Vms[i] = &partitionv1.VM{ + Id: vm.Id, + } + } + + // Only add AuthConfig if we have a KeyspaceID + if req.GetKeyAuthId() != "" { + gatewayConfig.AuthConfig = &partitionv1.AuthConfig{ + KeyAuthId: req.GetKeyAuthId(), + } + } + + if openapiSpec != "" { + gatewayConfig.ValidationConfig = &partitionv1.ValidationConfig{ + OpenapiSpec: openapiSpec, + } + } + + // Build domain assignment requests + domainRequests := make([]*hydrav1.DomainToAssign, 0, len(allDomains)) + for _, domain := range allDomains { + sticky := hydrav1.DomainSticky_DOMAIN_STICKY_UNSPECIFIED + if domain.sticky.Valid { + switch domain.sticky.DomainsSticky { + case db.DomainsStickyBranch: + sticky = hydrav1.DomainSticky_DOMAIN_STICKY_BRANCH + case db.DomainsStickyEnvironment: + sticky = hydrav1.DomainSticky_DOMAIN_STICKY_ENVIRONMENT + case db.DomainsStickyLive: + sticky = hydrav1.DomainSticky_DOMAIN_STICKY_LIVE + } + } + domainRequests = append(domainRequests, &hydrav1.DomainToAssign{ + Name: domain.domain, + Sticky: sticky, + }) + } + + // Call RoutingService to assign domains atomically + _, err = hydrav1.NewRoutingServiceClient(ctx, project.ID). + AssignDomains().Request(&hydrav1.AssignDomainsRequest{ + WorkspaceId: workspace.ID, + ProjectId: project.ID, + EnvironmentId: environment.ID, + DeploymentId: deployment.ID, + Domains: domainRequests, + GatewayConfig: gatewayConfig, + IsRolledBack: project.IsRolledBack, + }) + if err != nil { + return nil, fmt.Errorf("failed to assign domains: %w", err) + } + + // Update deployment status to ready + if err = w.updateDeploymentStatus(ctx, deployment.ID, db.DeploymentsStatusReady); err != nil { + return nil, err + } + + if !project.IsRolledBack && environment.Slug == "production" { + // only update this if the deployment is not rolled back + _, err = restate.Run(ctx, func(stepCtx restate.RunContext) (restate.Void, error) { + return restate.Void{}, db.Query.UpdateProjectDeployments(stepCtx, w.db.RW(), db.UpdateProjectDeploymentsParams{ + ID: deployment.ProjectID, + LiveDeploymentID: sql.NullString{Valid: true, String: deployment.ID}, + UpdatedAt: sql.NullInt64{Valid: true, Int64: time.Now().UnixMilli()}, + }) + }, restate.WithName("updating project live deployment")) + if err != nil { + return nil, err + } + } + + // Log deployment completed + _, err = restate.Run(ctx, func(stepCtx restate.RunContext) (restate.Void, error) { + return restate.Void{}, db.Query.InsertDeploymentStep(stepCtx, w.db.RW(), db.InsertDeploymentStepParams{ + DeploymentID: deployment.ID, + Status: "completed", + Message: "Deployment completed successfully", + CreatedAt: time.Now().UnixMilli(), + }) + }, restate.WithName("logging deployment completed")) + if err != nil { + return nil, err + } + + w.logger.Info("deployment workflow completed", + "deployment_id", deployment.ID, + "status", "succeeded", + "domains", len(allDomains)) + + finishedSuccessfully = true + + return &hydrav1.DeployResponse{}, nil +} diff --git a/go/apps/ctrl/workflows/deploy/doc.go b/go/apps/ctrl/workflows/deploy/doc.go new file mode 100644 index 0000000000..380b5532c8 --- /dev/null +++ b/go/apps/ctrl/workflows/deploy/doc.go @@ -0,0 +1,124 @@ +// Package deploy implements deployment lifecycle orchestration workflows. +// +// This package manages the complete deployment lifecycle including deploying new versions, +// rolling back to previous versions, and promoting deployments. It coordinates between +// container orchestration (Krane), database updates, domain routing, and gateway configuration +// to ensure consistent deployment state. +// +// # Built on Restate +// +// All workflows in this package are built on top of Restate (restate.dev) for durable +// execution. This provides critical guarantees: +// +// - Automatic retries on transient failures +// - Exactly-once execution semantics for each workflow step +// - Durable state that survives process crashes and restarts +// - Virtual object concurrency control keyed by project ID +// +// The virtual object model ensures that only one deployment operation runs per project +// at any given time, preventing race conditions during concurrent deploy/rollback/promote +// operations that could leave the system in an inconsistent state. +// +// # Key Types +// +// [Workflow] is the main entry point that implements deployment orchestration. +// It provides three primary operations: +// +// - [Workflow.Deploy] - Deploy a new Docker image and configure routing +// - [Workflow.Rollback] - Roll back to a previous deployment +// - [Workflow.Promote] - Promote a deployment to live and clear rollback state +// +// # Usage +// +// The workflow is typically initialized with database connections, a Krane client, +// and configuration: +// +// workflow := deploy.New(deploy.Config{ +// DB: mainDB, +// PartitionDB: partitionDB, +// Krane: kraneClient, +// Logger: logger, +// DefaultDomain: "unkey.app", +// }) +// +// Deploy a new version: +// +// _, err := workflow.Deploy(ctx, &hydrav1.DeployRequest{ +// DeploymentId: "dep_123", +// DockerImage: "myapp:v1.2.3", +// KeyAuthId: "key_auth_456", // optional +// }) +// +// Rollback to previous version: +// +// _, err := workflow.Rollback(ctx, &hydrav1.RollbackRequest{ +// SourceDeploymentId: "dep_current", +// TargetDeploymentId: "dep_previous", +// }) +// +// Promote a deployment to live: +// +// _, err := workflow.Promote(ctx, &hydrav1.PromoteRequest{ +// TargetDeploymentId: "dep_123", +// }) +// +// # Deployment Flow +// +// The deployment process follows these steps: +// +// 1. Deployment lookup - Find and validate deployment record +// 2. Context gathering - Load workspace, project, and environment data +// 3. Status update to building - Mark deployment as in-progress +// 4. Container deployment - Create deployment in Krane +// 5. Polling for readiness - Wait for all instances to be running +// 6. VM registration - Register running instances in partition DB +// 7. OpenAPI scraping - Fetch API spec from running instances (if available) +// 8. Domain assignment - Create/update domains and gateway configs via routing service +// 9. Status update to ready - Mark deployment as live +// 10. Project update - Update live deployment pointer (if production) +// +// Each step is wrapped in a restate.Run call, making it durable and retryable. If the +// workflow crashes at any point, Restate will resume from the last completed step rather +// than restarting from the beginning. The deferred error handler ensures that failed +// deployments are properly marked in the database even if the workflow is interrupted. +// +// # Rollback and Promote +// +// Rollbacks switch sticky domains (environment and live domains) from the current deployment +// to a previous deployment. This is done atomically through the routing service to prevent +// partial updates. The project is marked as rolled back to prevent new deployments from +// automatically taking over live domains. +// +// Promotion reverses a rollback by switching domains to a new deployment and clearing the +// rolled back flag. This allows normal deployment flow to resume. +// +// # Domain Generation +// +// The package generates multiple domain patterns per deployment: +// +// - Per-commit: `-git--.` (never reassigned) +// - Per-branch: `-git--.` (sticky to branch) +// - Per-environment: `--.` (sticky to environment) +// +// The sticky behavior ensures that branch and environment domains follow the latest +// deployment for that branch/environment, while commit domains remain immutable. +// +// # Gateway Configuration +// +// Gateway configs are created for all domains (except localhost and .local/.test TLDs) +// and stored as JSON in the partition database. Each config includes: +// +// - Deployment ID and enabled status +// - VM addresses for load balancing +// - Optional auth configuration (key auth ID) +// - Optional validation configuration (OpenAPI spec) +// +// Gateway configs use protojson encoding for easier debugging and direct database inspection. +// +// # Error Handling +// +// The package uses Restate's error handling model with deferred cleanup. If any step fails, +// the deployment status is automatically updated to "failed". Terminal errors with appropriate +// HTTP status codes are returned for client errors (invalid input, not found, etc.). System +// errors are returned for unexpected failures that may be retried by Restate. +package deploy diff --git a/go/apps/ctrl/services/deployment/domains.go b/go/apps/ctrl/workflows/deploy/domains.go similarity index 61% rename from go/apps/ctrl/services/deployment/domains.go rename to go/apps/ctrl/workflows/deploy/domains.go index f7be6bdd52..e7bdf008cc 100644 --- a/go/apps/ctrl/services/deployment/domains.go +++ b/go/apps/ctrl/workflows/deploy/domains.go @@ -1,4 +1,4 @@ -package deployment +package deploy import ( "fmt" @@ -15,13 +15,24 @@ type newDomain struct { sticky db.NullDomainsSticky } -// buildDomains looks at the deployment and returns a list of domains -// that should be assigned to the deployment. +// buildDomains generates the list of domains that should be assigned to a deployment. // -// We want these domains per deployment -// - `-git--.unkey.app` (this never gets reassigned) -// - `-git--.unkey.app` (this needs to point to the latest deployment of that branch, sluggify the branch name ) -// - `--.unkey.app` (this needs to point to the latest deployment of that environment and be rolled back) +// The function creates three types of domains: +// +// 1. Per-commit domain: `-git--.` +// - Never reassigned, provides stable URL for specific commit +// - For CLI uploads, adds random suffix to prevent collisions +// +// 2. Per-branch domain: `-git--.` +// - Sticky to branch, always points to latest deployment of that branch +// - Branch name is sluggified for URL safety +// +// 3. Per-environment domain: `--.` +// - Sticky to environment, points to latest deployment in that environment +// - Can be rolled back to previous deployment +// +// The sticky behavior ensures branch and environment domains automatically update to point +// to new deployments, while commit domains remain immutable. func buildDomains(workspaceSlug, projectSlug, environmentSlug, gitSha, branchName, apex string, source ctrlv1.SourceType) []newDomain { // Deploying via CLI often sends the same git sha, and we want to make them unique, @@ -71,6 +82,18 @@ func buildDomains(workspaceSlug, projectSlug, environmentSlug, gitSha, branchNam var nonAlphanumericRegex = regexp.MustCompile(`[^a-zA-Z0-9\s]`) var multipleSpacesRegex = regexp.MustCompile(`\s+`) +// sluggify converts a string into a URL-safe slug. +// +// The function performs these transformations: +// - Removes all non-alphanumeric characters (except spaces, hyphens, underscores) +// - Converts hyphens and underscores to spaces +// - Collapses multiple spaces into single space +// - Replaces spaces with hyphens +// - Converts to lowercase +// - Limits to 80 characters +// - Removes trailing hyphens +// +// This is used to convert Git branch names into URL-safe domain components. func sluggify(s string) string { // Trim whitespace s = strings.TrimSpace(s) diff --git a/go/apps/ctrl/workflows/deploy/helpers.go b/go/apps/ctrl/workflows/deploy/helpers.go new file mode 100644 index 0000000000..8adbd5cab4 --- /dev/null +++ b/go/apps/ctrl/workflows/deploy/helpers.go @@ -0,0 +1,111 @@ +package deploy + +import ( + "database/sql" + "fmt" + "strings" + "time" + + restate "github.com/restatedev/sdk-go" + kranev1 "github.com/unkeyed/unkey/go/gen/proto/krane/v1" + partitionv1 "github.com/unkeyed/unkey/go/gen/proto/partition/v1" + "github.com/unkeyed/unkey/go/pkg/db" +) + +// createGatewayConfig creates a gateway configuration protobuf object. +// +// The config includes deployment ID, VM addresses for load balancing, and optional +// auth configuration. Gateway configs control how the edge gateways route traffic +// and authenticate requests for each deployment. +// +// ENCODING POLICY: Gateway configs are stored as JSON (using protojson.Marshal) for +// easier debugging and readability during development. This makes it simpler to inspect +// and modify configs directly in the database. Always use protojson.Marshal for writes +// and protojson.Unmarshal for reads. +func createGatewayConfig(deploymentID, keyspaceID string, instances []*kranev1.Instance) (*partitionv1.GatewayConfig, error) { + // Create VM protobuf objects for gateway config + gatewayConfig := &partitionv1.GatewayConfig{ + Deployment: &partitionv1.Deployment{ + Id: deploymentID, + IsEnabled: true, + }, + Vms: make([]*partitionv1.VM, len(instances)), + } + + for i, vm := range instances { + gatewayConfig.Vms[i] = &partitionv1.VM{ + Id: vm.Id, + } + } + + // Only add AuthConfig if we have a KeyspaceID + if keyspaceID != "" { + gatewayConfig.AuthConfig = &partitionv1.AuthConfig{ + KeyAuthId: keyspaceID, + } + } + + return gatewayConfig, nil +} + +// isLocalHostname checks if a hostname should be skipped from gateway config creation. +// +// Returns true for localhost and development domains (.local, .test TLDs) that should +// not get gateway configurations. Hostnames using the default domain (e.g., *.unkey.app) +// return false, as they represent production/staging environments and need gateway configs. +// +// This prevents unnecessary config creation during local development while ensuring +// production domains are properly configured. +func isLocalHostname(hostname, defaultDomain string) bool { + // Lowercase for case-insensitive comparison + hostname = strings.ToLower(hostname) + defaultDomain = strings.ToLower(defaultDomain) + + // Exact matches for common local hosts - these should be skipped + if hostname == "localhost" || hostname == "127.0.0.1" { + return true + } + + // If hostname uses the default domain, it should NOT be skipped (return false) + // This allows gateway configs to be created for the default domain + if strings.HasSuffix(hostname, "."+defaultDomain) || hostname == defaultDomain { + return false + } + + // Check for local-only TLD suffixes - these should be skipped + // Note: .dev is a real TLD owned by Google, so it's excluded + localSuffixes := []string{ + ".local", + ".test", + } + + for _, suffix := range localSuffixes { + if strings.HasSuffix(hostname, suffix) { + return true + } + } + + return false +} + +// updateDeploymentStatus updates the status of a deployment in the database. +// +// This is a durable operation wrapped in restate.Run to ensure the status update +// is persisted even if the workflow is interrupted. Status updates are critical +// for tracking deployment progress and handling failures. +func (w *Workflow) updateDeploymentStatus(ctx restate.ObjectContext, deploymentID string, status db.DeploymentsStatus) error { + + _, err := restate.Run(ctx, func(stepCtx restate.RunContext) (restate.Void, error) { + updateErr := db.Query.UpdateDeploymentStatus(stepCtx, w.db.RW(), db.UpdateDeploymentStatusParams{ + ID: deploymentID, + Status: status, + UpdatedAt: sql.NullInt64{Valid: true, Int64: time.Now().UnixMilli()}, + }) + if updateErr != nil { + return restate.Void{}, fmt.Errorf("failed to update version status to building: %w", updateErr) + } + return restate.Void{}, nil + }, restate.WithName(fmt.Sprintf("updating deployment status to %s", status))) + return err + +} diff --git a/go/apps/ctrl/workflows/deploy/promote_handler.go b/go/apps/ctrl/workflows/deploy/promote_handler.go new file mode 100644 index 0000000000..e7f01dfed0 --- /dev/null +++ b/go/apps/ctrl/workflows/deploy/promote_handler.go @@ -0,0 +1,147 @@ +package deploy + +import ( + "database/sql" + "fmt" + "time" + + restate "github.com/restatedev/sdk-go" + hydrav1 "github.com/unkeyed/unkey/go/gen/proto/hydra/v1" + "github.com/unkeyed/unkey/go/pkg/db" + partitiondb "github.com/unkeyed/unkey/go/pkg/partition/db" +) + +// Promote reassigns all sticky domains to a deployment and clears the rolled back state. +// +// This durable workflow moves sticky domains (environment and live domains) from the +// current live deployment to a new target deployment. It reverses a previous rollback +// and allows normal deployment flow to resume. +// +// The workflow validates that: +// - Target deployment is ready (not building, deploying, or failed) +// - Target deployment has running VMs +// - Target deployment is not already the live deployment +// - Project has sticky domains to promote +// +// After switching domains atomically through the routing service, the project's live +// deployment pointer is updated and the rolled back flag is cleared, allowing future +// deployments to automatically take over sticky domains. +// +// Returns terminal errors (400/404) for validation failures and retryable errors +// for system failures. +func (w *Workflow) Promote(ctx restate.ObjectContext, req *hydrav1.PromoteRequest) (*hydrav1.PromoteResponse, error) { + w.logger.Info("initiating promotion", "target", req.GetTargetDeploymentId()) + + // Get target deployment + targetDeployment, err := restate.Run(ctx, func(stepCtx restate.RunContext) (db.FindDeploymentByIdRow, error) { + return db.Query.FindDeploymentById(stepCtx, w.db.RO(), req.GetTargetDeploymentId()) + }, restate.WithName("finding target deployment")) + if err != nil { + if db.IsNotFound(err) { + return nil, restate.TerminalError(fmt.Errorf("deployment not found: %s", req.GetTargetDeploymentId()), 404) + } + return nil, fmt.Errorf("failed to get target deployment: %w", err) + } + + // Get project + project, err := restate.Run(ctx, func(stepCtx restate.RunContext) (db.FindProjectByIdRow, error) { + return db.Query.FindProjectById(stepCtx, w.db.RO(), targetDeployment.ProjectID) + }, restate.WithName("finding project")) + if err != nil { + if db.IsNotFound(err) { + return nil, restate.TerminalError(fmt.Errorf("project not found: %s", targetDeployment.ProjectID), 404) + } + return nil, fmt.Errorf("failed to get project: %w", err) + } + + // Validate preconditions + if targetDeployment.Status != db.DeploymentsStatusReady { + return nil, restate.TerminalError(fmt.Errorf("deployment status must be ready, got: %s", targetDeployment.Status), 400) + } + if !project.LiveDeploymentID.Valid { + return nil, restate.TerminalError(fmt.Errorf("project has no live deployment"), 400) + } + if targetDeployment.ID == project.LiveDeploymentID.String { + return nil, restate.TerminalError(fmt.Errorf("target deployment is already the live deployment"), 400) + } + + // Check target deployment has running VMs + vms, err := restate.Run(ctx, func(stepCtx restate.RunContext) ([]partitiondb.Vm, error) { + return partitiondb.Query.FindVMsByDeploymentId(stepCtx, w.partitionDB.RO(), targetDeployment.ID) + }, restate.WithName("finding target VMs")) + if err != nil { + return nil, fmt.Errorf("failed to get VMs: %w", err) + } + + runningVms := 0 + for _, vm := range vms { + if vm.Status == partitiondb.VmsStatusRunning { + runningVms++ + } + } + if runningVms == 0 { + return nil, restate.TerminalError(fmt.Errorf("no running VMs found for target deployment: %s", targetDeployment.ID), 400) + } + + w.logger.Info("found running VMs for target deployment", "count", runningVms, "deployment_id", targetDeployment.ID) + + // Get all domains for promotion + domains, err := restate.Run(ctx, func(stepCtx restate.RunContext) ([]db.FindDomainsForPromotionRow, error) { + return db.Query.FindDomainsForPromotion(stepCtx, w.db.RO(), db.FindDomainsForPromotionParams{ + EnvironmentID: sql.NullString{Valid: true, String: targetDeployment.EnvironmentID}, + Sticky: []db.NullDomainsSticky{ + {Valid: true, DomainsSticky: db.DomainsStickyLive}, + {Valid: true, DomainsSticky: db.DomainsStickyEnvironment}, + }, + }) + }, restate.WithName("finding domains for promotion")) + if err != nil { + return nil, fmt.Errorf("failed to get domains: %w", err) + } + + if len(domains) == 0 { + return nil, restate.TerminalError(fmt.Errorf("no domains found for promotion"), 400) + } + + w.logger.Info("found domains for promotion", "count", len(domains), "deployment_id", targetDeployment.ID) + + // Collect domain IDs + var domainIDs []string + for _, domain := range domains { + domainIDs = append(domainIDs, domain.ID) + } + + // Call RoutingService to switch domains atomically + routingClient := hydrav1.NewRoutingServiceClient(ctx, project.ID) + _, err = routingClient.SwitchDomains().Request(&hydrav1.SwitchDomainsRequest{ + TargetDeploymentId: targetDeployment.ID, + DomainIds: domainIDs, + }) + if err != nil { + return nil, fmt.Errorf("failed to switch domains: %w", err) + } + + // Update project's live deployment and clear rolled back flag + _, err = restate.Run(ctx, func(stepCtx restate.RunContext) (restate.Void, error) { + err := db.Query.UpdateProjectDeployments(stepCtx, w.db.RW(), db.UpdateProjectDeploymentsParams{ + ID: project.ID, + LiveDeploymentID: sql.NullString{Valid: true, String: targetDeployment.ID}, + IsRolledBack: false, + UpdatedAt: sql.NullInt64{Valid: true, Int64: time.Now().UnixMilli()}, + }) + if err != nil { + return restate.Void{}, fmt.Errorf("failed to update project's live deployment id: %w", err) + } + w.logger.Info("updated project live deployment", "project_id", project.ID, "live_deployment_id", targetDeployment.ID) + return restate.Void{}, nil + }, restate.WithName("updating project live deployment")) + if err != nil { + return nil, err + } + + w.logger.Info("promotion completed successfully", + "target", req.GetTargetDeploymentId(), + "domains_promoted", len(domainIDs)) + + return &hydrav1.PromoteResponse{}, nil +} diff --git a/go/apps/ctrl/workflows/deploy/rollback_handler.go b/go/apps/ctrl/workflows/deploy/rollback_handler.go new file mode 100644 index 0000000000..cc952f6cc5 --- /dev/null +++ b/go/apps/ctrl/workflows/deploy/rollback_handler.go @@ -0,0 +1,173 @@ +package deploy + +import ( + "database/sql" + "fmt" + "time" + + restate "github.com/restatedev/sdk-go" + hydrav1 "github.com/unkeyed/unkey/go/gen/proto/hydra/v1" + "github.com/unkeyed/unkey/go/pkg/db" + partitiondb "github.com/unkeyed/unkey/go/pkg/partition/db" +) + +// Rollback performs a rollback to a previous deployment. +// +// This durable workflow switches sticky domains (environment and live domains) from the +// current live deployment back to a previous deployment. The operation is performed +// atomically through the routing service to prevent partial updates that could leave +// the system in an inconsistent state. +// +// The workflow validates that: +// - Source deployment is the current live deployment +// - Target deployment has running VMs +// - Both deployments are in the same project and environment +// - There are sticky domains to rollback +// +// After switching domains, the project is marked as rolled back to prevent new +// deployments from automatically taking over the live domains. +// +// Returns terminal errors (400/404) for validation failures and retryable errors +// for system failures. +func (w *Workflow) Rollback(ctx restate.ObjectContext, req *hydrav1.RollbackRequest) (*hydrav1.RollbackResponse, error) { + w.logger.Info("initiating rollback", + "source", req.GetSourceDeploymentId(), + "target", req.GetTargetDeploymentId(), + ) + + // Validate source and target are different + if req.GetSourceDeploymentId() == req.GetTargetDeploymentId() { + return nil, restate.TerminalError(fmt.Errorf("source and target deployments must be different"), 400) + } + + // Get source deployment + sourceDeployment, err := restate.Run(ctx, func(stepCtx restate.RunContext) (db.FindDeploymentByIdRow, error) { + return db.Query.FindDeploymentById(stepCtx, w.db.RO(), req.GetSourceDeploymentId()) + }, restate.WithName("finding source deployment")) + if err != nil { + if db.IsNotFound(err) { + return nil, restate.TerminalError(fmt.Errorf("source deployment not found: %s", req.GetSourceDeploymentId()), 404) + } + return nil, fmt.Errorf("failed to get source deployment: %w", err) + } + + // Get target deployment + targetDeployment, err := restate.Run(ctx, func(stepCtx restate.RunContext) (db.FindDeploymentByIdRow, error) { + return db.Query.FindDeploymentById(stepCtx, w.db.RO(), req.GetTargetDeploymentId()) + }, restate.WithName("finding target deployment")) + if err != nil { + if db.IsNotFound(err) { + return nil, restate.TerminalError(fmt.Errorf("target deployment not found: %s", req.GetTargetDeploymentId()), 404) + } + return nil, fmt.Errorf("failed to get target deployment: %w", err) + } + + // Validate deployments are in same environment and project + if targetDeployment.EnvironmentID != sourceDeployment.EnvironmentID { + return nil, restate.TerminalError(fmt.Errorf("deployments must be in the same environment"), 400) + } + if targetDeployment.ProjectID != sourceDeployment.ProjectID { + return nil, restate.TerminalError(fmt.Errorf("deployments must be in the same project"), 400) + } + + // Get project + project, err := restate.Run(ctx, func(stepCtx restate.RunContext) (db.FindProjectByIdRow, error) { + return db.Query.FindProjectById(stepCtx, w.db.RO(), sourceDeployment.ProjectID) + }, restate.WithName("finding project")) + if err != nil { + if db.IsNotFound(err) { + return nil, restate.TerminalError(fmt.Errorf("project not found: %s", sourceDeployment.ProjectID), 404) + } + return nil, fmt.Errorf("failed to get project: %w", err) + } + + // Validate source deployment is the live deployment + if !project.LiveDeploymentID.Valid || project.LiveDeploymentID.String != sourceDeployment.ID { + return nil, restate.TerminalError(fmt.Errorf("source deployment is not the current live deployment"), 400) + } + + // Check target deployment has running VMs + vms, err := restate.Run(ctx, func(stepCtx restate.RunContext) ([]partitiondb.Vm, error) { + return partitiondb.Query.FindVMsByDeploymentId(stepCtx, w.partitionDB.RO(), targetDeployment.ID) + }, restate.WithName("finding target VMs")) + if err != nil { + return nil, fmt.Errorf("failed to get VMs: %w", err) + } + + runningVms := 0 + for _, vm := range vms { + if vm.Status == partitiondb.VmsStatusRunning { + runningVms++ + } + } + if runningVms == 0 { + return nil, restate.TerminalError(fmt.Errorf("no running VMs found for target deployment: %s", targetDeployment.ID), 400) + } + + w.logger.Info("found running VMs for target deployment", "count", runningVms, "deployment_id", targetDeployment.ID) + + // Get all domains on the live deployment that are sticky + domains, err := restate.Run(ctx, func(stepCtx restate.RunContext) ([]db.FindDomainsForRollbackRow, error) { + return db.Query.FindDomainsForRollback(stepCtx, w.db.RO(), db.FindDomainsForRollbackParams{ + EnvironmentID: sql.NullString{Valid: true, String: sourceDeployment.EnvironmentID}, + Sticky: []db.NullDomainsSticky{ + {Valid: true, DomainsSticky: db.DomainsStickyLive}, + {Valid: true, DomainsSticky: db.DomainsStickyEnvironment}, + }, + }) + }, restate.WithName("finding domains for rollback")) + if err != nil { + return nil, fmt.Errorf("failed to get domains: %w", err) + } + + if len(domains) == 0 { + return nil, restate.TerminalError(fmt.Errorf("no domains to rollback"), 400) + } + + w.logger.Info("found domains for rollback", "count", len(domains), "deployment_id", sourceDeployment.ID) + + // Collect domain IDs + var domainIDs []string + for _, domain := range domains { + if domain.Sticky.Valid && + (domain.Sticky.DomainsSticky == db.DomainsStickyLive || + domain.Sticky.DomainsSticky == db.DomainsStickyEnvironment) { + domainIDs = append(domainIDs, domain.ID) + } + } + + // Call RoutingService to switch domains atomically + routingClient := hydrav1.NewRoutingServiceClient(ctx, project.ID) + _, err = routingClient.SwitchDomains().Request(&hydrav1.SwitchDomainsRequest{ + TargetDeploymentId: targetDeployment.ID, + DomainIds: domainIDs, + }) + if err != nil { + return nil, fmt.Errorf("failed to switch domains: %w", err) + } + + // Update project's live deployment + _, err = restate.Run(ctx, func(stepCtx restate.RunContext) (restate.Void, error) { + err := db.Query.UpdateProjectDeployments(stepCtx, w.db.RW(), db.UpdateProjectDeploymentsParams{ + ID: project.ID, + LiveDeploymentID: sql.NullString{Valid: true, String: targetDeployment.ID}, + IsRolledBack: true, + UpdatedAt: sql.NullInt64{Valid: true, Int64: time.Now().UnixMilli()}, + }) + if err != nil { + return restate.Void{}, fmt.Errorf("failed to update project's live deployment id: %w", err) + } + w.logger.Info("updated project live deployment", "project_id", project.ID, "live_deployment_id", targetDeployment.ID) + return restate.Void{}, nil + }, restate.WithName("updating project live deployment")) + if err != nil { + return nil, err + } + + w.logger.Info("rollback completed successfully", + "source", req.GetSourceDeploymentId(), + "target", req.GetTargetDeploymentId(), + "domains_rolled_back", len(domainIDs)) + + return &hydrav1.RollbackResponse{}, nil +} diff --git a/go/apps/ctrl/workflows/deploy/service.go b/go/apps/ctrl/workflows/deploy/service.go new file mode 100644 index 0000000000..8063275b22 --- /dev/null +++ b/go/apps/ctrl/workflows/deploy/service.go @@ -0,0 +1,60 @@ +package deploy + +import ( + hydrav1 "github.com/unkeyed/unkey/go/gen/proto/hydra/v1" + "github.com/unkeyed/unkey/go/gen/proto/krane/v1/kranev1connect" + "github.com/unkeyed/unkey/go/pkg/db" + "github.com/unkeyed/unkey/go/pkg/otel/logging" +) + +const hardcodedNamespace = "unkey" // TODO change to workspace scope + +// Workflow orchestrates deployment lifecycle operations. +// +// This workflow manages the complete deployment lifecycle including deploying new versions, +// rolling back to previous versions, and promoting deployments to live. It coordinates +// between container orchestration (Krane), database updates, domain routing, and gateway +// configuration to ensure consistent deployment state. +// +// The workflow uses Restate virtual objects keyed by project ID to ensure that only one +// deployment operation runs per project at any time, preventing race conditions during +// concurrent deploy/rollback/promote operations. +type Workflow struct { + hydrav1.UnimplementedDeploymentServiceServer + db db.Database + partitionDB db.Database + logger logging.Logger + krane kranev1connect.DeploymentServiceClient + defaultDomain string +} + +var _ hydrav1.DeploymentServiceServer = (*Workflow)(nil) + +// Config holds the configuration for creating a deployment workflow. +type Config struct { + // Logger for structured logging. + Logger logging.Logger + + // DB is the main database connection for workspace, project, and deployment data. + DB db.Database + + // PartitionDB is the partition database connection for VM and gateway config storage. + PartitionDB db.Database + + // Krane is the client for container orchestration operations. + Krane kranev1connect.DeploymentServiceClient + + // DefaultDomain is the apex domain for generated deployment URLs (e.g., "unkey.app"). + DefaultDomain string +} + +// New creates a new deployment workflow instance. +func New(cfg Config) *Workflow { + return &Workflow{ + db: cfg.DB, + partitionDB: cfg.PartitionDB, + logger: cfg.Logger, + krane: cfg.Krane, + defaultDomain: cfg.DefaultDomain, + } +} diff --git a/go/apps/ctrl/workflows/routing/assign_domains_handler.go b/go/apps/ctrl/workflows/routing/assign_domains_handler.go new file mode 100644 index 0000000000..f4e1219c08 --- /dev/null +++ b/go/apps/ctrl/workflows/routing/assign_domains_handler.go @@ -0,0 +1,163 @@ +package routing + +import ( + "context" + "database/sql" + "fmt" + "time" + + restate "github.com/restatedev/sdk-go" + hydrav1 "github.com/unkeyed/unkey/go/gen/proto/hydra/v1" + "github.com/unkeyed/unkey/go/pkg/db" + partitiondb "github.com/unkeyed/unkey/go/pkg/partition/db" + "github.com/unkeyed/unkey/go/pkg/uid" + "google.golang.org/protobuf/encoding/protojson" +) + +// AssignDomains creates or reassigns domains to a deployment and creates gateway configs. +// +// This durable workflow performs the following steps for each domain: +// 1. Check if domain exists in the database +// 2. If new, create domain record with specified sticky behavior +// 3. If existing and not rolled back, reassign to new deployment +// 4. If existing and rolled back, skip reassignment +// 5. Create gateway configs for all changed domains (except local hostnames) +// +// Each domain upsert is wrapped in a separate restate.Run call with a unique name, +// allowing partial completion tracking. If the workflow fails after creating some domains, +// Restate will skip the already-created domains on retry. +// +// Gateway configs are updated in bulk for all changed domains, using protojson encoding +// for easier debugging. Local hostnames (localhost, *.local, *.test) are skipped to +// prevent unnecessary config creation during local development. +// +// Returns the list of domain names that were actually modified (created or reassigned). +func (s *Service) AssignDomains(ctx restate.ObjectContext, req *hydrav1.AssignDomainsRequest) (*hydrav1.AssignDomainsResponse, error) { + s.logger.Info("assigning domains", + "deployment_id", req.GetDeploymentId(), + "domain_count", len(req.GetDomains()), + ) + + changedDomains := []string{} + + // Upsert each domain in the database + for _, domain := range req.GetDomains() { + changed, err := restate.Run(ctx, func(stepCtx restate.RunContext) (bool, error) { + now := time.Now().UnixMilli() + + var wasChanged bool + err := db.Tx(stepCtx, s.db.RW(), func(txCtx context.Context, tx db.DBTX) error { + existing, err := db.Query.FindDomainByDomain(txCtx, tx, domain.GetName()) + if err != nil { + if !db.IsNotFound(err) { + return fmt.Errorf("failed to find domain: %w", err) + } + + // Domain does not exist, create it + sticky := parseDomainSticky(domain.GetSticky()) + err := db.Query.InsertDomain(txCtx, tx, db.InsertDomainParams{ + ID: uid.New("domain"), + WorkspaceID: req.GetWorkspaceId(), + ProjectID: sql.NullString{Valid: true, String: req.GetProjectId()}, + EnvironmentID: sql.NullString{Valid: true, String: req.GetEnvironmentId()}, + Domain: domain.GetName(), + Sticky: sticky, + DeploymentID: sql.NullString{Valid: true, String: req.GetDeploymentId()}, + CreatedAt: now, + Type: db.DomainsTypeWildcard, + }) + if err != nil { + return fmt.Errorf("failed to insert domain: %w", err) + } + wasChanged = true + return nil + } + + // Domain exists + if req.GetIsRolledBack() { + s.logger.Info("skipping domain assignment - project is rolled back", + "domain_id", existing.ID, + "domain", existing.Domain, + ) + return nil + } + + // Reassign domain to new deployment + err = db.Query.ReassignDomain(txCtx, tx, db.ReassignDomainParams{ + ID: existing.ID, + TargetWorkspaceID: req.GetWorkspaceId(), + DeploymentID: sql.NullString{Valid: true, String: req.GetDeploymentId()}, + UpdatedAt: sql.NullInt64{Valid: true, Int64: now}, + }) + if err != nil { + return fmt.Errorf("failed to reassign domain: %w", err) + } + wasChanged = true + return nil + }) + + return wasChanged, err + }, restate.WithName(fmt.Sprintf("upsert-domain-%s", domain.GetName()))) + + if err != nil { + return nil, err + } + + if changed { + changedDomains = append(changedDomains, domain.GetName()) + } + } + + // Create gateway configs for changed domains (except local ones) + _, err := restate.Run(ctx, func(stepCtx restate.RunContext) (restate.Void, error) { + var gatewayParams []partitiondb.UpsertGatewayParams + var skippedDomains []string + + for _, domainName := range changedDomains { + if isLocalHostname(domainName, s.defaultDomain) { + skippedDomains = append(skippedDomains, domainName) + continue + } + + // Marshal gateway config to JSON + configBytes, err := protojson.Marshal(req.GetGatewayConfig()) + if err != nil { + s.logger.Error("failed to marshal gateway config", "error", err, "domain", domainName) + continue + } + + gatewayParams = append(gatewayParams, partitiondb.UpsertGatewayParams{ + WorkspaceID: req.GetWorkspaceId(), + DeploymentID: req.GetDeploymentId(), + Hostname: domainName, + Config: configBytes, + }) + } + + // Bulk upsert gateway configs + if len(gatewayParams) > 0 { + if err := partitiondb.BulkQuery.UpsertGateway(stepCtx, s.partitionDB.RW(), gatewayParams); err != nil { + return restate.Void{}, fmt.Errorf("failed to upsert gateway configs: %w", err) + } + s.logger.Info("created gateway configs", + "count", len(gatewayParams), + "skipped", len(skippedDomains), + ) + } + + return restate.Void{}, nil + }, restate.WithName("create-gateway-configs")) + + if err != nil { + return nil, err + } + + s.logger.Info("domain assignment completed", + "deployment_id", req.GetDeploymentId(), + "changed_domains", len(changedDomains), + ) + + return &hydrav1.AssignDomainsResponse{ + ChangedDomainNames: changedDomains, + }, nil +} diff --git a/go/apps/ctrl/workflows/routing/doc.go b/go/apps/ctrl/workflows/routing/doc.go new file mode 100644 index 0000000000..66a386b191 --- /dev/null +++ b/go/apps/ctrl/workflows/routing/doc.go @@ -0,0 +1,134 @@ +// Package routing implements domain assignment and gateway configuration workflows. +// +// This package manages the relationship between domains, deployments, and gateway +// configurations. It handles creating new domain assignments during deployments and +// switching existing domains between deployments during rollback/promote operations. +// +// # Built on Restate +// +// All workflows in this package are built on top of Restate (restate.dev) for durable +// execution. This provides critical guarantees: +// +// - Automatic retries on transient failures +// - Exactly-once execution semantics for each workflow step +// - Durable state that survives process crashes and restarts +// - Virtual object concurrency control keyed by project ID +// +// The virtual object model ensures that domain operations for a project are serialized, +// preventing race conditions where concurrent operations could create inconsistent routing +// state between the main database and partition database. +// +// # Key Types +// +// [Service] is the main entry point that implements routing operations. +// It provides two primary operations: +// +// - [Service.AssignDomains] - Create or reassign domains during deployment +// - [Service.SwitchDomains] - Switch existing domains during rollback/promote +// +// # Usage +// +// The service is typically initialized with database connections: +// +// svc := routing.New(routing.Config{ +// DB: mainDB, +// PartitionDB: partitionDB, +// Logger: logger, +// DefaultDomain: "unkey.app", +// }) +// +// Assign domains during deployment: +// +// resp, err := svc.AssignDomains(ctx, &hydrav1.AssignDomainsRequest{ +// WorkspaceId: "ws_123", +// ProjectId: "proj_456", +// EnvironmentId: "env_789", +// DeploymentId: "dep_abc", +// Domains: []*hydrav1.DomainToAssign{ +// {Name: "api.example.com", Sticky: hydrav1.DomainSticky_DOMAIN_STICKY_ENVIRONMENT}, +// }, +// GatewayConfig: gatewayConfig, +// IsRolledBack: false, +// }) +// +// Switch domains during rollback/promote: +// +// _, err := svc.SwitchDomains(ctx, &hydrav1.SwitchDomainsRequest{ +// TargetDeploymentId: "dep_previous", +// DomainIds: []string{"dom_1", "dom_2"}, +// }) +// +// # Domain Assignment Flow +// +// The AssignDomains operation performs these steps: +// +// 1. For each domain, check if it exists in the database +// 2. If new, create domain record with specified sticky behavior +// 3. If existing and not rolled back, reassign to new deployment +// 4. If existing and rolled back, skip reassignment +// 5. Create gateway configs for all changed domains (except local hostnames) +// 6. Return list of domains that were actually modified +// +// Each domain upsert is wrapped in a restate.Run call with a unique name, allowing +// partial completion tracking. If the workflow fails after creating some domains, +// Restate will skip the already-created domains on retry. +// +// # Domain Switching Flow +// +// The SwitchDomains operation performs these steps: +// +// 1. Fetch gateway config for the target deployment +// 2. Fetch domain information (hostnames, workspace IDs) for given domain IDs +// 3. Upsert gateway configs first (atomic update of routing) +// 4. Reassign domains to the target deployment +// +// The gateway configs are updated before domain reassignment to ensure that when a domain +// points to a new deployment, the gateway config is already in place. This prevents a +// window where a domain might route to a deployment without proper configuration. +// +// # Sticky Domain Behavior +// +// Domains can have different sticky behaviors: +// +// - UNSPECIFIED: Per-commit domain, never reassigned (immutable) +// - BRANCH: Sticky to branch, follows latest deployment for that branch +// - ENVIRONMENT: Sticky to environment, follows latest deployment for that environment +// - LIVE: Sticky to live deployment, follows the current production deployment +// +// During deployment, sticky domains (branch, environment, live) are automatically reassigned +// to point to the new deployment. Non-sticky domains remain pointing to their original +// deployment, allowing stable URLs for specific versions. +// +// # Local Hostname Handling +// +// Gateway configs are NOT created for local development hostnames (localhost, 127.0.0.1, +// *.local, *.test). This prevents unnecessary config creation during local development. +// Hostnames using the default domain (e.g., *.unkey.app) ARE configured, as they represent +// production/staging environments. +// +// # Gateway Configuration Format +// +// Gateway configs are stored as JSON (using protojson.Marshal) in the partition database. +// This format was chosen for easier debugging and direct database inspection during +// development. Each config includes deployment ID, VM list, optional auth config, and +// optional validation config. +// +// # Atomicity and Consistency +// +// Domain assignment and switching operations affect two databases (main DB for domains, +// partition DB for gateway configs). While not using distributed transactions, the +// operations are ordered carefully: +// +// - On assignment: Domains first, then gateway configs +// - On switching: Gateway configs first, then domain reassignment +// +// Restate's durable execution ensures that if either step fails, the operation will be +// retried until both complete, maintaining eventual consistency between the databases. +// +// # Error Handling +// +// The package uses Restate's error handling model. Terminal errors with appropriate HTTP +// status codes are returned for client errors. System errors are returned for unexpected +// failures that may be retried. Partial failures during bulk operations are logged but +// do not fail the entire operation. +package routing diff --git a/go/apps/ctrl/workflows/routing/helpers.go b/go/apps/ctrl/workflows/routing/helpers.go new file mode 100644 index 0000000000..fffe959ef5 --- /dev/null +++ b/go/apps/ctrl/workflows/routing/helpers.go @@ -0,0 +1,55 @@ +package routing + +import ( + "strings" + + hydrav1 "github.com/unkeyed/unkey/go/gen/proto/hydra/v1" + "github.com/unkeyed/unkey/go/pkg/db" +) + +// parseDomainSticky converts a protobuf DomainSticky enum to database representation. +// +// Sticky domains automatically follow the latest deployment for their scope (branch, +// environment, or live), while non-sticky domains remain immutable. +func parseDomainSticky(sticky hydrav1.DomainSticky) db.NullDomainsSticky { + switch sticky { + case hydrav1.DomainSticky_DOMAIN_STICKY_BRANCH: + return db.NullDomainsSticky{Valid: true, DomainsSticky: db.DomainsStickyBranch} + case hydrav1.DomainSticky_DOMAIN_STICKY_ENVIRONMENT: + return db.NullDomainsSticky{Valid: true, DomainsSticky: db.DomainsStickyEnvironment} + case hydrav1.DomainSticky_DOMAIN_STICKY_LIVE: + return db.NullDomainsSticky{Valid: true, DomainsSticky: db.DomainsStickyLive} + default: + return db.NullDomainsSticky{Valid: false} + } +} + +// isLocalHostname checks if a hostname should be skipped from gateway config creation. +// +// Returns true for localhost and development domains (.local, .test TLDs) that should +// not get gateway configurations. Hostnames using the default domain (e.g., *.unkey.app) +// return false, as they represent production/staging environments and need gateway configs. +func isLocalHostname(hostname, defaultDomain string) bool { + hostname = strings.ToLower(hostname) + defaultDomain = strings.ToLower(defaultDomain) + + // Exact matches for localhost + if hostname == "localhost" || hostname == "127.0.0.1" { + return true + } + + // If hostname uses the default domain, it's NOT local + if strings.HasSuffix(hostname, "."+defaultDomain) || hostname == defaultDomain { + return false + } + + // Check for local-only TLD suffixes + localSuffixes := []string{".local", ".test"} + for _, suffix := range localSuffixes { + if strings.HasSuffix(hostname, suffix) { + return true + } + } + + return false +} diff --git a/go/apps/ctrl/workflows/routing/service.go b/go/apps/ctrl/workflows/routing/service.go new file mode 100644 index 0000000000..222c2de161 --- /dev/null +++ b/go/apps/ctrl/workflows/routing/service.go @@ -0,0 +1,51 @@ +package routing + +import ( + hydrav1 "github.com/unkeyed/unkey/go/gen/proto/hydra/v1" + "github.com/unkeyed/unkey/go/pkg/db" + "github.com/unkeyed/unkey/go/pkg/otel/logging" +) + +// Service handles routing operations - domain assignment and gateway configuration. +// +// This service manages the relationship between domains, deployments, and gateway +// configurations. It handles creating new domain assignments during deployments and +// switching existing domains between deployments during rollback/promote operations. +// +// The service uses Restate virtual objects keyed by project ID to ensure that domain +// operations are serialized, preventing race conditions that could create inconsistent +// routing state. +type Service struct { + hydrav1.UnimplementedRoutingServiceServer + db db.Database + partitionDB db.Database + logger logging.Logger + defaultDomain string +} + +var _ hydrav1.RoutingServiceServer = (*Service)(nil) + +// Config holds the configuration for creating a routing service. +type Config struct { + // Logger for structured logging. + Logger logging.Logger + + // DB is the main database connection for domain data. + DB db.Database + + // PartitionDB is the partition database connection for gateway config storage. + PartitionDB db.Database + + // DefaultDomain is the apex domain used to identify production domains (e.g., "unkey.app"). + DefaultDomain string +} + +// New creates a new routing service instance. +func New(cfg Config) *Service { + return &Service{ + db: cfg.DB, + partitionDB: cfg.PartitionDB, + logger: cfg.Logger, + defaultDomain: cfg.DefaultDomain, + } +} diff --git a/go/apps/ctrl/workflows/routing/switch_domains_handler.go b/go/apps/ctrl/workflows/routing/switch_domains_handler.go new file mode 100644 index 0000000000..eadb8807ff --- /dev/null +++ b/go/apps/ctrl/workflows/routing/switch_domains_handler.go @@ -0,0 +1,119 @@ +package routing + +import ( + "database/sql" + "fmt" + "time" + + restate "github.com/restatedev/sdk-go" + hydrav1 "github.com/unkeyed/unkey/go/gen/proto/hydra/v1" + "github.com/unkeyed/unkey/go/pkg/db" + partitiondb "github.com/unkeyed/unkey/go/pkg/partition/db" +) + +// SwitchDomains reassigns existing domains to a different deployment. +// +// This durable workflow performs the following steps: +// 1. Fetch gateway config for the target deployment from partition DB +// 2. Fetch domain information (hostnames, workspace IDs) for given domain IDs +// 3. Upsert gateway configs first (atomic update of routing) +// 4. Reassign domains to the target deployment in main DB +// +// Gateway configs are updated BEFORE domain reassignment to ensure that when a domain +// points to a new deployment, the gateway config is already in place. This prevents a +// window where a domain might route to a deployment without proper configuration. +// +// Each step is wrapped in restate.Run for durability. If the workflow is interrupted, +// it resumes from the last completed step. +// +// This operation is used during rollback and promote workflows to atomically switch +// sticky domains between deployments. +func (s *Service) SwitchDomains(ctx restate.ObjectContext, req *hydrav1.SwitchDomainsRequest) (*hydrav1.SwitchDomainsResponse, error) { + s.logger.Info("switching domains", + "target_deployment_id", req.GetTargetDeploymentId(), + "domain_count", len(req.GetDomainIds()), + ) + + // Fetch target deployment's gateway config + gatewayConfig, err := restate.Run(ctx, func(stepCtx restate.RunContext) (partitiondb.FindGatewayByDeploymentIdRow, error) { + return partitiondb.Query.FindGatewayByDeploymentId(stepCtx, s.partitionDB.RO(), req.GetTargetDeploymentId()) + }, restate.WithName("fetch-gateway-config")) + if err != nil { + return nil, fmt.Errorf("failed to fetch gateway config for deployment %s: %w", req.GetTargetDeploymentId(), err) + } + + // Fetch domain info (to get hostnames and workspace_id) + domains, err := restate.Run(ctx, func(stepCtx restate.RunContext) ([]db.FindDomainsByIdsRow, error) { + return db.Query.FindDomainsByIds(stepCtx, s.db.RO(), req.GetDomainIds()) + }, restate.WithName("fetch-domains")) + if err != nil { + return nil, err + } + + // Upsert gateway configs first + _, err = restate.Run(ctx, func(stepCtx restate.RunContext) (restate.Void, error) { + var gatewayParams []partitiondb.UpsertGatewayParams + + for _, domain := range domains { + if isLocalHostname(domain.Domain, s.defaultDomain) { + continue + } + + gatewayParams = append(gatewayParams, partitiondb.UpsertGatewayParams{ + WorkspaceID: domain.WorkspaceID, + DeploymentID: req.GetTargetDeploymentId(), + Hostname: domain.Domain, + Config: gatewayConfig.Config, + }) + } + + if len(gatewayParams) > 0 { + if err := partitiondb.BulkQuery.UpsertGateway(stepCtx, s.partitionDB.RW(), gatewayParams); err != nil { + return restate.Void{}, fmt.Errorf("failed to upsert gateway configs: %w", err) + } + s.logger.Info("updated gateway configs", "count", len(gatewayParams)) + } + + return restate.Void{}, nil + }, restate.WithName("upsert-gateway-configs")) + + if err != nil { + return nil, err + } + + // Reassign domains + _, err = restate.Run(ctx, func(stepCtx restate.RunContext) (restate.Void, error) { + now := time.Now().UnixMilli() + + for _, domain := range domains { + s.logger.Info("reassigning domain", + "domain_id", domain.ID, + "domain_name", domain.Domain, + ) + + err := db.Query.ReassignDomain(stepCtx, s.db.RW(), db.ReassignDomainParams{ + ID: domain.ID, + TargetWorkspaceID: domain.WorkspaceID, + DeploymentID: sql.NullString{Valid: true, String: req.GetTargetDeploymentId()}, + UpdatedAt: sql.NullInt64{Valid: true, Int64: now}, + }) + if err != nil { + return restate.Void{}, fmt.Errorf("failed to reassign domain %s: %w", domain.Domain, err) + } + } + + s.logger.Info("reassigned domains", "count", len(domains)) + return restate.Void{}, nil + }, restate.WithName("reassign-domains")) + + if err != nil { + return nil, err + } + + s.logger.Info("domain switching completed", + "target_deployment_id", req.GetTargetDeploymentId(), + "domain_count", len(domains), + ) + + return &hydrav1.SwitchDomainsResponse{}, nil +} diff --git a/go/buf.gen.yaml b/go/buf.gen.connect.yaml similarity index 93% rename from go/buf.gen.yaml rename to go/buf.gen.connect.yaml index c025ac272f..873286f1ac 100644 --- a/go/buf.gen.yaml +++ b/go/buf.gen.connect.yaml @@ -1,4 +1,6 @@ version: v2 +managed: + enabled: true plugins: - remote: buf.build/protocolbuffers/go:v1.36.8 out: gen/proto diff --git a/go/buf.gen.restate.yaml b/go/buf.gen.restate.yaml new file mode 100644 index 0000000000..354076f6e9 --- /dev/null +++ b/go/buf.gen.restate.yaml @@ -0,0 +1,10 @@ +version: v2 +managed: + enabled: true +plugins: + - remote: buf.build/protocolbuffers/go:v1.36.8 + out: gen/proto + opt: paths=source_relative + - local: protoc-gen-go-restate + out: gen/proto + opt: paths=source_relative diff --git a/go/buf.lock b/go/buf.lock new file mode 100644 index 0000000000..9fe1ca4847 --- /dev/null +++ b/go/buf.lock @@ -0,0 +1,6 @@ +# Generated by buf. DO NOT EDIT. +version: v2 +deps: + - name: buf.build/restatedev/sdk-go + commit: 9ea0b54286dd4f35b0cb96ecdf09b402 + digest: b5:822b9362e943c827c36e44b0db519542259439382f94817989349d0ee590617ba70e35975840c5d96ceff278254806435e7d570db81548f9703c00b01eec398e diff --git a/go/buf.yaml b/go/buf.yaml index 55182c58c1..1086ea8efc 100644 --- a/go/buf.yaml +++ b/go/buf.yaml @@ -1,4 +1,5 @@ version: v2 + modules: - path: proto lint: @@ -13,3 +14,5 @@ breaking: - FIELD_NO_DELETE - FIELD_NO_DELETE_UNLESS_NAME_RESERVED - FIELD_NO_DELETE_UNLESS_NUMBER_RESERVED +deps: + - buf.build/restatedev/sdk-go diff --git a/go/clickhouse.test b/go/clickhouse.test deleted file mode 100755 index bbffe10e44..0000000000 Binary files a/go/clickhouse.test and /dev/null differ diff --git a/go/cmd/ctrl/main.go b/go/cmd/ctrl/main.go index 80d832fc96..b682d11945 100644 --- a/go/cmd/ctrl/main.go +++ b/go/cmd/ctrl/main.go @@ -78,6 +78,16 @@ var Cmd = &cli.Command{ cli.String("acme-cloudflare-api-token", "Cloudflare API token for Let's Encrypt", cli.EnvVar("UNKEY_ACME_CLOUDFLARE_API_TOKEN")), cli.String("default-domain", "Default domain for auto-generated hostnames", cli.Default("unkey.app"), cli.EnvVar("UNKEY_DEFAULT_DOMAIN")), + + // Restate Configuration + cli.String("restate-ingress-url", "URL of the Restate ingress endpoint for invoking workflows. Example: http://restate:8080", + cli.Default("http://restate:8080"), cli.EnvVar("UNKEY_RESTATE_INGRESS_URL")), + cli.String("restate-admin-url", "URL of the Restate admin endpoint for service registration. Example: http://restate:9070", + cli.Default("http://restate:9070"), cli.EnvVar("UNKEY_RESTATE_ADMIN_URL")), + cli.Int("restate-http-port", "Port where we listen for Restate HTTP requests. Example: 9080", + cli.Default(9080), cli.EnvVar("UNKEY_RESTATE_HTTP_PORT")), + cli.String("restate-register-as", "URL of this service for self-registration with Restate. Example: http://ctrl:9080", + cli.EnvVar("UNKEY_RESTATE_REGISTER_AS")), }, Action: action, } @@ -111,7 +121,6 @@ func action(ctx context.Context, cmd *cli.Command) error { // Database configuration DatabasePrimary: cmd.String("database-primary"), DatabasePartition: cmd.String("database-partition"), - DatabaseHydra: cmd.String("database-hydra"), // Observability OtelEnabled: cmd.Bool("otel"), @@ -146,6 +155,14 @@ func action(ctx context.Context, cmd *cli.Command) error { DefaultDomain: cmd.String("default-domain"), + // Restate configuration + Restate: ctrl.RestateConfig{ + IngressURL: cmd.String("restate-ingress-url"), + AdminURL: cmd.String("restate-admin-url"), + HttpPort: cmd.Int("restate-http-port"), + RegisterAs: cmd.String("restate-register-as"), + }, + // Common Clock: clock.New(), } diff --git a/go/demo_api/main.go b/go/demo_api/main.go index 0f67d6ecbd..2b068e2068 100644 --- a/go/demo_api/main.go +++ b/go/demo_api/main.go @@ -799,7 +799,7 @@ paths: required: true schema: type: string - minLength: 1 + minLength: 3 responses: '200': description: Successful response diff --git a/go/gen/proto/ctrl/v1/acme.pb.go b/go/gen/proto/ctrl/v1/acme.pb.go index 429eb1dd43..7dda49ef65 100644 --- a/go/gen/proto/ctrl/v1/acme.pb.go +++ b/go/gen/proto/ctrl/v1/acme.pb.go @@ -129,7 +129,8 @@ const file_ctrl_v1_acme_proto_rawDesc = "" + "%HandleCertificateVerificationResponse\x12\x14\n" + "\x05token\x18\x01 \x01(\tR\x05token2\x90\x01\n" + "\vAcmeService\x12\x80\x01\n" + - "\x1dHandleCertificateVerification\x12-.ctrl.v1.HandleCertificateVerificationRequest\x1a..ctrl.v1.HandleCertificateVerificationResponse\"\x00B6Z4github.com/unkeyed/unkey/go/gen/proto/ctrl/v1;ctrlv1b\x06proto3" + "\x1dHandleCertificateVerification\x12-.ctrl.v1.HandleCertificateVerificationRequest\x1a..ctrl.v1.HandleCertificateVerificationResponse\"\x00B\x8b\x01\n" + + "\vcom.ctrl.v1B\tAcmeProtoP\x01Z4github.com/unkeyed/unkey/go/gen/proto/ctrl/v1;ctrlv1\xa2\x02\x03CXX\xaa\x02\aCtrl.V1\xca\x02\aCtrl\\V1\xe2\x02\x13Ctrl\\V1\\GPBMetadata\xea\x02\bCtrl::V1b\x06proto3" var ( file_ctrl_v1_acme_proto_rawDescOnce sync.Once diff --git a/go/gen/proto/ctrl/v1/build.pb.go b/go/gen/proto/ctrl/v1/build.pb.go index 21831c3dca..a84d2ec329 100644 --- a/go/gen/proto/ctrl/v1/build.pb.go +++ b/go/gen/proto/ctrl/v1/build.pb.go @@ -453,7 +453,9 @@ const file_ctrl_v1_build_proto_rawDesc = "" + "\x16BUILD_STATUS_CANCELLED\x10\x052\x9d\x01\n" + "\fBuildService\x12J\n" + "\vCreateBuild\x12\x1b.ctrl.v1.CreateBuildRequest\x1a\x1c.ctrl.v1.CreateBuildResponse\"\x00\x12A\n" + - "\bGetBuild\x12\x18.ctrl.v1.GetBuildRequest\x1a\x19.ctrl.v1.GetBuildResponse\"\x00B6Z4github.com/unkeyed/unkey/go/gen/proto/ctrl/v1;ctrlv1b\x06proto3" + "\bGetBuild\x12\x18.ctrl.v1.GetBuildRequest\x1a\x19.ctrl.v1.GetBuildResponse\"\x00B\x8c\x01\n" + + "\vcom.ctrl.v1B\n" + + "BuildProtoP\x01Z4github.com/unkeyed/unkey/go/gen/proto/ctrl/v1;ctrlv1\xa2\x02\x03CXX\xaa\x02\aCtrl.V1\xca\x02\aCtrl\\V1\xe2\x02\x13Ctrl\\V1\\GPBMetadata\xea\x02\bCtrl::V1b\x06proto3" var ( file_ctrl_v1_build_proto_rawDescOnce sync.Once diff --git a/go/gen/proto/ctrl/v1/deployment.pb.go b/go/gen/proto/ctrl/v1/deployment.pb.go index 6640f96c5d..279c9cece6 100644 --- a/go/gen/proto/ctrl/v1/deployment.pb.go +++ b/go/gen/proto/ctrl/v1/deployment.pb.go @@ -1100,7 +1100,8 @@ const file_ctrl_v1_deployment_proto_rawDesc = "" + "\x10CreateDeployment\x12 .ctrl.v1.CreateDeploymentRequest\x1a!.ctrl.v1.CreateDeploymentResponse\"\x00\x12P\n" + "\rGetDeployment\x12\x1d.ctrl.v1.GetDeploymentRequest\x1a\x1e.ctrl.v1.GetDeploymentResponse\"\x00\x12A\n" + "\bRollback\x12\x18.ctrl.v1.RollbackRequest\x1a\x19.ctrl.v1.RollbackResponse\"\x00\x12>\n" + - "\aPromote\x12\x17.ctrl.v1.PromoteRequest\x1a\x18.ctrl.v1.PromoteResponse\"\x00B6Z4github.com/unkeyed/unkey/go/gen/proto/ctrl/v1;ctrlv1b\x06proto3" + "\aPromote\x12\x17.ctrl.v1.PromoteRequest\x1a\x18.ctrl.v1.PromoteResponse\"\x00B\x91\x01\n" + + "\vcom.ctrl.v1B\x0fDeploymentProtoP\x01Z4github.com/unkeyed/unkey/go/gen/proto/ctrl/v1;ctrlv1\xa2\x02\x03CXX\xaa\x02\aCtrl.V1\xca\x02\aCtrl\\V1\xe2\x02\x13Ctrl\\V1\\GPBMetadata\xea\x02\bCtrl::V1b\x06proto3" var ( file_ctrl_v1_deployment_proto_rawDescOnce sync.Once diff --git a/go/gen/proto/ctrl/v1/openapi.pb.go b/go/gen/proto/ctrl/v1/openapi.pb.go index 361408c30f..4680aef88f 100644 --- a/go/gen/proto/ctrl/v1/openapi.pb.go +++ b/go/gen/proto/ctrl/v1/openapi.pb.go @@ -422,7 +422,8 @@ const file_ctrl_v1_openapi_proto_rawDesc = "" + "\x14has_breaking_changes\x18\x02 \x01(\bR\x12hasBreakingChanges\x121\n" + "\achanges\x18\x03 \x03(\v2\x17.ctrl.v1.ChangelogEntryR\achanges2e\n" + "\x0eOpenApiService\x12S\n" + - "\x0eGetOpenApiDiff\x12\x1e.ctrl.v1.GetOpenApiDiffRequest\x1a\x1f.ctrl.v1.GetOpenApiDiffResponse\"\x00B6Z4github.com/unkeyed/unkey/go/gen/proto/ctrl/v1;ctrlv1b\x06proto3" + "\x0eGetOpenApiDiff\x12\x1e.ctrl.v1.GetOpenApiDiffRequest\x1a\x1f.ctrl.v1.GetOpenApiDiffResponse\"\x00B\x8e\x01\n" + + "\vcom.ctrl.v1B\fOpenapiProtoP\x01Z4github.com/unkeyed/unkey/go/gen/proto/ctrl/v1;ctrlv1\xa2\x02\x03CXX\xaa\x02\aCtrl.V1\xca\x02\aCtrl\\V1\xe2\x02\x13Ctrl\\V1\\GPBMetadata\xea\x02\bCtrl::V1b\x06proto3" var ( file_ctrl_v1_openapi_proto_rawDescOnce sync.Once diff --git a/go/gen/proto/ctrl/v1/service.pb.go b/go/gen/proto/ctrl/v1/service.pb.go index ac99627a34..c6680081e6 100644 --- a/go/gen/proto/ctrl/v1/service.pb.go +++ b/go/gen/proto/ctrl/v1/service.pb.go @@ -129,7 +129,8 @@ const file_ctrl_v1_service_proto_rawDesc = "" + "\vinstance_id\x18\x03 \x01(\tR\n" + "instanceId2P\n" + "\vCtrlService\x12A\n" + - "\bLiveness\x12\x18.ctrl.v1.LivenessRequest\x1a\x19.ctrl.v1.LivenessResponse\"\x00B6Z4github.com/unkeyed/unkey/go/gen/proto/ctrl/v1;ctrlv1b\x06proto3" + "\bLiveness\x12\x18.ctrl.v1.LivenessRequest\x1a\x19.ctrl.v1.LivenessResponse\"\x00B\x8e\x01\n" + + "\vcom.ctrl.v1B\fServiceProtoP\x01Z4github.com/unkeyed/unkey/go/gen/proto/ctrl/v1;ctrlv1\xa2\x02\x03CXX\xaa\x02\aCtrl.V1\xca\x02\aCtrl\\V1\xe2\x02\x13Ctrl\\V1\\GPBMetadata\xea\x02\bCtrl::V1b\x06proto3" var ( file_ctrl_v1_service_proto_rawDescOnce sync.Once diff --git a/go/gen/proto/hydra/v1/certificate.pb.go b/go/gen/proto/hydra/v1/certificate.pb.go new file mode 100644 index 0000000000..49c0a3a2d3 --- /dev/null +++ b/go/gen/proto/hydra/v1/certificate.pb.go @@ -0,0 +1,193 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.8 +// protoc (unknown) +// source: hydra/v1/certificate.proto + +package hydrav1 + +import ( + _ "github.com/restatedev/sdk-go/generated/dev/restate/sdk" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ProcessChallengeRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + WorkspaceId string `protobuf:"bytes,1,opt,name=workspace_id,json=workspaceId,proto3" json:"workspace_id,omitempty"` + Domain string `protobuf:"bytes,2,opt,name=domain,proto3" json:"domain,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ProcessChallengeRequest) Reset() { + *x = ProcessChallengeRequest{} + mi := &file_hydra_v1_certificate_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ProcessChallengeRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProcessChallengeRequest) ProtoMessage() {} + +func (x *ProcessChallengeRequest) ProtoReflect() protoreflect.Message { + mi := &file_hydra_v1_certificate_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProcessChallengeRequest.ProtoReflect.Descriptor instead. +func (*ProcessChallengeRequest) Descriptor() ([]byte, []int) { + return file_hydra_v1_certificate_proto_rawDescGZIP(), []int{0} +} + +func (x *ProcessChallengeRequest) GetWorkspaceId() string { + if x != nil { + return x.WorkspaceId + } + return "" +} + +func (x *ProcessChallengeRequest) GetDomain() string { + if x != nil { + return x.Domain + } + return "" +} + +type ProcessChallengeResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + CertificateId string `protobuf:"bytes,1,opt,name=certificate_id,json=certificateId,proto3" json:"certificate_id,omitempty"` + Status string `protobuf:"bytes,2,opt,name=status,proto3" json:"status,omitempty"` // "success", "failed", "pending" + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ProcessChallengeResponse) Reset() { + *x = ProcessChallengeResponse{} + mi := &file_hydra_v1_certificate_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ProcessChallengeResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ProcessChallengeResponse) ProtoMessage() {} + +func (x *ProcessChallengeResponse) ProtoReflect() protoreflect.Message { + mi := &file_hydra_v1_certificate_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ProcessChallengeResponse.ProtoReflect.Descriptor instead. +func (*ProcessChallengeResponse) Descriptor() ([]byte, []int) { + return file_hydra_v1_certificate_proto_rawDescGZIP(), []int{1} +} + +func (x *ProcessChallengeResponse) GetCertificateId() string { + if x != nil { + return x.CertificateId + } + return "" +} + +func (x *ProcessChallengeResponse) GetStatus() string { + if x != nil { + return x.Status + } + return "" +} + +var File_hydra_v1_certificate_proto protoreflect.FileDescriptor + +const file_hydra_v1_certificate_proto_rawDesc = "" + + "\n" + + "\x1ahydra/v1/certificate.proto\x12\bhydra.v1\x1a\x18dev/restate/sdk/go.proto\"T\n" + + "\x17ProcessChallengeRequest\x12!\n" + + "\fworkspace_id\x18\x01 \x01(\tR\vworkspaceId\x12\x16\n" + + "\x06domain\x18\x02 \x01(\tR\x06domain\"Y\n" + + "\x18ProcessChallengeResponse\x12%\n" + + "\x0ecertificate_id\x18\x01 \x01(\tR\rcertificateId\x12\x16\n" + + "\x06status\x18\x02 \x01(\tR\x06status2w\n" + + "\x12CertificateService\x12[\n" + + "\x10ProcessChallenge\x12!.hydra.v1.ProcessChallengeRequest\x1a\".hydra.v1.ProcessChallengeResponse\"\x00\x1a\x04\x98\x80\x01\x01B\x99\x01\n" + + "\fcom.hydra.v1B\x10CertificateProtoP\x01Z6github.com/unkeyed/unkey/go/gen/proto/hydra/v1;hydrav1\xa2\x02\x03HXX\xaa\x02\bHydra.V1\xca\x02\bHydra\\V1\xe2\x02\x14Hydra\\V1\\GPBMetadata\xea\x02\tHydra::V1b\x06proto3" + +var ( + file_hydra_v1_certificate_proto_rawDescOnce sync.Once + file_hydra_v1_certificate_proto_rawDescData []byte +) + +func file_hydra_v1_certificate_proto_rawDescGZIP() []byte { + file_hydra_v1_certificate_proto_rawDescOnce.Do(func() { + file_hydra_v1_certificate_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_hydra_v1_certificate_proto_rawDesc), len(file_hydra_v1_certificate_proto_rawDesc))) + }) + return file_hydra_v1_certificate_proto_rawDescData +} + +var file_hydra_v1_certificate_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_hydra_v1_certificate_proto_goTypes = []any{ + (*ProcessChallengeRequest)(nil), // 0: hydra.v1.ProcessChallengeRequest + (*ProcessChallengeResponse)(nil), // 1: hydra.v1.ProcessChallengeResponse +} +var file_hydra_v1_certificate_proto_depIdxs = []int32{ + 0, // 0: hydra.v1.CertificateService.ProcessChallenge:input_type -> hydra.v1.ProcessChallengeRequest + 1, // 1: hydra.v1.CertificateService.ProcessChallenge:output_type -> hydra.v1.ProcessChallengeResponse + 1, // [1:2] is the sub-list for method output_type + 0, // [0:1] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_hydra_v1_certificate_proto_init() } +func file_hydra_v1_certificate_proto_init() { + if File_hydra_v1_certificate_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_hydra_v1_certificate_proto_rawDesc), len(file_hydra_v1_certificate_proto_rawDesc)), + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_hydra_v1_certificate_proto_goTypes, + DependencyIndexes: file_hydra_v1_certificate_proto_depIdxs, + MessageInfos: file_hydra_v1_certificate_proto_msgTypes, + }.Build() + File_hydra_v1_certificate_proto = out.File + file_hydra_v1_certificate_proto_goTypes = nil + file_hydra_v1_certificate_proto_depIdxs = nil +} diff --git a/go/gen/proto/hydra/v1/certificate_restate.pb.go b/go/gen/proto/hydra/v1/certificate_restate.pb.go new file mode 100644 index 0000000000..b17513c944 --- /dev/null +++ b/go/gen/proto/hydra/v1/certificate_restate.pb.go @@ -0,0 +1,87 @@ +// Code generated by protoc-gen-go-restate. DO NOT EDIT. +// versions: +// - protoc-gen-go-restate v0.1 +// - protoc (unknown) +// source: hydra/v1/certificate.proto + +package hydrav1 + +import ( + fmt "fmt" + sdk_go "github.com/restatedev/sdk-go" +) + +// CertificateServiceClient is the client API for hydra.v1.CertificateService service. +// +// CertificateService manages ACME certificate challenges and issuance +type CertificateServiceClient interface { + // ProcessChallenge handles the complete ACME certificate challenge flow + // Key: domain name (ensures only one challenge per domain at a time) + ProcessChallenge(opts ...sdk_go.ClientOption) sdk_go.Client[*ProcessChallengeRequest, *ProcessChallengeResponse] +} + +type certificateServiceClient struct { + ctx sdk_go.Context + key string + options []sdk_go.ClientOption +} + +func NewCertificateServiceClient(ctx sdk_go.Context, key string, opts ...sdk_go.ClientOption) CertificateServiceClient { + cOpts := append([]sdk_go.ClientOption{sdk_go.WithProtoJSON}, opts...) + return &certificateServiceClient{ + ctx, + key, + cOpts, + } +} +func (c *certificateServiceClient) ProcessChallenge(opts ...sdk_go.ClientOption) sdk_go.Client[*ProcessChallengeRequest, *ProcessChallengeResponse] { + cOpts := c.options + if len(opts) > 0 { + cOpts = append(append([]sdk_go.ClientOption{}, cOpts...), opts...) + } + return sdk_go.WithRequestType[*ProcessChallengeRequest](sdk_go.Object[*ProcessChallengeResponse](c.ctx, "hydra.v1.CertificateService", c.key, "ProcessChallenge", cOpts...)) +} + +// CertificateServiceServer is the server API for hydra.v1.CertificateService service. +// All implementations should embed UnimplementedCertificateServiceServer +// for forward compatibility. +// +// CertificateService manages ACME certificate challenges and issuance +type CertificateServiceServer interface { + // ProcessChallenge handles the complete ACME certificate challenge flow + // Key: domain name (ensures only one challenge per domain at a time) + ProcessChallenge(ctx sdk_go.ObjectContext, req *ProcessChallengeRequest) (*ProcessChallengeResponse, error) +} + +// UnimplementedCertificateServiceServer should be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedCertificateServiceServer struct{} + +func (UnimplementedCertificateServiceServer) ProcessChallenge(ctx sdk_go.ObjectContext, req *ProcessChallengeRequest) (*ProcessChallengeResponse, error) { + return nil, sdk_go.TerminalError(fmt.Errorf("method ProcessChallenge not implemented"), 501) +} +func (UnimplementedCertificateServiceServer) testEmbeddedByValue() {} + +// UnsafeCertificateServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to CertificateServiceServer will +// result in compilation errors. +type UnsafeCertificateServiceServer interface { + mustEmbedUnimplementedCertificateServiceServer() +} + +func NewCertificateServiceServer(srv CertificateServiceServer, opts ...sdk_go.ServiceDefinitionOption) sdk_go.ServiceDefinition { + // If the following call panics, it indicates UnimplementedCertificateServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + sOpts := append([]sdk_go.ServiceDefinitionOption{sdk_go.WithProtoJSON}, opts...) + router := sdk_go.NewObject("hydra.v1.CertificateService", sOpts...) + router = router.Handler("ProcessChallenge", sdk_go.NewObjectHandler(srv.ProcessChallenge)) + return router +} diff --git a/go/gen/proto/hydra/v1/deployment.pb.go b/go/gen/proto/hydra/v1/deployment.pb.go new file mode 100644 index 0000000000..cd07d2c5b7 --- /dev/null +++ b/go/gen/proto/hydra/v1/deployment.pb.go @@ -0,0 +1,371 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.8 +// protoc (unknown) +// source: hydra/v1/deployment.proto + +package hydrav1 + +import ( + _ "github.com/restatedev/sdk-go/generated/dev/restate/sdk" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type DeployRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + DeploymentId string `protobuf:"bytes,1,opt,name=deployment_id,json=deploymentId,proto3" json:"deployment_id,omitempty"` + DockerImage string `protobuf:"bytes,2,opt,name=docker_image,json=dockerImage,proto3" json:"docker_image,omitempty"` + KeyAuthId *string `protobuf:"bytes,3,opt,name=key_auth_id,json=keyAuthId,proto3,oneof" json:"key_auth_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DeployRequest) Reset() { + *x = DeployRequest{} + mi := &file_hydra_v1_deployment_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DeployRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeployRequest) ProtoMessage() {} + +func (x *DeployRequest) ProtoReflect() protoreflect.Message { + mi := &file_hydra_v1_deployment_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeployRequest.ProtoReflect.Descriptor instead. +func (*DeployRequest) Descriptor() ([]byte, []int) { + return file_hydra_v1_deployment_proto_rawDescGZIP(), []int{0} +} + +func (x *DeployRequest) GetDeploymentId() string { + if x != nil { + return x.DeploymentId + } + return "" +} + +func (x *DeployRequest) GetDockerImage() string { + if x != nil { + return x.DockerImage + } + return "" +} + +func (x *DeployRequest) GetKeyAuthId() string { + if x != nil && x.KeyAuthId != nil { + return *x.KeyAuthId + } + return "" +} + +type DeployResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DeployResponse) Reset() { + *x = DeployResponse{} + mi := &file_hydra_v1_deployment_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DeployResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeployResponse) ProtoMessage() {} + +func (x *DeployResponse) ProtoReflect() protoreflect.Message { + mi := &file_hydra_v1_deployment_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeployResponse.ProtoReflect.Descriptor instead. +func (*DeployResponse) Descriptor() ([]byte, []int) { + return file_hydra_v1_deployment_proto_rawDescGZIP(), []int{1} +} + +type RollbackRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + SourceDeploymentId string `protobuf:"bytes,1,opt,name=source_deployment_id,json=sourceDeploymentId,proto3" json:"source_deployment_id,omitempty"` + TargetDeploymentId string `protobuf:"bytes,2,opt,name=target_deployment_id,json=targetDeploymentId,proto3" json:"target_deployment_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RollbackRequest) Reset() { + *x = RollbackRequest{} + mi := &file_hydra_v1_deployment_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RollbackRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RollbackRequest) ProtoMessage() {} + +func (x *RollbackRequest) ProtoReflect() protoreflect.Message { + mi := &file_hydra_v1_deployment_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RollbackRequest.ProtoReflect.Descriptor instead. +func (*RollbackRequest) Descriptor() ([]byte, []int) { + return file_hydra_v1_deployment_proto_rawDescGZIP(), []int{2} +} + +func (x *RollbackRequest) GetSourceDeploymentId() string { + if x != nil { + return x.SourceDeploymentId + } + return "" +} + +func (x *RollbackRequest) GetTargetDeploymentId() string { + if x != nil { + return x.TargetDeploymentId + } + return "" +} + +type RollbackResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RollbackResponse) Reset() { + *x = RollbackResponse{} + mi := &file_hydra_v1_deployment_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RollbackResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RollbackResponse) ProtoMessage() {} + +func (x *RollbackResponse) ProtoReflect() protoreflect.Message { + mi := &file_hydra_v1_deployment_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RollbackResponse.ProtoReflect.Descriptor instead. +func (*RollbackResponse) Descriptor() ([]byte, []int) { + return file_hydra_v1_deployment_proto_rawDescGZIP(), []int{3} +} + +type PromoteRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + TargetDeploymentId string `protobuf:"bytes,1,opt,name=target_deployment_id,json=targetDeploymentId,proto3" json:"target_deployment_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PromoteRequest) Reset() { + *x = PromoteRequest{} + mi := &file_hydra_v1_deployment_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PromoteRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PromoteRequest) ProtoMessage() {} + +func (x *PromoteRequest) ProtoReflect() protoreflect.Message { + mi := &file_hydra_v1_deployment_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PromoteRequest.ProtoReflect.Descriptor instead. +func (*PromoteRequest) Descriptor() ([]byte, []int) { + return file_hydra_v1_deployment_proto_rawDescGZIP(), []int{4} +} + +func (x *PromoteRequest) GetTargetDeploymentId() string { + if x != nil { + return x.TargetDeploymentId + } + return "" +} + +type PromoteResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PromoteResponse) Reset() { + *x = PromoteResponse{} + mi := &file_hydra_v1_deployment_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PromoteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PromoteResponse) ProtoMessage() {} + +func (x *PromoteResponse) ProtoReflect() protoreflect.Message { + mi := &file_hydra_v1_deployment_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PromoteResponse.ProtoReflect.Descriptor instead. +func (*PromoteResponse) Descriptor() ([]byte, []int) { + return file_hydra_v1_deployment_proto_rawDescGZIP(), []int{5} +} + +var File_hydra_v1_deployment_proto protoreflect.FileDescriptor + +const file_hydra_v1_deployment_proto_rawDesc = "" + + "\n" + + "\x19hydra/v1/deployment.proto\x12\bhydra.v1\x1a\x18dev/restate/sdk/go.proto\"\x8c\x01\n" + + "\rDeployRequest\x12#\n" + + "\rdeployment_id\x18\x01 \x01(\tR\fdeploymentId\x12!\n" + + "\fdocker_image\x18\x02 \x01(\tR\vdockerImage\x12#\n" + + "\vkey_auth_id\x18\x03 \x01(\tH\x00R\tkeyAuthId\x88\x01\x01B\x0e\n" + + "\f_key_auth_id\"\x10\n" + + "\x0eDeployResponse\"u\n" + + "\x0fRollbackRequest\x120\n" + + "\x14source_deployment_id\x18\x01 \x01(\tR\x12sourceDeploymentId\x120\n" + + "\x14target_deployment_id\x18\x02 \x01(\tR\x12targetDeploymentId\"\x12\n" + + "\x10RollbackResponse\"B\n" + + "\x0ePromoteRequest\x120\n" + + "\x14target_deployment_id\x18\x01 \x01(\tR\x12targetDeploymentId\"\x11\n" + + "\x0fPromoteResponse2\xdf\x01\n" + + "\x11DeploymentService\x12=\n" + + "\x06Deploy\x12\x17.hydra.v1.DeployRequest\x1a\x18.hydra.v1.DeployResponse\"\x00\x12C\n" + + "\bRollback\x12\x19.hydra.v1.RollbackRequest\x1a\x1a.hydra.v1.RollbackResponse\"\x00\x12@\n" + + "\aPromote\x12\x18.hydra.v1.PromoteRequest\x1a\x19.hydra.v1.PromoteResponse\"\x00\x1a\x04\x98\x80\x01\x01B\x98\x01\n" + + "\fcom.hydra.v1B\x0fDeploymentProtoP\x01Z6github.com/unkeyed/unkey/go/gen/proto/hydra/v1;hydrav1\xa2\x02\x03HXX\xaa\x02\bHydra.V1\xca\x02\bHydra\\V1\xe2\x02\x14Hydra\\V1\\GPBMetadata\xea\x02\tHydra::V1b\x06proto3" + +var ( + file_hydra_v1_deployment_proto_rawDescOnce sync.Once + file_hydra_v1_deployment_proto_rawDescData []byte +) + +func file_hydra_v1_deployment_proto_rawDescGZIP() []byte { + file_hydra_v1_deployment_proto_rawDescOnce.Do(func() { + file_hydra_v1_deployment_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_hydra_v1_deployment_proto_rawDesc), len(file_hydra_v1_deployment_proto_rawDesc))) + }) + return file_hydra_v1_deployment_proto_rawDescData +} + +var file_hydra_v1_deployment_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_hydra_v1_deployment_proto_goTypes = []any{ + (*DeployRequest)(nil), // 0: hydra.v1.DeployRequest + (*DeployResponse)(nil), // 1: hydra.v1.DeployResponse + (*RollbackRequest)(nil), // 2: hydra.v1.RollbackRequest + (*RollbackResponse)(nil), // 3: hydra.v1.RollbackResponse + (*PromoteRequest)(nil), // 4: hydra.v1.PromoteRequest + (*PromoteResponse)(nil), // 5: hydra.v1.PromoteResponse +} +var file_hydra_v1_deployment_proto_depIdxs = []int32{ + 0, // 0: hydra.v1.DeploymentService.Deploy:input_type -> hydra.v1.DeployRequest + 2, // 1: hydra.v1.DeploymentService.Rollback:input_type -> hydra.v1.RollbackRequest + 4, // 2: hydra.v1.DeploymentService.Promote:input_type -> hydra.v1.PromoteRequest + 1, // 3: hydra.v1.DeploymentService.Deploy:output_type -> hydra.v1.DeployResponse + 3, // 4: hydra.v1.DeploymentService.Rollback:output_type -> hydra.v1.RollbackResponse + 5, // 5: hydra.v1.DeploymentService.Promote:output_type -> hydra.v1.PromoteResponse + 3, // [3:6] is the sub-list for method output_type + 0, // [0:3] is the sub-list for method input_type + 0, // [0:0] is the sub-list for extension type_name + 0, // [0:0] is the sub-list for extension extendee + 0, // [0:0] is the sub-list for field type_name +} + +func init() { file_hydra_v1_deployment_proto_init() } +func file_hydra_v1_deployment_proto_init() { + if File_hydra_v1_deployment_proto != nil { + return + } + file_hydra_v1_deployment_proto_msgTypes[0].OneofWrappers = []any{} + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_hydra_v1_deployment_proto_rawDesc), len(file_hydra_v1_deployment_proto_rawDesc)), + NumEnums: 0, + NumMessages: 6, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_hydra_v1_deployment_proto_goTypes, + DependencyIndexes: file_hydra_v1_deployment_proto_depIdxs, + MessageInfos: file_hydra_v1_deployment_proto_msgTypes, + }.Build() + File_hydra_v1_deployment_proto = out.File + file_hydra_v1_deployment_proto_goTypes = nil + file_hydra_v1_deployment_proto_depIdxs = nil +} diff --git a/go/gen/proto/hydra/v1/deployment_restate.pb.go b/go/gen/proto/hydra/v1/deployment_restate.pb.go new file mode 100644 index 0000000000..537aa70737 --- /dev/null +++ b/go/gen/proto/hydra/v1/deployment_restate.pb.go @@ -0,0 +1,107 @@ +// Code generated by protoc-gen-go-restate. DO NOT EDIT. +// versions: +// - protoc-gen-go-restate v0.1 +// - protoc (unknown) +// source: hydra/v1/deployment.proto + +package hydrav1 + +import ( + fmt "fmt" + sdk_go "github.com/restatedev/sdk-go" +) + +// DeploymentServiceClient is the client API for hydra.v1.DeploymentService service. +type DeploymentServiceClient interface { + Deploy(opts ...sdk_go.ClientOption) sdk_go.Client[*DeployRequest, *DeployResponse] + Rollback(opts ...sdk_go.ClientOption) sdk_go.Client[*RollbackRequest, *RollbackResponse] + Promote(opts ...sdk_go.ClientOption) sdk_go.Client[*PromoteRequest, *PromoteResponse] +} + +type deploymentServiceClient struct { + ctx sdk_go.Context + key string + options []sdk_go.ClientOption +} + +func NewDeploymentServiceClient(ctx sdk_go.Context, key string, opts ...sdk_go.ClientOption) DeploymentServiceClient { + cOpts := append([]sdk_go.ClientOption{sdk_go.WithProtoJSON}, opts...) + return &deploymentServiceClient{ + ctx, + key, + cOpts, + } +} +func (c *deploymentServiceClient) Deploy(opts ...sdk_go.ClientOption) sdk_go.Client[*DeployRequest, *DeployResponse] { + cOpts := c.options + if len(opts) > 0 { + cOpts = append(append([]sdk_go.ClientOption{}, cOpts...), opts...) + } + return sdk_go.WithRequestType[*DeployRequest](sdk_go.Object[*DeployResponse](c.ctx, "hydra.v1.DeploymentService", c.key, "Deploy", cOpts...)) +} + +func (c *deploymentServiceClient) Rollback(opts ...sdk_go.ClientOption) sdk_go.Client[*RollbackRequest, *RollbackResponse] { + cOpts := c.options + if len(opts) > 0 { + cOpts = append(append([]sdk_go.ClientOption{}, cOpts...), opts...) + } + return sdk_go.WithRequestType[*RollbackRequest](sdk_go.Object[*RollbackResponse](c.ctx, "hydra.v1.DeploymentService", c.key, "Rollback", cOpts...)) +} + +func (c *deploymentServiceClient) Promote(opts ...sdk_go.ClientOption) sdk_go.Client[*PromoteRequest, *PromoteResponse] { + cOpts := c.options + if len(opts) > 0 { + cOpts = append(append([]sdk_go.ClientOption{}, cOpts...), opts...) + } + return sdk_go.WithRequestType[*PromoteRequest](sdk_go.Object[*PromoteResponse](c.ctx, "hydra.v1.DeploymentService", c.key, "Promote", cOpts...)) +} + +// DeploymentServiceServer is the server API for hydra.v1.DeploymentService service. +// All implementations should embed UnimplementedDeploymentServiceServer +// for forward compatibility. +type DeploymentServiceServer interface { + Deploy(ctx sdk_go.ObjectContext, req *DeployRequest) (*DeployResponse, error) + Rollback(ctx sdk_go.ObjectContext, req *RollbackRequest) (*RollbackResponse, error) + Promote(ctx sdk_go.ObjectContext, req *PromoteRequest) (*PromoteResponse, error) +} + +// UnimplementedDeploymentServiceServer should be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedDeploymentServiceServer struct{} + +func (UnimplementedDeploymentServiceServer) Deploy(ctx sdk_go.ObjectContext, req *DeployRequest) (*DeployResponse, error) { + return nil, sdk_go.TerminalError(fmt.Errorf("method Deploy not implemented"), 501) +} +func (UnimplementedDeploymentServiceServer) Rollback(ctx sdk_go.ObjectContext, req *RollbackRequest) (*RollbackResponse, error) { + return nil, sdk_go.TerminalError(fmt.Errorf("method Rollback not implemented"), 501) +} +func (UnimplementedDeploymentServiceServer) Promote(ctx sdk_go.ObjectContext, req *PromoteRequest) (*PromoteResponse, error) { + return nil, sdk_go.TerminalError(fmt.Errorf("method Promote not implemented"), 501) +} +func (UnimplementedDeploymentServiceServer) testEmbeddedByValue() {} + +// UnsafeDeploymentServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to DeploymentServiceServer will +// result in compilation errors. +type UnsafeDeploymentServiceServer interface { + mustEmbedUnimplementedDeploymentServiceServer() +} + +func NewDeploymentServiceServer(srv DeploymentServiceServer, opts ...sdk_go.ServiceDefinitionOption) sdk_go.ServiceDefinition { + // If the following call panics, it indicates UnimplementedDeploymentServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + sOpts := append([]sdk_go.ServiceDefinitionOption{sdk_go.WithProtoJSON}, opts...) + router := sdk_go.NewObject("hydra.v1.DeploymentService", sOpts...) + router = router.Handler("Deploy", sdk_go.NewObjectHandler(srv.Deploy)) + router = router.Handler("Rollback", sdk_go.NewObjectHandler(srv.Rollback)) + router = router.Handler("Promote", sdk_go.NewObjectHandler(srv.Promote)) + return router +} diff --git a/go/gen/proto/hydra/v1/routing.pb.go b/go/gen/proto/hydra/v1/routing.pb.go new file mode 100644 index 0000000000..ebe1927408 --- /dev/null +++ b/go/gen/proto/hydra/v1/routing.pb.go @@ -0,0 +1,456 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.8 +// protoc (unknown) +// source: hydra/v1/routing.proto + +package hydrav1 + +import ( + _ "github.com/restatedev/sdk-go/generated/dev/restate/sdk" + v1 "github.com/unkeyed/unkey/go/gen/proto/partition/v1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type DomainSticky int32 + +const ( + DomainSticky_DOMAIN_STICKY_UNSPECIFIED DomainSticky = 0 // Not sticky (per-commit domain) + DomainSticky_DOMAIN_STICKY_BRANCH DomainSticky = 1 // Sticky to branch + DomainSticky_DOMAIN_STICKY_ENVIRONMENT DomainSticky = 2 // Sticky to environment + DomainSticky_DOMAIN_STICKY_LIVE DomainSticky = 3 // Sticky to live deployment +) + +// Enum value maps for DomainSticky. +var ( + DomainSticky_name = map[int32]string{ + 0: "DOMAIN_STICKY_UNSPECIFIED", + 1: "DOMAIN_STICKY_BRANCH", + 2: "DOMAIN_STICKY_ENVIRONMENT", + 3: "DOMAIN_STICKY_LIVE", + } + DomainSticky_value = map[string]int32{ + "DOMAIN_STICKY_UNSPECIFIED": 0, + "DOMAIN_STICKY_BRANCH": 1, + "DOMAIN_STICKY_ENVIRONMENT": 2, + "DOMAIN_STICKY_LIVE": 3, + } +) + +func (x DomainSticky) Enum() *DomainSticky { + p := new(DomainSticky) + *p = x + return p +} + +func (x DomainSticky) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (DomainSticky) Descriptor() protoreflect.EnumDescriptor { + return file_hydra_v1_routing_proto_enumTypes[0].Descriptor() +} + +func (DomainSticky) Type() protoreflect.EnumType { + return &file_hydra_v1_routing_proto_enumTypes[0] +} + +func (x DomainSticky) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use DomainSticky.Descriptor instead. +func (DomainSticky) EnumDescriptor() ([]byte, []int) { + return file_hydra_v1_routing_proto_rawDescGZIP(), []int{0} +} + +// AssignDomainsRequest is used when deploying - creates/updates domains and gateway configs +type AssignDomainsRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + WorkspaceId string `protobuf:"bytes,1,opt,name=workspace_id,json=workspaceId,proto3" json:"workspace_id,omitempty"` + ProjectId string `protobuf:"bytes,2,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` + EnvironmentId string `protobuf:"bytes,3,opt,name=environment_id,json=environmentId,proto3" json:"environment_id,omitempty"` + DeploymentId string `protobuf:"bytes,4,opt,name=deployment_id,json=deploymentId,proto3" json:"deployment_id,omitempty"` + // Domains to create/assign (by name) + Domains []*DomainToAssign `protobuf:"bytes,5,rep,name=domains,proto3" json:"domains,omitempty"` + // Gateway configuration + GatewayConfig *v1.GatewayConfig `protobuf:"bytes,6,opt,name=gateway_config,json=gatewayConfig,proto3" json:"gateway_config,omitempty"` + // Whether the project is currently rolled back (skip domain assignment if true) + IsRolledBack bool `protobuf:"varint,7,opt,name=is_rolled_back,json=isRolledBack,proto3" json:"is_rolled_back,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *AssignDomainsRequest) Reset() { + *x = AssignDomainsRequest{} + mi := &file_hydra_v1_routing_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *AssignDomainsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AssignDomainsRequest) ProtoMessage() {} + +func (x *AssignDomainsRequest) ProtoReflect() protoreflect.Message { + mi := &file_hydra_v1_routing_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AssignDomainsRequest.ProtoReflect.Descriptor instead. +func (*AssignDomainsRequest) Descriptor() ([]byte, []int) { + return file_hydra_v1_routing_proto_rawDescGZIP(), []int{0} +} + +func (x *AssignDomainsRequest) GetWorkspaceId() string { + if x != nil { + return x.WorkspaceId + } + return "" +} + +func (x *AssignDomainsRequest) GetProjectId() string { + if x != nil { + return x.ProjectId + } + return "" +} + +func (x *AssignDomainsRequest) GetEnvironmentId() string { + if x != nil { + return x.EnvironmentId + } + return "" +} + +func (x *AssignDomainsRequest) GetDeploymentId() string { + if x != nil { + return x.DeploymentId + } + return "" +} + +func (x *AssignDomainsRequest) GetDomains() []*DomainToAssign { + if x != nil { + return x.Domains + } + return nil +} + +func (x *AssignDomainsRequest) GetGatewayConfig() *v1.GatewayConfig { + if x != nil { + return x.GatewayConfig + } + return nil +} + +func (x *AssignDomainsRequest) GetIsRolledBack() bool { + if x != nil { + return x.IsRolledBack + } + return false +} + +type DomainToAssign struct { + state protoimpl.MessageState `protogen:"open.v1"` + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Sticky DomainSticky `protobuf:"varint,2,opt,name=sticky,proto3,enum=hydra.v1.DomainSticky" json:"sticky,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DomainToAssign) Reset() { + *x = DomainToAssign{} + mi := &file_hydra_v1_routing_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DomainToAssign) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DomainToAssign) ProtoMessage() {} + +func (x *DomainToAssign) ProtoReflect() protoreflect.Message { + mi := &file_hydra_v1_routing_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DomainToAssign.ProtoReflect.Descriptor instead. +func (*DomainToAssign) Descriptor() ([]byte, []int) { + return file_hydra_v1_routing_proto_rawDescGZIP(), []int{1} +} + +func (x *DomainToAssign) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *DomainToAssign) GetSticky() DomainSticky { + if x != nil { + return x.Sticky + } + return DomainSticky_DOMAIN_STICKY_UNSPECIFIED +} + +type AssignDomainsResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Domain names that were actually changed (created or reassigned) + ChangedDomainNames []string `protobuf:"bytes,1,rep,name=changed_domain_names,json=changedDomainNames,proto3" json:"changed_domain_names,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *AssignDomainsResponse) Reset() { + *x = AssignDomainsResponse{} + mi := &file_hydra_v1_routing_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *AssignDomainsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AssignDomainsResponse) ProtoMessage() {} + +func (x *AssignDomainsResponse) ProtoReflect() protoreflect.Message { + mi := &file_hydra_v1_routing_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AssignDomainsResponse.ProtoReflect.Descriptor instead. +func (*AssignDomainsResponse) Descriptor() ([]byte, []int) { + return file_hydra_v1_routing_proto_rawDescGZIP(), []int{2} +} + +func (x *AssignDomainsResponse) GetChangedDomainNames() []string { + if x != nil { + return x.ChangedDomainNames + } + return nil +} + +// SwitchDomainsRequest is used for rollback/promote - switches existing domains to a different deployment +type SwitchDomainsRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + TargetDeploymentId string `protobuf:"bytes,1,opt,name=target_deployment_id,json=targetDeploymentId,proto3" json:"target_deployment_id,omitempty"` + // Domain IDs to switch (must already exist in database) + DomainIds []string `protobuf:"bytes,2,rep,name=domain_ids,json=domainIds,proto3" json:"domain_ids,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SwitchDomainsRequest) Reset() { + *x = SwitchDomainsRequest{} + mi := &file_hydra_v1_routing_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SwitchDomainsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SwitchDomainsRequest) ProtoMessage() {} + +func (x *SwitchDomainsRequest) ProtoReflect() protoreflect.Message { + mi := &file_hydra_v1_routing_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SwitchDomainsRequest.ProtoReflect.Descriptor instead. +func (*SwitchDomainsRequest) Descriptor() ([]byte, []int) { + return file_hydra_v1_routing_proto_rawDescGZIP(), []int{3} +} + +func (x *SwitchDomainsRequest) GetTargetDeploymentId() string { + if x != nil { + return x.TargetDeploymentId + } + return "" +} + +func (x *SwitchDomainsRequest) GetDomainIds() []string { + if x != nil { + return x.DomainIds + } + return nil +} + +type SwitchDomainsResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *SwitchDomainsResponse) Reset() { + *x = SwitchDomainsResponse{} + mi := &file_hydra_v1_routing_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *SwitchDomainsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SwitchDomainsResponse) ProtoMessage() {} + +func (x *SwitchDomainsResponse) ProtoReflect() protoreflect.Message { + mi := &file_hydra_v1_routing_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SwitchDomainsResponse.ProtoReflect.Descriptor instead. +func (*SwitchDomainsResponse) Descriptor() ([]byte, []int) { + return file_hydra_v1_routing_proto_rawDescGZIP(), []int{4} +} + +var File_hydra_v1_routing_proto protoreflect.FileDescriptor + +const file_hydra_v1_routing_proto_rawDesc = "" + + "\n" + + "\x16hydra/v1/routing.proto\x12\bhydra.v1\x1a\x18dev/restate/sdk/go.proto\x1a\x1apartition/v1/gateway.proto\"\xc2\x02\n" + + "\x14AssignDomainsRequest\x12!\n" + + "\fworkspace_id\x18\x01 \x01(\tR\vworkspaceId\x12\x1d\n" + + "\n" + + "project_id\x18\x02 \x01(\tR\tprojectId\x12%\n" + + "\x0eenvironment_id\x18\x03 \x01(\tR\renvironmentId\x12#\n" + + "\rdeployment_id\x18\x04 \x01(\tR\fdeploymentId\x122\n" + + "\adomains\x18\x05 \x03(\v2\x18.hydra.v1.DomainToAssignR\adomains\x12B\n" + + "\x0egateway_config\x18\x06 \x01(\v2\x1b.partition.v1.GatewayConfigR\rgatewayConfig\x12$\n" + + "\x0eis_rolled_back\x18\a \x01(\bR\fisRolledBack\"T\n" + + "\x0eDomainToAssign\x12\x12\n" + + "\x04name\x18\x01 \x01(\tR\x04name\x12.\n" + + "\x06sticky\x18\x02 \x01(\x0e2\x16.hydra.v1.DomainStickyR\x06sticky\"I\n" + + "\x15AssignDomainsResponse\x120\n" + + "\x14changed_domain_names\x18\x01 \x03(\tR\x12changedDomainNames\"g\n" + + "\x14SwitchDomainsRequest\x120\n" + + "\x14target_deployment_id\x18\x01 \x01(\tR\x12targetDeploymentId\x12\x1d\n" + + "\n" + + "domain_ids\x18\x02 \x03(\tR\tdomainIds\"\x17\n" + + "\x15SwitchDomainsResponse*~\n" + + "\fDomainSticky\x12\x1d\n" + + "\x19DOMAIN_STICKY_UNSPECIFIED\x10\x00\x12\x18\n" + + "\x14DOMAIN_STICKY_BRANCH\x10\x01\x12\x1d\n" + + "\x19DOMAIN_STICKY_ENVIRONMENT\x10\x02\x12\x16\n" + + "\x12DOMAIN_STICKY_LIVE\x10\x032\xbe\x01\n" + + "\x0eRoutingService\x12R\n" + + "\rAssignDomains\x12\x1e.hydra.v1.AssignDomainsRequest\x1a\x1f.hydra.v1.AssignDomainsResponse\"\x00\x12R\n" + + "\rSwitchDomains\x12\x1e.hydra.v1.SwitchDomainsRequest\x1a\x1f.hydra.v1.SwitchDomainsResponse\"\x00\x1a\x04\x98\x80\x01\x01B\x95\x01\n" + + "\fcom.hydra.v1B\fRoutingProtoP\x01Z6github.com/unkeyed/unkey/go/gen/proto/hydra/v1;hydrav1\xa2\x02\x03HXX\xaa\x02\bHydra.V1\xca\x02\bHydra\\V1\xe2\x02\x14Hydra\\V1\\GPBMetadata\xea\x02\tHydra::V1b\x06proto3" + +var ( + file_hydra_v1_routing_proto_rawDescOnce sync.Once + file_hydra_v1_routing_proto_rawDescData []byte +) + +func file_hydra_v1_routing_proto_rawDescGZIP() []byte { + file_hydra_v1_routing_proto_rawDescOnce.Do(func() { + file_hydra_v1_routing_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_hydra_v1_routing_proto_rawDesc), len(file_hydra_v1_routing_proto_rawDesc))) + }) + return file_hydra_v1_routing_proto_rawDescData +} + +var file_hydra_v1_routing_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_hydra_v1_routing_proto_msgTypes = make([]protoimpl.MessageInfo, 5) +var file_hydra_v1_routing_proto_goTypes = []any{ + (DomainSticky)(0), // 0: hydra.v1.DomainSticky + (*AssignDomainsRequest)(nil), // 1: hydra.v1.AssignDomainsRequest + (*DomainToAssign)(nil), // 2: hydra.v1.DomainToAssign + (*AssignDomainsResponse)(nil), // 3: hydra.v1.AssignDomainsResponse + (*SwitchDomainsRequest)(nil), // 4: hydra.v1.SwitchDomainsRequest + (*SwitchDomainsResponse)(nil), // 5: hydra.v1.SwitchDomainsResponse + (*v1.GatewayConfig)(nil), // 6: partition.v1.GatewayConfig +} +var file_hydra_v1_routing_proto_depIdxs = []int32{ + 2, // 0: hydra.v1.AssignDomainsRequest.domains:type_name -> hydra.v1.DomainToAssign + 6, // 1: hydra.v1.AssignDomainsRequest.gateway_config:type_name -> partition.v1.GatewayConfig + 0, // 2: hydra.v1.DomainToAssign.sticky:type_name -> hydra.v1.DomainSticky + 1, // 3: hydra.v1.RoutingService.AssignDomains:input_type -> hydra.v1.AssignDomainsRequest + 4, // 4: hydra.v1.RoutingService.SwitchDomains:input_type -> hydra.v1.SwitchDomainsRequest + 3, // 5: hydra.v1.RoutingService.AssignDomains:output_type -> hydra.v1.AssignDomainsResponse + 5, // 6: hydra.v1.RoutingService.SwitchDomains:output_type -> hydra.v1.SwitchDomainsResponse + 5, // [5:7] is the sub-list for method output_type + 3, // [3:5] is the sub-list for method input_type + 3, // [3:3] is the sub-list for extension type_name + 3, // [3:3] is the sub-list for extension extendee + 0, // [0:3] is the sub-list for field type_name +} + +func init() { file_hydra_v1_routing_proto_init() } +func file_hydra_v1_routing_proto_init() { + if File_hydra_v1_routing_proto != nil { + return + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_hydra_v1_routing_proto_rawDesc), len(file_hydra_v1_routing_proto_rawDesc)), + NumEnums: 1, + NumMessages: 5, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_hydra_v1_routing_proto_goTypes, + DependencyIndexes: file_hydra_v1_routing_proto_depIdxs, + EnumInfos: file_hydra_v1_routing_proto_enumTypes, + MessageInfos: file_hydra_v1_routing_proto_msgTypes, + }.Build() + File_hydra_v1_routing_proto = out.File + file_hydra_v1_routing_proto_goTypes = nil + file_hydra_v1_routing_proto_depIdxs = nil +} diff --git a/go/gen/proto/hydra/v1/routing_restate.pb.go b/go/gen/proto/hydra/v1/routing_restate.pb.go new file mode 100644 index 0000000000..1e24248b9d --- /dev/null +++ b/go/gen/proto/hydra/v1/routing_restate.pb.go @@ -0,0 +1,101 @@ +// Code generated by protoc-gen-go-restate. DO NOT EDIT. +// versions: +// - protoc-gen-go-restate v0.1 +// - protoc (unknown) +// source: hydra/v1/routing.proto + +package hydrav1 + +import ( + fmt "fmt" + sdk_go "github.com/restatedev/sdk-go" +) + +// RoutingServiceClient is the client API for hydra.v1.RoutingService service. +type RoutingServiceClient interface { + // AssignDomains creates or reassigns domains to a deployment and creates gateway configs + // Used during initial deployment + AssignDomains(opts ...sdk_go.ClientOption) sdk_go.Client[*AssignDomainsRequest, *AssignDomainsResponse] + // SwitchDomains reassigns existing domains to a different deployment and updates gateway configs + // Used during rollback/promote operations + SwitchDomains(opts ...sdk_go.ClientOption) sdk_go.Client[*SwitchDomainsRequest, *SwitchDomainsResponse] +} + +type routingServiceClient struct { + ctx sdk_go.Context + key string + options []sdk_go.ClientOption +} + +func NewRoutingServiceClient(ctx sdk_go.Context, key string, opts ...sdk_go.ClientOption) RoutingServiceClient { + cOpts := append([]sdk_go.ClientOption{sdk_go.WithProtoJSON}, opts...) + return &routingServiceClient{ + ctx, + key, + cOpts, + } +} +func (c *routingServiceClient) AssignDomains(opts ...sdk_go.ClientOption) sdk_go.Client[*AssignDomainsRequest, *AssignDomainsResponse] { + cOpts := c.options + if len(opts) > 0 { + cOpts = append(append([]sdk_go.ClientOption{}, cOpts...), opts...) + } + return sdk_go.WithRequestType[*AssignDomainsRequest](sdk_go.Object[*AssignDomainsResponse](c.ctx, "hydra.v1.RoutingService", c.key, "AssignDomains", cOpts...)) +} + +func (c *routingServiceClient) SwitchDomains(opts ...sdk_go.ClientOption) sdk_go.Client[*SwitchDomainsRequest, *SwitchDomainsResponse] { + cOpts := c.options + if len(opts) > 0 { + cOpts = append(append([]sdk_go.ClientOption{}, cOpts...), opts...) + } + return sdk_go.WithRequestType[*SwitchDomainsRequest](sdk_go.Object[*SwitchDomainsResponse](c.ctx, "hydra.v1.RoutingService", c.key, "SwitchDomains", cOpts...)) +} + +// RoutingServiceServer is the server API for hydra.v1.RoutingService service. +// All implementations should embed UnimplementedRoutingServiceServer +// for forward compatibility. +type RoutingServiceServer interface { + // AssignDomains creates or reassigns domains to a deployment and creates gateway configs + // Used during initial deployment + AssignDomains(ctx sdk_go.ObjectContext, req *AssignDomainsRequest) (*AssignDomainsResponse, error) + // SwitchDomains reassigns existing domains to a different deployment and updates gateway configs + // Used during rollback/promote operations + SwitchDomains(ctx sdk_go.ObjectContext, req *SwitchDomainsRequest) (*SwitchDomainsResponse, error) +} + +// UnimplementedRoutingServiceServer should be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedRoutingServiceServer struct{} + +func (UnimplementedRoutingServiceServer) AssignDomains(ctx sdk_go.ObjectContext, req *AssignDomainsRequest) (*AssignDomainsResponse, error) { + return nil, sdk_go.TerminalError(fmt.Errorf("method AssignDomains not implemented"), 501) +} +func (UnimplementedRoutingServiceServer) SwitchDomains(ctx sdk_go.ObjectContext, req *SwitchDomainsRequest) (*SwitchDomainsResponse, error) { + return nil, sdk_go.TerminalError(fmt.Errorf("method SwitchDomains not implemented"), 501) +} +func (UnimplementedRoutingServiceServer) testEmbeddedByValue() {} + +// UnsafeRoutingServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to RoutingServiceServer will +// result in compilation errors. +type UnsafeRoutingServiceServer interface { + mustEmbedUnimplementedRoutingServiceServer() +} + +func NewRoutingServiceServer(srv RoutingServiceServer, opts ...sdk_go.ServiceDefinitionOption) sdk_go.ServiceDefinition { + // If the following call panics, it indicates UnimplementedRoutingServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + sOpts := append([]sdk_go.ServiceDefinitionOption{sdk_go.WithProtoJSON}, opts...) + router := sdk_go.NewObject("hydra.v1.RoutingService", sOpts...) + router = router.Handler("AssignDomains", sdk_go.NewObjectHandler(srv.AssignDomains)) + router = router.Handler("SwitchDomains", sdk_go.NewObjectHandler(srv.SwitchDomains)) + return router +} diff --git a/go/gen/proto/krane/v1/deployment.pb.go b/go/gen/proto/krane/v1/deployment.pb.go index 32577a5f5e..b50ddc814d 100644 --- a/go/gen/proto/krane/v1/deployment.pb.go +++ b/go/gen/proto/krane/v1/deployment.pb.go @@ -622,7 +622,8 @@ const file_krane_v1_deployment_proto_rawDesc = "" + "\x11DeploymentService\x12Y\n" + "\x10CreateDeployment\x12!.krane.v1.CreateDeploymentRequest\x1a\".krane.v1.CreateDeploymentResponse\x12P\n" + "\rGetDeployment\x12\x1e.krane.v1.GetDeploymentRequest\x1a\x1f.krane.v1.GetDeploymentResponse\x12Y\n" + - "\x10DeleteDeployment\x12!.krane.v1.DeleteDeploymentRequest\x1a\".krane.v1.DeleteDeploymentResponseB8Z6github.com/unkeyed/unkey/go/gen/proto/krane/v1;kranev1b\x06proto3" + "\x10DeleteDeployment\x12!.krane.v1.DeleteDeploymentRequest\x1a\".krane.v1.DeleteDeploymentResponseB\x98\x01\n" + + "\fcom.krane.v1B\x0fDeploymentProtoP\x01Z6github.com/unkeyed/unkey/go/gen/proto/krane/v1;kranev1\xa2\x02\x03KXX\xaa\x02\bKrane.V1\xca\x02\bKrane\\V1\xe2\x02\x14Krane\\V1\\GPBMetadata\xea\x02\tKrane::V1b\x06proto3" var ( file_krane_v1_deployment_proto_rawDescOnce sync.Once diff --git a/go/gen/proto/partition/v1/gateway.pb.go b/go/gen/proto/partition/v1/gateway.pb.go index ff9f602092..a70af8e98d 100644 --- a/go/gen/proto/partition/v1/gateway.pb.go +++ b/go/gen/proto/partition/v1/gateway.pb.go @@ -371,7 +371,8 @@ const file_partition_v1_gateway_proto_rawDesc = "" + "AuthConfig\x12\x1e\n" + "\vkey_auth_id\x18\x01 \x01(\tR\tkeyAuthId\"5\n" + "\x10ValidationConfig\x12!\n" + - "\fopenapi_spec\x18\x01 \x01(\tR\vopenapiSpecB@Z>github.com/unkeyed/unkey/go/gen/proto/partition/v1;partitionv1b\x06proto3" + "\fopenapi_spec\x18\x01 \x01(\tR\vopenapiSpecB\xb1\x01\n" + + "\x10com.partition.v1B\fGatewayProtoP\x01Z>github.com/unkeyed/unkey/go/gen/proto/partition/v1;partitionv1\xa2\x02\x03PXX\xaa\x02\fPartition.V1\xca\x02\fPartition\\V1\xe2\x02\x18Partition\\V1\\GPBMetadata\xea\x02\rPartition::V1b\x06proto3" var ( file_partition_v1_gateway_proto_rawDescOnce sync.Once diff --git a/go/gen/proto/vault/v1/object.pb.go b/go/gen/proto/vault/v1/object.pb.go index 9cf5fb6e99..70871b0a3a 100644 --- a/go/gen/proto/vault/v1/object.pb.go +++ b/go/gen/proto/vault/v1/object.pb.go @@ -357,7 +357,8 @@ const file_vault_v1_object_proto_rawDesc = "" + "\x11encryption_key_id\x18\x04 \x01(\tR\x0fencryptionKeyId\x12\x12\n" + "\x04time\x18\x05 \x01(\x03R\x04time*\x1c\n" + "\tAlgorithm\x12\x0f\n" + - "\vAES_256_GCM\x10\x00B8Z6github.com/unkeyed/unkey/go/gen/proto/vault/v1;vaultv1b\x06proto3" + "\vAES_256_GCM\x10\x00B\x94\x01\n" + + "\fcom.vault.v1B\vObjectProtoP\x01Z6github.com/unkeyed/unkey/go/gen/proto/vault/v1;vaultv1\xa2\x02\x03VXX\xaa\x02\bVault.V1\xca\x02\bVault\\V1\xe2\x02\x14Vault\\V1\\GPBMetadata\xea\x02\tVault::V1b\x06proto3" var ( file_vault_v1_object_proto_rawDescOnce sync.Once diff --git a/go/gen/proto/vault/v1/service.pb.go b/go/gen/proto/vault/v1/service.pb.go index ef0914d69f..5688e87498 100644 --- a/go/gen/proto/vault/v1/service.pb.go +++ b/go/gen/proto/vault/v1/service.pb.go @@ -715,7 +715,8 @@ const file_vault_v1_service_proto_rawDesc = "" + "\vEncryptBulk\x12\x1c.vault.v1.EncryptBulkRequest\x1a\x1d.vault.v1.EncryptBulkResponse\"\x00\x12@\n" + "\aDecrypt\x12\x18.vault.v1.DecryptRequest\x1a\x19.vault.v1.DecryptResponse\"\x00\x12F\n" + "\tReEncrypt\x12\x1a.vault.v1.ReEncryptRequest\x1a\x1b.vault.v1.ReEncryptResponse\"\x00\x12R\n" + - "\rReEncryptDEKs\x12\x1e.vault.v1.ReEncryptDEKsRequest\x1a\x1f.vault.v1.ReEncryptDEKsResponse\"\x00B8Z6github.com/unkeyed/unkey/go/gen/proto/vault/v1;vaultv1b\x06proto3" + "\rReEncryptDEKs\x12\x1e.vault.v1.ReEncryptDEKsRequest\x1a\x1f.vault.v1.ReEncryptDEKsResponse\"\x00B\x95\x01\n" + + "\fcom.vault.v1B\fServiceProtoP\x01Z6github.com/unkeyed/unkey/go/gen/proto/vault/v1;vaultv1\xa2\x02\x03VXX\xaa\x02\bVault.V1\xca\x02\bVault\\V1\xe2\x02\x14Vault\\V1\\GPBMetadata\xea\x02\tVault::V1b\x06proto3" var ( file_vault_v1_service_proto_rawDescOnce sync.Once diff --git a/go/go.mod b/go/go.mod index cf3e8b2538..3e9f3f9f56 100644 --- a/go/go.mod +++ b/go/go.mod @@ -202,6 +202,7 @@ require ( github.com/gofrs/flock v0.12.1 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.5.2 // indirect + github.com/golang-jwt/jwt/v5 v5.2.3 // indirect github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 // indirect github.com/golang-sql/sqlexp v0.1.0 // indirect github.com/golangci/dupl v0.0.0-20250308024227-f665c8d69b32 // indirect @@ -229,6 +230,7 @@ require ( github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect github.com/hexops/gotextdiff v1.0.3 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect + github.com/invopop/jsonschema v0.13.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect github.com/jackc/pgx/v5 v5.7.5 // indirect @@ -282,6 +284,7 @@ require ( github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 // indirect github.com/moricho/tparallel v0.3.2 // indirect github.com/morikuni/aec v1.0.0 // indirect + github.com/mr-tron/base58 v1.2.0 // indirect github.com/muesli/termenv v0.16.0 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/nakabonne/nestif v0.3.1 // indirect @@ -319,6 +322,7 @@ require ( github.com/quic-go/quic-go v0.54.0 // indirect github.com/raeperd/recvcheck v0.2.0 // indirect github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect + github.com/restatedev/sdk-go v0.20.0 // indirect github.com/rivo/uniseg v0.4.7 // indirect github.com/riza-io/grpc-go v0.2.0 // indirect github.com/rogpeppe/go-internal v1.14.1 // indirect diff --git a/go/go.sum b/go/go.sum index 213268846e..1b048f1c6b 100644 --- a/go/go.sum +++ b/go/go.sum @@ -406,6 +406,8 @@ github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXe github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8= github.com/golang-jwt/jwt/v5 v5.2.2/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= +github.com/golang-jwt/jwt/v5 v5.2.3 h1:kkGXqQOBSDDWRhWNXTFpqGSCMyh/PLnqUvMGJPDJDs0= +github.com/golang-jwt/jwt/v5 v5.2.3/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 h1:au07oEsX2xN0ktxqI+Sida1w446QrXBRJ0nee3SNZlA= github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang-sql/sqlexp v0.1.0 h1:ZCD6MBpcuOVfGVqsEmY5/4FtYiKz6tSyUv9LPEDei6A= @@ -507,6 +509,8 @@ github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpO github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= +github.com/invopop/jsonschema v0.13.0 h1:KvpoAJWEjR3uD9Kbm2HWJmqsEaHt8lBUpd0qHcIi21E= +github.com/invopop/jsonschema v0.13.0/go.mod h1:ffZ5Km5SWWRAIN6wbDXItl95euhFz2uON45H2qjYt+0= github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= @@ -647,6 +651,8 @@ github.com/moricho/tparallel v0.3.2 h1:odr8aZVFA3NZrNybggMkYO3rgPRcqjeQUlBBFVxKH github.com/moricho/tparallel v0.3.2/go.mod h1:OQ+K3b4Ln3l2TZveGCywybl68glfLEwFGqvnjok8b+U= github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= +github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc= github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= @@ -775,6 +781,8 @@ github.com/rekby/fixenv v0.6.1 h1:jUFiSPpajT4WY2cYuc++7Y1zWrnCxnovGCIX72PZniM= github.com/rekby/fixenv v0.6.1/go.mod h1:/b5LRc06BYJtslRtHKxsPWFT/ySpHV+rWvzTg+XWk4c= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/restatedev/sdk-go v0.20.0 h1:BjV1CqKoJwvVKxR5Z4e1Ofx2H+k9oeD0AbRGkS0ihZs= +github.com/restatedev/sdk-go v0.20.0/go.mod h1:T3G/P3VBSRTvdverfEiCVVcsNSymzO5ebIyUU6uRqk8= github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= diff --git a/go/k8s/manifests/ctrl.yaml b/go/k8s/manifests/ctrl.yaml index a90833ed2a..2a41d5e536 100644 --- a/go/k8s/manifests/ctrl.yaml +++ b/go/k8s/manifests/ctrl.yaml @@ -23,6 +23,7 @@ spec: imagePullPolicy: Never # Use local images ports: - containerPort: 7091 + - containerPort: 9080 env: # Server Configuration - name: UNKEY_HTTP_PORT @@ -72,6 +73,18 @@ spec: - name: UNKEY_DEFAULT_DOMAIN value: "unkey.local" + + # Restate Configuration + - name: UNKEY_RESTATE_INGRESS_URL + value: "http://restate:8080" + - name: UNKEY_RESTATE_ADMIN_URL + value: "http://restate:9070" + - name: UNKEY_RESTATE_HTTP_PORT + value: "9080" + - name: UNKEY_RESTATE_REGISTER_AS + value: "http://ctrl:9080" + + # Additional Configuration - name: UNKEY_API_KEY value: "your-local-dev-key" @@ -83,7 +96,7 @@ spec: [ "sh", "-c", - "until nc -z mysql 3306 && nc -z s3 3902; do echo waiting for dependencies; sleep 2; done;", + "until nc -z mysql 3306 && nc -z s3 3902 && nc -z restate 8080; do echo waiting for dependencies; sleep 2; done;", ] --- @@ -102,4 +115,8 @@ spec: port: 7091 targetPort: 7091 protocol: TCP + - name: restate + port: 9080 + targetPort: 9080 + protocol: TCP type: LoadBalancer diff --git a/go/k8s/manifests/restate.yaml b/go/k8s/manifests/restate.yaml new file mode 100644 index 0000000000..0518bd735e --- /dev/null +++ b/go/k8s/manifests/restate.yaml @@ -0,0 +1,84 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: restate-pvc + namespace: unkey +spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 10Gi + +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: restate + namespace: unkey + labels: + app: restate +spec: + serviceName: "restate" + replicas: 1 # Restate requires single-replica for development + selector: + matchLabels: + app: restate + template: + metadata: + labels: + app: restate + spec: + containers: + - name: restate + image: docker.io/restatedev/restate:1.5.1 + ports: + - containerPort: 8080 + name: ingress + - containerPort: 9070 + name: admin + volumeMounts: + - name: restate-storage + mountPath: /restate-data + env: + - name: RESTATE_WORKER__STORAGE__DATA_DIR + value: "/restate-data" + readinessProbe: + httpGet: + path: /health + port: 9070 + initialDelaySeconds: 10 + periodSeconds: 5 + livenessProbe: + httpGet: + path: /health + port: 9070 + initialDelaySeconds: 30 + periodSeconds: 10 + volumes: + - name: restate-storage + persistentVolumeClaim: + claimName: restate-pvc + +--- +apiVersion: v1 +kind: Service +metadata: + name: restate + namespace: unkey + labels: + app: restate +spec: + selector: + app: restate + ports: + - name: ingress + port: 8080 + targetPort: 8080 + protocol: TCP + - name: admin + port: 9070 + targetPort: 9070 + protocol: TCP + type: LoadBalancer diff --git a/go/openapi-test b/go/openapi-test deleted file mode 100755 index 2cb1989b7a..0000000000 Binary files a/go/openapi-test and /dev/null differ diff --git a/go/pkg/db/domain_find_by_ids.sql_generated.go b/go/pkg/db/domain_find_by_ids.sql_generated.go new file mode 100644 index 0000000000..6f62cac862 --- /dev/null +++ b/go/pkg/db/domain_find_by_ids.sql_generated.go @@ -0,0 +1,100 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.29.0 +// source: domain_find_by_ids.sql + +package db + +import ( + "context" + "database/sql" + "strings" +) + +const findDomainsByIds = `-- name: FindDomainsByIds :many +SELECT + id, + workspace_id, + project_id, + environment_id, + domain, + deployment_id, + sticky, + type, + created_at, + updated_at +FROM domains +WHERE id IN (/*SLICE:ids*/?) +` + +type FindDomainsByIdsRow struct { + ID string `db:"id"` + WorkspaceID string `db:"workspace_id"` + ProjectID sql.NullString `db:"project_id"` + EnvironmentID sql.NullString `db:"environment_id"` + Domain string `db:"domain"` + DeploymentID sql.NullString `db:"deployment_id"` + Sticky NullDomainsSticky `db:"sticky"` + Type DomainsType `db:"type"` + CreatedAt int64 `db:"created_at"` + UpdatedAt sql.NullInt64 `db:"updated_at"` +} + +// FindDomainsByIds +// +// SELECT +// id, +// workspace_id, +// project_id, +// environment_id, +// domain, +// deployment_id, +// sticky, +// type, +// created_at, +// updated_at +// FROM domains +// WHERE id IN (/*SLICE:ids*/?) +func (q *Queries) FindDomainsByIds(ctx context.Context, db DBTX, ids []string) ([]FindDomainsByIdsRow, error) { + query := findDomainsByIds + var queryParams []interface{} + if len(ids) > 0 { + for _, v := range ids { + queryParams = append(queryParams, v) + } + query = strings.Replace(query, "/*SLICE:ids*/?", strings.Repeat(",?", len(ids))[1:], 1) + } else { + query = strings.Replace(query, "/*SLICE:ids*/?", "NULL", 1) + } + rows, err := db.QueryContext(ctx, query, queryParams...) + if err != nil { + return nil, err + } + defer rows.Close() + var items []FindDomainsByIdsRow + for rows.Next() { + var i FindDomainsByIdsRow + if err := rows.Scan( + &i.ID, + &i.WorkspaceID, + &i.ProjectID, + &i.EnvironmentID, + &i.Domain, + &i.DeploymentID, + &i.Sticky, + &i.Type, + &i.CreatedAt, + &i.UpdatedAt, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} diff --git a/go/pkg/db/querier_generated.go b/go/pkg/db/querier_generated.go index f166bc877b..1b0f601f7d 100644 --- a/go/pkg/db/querier_generated.go +++ b/go/pkg/db/querier_generated.go @@ -189,6 +189,22 @@ type Querier interface { // WHERE deployment_id = ? // ORDER BY created_at ASC FindDomainsByDeploymentId(ctx context.Context, db DBTX, deploymentID sql.NullString) ([]FindDomainsByDeploymentIdRow, error) + //FindDomainsByIds + // + // SELECT + // id, + // workspace_id, + // project_id, + // environment_id, + // domain, + // deployment_id, + // sticky, + // type, + // created_at, + // updated_at + // FROM domains + // WHERE id IN (/*SLICE:ids*/?) + FindDomainsByIds(ctx context.Context, db DBTX, ids []string) ([]FindDomainsByIdsRow, error) //FindDomainsForPromotion // // SELECT diff --git a/go/pkg/db/queries/domain_find_by_ids.sql b/go/pkg/db/queries/domain_find_by_ids.sql new file mode 100644 index 0000000000..60d615c2fa --- /dev/null +++ b/go/pkg/db/queries/domain_find_by_ids.sql @@ -0,0 +1,14 @@ +-- name: FindDomainsByIds :many +SELECT + id, + workspace_id, + project_id, + environment_id, + domain, + deployment_id, + sticky, + type, + created_at, + updated_at +FROM domains +WHERE id IN (sqlc.slice(ids)); diff --git a/go/proto/hydra/v1/certificate.proto b/go/proto/hydra/v1/certificate.proto new file mode 100644 index 0000000000..727ec20f03 --- /dev/null +++ b/go/proto/hydra/v1/certificate.proto @@ -0,0 +1,26 @@ +syntax = "proto3"; + +package hydra.v1; + +import "dev/restate/sdk/go.proto"; + +option go_package = "github.com/unkeyed/unkey/go/gen/proto/hydra/v1;hydrav1"; + +// CertificateService manages ACME certificate challenges and issuance +service CertificateService { + option (dev.restate.sdk.go.service_type) = VIRTUAL_OBJECT; + + // ProcessChallenge handles the complete ACME certificate challenge flow + // Key: domain name (ensures only one challenge per domain at a time) + rpc ProcessChallenge(ProcessChallengeRequest) returns (ProcessChallengeResponse) {} +} + +message ProcessChallengeRequest { + string workspace_id = 1; + string domain = 2; +} + +message ProcessChallengeResponse { + string certificate_id = 1; + string status = 2; // "success", "failed", "pending" +} diff --git a/go/proto/hydra/v1/deployment.proto b/go/proto/hydra/v1/deployment.proto new file mode 100644 index 0000000000..e525e5b211 --- /dev/null +++ b/go/proto/hydra/v1/deployment.proto @@ -0,0 +1,35 @@ +syntax = "proto3"; + +package hydra.v1; + +import "dev/restate/sdk/go.proto"; + +option go_package = "github.com/unkeyed/unkey/go/gen/proto/hydra/v1;hydrav1"; + +service DeploymentService { + option (dev.restate.sdk.go.service_type) = VIRTUAL_OBJECT; + rpc Deploy(DeployRequest) returns (DeployResponse) {} + rpc Rollback(RollbackRequest) returns (RollbackResponse) {} + rpc Promote(PromoteRequest) returns (PromoteResponse) {} +} + +message DeployRequest { + string deployment_id = 1; + string docker_image = 2; + optional string key_auth_id = 3; +} + +message DeployResponse {} + +message RollbackRequest { + string source_deployment_id = 1; + string target_deployment_id = 2; +} + +message RollbackResponse {} + +message PromoteRequest { + string target_deployment_id = 1; +} + +message PromoteResponse {} diff --git a/go/proto/hydra/v1/routing.proto b/go/proto/hydra/v1/routing.proto new file mode 100644 index 0000000000..aba8b33a7b --- /dev/null +++ b/go/proto/hydra/v1/routing.proto @@ -0,0 +1,64 @@ +syntax = "proto3"; + +package hydra.v1; + +import "dev/restate/sdk/go.proto"; +import "partition/v1/gateway.proto"; + +option go_package = "github.com/unkeyed/unkey/go/gen/proto/hydra/v1;hydrav1"; + +service RoutingService { + option (dev.restate.sdk.go.service_type) = VIRTUAL_OBJECT; + + // AssignDomains creates or reassigns domains to a deployment and creates gateway configs + // Used during initial deployment + rpc AssignDomains(AssignDomainsRequest) returns (AssignDomainsResponse) {} + + // SwitchDomains reassigns existing domains to a different deployment and updates gateway configs + // Used during rollback/promote operations + rpc SwitchDomains(SwitchDomainsRequest) returns (SwitchDomainsResponse) {} +} + +// AssignDomainsRequest is used when deploying - creates/updates domains and gateway configs +message AssignDomainsRequest { + string workspace_id = 1; + string project_id = 2; + string environment_id = 3; + string deployment_id = 4; + + // Domains to create/assign (by name) + repeated DomainToAssign domains = 5; + + // Gateway configuration + partition.v1.GatewayConfig gateway_config = 6; + + // Whether the project is currently rolled back (skip domain assignment if true) + bool is_rolled_back = 7; +} + +enum DomainSticky { + DOMAIN_STICKY_UNSPECIFIED = 0; // Not sticky (per-commit domain) + DOMAIN_STICKY_BRANCH = 1; // Sticky to branch + DOMAIN_STICKY_ENVIRONMENT = 2; // Sticky to environment + DOMAIN_STICKY_LIVE = 3; // Sticky to live deployment +} + +message DomainToAssign { + string name = 1; + DomainSticky sticky = 2; +} + +message AssignDomainsResponse { + // Domain names that were actually changed (created or reassigned) + repeated string changed_domain_names = 1; +} + +// SwitchDomainsRequest is used for rollback/promote - switches existing domains to a different deployment +message SwitchDomainsRequest { + string target_deployment_id = 1; + + // Domain IDs to switch (must already exist in database) + repeated string domain_ids = 2; +} + +message SwitchDomainsResponse {} diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 3c2b47e8e4..0fb53cbb49 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -492,9 +492,15 @@ importers: lucide-react: specifier: ^0.378.0 version: 0.378.0(react@18.3.1) + mermaid: + specifier: ^11.12.0 + version: 11.12.0 next: specifier: 14.2.15 version: 14.2.15(react-dom@18.3.1)(react@18.3.1) + next-themes: + specifier: ^0.4.6 + version: 0.4.6(react-dom@18.3.1)(react@18.3.1) react: specifier: ^18.3.1 version: 18.3.1 @@ -639,7 +645,7 @@ importers: devDependencies: checkly: specifier: latest - version: 4.9.0(@types/node@20.14.9)(typescript@5.5.3) + version: 6.5.0(@types/node@20.14.9)(typescript@5.5.3) ts-node: specifier: 10.9.1 version: 10.9.1(@types/node@20.14.9)(typescript@5.5.3) @@ -1336,6 +1342,17 @@ packages: react-dom: 18.3.1(react@18.3.1) dev: false + /@antfu/install-pkg@1.1.0: + resolution: {integrity: sha512-MGQsmw10ZyI+EJo45CdSER4zEb+p31LpDAFp2Z3gkSd1yqVZGi0Ebx++YTEMonJy4oChEMLsxZ64j8FH6sSqtQ==} + dependencies: + package-manager-detector: 1.3.0 + tinyexec: 1.0.1 + dev: false + + /@antfu/utils@9.2.1: + resolution: {integrity: sha512-TMilPqXyii1AsiEii6l6ubRzbo76p6oshUSYPaKsmXDavyMLqjzVDkcp3pHp5ELMUNJHATcEOGxKTTsX9yYhGg==} + dev: false + /@antv/adjust@0.2.5: resolution: {integrity: sha512-MfWZOkD9CqXRES6MBGRNe27Q577a72EIwyMnE29wIlPliFvJfWwsrONddpGU7lilMpVKecS3WAzOoip3RfPTRQ==} dependencies: @@ -1744,6 +1761,10 @@ packages: dev: true optional: true + /@braintree/sanitize-url@7.1.1: + resolution: {integrity: sha512-i1L7noDNxtFyL5DmZafWy1wRVhGehQmzZaz1HiN5e7iylJMSZR7ekOV7NsIqa5qBldlLrsKv4HbgFUVlQrz8Mw==} + dev: false + /@bufbuild/buf-darwin-arm64@1.47.2: resolution: {integrity: sha512-74WerFn06y+azgVfsnzhfbI5wla/OLPDnIvaNJBWHaqya/3bfascJkDylW2GVNHmwG1K/cscpmcc/RJPaO7ntQ==} engines: {node: '>=12'} @@ -2048,6 +2069,33 @@ packages: prettier: 2.8.8 dev: true + /@chevrotain/cst-dts-gen@11.0.3: + resolution: {integrity: sha512-BvIKpRLeS/8UbfxXxgC33xOumsacaeCKAjAeLyOn7Pcp95HiRbrpl14S+9vaZLolnbssPIUuiUd8IvgkRyt6NQ==} + dependencies: + '@chevrotain/gast': 11.0.3 + '@chevrotain/types': 11.0.3 + lodash-es: 4.17.21 + dev: false + + /@chevrotain/gast@11.0.3: + resolution: {integrity: sha512-+qNfcoNk70PyS/uxmj3li5NiECO+2YKZZQMbmjTqRI3Qchu8Hig/Q9vgkHpI3alNjr7M+a2St5pw5w5F6NL5/Q==} + dependencies: + '@chevrotain/types': 11.0.3 + lodash-es: 4.17.21 + dev: false + + /@chevrotain/regexp-to-ast@11.0.3: + resolution: {integrity: sha512-1fMHaBZxLFvWI067AVbGJav1eRY7N8DDvYCTwGBiE/ytKBgP8azTdgyrKyWZ9Mfh09eHWb5PgTSO8wi7U824RA==} + dev: false + + /@chevrotain/types@11.0.3: + resolution: {integrity: sha512-gsiM3G8b58kZC2HaWR50gu6Y1440cHiJ+i3JUvcp/35JchYejb2+5MVeJK0iKThYpAa/P2PYFV4hoi44HD+aHQ==} + dev: false + + /@chevrotain/utils@11.0.3: + resolution: {integrity: sha512-YslZMgtJUyuMbZ+aKvfF3x1f5liK4mWNxghFRv7jqRR9C3R3fAOGTTKvxXDa2Y1s9zSbcpuO0cAxDYsc9SrXoQ==} + dev: false + /@clack/core@0.3.5: resolution: {integrity: sha512-5cfhQNH+1VQ2xLQlmzXMqUoiaH0lRBq9/CLW9lTyMbuKLC3+xEK01tHVvyut++mLOn5urSHmkm6I0Lg9MaJSTQ==} dependencies: @@ -4095,6 +4143,25 @@ packages: engines: {node: '>=18.18'} dev: false + /@iconify/types@2.0.0: + resolution: {integrity: sha512-+wluvCrRhXrhyOmRDJ3q8mux9JkKy5SJ/v8ol2tu4FVjyYvtEzkc/3pK15ET6RKg4b4w4BmTk1+gsCUhf21Ykg==} + dev: false + + /@iconify/utils@3.0.2: + resolution: {integrity: sha512-EfJS0rLfVuRuJRn4psJHtK2A9TqVnkxPpHY6lYHiB9+8eSuudsxbwMiavocG45ujOo6FJ+CIRlRnlOGinzkaGQ==} + dependencies: + '@antfu/install-pkg': 1.1.0 + '@antfu/utils': 9.2.1 + '@iconify/types': 2.0.0 + debug: 4.4.1(supports-color@8.1.1) + globals: 15.15.0 + kolorist: 1.8.0 + local-pkg: 1.1.2 + mlly: 1.8.0 + transitivePeerDependencies: + - supports-color + dev: false + /@img/sharp-darwin-arm64@0.33.5: resolution: {integrity: sha512-UT4p+iz/2H4twwAoLCqfA9UH5pI6DggwKEGuaPy7nCVQ8ZsiY5PIcrRvD1DzuY3qYL07NtIQcWnBSY/heikIFQ==} engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} @@ -4483,6 +4550,23 @@ packages: dev: false optional: true + /@inquirer/checkbox@4.2.2(@types/node@20.14.9): + resolution: {integrity: sha512-E+KExNurKcUJJdxmjglTl141EwxWyAHplvsYJQgSwXf8qiNWkTxTuCCqmhFEmbIXd4zLaGMfQFJ6WrZ7fSeV3g==} + engines: {node: '>=18'} + peerDependencies: + '@types/node': '>=18' + peerDependenciesMeta: + '@types/node': + optional: true + dependencies: + '@inquirer/core': 10.2.0(@types/node@20.14.9) + '@inquirer/figures': 1.0.13 + '@inquirer/type': 3.0.8(@types/node@20.14.9) + '@types/node': 20.14.9 + ansi-escapes: 4.3.2 + yoctocolors-cjs: 2.1.3 + dev: true + /@inquirer/checkbox@4.2.2(@types/node@22.14.0): resolution: {integrity: sha512-E+KExNurKcUJJdxmjglTl141EwxWyAHplvsYJQgSwXf8qiNWkTxTuCCqmhFEmbIXd4zLaGMfQFJ6WrZ7fSeV3g==} engines: {node: '>=18'} @@ -4500,6 +4584,20 @@ packages: yoctocolors-cjs: 2.1.3 dev: true + /@inquirer/confirm@5.1.16(@types/node@20.14.9): + resolution: {integrity: sha512-j1a5VstaK5KQy8Mu8cHmuQvN1Zc62TbLhjJxwHvKPPKEoowSF6h/0UdOpA9DNdWZ+9Inq73+puRq1df6OJ8Sag==} + engines: {node: '>=18'} + peerDependencies: + '@types/node': '>=18' + peerDependenciesMeta: + '@types/node': + optional: true + dependencies: + '@inquirer/core': 10.2.0(@types/node@20.14.9) + '@inquirer/type': 3.0.8(@types/node@20.14.9) + '@types/node': 20.14.9 + dev: true + /@inquirer/confirm@5.1.16(@types/node@22.14.0): resolution: {integrity: sha512-j1a5VstaK5KQy8Mu8cHmuQvN1Zc62TbLhjJxwHvKPPKEoowSF6h/0UdOpA9DNdWZ+9Inq73+puRq1df6OJ8Sag==} engines: {node: '>=18'} @@ -4514,6 +4612,26 @@ packages: '@types/node': 22.14.0 dev: true + /@inquirer/core@10.2.0(@types/node@20.14.9): + resolution: {integrity: sha512-NyDSjPqhSvpZEMZrLCYUquWNl+XC/moEcVFqS55IEYIYsY0a1cUCevSqk7ctOlnm/RaSBU5psFryNlxcmGrjaA==} + engines: {node: '>=18'} + peerDependencies: + '@types/node': '>=18' + peerDependenciesMeta: + '@types/node': + optional: true + dependencies: + '@inquirer/figures': 1.0.13 + '@inquirer/type': 3.0.8(@types/node@20.14.9) + '@types/node': 20.14.9 + ansi-escapes: 4.3.2 + cli-width: 4.1.0 + mute-stream: 2.0.0 + signal-exit: 4.1.0 + wrap-ansi: 6.2.0 + yoctocolors-cjs: 2.1.3 + dev: true + /@inquirer/core@10.2.0(@types/node@22.14.0): resolution: {integrity: sha512-NyDSjPqhSvpZEMZrLCYUquWNl+XC/moEcVFqS55IEYIYsY0a1cUCevSqk7ctOlnm/RaSBU5psFryNlxcmGrjaA==} engines: {node: '>=18'} @@ -4534,6 +4652,21 @@ packages: yoctocolors-cjs: 2.1.3 dev: true + /@inquirer/editor@4.2.18(@types/node@20.14.9): + resolution: {integrity: sha512-yeQN3AXjCm7+Hmq5L6Dm2wEDeBRdAZuyZ4I7tWSSanbxDzqM0KqzoDbKM7p4ebllAYdoQuPJS6N71/3L281i6w==} + engines: {node: '>=18'} + peerDependencies: + '@types/node': '>=18' + peerDependenciesMeta: + '@types/node': + optional: true + dependencies: + '@inquirer/core': 10.2.0(@types/node@20.14.9) + '@inquirer/external-editor': 1.0.1(@types/node@20.14.9) + '@inquirer/type': 3.0.8(@types/node@20.14.9) + '@types/node': 20.14.9 + dev: true + /@inquirer/editor@4.2.18(@types/node@22.14.0): resolution: {integrity: sha512-yeQN3AXjCm7+Hmq5L6Dm2wEDeBRdAZuyZ4I7tWSSanbxDzqM0KqzoDbKM7p4ebllAYdoQuPJS6N71/3L281i6w==} engines: {node: '>=18'} @@ -4549,6 +4682,21 @@ packages: '@types/node': 22.14.0 dev: true + /@inquirer/expand@4.0.18(@types/node@20.14.9): + resolution: {integrity: sha512-xUjteYtavH7HwDMzq4Cn2X4Qsh5NozoDHCJTdoXg9HfZ4w3R6mxV1B9tL7DGJX2eq/zqtsFjhm0/RJIMGlh3ag==} + engines: {node: '>=18'} + peerDependencies: + '@types/node': '>=18' + peerDependenciesMeta: + '@types/node': + optional: true + dependencies: + '@inquirer/core': 10.2.0(@types/node@20.14.9) + '@inquirer/type': 3.0.8(@types/node@20.14.9) + '@types/node': 20.14.9 + yoctocolors-cjs: 2.1.3 + dev: true + /@inquirer/expand@4.0.18(@types/node@22.14.0): resolution: {integrity: sha512-xUjteYtavH7HwDMzq4Cn2X4Qsh5NozoDHCJTdoXg9HfZ4w3R6mxV1B9tL7DGJX2eq/zqtsFjhm0/RJIMGlh3ag==} engines: {node: '>=18'} @@ -4564,6 +4712,20 @@ packages: yoctocolors-cjs: 2.1.3 dev: true + /@inquirer/external-editor@1.0.1(@types/node@20.14.9): + resolution: {integrity: sha512-Oau4yL24d2B5IL4ma4UpbQigkVhzPDXLoqy1ggK4gnHg/stmkffJE4oOXHXF3uz0UEpywG68KcyXsyYpA1Re/Q==} + engines: {node: '>=18'} + peerDependencies: + '@types/node': '>=18' + peerDependenciesMeta: + '@types/node': + optional: true + dependencies: + '@types/node': 20.14.9 + chardet: 2.1.0 + iconv-lite: 0.6.3 + dev: true + /@inquirer/external-editor@1.0.1(@types/node@22.14.0): resolution: {integrity: sha512-Oau4yL24d2B5IL4ma4UpbQigkVhzPDXLoqy1ggK4gnHg/stmkffJE4oOXHXF3uz0UEpywG68KcyXsyYpA1Re/Q==} engines: {node: '>=18'} @@ -4583,6 +4745,20 @@ packages: engines: {node: '>=18'} dev: true + /@inquirer/input@4.2.2(@types/node@20.14.9): + resolution: {integrity: sha512-hqOvBZj/MhQCpHUuD3MVq18SSoDNHy7wEnQ8mtvs71K8OPZVXJinOzcvQna33dNYLYE4LkA9BlhAhK6MJcsVbw==} + engines: {node: '>=18'} + peerDependencies: + '@types/node': '>=18' + peerDependenciesMeta: + '@types/node': + optional: true + dependencies: + '@inquirer/core': 10.2.0(@types/node@20.14.9) + '@inquirer/type': 3.0.8(@types/node@20.14.9) + '@types/node': 20.14.9 + dev: true + /@inquirer/input@4.2.2(@types/node@22.14.0): resolution: {integrity: sha512-hqOvBZj/MhQCpHUuD3MVq18SSoDNHy7wEnQ8mtvs71K8OPZVXJinOzcvQna33dNYLYE4LkA9BlhAhK6MJcsVbw==} engines: {node: '>=18'} @@ -4597,6 +4773,20 @@ packages: '@types/node': 22.14.0 dev: true + /@inquirer/number@3.0.18(@types/node@20.14.9): + resolution: {integrity: sha512-7exgBm52WXZRczsydCVftozFTrrwbG5ySE0GqUd2zLNSBXyIucs2Wnm7ZKLe/aUu6NUg9dg7Q80QIHCdZJiY4A==} + engines: {node: '>=18'} + peerDependencies: + '@types/node': '>=18' + peerDependenciesMeta: + '@types/node': + optional: true + dependencies: + '@inquirer/core': 10.2.0(@types/node@20.14.9) + '@inquirer/type': 3.0.8(@types/node@20.14.9) + '@types/node': 20.14.9 + dev: true + /@inquirer/number@3.0.18(@types/node@22.14.0): resolution: {integrity: sha512-7exgBm52WXZRczsydCVftozFTrrwbG5ySE0GqUd2zLNSBXyIucs2Wnm7ZKLe/aUu6NUg9dg7Q80QIHCdZJiY4A==} engines: {node: '>=18'} @@ -4611,6 +4801,21 @@ packages: '@types/node': 22.14.0 dev: true + /@inquirer/password@4.0.18(@types/node@20.14.9): + resolution: {integrity: sha512-zXvzAGxPQTNk/SbT3carAD4Iqi6A2JS2qtcqQjsL22uvD+JfQzUrDEtPjLL7PLn8zlSNyPdY02IiQjzoL9TStA==} + engines: {node: '>=18'} + peerDependencies: + '@types/node': '>=18' + peerDependenciesMeta: + '@types/node': + optional: true + dependencies: + '@inquirer/core': 10.2.0(@types/node@20.14.9) + '@inquirer/type': 3.0.8(@types/node@20.14.9) + '@types/node': 20.14.9 + ansi-escapes: 4.3.2 + dev: true + /@inquirer/password@4.0.18(@types/node@22.14.0): resolution: {integrity: sha512-zXvzAGxPQTNk/SbT3carAD4Iqi6A2JS2qtcqQjsL22uvD+JfQzUrDEtPjLL7PLn8zlSNyPdY02IiQjzoL9TStA==} engines: {node: '>=18'} @@ -4626,6 +4831,28 @@ packages: ansi-escapes: 4.3.2 dev: true + /@inquirer/prompts@7.8.4(@types/node@20.14.9): + resolution: {integrity: sha512-MuxVZ1en1g5oGamXV3DWP89GEkdD54alcfhHd7InUW5BifAdKQEK9SLFa/5hlWbvuhMPlobF0WAx7Okq988Jxg==} + engines: {node: '>=18'} + peerDependencies: + '@types/node': '>=18' + peerDependenciesMeta: + '@types/node': + optional: true + dependencies: + '@inquirer/checkbox': 4.2.2(@types/node@20.14.9) + '@inquirer/confirm': 5.1.16(@types/node@20.14.9) + '@inquirer/editor': 4.2.18(@types/node@20.14.9) + '@inquirer/expand': 4.0.18(@types/node@20.14.9) + '@inquirer/input': 4.2.2(@types/node@20.14.9) + '@inquirer/number': 3.0.18(@types/node@20.14.9) + '@inquirer/password': 4.0.18(@types/node@20.14.9) + '@inquirer/rawlist': 4.1.6(@types/node@20.14.9) + '@inquirer/search': 3.1.1(@types/node@20.14.9) + '@inquirer/select': 4.3.2(@types/node@20.14.9) + '@types/node': 20.14.9 + dev: true + /@inquirer/prompts@7.8.4(@types/node@22.14.0): resolution: {integrity: sha512-MuxVZ1en1g5oGamXV3DWP89GEkdD54alcfhHd7InUW5BifAdKQEK9SLFa/5hlWbvuhMPlobF0WAx7Okq988Jxg==} engines: {node: '>=18'} @@ -4648,6 +4875,21 @@ packages: '@types/node': 22.14.0 dev: true + /@inquirer/rawlist@4.1.6(@types/node@20.14.9): + resolution: {integrity: sha512-KOZqa3QNr3f0pMnufzL7K+nweFFCCBs6LCXZzXDrVGTyssjLeudn5ySktZYv1XiSqobyHRYYK0c6QsOxJEhXKA==} + engines: {node: '>=18'} + peerDependencies: + '@types/node': '>=18' + peerDependenciesMeta: + '@types/node': + optional: true + dependencies: + '@inquirer/core': 10.2.0(@types/node@20.14.9) + '@inquirer/type': 3.0.8(@types/node@20.14.9) + '@types/node': 20.14.9 + yoctocolors-cjs: 2.1.3 + dev: true + /@inquirer/rawlist@4.1.6(@types/node@22.14.0): resolution: {integrity: sha512-KOZqa3QNr3f0pMnufzL7K+nweFFCCBs6LCXZzXDrVGTyssjLeudn5ySktZYv1XiSqobyHRYYK0c6QsOxJEhXKA==} engines: {node: '>=18'} @@ -4663,6 +4905,22 @@ packages: yoctocolors-cjs: 2.1.3 dev: true + /@inquirer/search@3.1.1(@types/node@20.14.9): + resolution: {integrity: sha512-TkMUY+A2p2EYVY3GCTItYGvqT6LiLzHBnqsU1rJbrpXUijFfM6zvUx0R4civofVwFCmJZcKqOVwwWAjplKkhxA==} + engines: {node: '>=18'} + peerDependencies: + '@types/node': '>=18' + peerDependenciesMeta: + '@types/node': + optional: true + dependencies: + '@inquirer/core': 10.2.0(@types/node@20.14.9) + '@inquirer/figures': 1.0.13 + '@inquirer/type': 3.0.8(@types/node@20.14.9) + '@types/node': 20.14.9 + yoctocolors-cjs: 2.1.3 + dev: true + /@inquirer/search@3.1.1(@types/node@22.14.0): resolution: {integrity: sha512-TkMUY+A2p2EYVY3GCTItYGvqT6LiLzHBnqsU1rJbrpXUijFfM6zvUx0R4civofVwFCmJZcKqOVwwWAjplKkhxA==} engines: {node: '>=18'} @@ -4679,6 +4937,23 @@ packages: yoctocolors-cjs: 2.1.3 dev: true + /@inquirer/select@4.3.2(@types/node@20.14.9): + resolution: {integrity: sha512-nwous24r31M+WyDEHV+qckXkepvihxhnyIaod2MG7eCE6G0Zm/HUF6jgN8GXgf4U7AU6SLseKdanY195cwvU6w==} + engines: {node: '>=18'} + peerDependencies: + '@types/node': '>=18' + peerDependenciesMeta: + '@types/node': + optional: true + dependencies: + '@inquirer/core': 10.2.0(@types/node@20.14.9) + '@inquirer/figures': 1.0.13 + '@inquirer/type': 3.0.8(@types/node@20.14.9) + '@types/node': 20.14.9 + ansi-escapes: 4.3.2 + yoctocolors-cjs: 2.1.3 + dev: true + /@inquirer/select@4.3.2(@types/node@22.14.0): resolution: {integrity: sha512-nwous24r31M+WyDEHV+qckXkepvihxhnyIaod2MG7eCE6G0Zm/HUF6jgN8GXgf4U7AU6SLseKdanY195cwvU6w==} engines: {node: '>=18'} @@ -4696,6 +4971,18 @@ packages: yoctocolors-cjs: 2.1.3 dev: true + /@inquirer/type@3.0.8(@types/node@20.14.9): + resolution: {integrity: sha512-lg9Whz8onIHRthWaN1Q9EGLa/0LFJjyM8mEUbL1eTi6yMGvBf8gvyDLtxSXztQsxMvhxxNpJYrwa1YHdq+w4Jw==} + engines: {node: '>=18'} + peerDependencies: + '@types/node': '>=18' + peerDependenciesMeta: + '@types/node': + optional: true + dependencies: + '@types/node': 20.14.9 + dev: true + /@inquirer/type@3.0.8(@types/node@22.14.0): resolution: {integrity: sha512-lg9Whz8onIHRthWaN1Q9EGLa/0LFJjyM8mEUbL1eTi6yMGvBf8gvyDLtxSXztQsxMvhxxNpJYrwa1YHdq+w4Jw==} engines: {node: '>=18'} @@ -5015,6 +5302,12 @@ packages: - ws dev: false + /@mermaid-js/parser@0.6.2: + resolution: {integrity: sha512-+PO02uGF6L6Cs0Bw8RpGhikVvMWEysfAyl27qTlroUB8jSWr1lL0Sf6zi78ZxlSnmgSY2AMMKVgghnN9jTtwkQ==} + dependencies: + langium: 3.3.1 + dev: false + /@mintlify/cli@4.0.635(@types/node@22.14.0)(@types/react@18.3.11)(react-dom@18.3.1)(typescript@5.7.3): resolution: {integrity: sha512-jvTcEz3Zt31AQG9K+VBzJujssYKL6ED5NHeajsoysH2aJzLM8Xp7h+JPseVZmeDUU7jp+yCF/fNeGmmlUQXyqQ==} engines: {node: '>=18.0.0'} @@ -5665,146 +5958,78 @@ packages: fastq: 1.19.1 dev: true - /@oclif/color@1.0.13: - resolution: {integrity: sha512-/2WZxKCNjeHlQogCs1VBtJWlPXjwWke/9gMrwsVsrUt00g2V6LUBvwgwrxhrXepjOmq4IZ5QeNbpDMEOUlx/JA==} - engines: {node: '>=12.0.0'} - dependencies: - ansi-styles: 4.3.0 - chalk: 4.1.2 - strip-ansi: 6.0.1 - supports-color: 8.1.1 - tslib: 2.8.1 - dev: true - - /@oclif/core@1.26.2: - resolution: {integrity: sha512-6jYuZgXvHfOIc9GIaS4T3CIKGTjPmfAxuMcbCbMRKJJl4aq/4xeRlEz0E8/hz8HxvxZBGvN2GwAUHlrGWQVrVw==} - engines: {node: '>=14.0.0'} + /@oclif/core@4.5.3: + resolution: {integrity: sha512-ISoFlfmsuxJvNKXhabCO4/KqNXDQdLHchZdTPfZbtqAsQbqTw5IKitLVZq9Sz1LWizN37HILp4u0350B8scBjg==} + engines: {node: '>=18.0.0'} dependencies: - '@oclif/linewrap': 1.0.0 - '@oclif/screen': 3.0.8 ansi-escapes: 4.3.2 - ansi-styles: 4.3.0 - cardinal: 2.1.1 - chalk: 4.1.2 + ansis: 3.17.0 clean-stack: 3.0.1 - cli-progress: 3.12.0 + cli-spinners: 2.9.2 debug: 4.4.1(supports-color@8.1.1) ejs: 3.1.10 - fs-extra: 9.1.0 get-package-type: 0.1.0 - globby: 11.1.0 - hyperlinker: 1.0.0 indent-string: 4.0.0 is-wsl: 2.2.0 - js-yaml: 3.14.1 - natural-orderby: 2.0.3 - object-treeify: 1.1.33 - password-prompt: 1.1.3 + lilconfig: 3.1.3 + minimatch: 9.0.5 semver: 7.7.2 string-width: 4.2.3 - strip-ansi: 6.0.1 supports-color: 8.1.1 - supports-hyperlinks: 2.3.0 - tslib: 2.8.1 + tinyglobby: 0.2.15 widest-line: 3.1.0 + wordwrap: 1.0.0 wrap-ansi: 7.0.0 dev: true - /@oclif/core@2.8.11(@types/node@20.14.9)(typescript@5.5.3): - resolution: {integrity: sha512-9wYW6KRSWfB/D+tqeyl/jxmEz/xPXkFJGVWfKaptqHz6FPWNJREjAM945MuJL2Y8NRhMe+ScRlZ3WpdToX5aVQ==} - engines: {node: '>=14.0.0'} + /@oclif/core@4.5.4: + resolution: {integrity: sha512-78YYJls8+KG96tReyUsesKKIKqC0qbFSY1peUSrt0P2uGsrgAuU9axQ0iBQdhAlIwZDcTyaj+XXVQkz2kl/O0w==} + engines: {node: '>=18.0.0'} dependencies: - '@types/cli-progress': 3.11.6 ansi-escapes: 4.3.2 - ansi-styles: 4.3.0 - cardinal: 2.1.1 - chalk: 4.1.2 + ansis: 3.17.0 clean-stack: 3.0.1 - cli-progress: 3.12.0 - debug: 4.4.1(supports-color@8.1.1) + cli-spinners: 2.9.2 + debug: 4.4.3(supports-color@8.1.1) ejs: 3.1.10 - fs-extra: 9.1.0 get-package-type: 0.1.0 - globby: 11.1.0 - hyperlinker: 1.0.0 indent-string: 4.0.0 is-wsl: 2.2.0 - js-yaml: 3.14.1 - natural-orderby: 2.0.3 - object-treeify: 1.1.33 - password-prompt: 1.1.3 + lilconfig: 3.1.3 + minimatch: 9.0.5 semver: 7.7.2 string-width: 4.2.3 - strip-ansi: 6.0.1 supports-color: 8.1.1 - supports-hyperlinks: 2.3.0 - ts-node: 10.9.1(@types/node@20.14.9)(typescript@5.5.3) - tslib: 2.8.1 + tinyglobby: 0.2.15 widest-line: 3.1.0 wordwrap: 1.0.0 wrap-ansi: 7.0.0 - transitivePeerDependencies: - - '@swc/core' - - '@swc/wasm' - - '@types/node' - - typescript dev: true - /@oclif/core@4.5.3: - resolution: {integrity: sha512-ISoFlfmsuxJvNKXhabCO4/KqNXDQdLHchZdTPfZbtqAsQbqTw5IKitLVZq9Sz1LWizN37HILp4u0350B8scBjg==} + /@oclif/plugin-help@6.2.33: + resolution: {integrity: sha512-9L07S61R0tuXrURdLcVtjF79Nbyv3qGplJ88DVskJBxShbROZl3hBG7W/CNltAK3cnMPlXV8K3kKh+C0N0p4xw==} engines: {node: '>=18.0.0'} dependencies: - ansi-escapes: 4.3.2 - ansis: 3.17.0 - clean-stack: 3.0.1 - cli-spinners: 2.9.2 - debug: 4.4.1(supports-color@8.1.1) - ejs: 3.1.10 - get-package-type: 0.1.0 - indent-string: 4.0.0 - is-wsl: 2.2.0 - lilconfig: 3.1.3 - minimatch: 9.0.5 - semver: 7.7.2 - string-width: 4.2.3 - supports-color: 8.1.1 - tinyglobby: 0.2.15 - widest-line: 3.1.0 - wordwrap: 1.0.0 - wrap-ansi: 7.0.0 - dev: true - - /@oclif/linewrap@1.0.0: - resolution: {integrity: sha512-Ups2dShK52xXa8w6iBWLgcjPJWjais6KPJQq3gQ/88AY6BXoTX+MIGFPrWQO1KLMiQfoTpcLnUwloN4brrVUHw==} - dev: true - - /@oclif/plugin-help@5.1.20: - resolution: {integrity: sha512-N8xRxE/isFcdBDI8cobixEZA5toxIK5jbxpwALNTr4s8KNAtBA3ORQrSiY0fWGkcv0sCGMwZw7rJ0Izh18JPsw==} - engines: {node: '>=12.0.0'} - dependencies: - '@oclif/core': 1.26.2 + '@oclif/core': 4.5.3 dev: true - /@oclif/plugin-not-found@2.3.23(@types/node@20.14.9)(typescript@5.5.3): - resolution: {integrity: sha512-UZM8aolxXvqwH8WcmJxRNASDWgMoSQm/pgCdkc1AGCRevYc8+LBSO+U6nLWq+Dx8H/dn9RyIv5oiUIOGkKDlZA==} - engines: {node: '>=12.0.0'} + /@oclif/plugin-not-found@3.2.68(@types/node@20.14.9): + resolution: {integrity: sha512-Uv0AiXESEwrIbfN1IA68lcw4/7/L+Z3nFHMHG03jjDXHTVOfpTZDaKyPx/6rf2AL/CIhQQxQF3foDvs6psS3tA==} + engines: {node: '>=18.0.0'} dependencies: - '@oclif/color': 1.0.13 - '@oclif/core': 2.8.11(@types/node@20.14.9)(typescript@5.5.3) + '@inquirer/prompts': 7.8.4(@types/node@20.14.9) + '@oclif/core': 4.5.3 + ansis: 3.17.0 fast-levenshtein: 3.0.0 - lodash: 4.17.21 transitivePeerDependencies: - - '@swc/core' - - '@swc/wasm' - '@types/node' - - typescript dev: true - /@oclif/plugin-plugins@5.4.4: - resolution: {integrity: sha512-p30fo3JPtbOqTJOX9A/8qKV/14XWt8xFgG/goVfIkuKBAO+cdY78ag8pYatlpzsYzJhO27X1MFn0WkkPWo36Ww==} + /@oclif/plugin-plugins@5.4.48: + resolution: {integrity: sha512-He579UlYewPKiqPq4ufq+AhO8NLXChyN7+mQtEs2ExwigIJuKeAEBTNukTwI1B4c5ctMj3a7cAoiY3tUgZTSHg==} engines: {node: '>=18.0.0'} dependencies: - '@oclif/core': 4.5.3 + '@oclif/core': 4.5.4 ansis: 3.17.0 debug: 4.4.1(supports-color@8.1.1) npm: 10.9.3 @@ -5819,29 +6044,18 @@ packages: - supports-color dev: true - /@oclif/plugin-warn-if-update-available@2.0.24(@types/node@20.14.9)(typescript@5.5.3): - resolution: {integrity: sha512-Rq8/EZ8wQawvPWS6W59Zhf/zSz/umLc3q75I1ybi7pul6YMNwf/E1eDVHytSUEQ6yQV+p3cCs034IItz4CVdjw==} - engines: {node: '>=12.0.0'} + /@oclif/plugin-warn-if-update-available@3.1.48: + resolution: {integrity: sha512-jZESAAHqJuGcvnyLX0/2WAVDu/WAk1iMth5/o8oviDPzS3a4Ajsd5slxwFb/tg4hbswY9aFoob9wYP4tnP6d8w==} + engines: {node: '>=18.0.0'} dependencies: - '@oclif/core': 2.8.11(@types/node@20.14.9)(typescript@5.5.3) - chalk: 4.1.2 - debug: 4.4.1(supports-color@8.1.1) - fs-extra: 9.1.0 + '@oclif/core': 4.5.3 + ansis: 3.17.0 + debug: 4.4.3(supports-color@8.1.1) http-call: 5.3.0 lodash: 4.17.21 - semver: 7.7.2 + registry-auth-token: 5.1.0 transitivePeerDependencies: - - '@swc/core' - - '@swc/wasm' - - '@types/node' - supports-color - - typescript - dev: true - - /@oclif/screen@3.0.8: - resolution: {integrity: sha512-yx6KAqlt3TAHBduS2fMQtJDL2ufIHnDRArrJEOoTTuizxqmjLT+psGYOHpmMl3gvQpFJ11Hs76guUUktzAF9Bg==} - engines: {node: '>=12.0.0'} - deprecated: Package no longer supported. Contact Support at https://www.npmjs.com/support for more info. dev: true /@octokit/auth-token@2.5.0: @@ -6468,6 +6682,27 @@ packages: engines: {node: '>=16'} dev: false + /@pnpm/config.env-replace@1.1.0: + resolution: {integrity: sha512-htyl8TWnKL7K/ESFa1oW2UB5lVDxuF5DpM7tBi6Hu2LNL3mWkIzNLG6N4zoCUP1lCKNxWy/3iu8mS8MvToGd6w==} + engines: {node: '>=12.22.0'} + dev: true + + /@pnpm/network.ca-file@1.0.2: + resolution: {integrity: sha512-YcPQ8a0jwYU9bTdJDpXjMi7Brhkr1mXsXrUJvjqM2mQDgkRiz8jFaQGOdaLxgjtUfQgZhKy/O3cG/YwmgKaxLA==} + engines: {node: '>=12.22.0'} + dependencies: + graceful-fs: 4.2.10 + dev: true + + /@pnpm/npm-conf@2.3.1: + resolution: {integrity: sha512-c83qWb22rNRuB0UaVCI0uRPNRr8Z0FWnEIvT47jiHAmOIUHbBOg5XvV7pM5x+rKn9HRpjxquDbXYSXr3fAKFcw==} + engines: {node: '>=12'} + dependencies: + '@pnpm/config.env-replace': 1.1.0 + '@pnpm/network.ca-file': 1.0.2 + config-chain: 1.1.13 + dev: true + /@polka/url@1.0.0-next.29: resolution: {integrity: sha512-wwQAWhWSuHaag8c4q/KN/vCoeOJYshAIvMQwD4GpSb3OiZklFfvAgmj0VCBBImRpuF/aFgIRzllXlVX93Jevww==} dev: true @@ -10146,9 +10381,15 @@ packages: /@types/accepts@1.3.7: resolution: {integrity: sha512-Pay9fq2lM2wXPWbteBsRAGiWH2hig4ZE2asK+mm7kUzlxRTfL961rj89I6zV/E3PcIkDqyuBEcMxFT7rccugeQ==} dependencies: - '@types/node': 22.14.0 + '@types/node': 20.14.9 dev: false + /@types/archiver@6.0.3: + resolution: {integrity: sha512-a6wUll6k3zX6qs5KlxIggs1P1JcYJaTCx2gnlr+f0S1yd2DoaEwoIK10HmBaLnZwWneBz+JBm0dwcZu0zECBcQ==} + dependencies: + '@types/readdir-glob': 1.1.5 + dev: true + /@types/aria-query@5.0.4: resolution: {integrity: sha512-rfT93uj5s0PRL7EzccGMs3brplhcrghnDoV26NqKhCAS1hVo+WdNsPvE/yb6ilfr5hi2MEk6d5EWJTKdxg8jVw==} dev: true @@ -10157,7 +10398,7 @@ packages: resolution: {integrity: sha512-HLFeCYgz89uk22N5Qg3dvGvsv46B8GLvKKo1zKG4NybA8U2DiEO3w9lqGg29t/tfLRJpJ6iQxnVw4OnB7MoM9g==} dependencies: '@types/connect': 3.4.38 - '@types/node': 22.14.0 + '@types/node': 20.14.9 dev: false /@types/chai@5.2.2: @@ -10166,16 +10407,10 @@ packages: '@types/deep-eql': 4.0.2 dev: true - /@types/cli-progress@3.11.6: - resolution: {integrity: sha512-cE3+jb9WRlu+uOSAugewNpITJDt1VF8dHOopPO4IABFc3SXYL5WE/+PTz/FCdZRRfIujiWW3n3aMbv1eIGVRWA==} - dependencies: - '@types/node': 20.14.9 - dev: true - /@types/connect@3.4.38: resolution: {integrity: sha512-K6uROf1LD88uDQqJCktA4yzL1YYAK6NgfsI0v/mTgyPKWsX1CnJ0XPSDhViejru1GcRkLWb8RlzFYJRqGUbaug==} dependencies: - '@types/node': 22.14.0 + '@types/node': 20.14.9 dev: false /@types/content-disposition@0.5.9: @@ -10203,7 +10438,7 @@ packages: '@types/connect': 3.4.38 '@types/express': 4.17.23 '@types/keygrip': 1.0.6 - '@types/node': 22.14.0 + '@types/node': 20.14.9 dev: false /@types/cors@2.8.19: @@ -10215,14 +10450,79 @@ packages: resolution: {integrity: sha512-Y2Jn2idRrLzUfAKV2LyRImR+y4oa2AntrgID95SHJxuMUrkNXmanDSed71sRNZysveJVt1hLLemQZIady0FpEg==} dev: false + /@types/d3-axis@3.0.6: + resolution: {integrity: sha512-pYeijfZuBd87T0hGn0FO1vQ/cgLk6E1ALJjfkC0oJ8cbwkZl3TpgS8bVBLZN+2jjGgg38epgxb2zmoGtSfvgMw==} + dependencies: + '@types/d3-selection': 3.0.11 + dev: false + + /@types/d3-brush@3.0.6: + resolution: {integrity: sha512-nH60IZNNxEcrh6L1ZSMNA28rj27ut/2ZmI3r96Zd+1jrZD++zD3LsMIjWlvg4AYrHn/Pqz4CF3veCxGjtbqt7A==} + dependencies: + '@types/d3-selection': 3.0.11 + dev: false + + /@types/d3-chord@3.0.6: + resolution: {integrity: sha512-LFYWWd8nwfwEmTZG9PfQxd17HbNPksHBiJHaKuY1XeqscXacsS2tyoo6OdRsjf+NQYeB6XrNL3a25E3gH69lcg==} + dev: false + /@types/d3-color@3.1.3: resolution: {integrity: sha512-iO90scth9WAbmgv7ogoq57O9YpKmFBbmoEoCHDB2xMBY0+/KVrqAaCDyCE16dUspeOvIxFFRI+0sEtqDqy2b4A==} dev: false + /@types/d3-contour@3.0.6: + resolution: {integrity: sha512-BjzLgXGnCWjUSYGfH1cpdo41/hgdWETu4YxpezoztawmqsvCeep+8QGfiY6YbDvfgHz/DkjeIkkZVJavB4a3rg==} + dependencies: + '@types/d3-array': 3.2.1 + '@types/geojson': 7946.0.16 + dev: false + + /@types/d3-delaunay@6.0.4: + resolution: {integrity: sha512-ZMaSKu4THYCU6sV64Lhg6qjf1orxBthaC161plr5KuPHo3CNm8DTHiLw/5Eq2b6TsNP0W0iJrUOFscY6Q450Hw==} + dev: false + + /@types/d3-dispatch@3.0.7: + resolution: {integrity: sha512-5o9OIAdKkhN1QItV2oqaE5KMIiXAvDWBDPrD85e58Qlz1c1kI/J0NcqbEG88CoTwJrYe7ntUCVfeUl2UJKbWgA==} + dev: false + + /@types/d3-drag@3.0.7: + resolution: {integrity: sha512-HE3jVKlzU9AaMazNufooRJ5ZpWmLIoc90A37WU2JMmeq28w1FQqCZswHZ3xR+SuxYftzHq6WU6KJHvqxKzTxxQ==} + dependencies: + '@types/d3-selection': 3.0.11 + dev: false + + /@types/d3-dsv@3.0.7: + resolution: {integrity: sha512-n6QBF9/+XASqcKK6waudgL0pf/S5XHPPI8APyMLLUHd8NqouBGLsU8MgtO7NINGtPBtk9Kko/W4ea0oAspwh9g==} + dev: false + /@types/d3-ease@3.0.2: resolution: {integrity: sha512-NcV1JjO5oDzoK26oMzbILE6HW7uVXOHLQvHshBUW4UMdZGfiY6v5BeQwh9a9tCzv+CeefZQHJt5SRgK154RtiA==} dev: false + /@types/d3-fetch@3.0.7: + resolution: {integrity: sha512-fTAfNmxSb9SOWNB9IoG5c8Hg6R+AzUHDRlsXsDZsNp6sxAEOP0tkP3gKkNSO/qmHPoBFTxNrjDprVHDQDvo5aA==} + dependencies: + '@types/d3-dsv': 3.0.7 + dev: false + + /@types/d3-force@3.0.10: + resolution: {integrity: sha512-ZYeSaCF3p73RdOKcjj+swRlZfnYpK1EbaDiYICEEp5Q6sUiqFaFQ9qgoshp5CzIyyb/yD09kD9o2zEltCexlgw==} + dev: false + + /@types/d3-format@3.0.4: + resolution: {integrity: sha512-fALi2aI6shfg7vM5KiR1wNJnZ7r6UuggVqtDA+xiEdPZQwy/trcQaHnwShLuLdta2rTymCNpxYTiMZX/e09F4g==} + dev: false + + /@types/d3-geo@3.1.0: + resolution: {integrity: sha512-856sckF0oP/diXtS4jNsiQw/UuK5fQG8l/a9VVLeSouf1/PPbBE1i1W852zVwKwYCBkFJJB7nCFTbk6UMEXBOQ==} + dependencies: + '@types/geojson': 7946.0.16 + dev: false + + /@types/d3-hierarchy@3.1.7: + resolution: {integrity: sha512-tJFtNoYBtRtkNysX1Xq4sxtjK8YgoWUNpIiUee0/jHGRwqvzYxkq0hGVbbOGSz+JgFxxRu4K8nb3YpG3CMARtg==} + dev: false + /@types/d3-interpolate@3.0.4: resolution: {integrity: sha512-mgLPETlrpVV1YRJIglr4Ez47g7Yxjl1lj7YKsiMCb27VJH9W8NVM6Bb9d8kkpG/uAQS5AmbA48q2IAolKKo1MA==} dependencies: @@ -10233,18 +10533,42 @@ packages: resolution: {integrity: sha512-VMZBYyQvbGmWyWVea0EHs/BwLgxc+MKi1zLDCONksozI4YJMcTt8ZEuIR4Sb1MMTE8MMW49v0IwI5+b7RmfWlg==} dev: false + /@types/d3-polygon@3.0.2: + resolution: {integrity: sha512-ZuWOtMaHCkN9xoeEMr1ubW2nGWsp4nIql+OPQRstu4ypeZ+zk3YKqQT0CXVe/PYqrKpZAi+J9mTs05TKwjXSRA==} + dev: false + + /@types/d3-quadtree@3.0.6: + resolution: {integrity: sha512-oUzyO1/Zm6rsxKRHA1vH0NEDG58HrT5icx/azi9MF1TWdtttWl0UIUsjEQBBh+SIkrpd21ZjEv7ptxWys1ncsg==} + dev: false + + /@types/d3-random@3.0.3: + resolution: {integrity: sha512-Imagg1vJ3y76Y2ea0871wpabqp613+8/r0mCLEBfdtqC7xMSfj9idOnmBYyMoULfHePJyxMAw3nWhJxzc+LFwQ==} + dev: false + + /@types/d3-scale-chromatic@3.1.0: + resolution: {integrity: sha512-iWMJgwkK7yTRmWqRB5plb1kadXyQ5Sj8V/zYlFGMUBbIPKQScw+Dku9cAAMgJG+z5GYDoMjWGLVOvjghDEFnKQ==} + dev: false + /@types/d3-scale@4.0.9: resolution: {integrity: sha512-dLmtwB8zkAeO/juAMfnV+sItKjlsw2lKdZVVy6LRr0cBmegxSABiLEpGVmSJJ8O08i4+sGR6qQtb6WtuwJdvVw==} dependencies: '@types/d3-time': 3.0.4 dev: false + /@types/d3-selection@3.0.11: + resolution: {integrity: sha512-bhAXu23DJWsrI45xafYpkQ4NtcKMwWnAC/vKrd2l+nxMFuvOT3XMYTIj2opv8vq8AO5Yh7Qac/nSeP/3zjTK0w==} + dev: false + /@types/d3-shape@3.1.7: resolution: {integrity: sha512-VLvUQ33C+3J+8p+Daf+nYSOsjB4GXp19/S/aGo60m9h1v6XaxjiT82lKVWJCfzhtuZ3yD7i/TPeC/fuKLLOSmg==} dependencies: '@types/d3-path': 3.1.1 dev: false + /@types/d3-time-format@4.0.3: + resolution: {integrity: sha512-5xg9rC+wWL8kdDj153qZcsJ0FWiFt0J5RB6LYUNZjwSnesfblqrI/bJ1wBdJ8OQfncgbJG5+2F+qfqnqyzYxyg==} + dev: false + /@types/d3-time@3.0.4: resolution: {integrity: sha512-yuzZug1nkAAaBlBBikKZTgzCeA+k1uy4ZFwWANOfKw5z5LRhV0gNA7gNkKm7HoK+HRN0wX3EkxGk0fpbWhmB7g==} dev: false @@ -10257,6 +10581,54 @@ packages: resolution: {integrity: sha512-Ps3T8E8dZDam6fUyNiMkekK3XUsaUEik+idO9/YjPtfj2qruF8tFBXS7XhtE4iIXBLxhmLjP3SXpLhVf21I9Lw==} dev: false + /@types/d3-transition@3.0.9: + resolution: {integrity: sha512-uZS5shfxzO3rGlu0cC3bjmMFKsXv+SmZZcgp0KD22ts4uGXp5EVYGzu/0YdwZeKmddhcAccYtREJKkPfXkZuCg==} + dependencies: + '@types/d3-selection': 3.0.11 + dev: false + + /@types/d3-zoom@3.0.8: + resolution: {integrity: sha512-iqMC4/YlFCSlO8+2Ii1GGGliCAY4XdeG748w5vQUbevlbDu0zSjH/+jojorQVBK/se0j6DUFNPBGSqD3YWYnDw==} + dependencies: + '@types/d3-interpolate': 3.0.4 + '@types/d3-selection': 3.0.11 + dev: false + + /@types/d3@7.4.3: + resolution: {integrity: sha512-lZXZ9ckh5R8uiFVt8ogUNf+pIrK4EsWrx2Np75WvF/eTpJ0FMHNhjXk8CKEx/+gpHbNQyJWehbFaTvqmHWB3ww==} + dependencies: + '@types/d3-array': 3.2.1 + '@types/d3-axis': 3.0.6 + '@types/d3-brush': 3.0.6 + '@types/d3-chord': 3.0.6 + '@types/d3-color': 3.1.3 + '@types/d3-contour': 3.0.6 + '@types/d3-delaunay': 6.0.4 + '@types/d3-dispatch': 3.0.7 + '@types/d3-drag': 3.0.7 + '@types/d3-dsv': 3.0.7 + '@types/d3-ease': 3.0.2 + '@types/d3-fetch': 3.0.7 + '@types/d3-force': 3.0.10 + '@types/d3-format': 3.0.4 + '@types/d3-geo': 3.1.0 + '@types/d3-hierarchy': 3.1.7 + '@types/d3-interpolate': 3.0.4 + '@types/d3-path': 3.1.1 + '@types/d3-polygon': 3.0.2 + '@types/d3-quadtree': 3.0.6 + '@types/d3-random': 3.0.3 + '@types/d3-scale': 4.0.9 + '@types/d3-scale-chromatic': 3.1.0 + '@types/d3-selection': 3.0.11 + '@types/d3-shape': 3.1.7 + '@types/d3-time': 3.0.4 + '@types/d3-time-format': 4.0.3 + '@types/d3-timer': 3.0.2 + '@types/d3-transition': 3.0.9 + '@types/d3-zoom': 3.0.8 + dev: false + /@types/debug@4.1.12: resolution: {integrity: sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ==} dependencies: @@ -10316,7 +10688,7 @@ packages: /@types/express-serve-static-core@4.19.6: resolution: {integrity: sha512-N4LZ2xG7DatVqhCZzOGb1Yi5lMbXSZcmdLDe9EzSndPV2HpWYWzRbaerl2n27irrm94EPpprqa8KpskPT085+A==} dependencies: - '@types/node': 22.14.0 + '@types/node': 20.14.9 '@types/qs': 6.14.0 '@types/range-parser': 1.2.7 '@types/send': 0.17.5 @@ -10331,6 +10703,10 @@ packages: '@types/serve-static': 1.15.8 dev: false + /@types/geojson@7946.0.16: + resolution: {integrity: sha512-6C8nqWur3j98U6+lXDfTUWIfgvZU+EumvpHKcYjujKH7woYyLj2sUmff0tRhrqM7BohUw7Pz3ZB1jj2gW9Fvmg==} + dev: false + /@types/hast@3.0.4: resolution: {integrity: sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==} dependencies: @@ -10467,18 +10843,30 @@ packages: '@types/prop-types': 15.7.15 csstype: 3.1.3 + /@types/readable-stream@4.0.21: + resolution: {integrity: sha512-19eKVv9tugr03IgfXlA9UVUVRbW6IuqRO5B92Dl4a6pT7K8uaGrNS0GkxiZD0BOk6PLuXl5FhWl//eX/pzYdTQ==} + dependencies: + '@types/node': 20.14.9 + dev: true + + /@types/readdir-glob@1.1.5: + resolution: {integrity: sha512-raiuEPUYqXu+nvtY2Pe8s8FEmZ3x5yAH4VkLdihcPdalvsHltomrRC9BzuStrJ9yk06470hS0Crw0f1pXqD+Hg==} + dependencies: + '@types/node': 20.14.9 + dev: true + /@types/send@0.17.5: resolution: {integrity: sha512-z6F2D3cOStZvuk2SaP6YrwkNO65iTZcwA2ZkSABegdkAh/lf+Aa/YQndZVfmEXT5vgAp6zv06VQ3ejSVjAny4w==} dependencies: '@types/mime': 1.3.5 - '@types/node': 22.14.0 + '@types/node': 20.14.9 dev: false /@types/serve-static@1.15.8: resolution: {integrity: sha512-roei0UY3LhpOJvjbIP6ZZFngyLKl5dskOtDhxY5THRSpO+ZI+nzJ+m5yUMzGrp89YRa7lvknKkMYjqQFGwA7Sg==} dependencies: '@types/http-errors': 2.0.5 - '@types/node': 22.14.0 + '@types/node': 20.14.9 '@types/send': 0.17.5 dev: false @@ -10505,6 +10893,12 @@ packages: '@types/node': 18.19.124 dev: true + /@types/trusted-types@2.0.7: + resolution: {integrity: sha512-ScaPdn1dQczgbl0QFTeTOmVHFULt394XJgOQNoyVhZ6r2vLnMLJfBPd53SB52T/3G36VI1/g2MZaX0cwDuXsfw==} + requiresBuild: true + dev: false + optional: true + /@types/unist@2.0.11: resolution: {integrity: sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA==} @@ -10542,39 +10936,61 @@ packages: dev: true optional: true - /@typescript-eslint/types@6.19.0: - resolution: {integrity: sha512-lFviGV/vYhOy3m8BJ/nAKoAyNhInTdXpftonhWle66XHAtT1ouBlkjL496b5H5hb8dWXHwtypTqgtb/DEa+j5A==} - engines: {node: ^16.0.0 || >=18.0.0} + /@typescript-eslint/project-service@8.45.0(typescript@5.5.3): + resolution: {integrity: sha512-3pcVHwMG/iA8afdGLMuTibGR7pDsn9RjDev6CCB+naRsSYs2pns5QbinF4Xqw6YC/Sj3lMrm/Im0eMfaa61WUg==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + typescript: '>=4.8.4 <6.0.0' + dependencies: + '@typescript-eslint/tsconfig-utils': 8.45.0(typescript@5.5.3) + '@typescript-eslint/types': 8.45.0 + debug: 4.4.1(supports-color@8.1.1) + typescript: 5.5.3 + transitivePeerDependencies: + - supports-color dev: true - /@typescript-eslint/typescript-estree@6.19.0(typescript@5.5.3): - resolution: {integrity: sha512-o/zefXIbbLBZ8YJ51NlkSAt2BamrK6XOmuxSR3hynMIzzyMY33KuJ9vuMdFSXW+H0tVvdF9qBPTHA91HDb4BIQ==} - engines: {node: ^16.0.0 || >=18.0.0} + /@typescript-eslint/tsconfig-utils@8.45.0(typescript@5.5.3): + resolution: {integrity: sha512-aFdr+c37sc+jqNMGhH+ajxPXwjv9UtFZk79k8pLoJ6p4y0snmYpPA52GuWHgt2ZF4gRRW6odsEj41uZLojDt5w==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} peerDependencies: - typescript: '*' - peerDependenciesMeta: - typescript: - optional: true + typescript: '>=4.8.4 <6.0.0' + dependencies: + typescript: 5.5.3 + dev: true + + /@typescript-eslint/types@8.45.0: + resolution: {integrity: sha512-WugXLuOIq67BMgQInIxxnsSyRLFxdkJEJu8r4ngLR56q/4Q5LrbfkFRH27vMTjxEK8Pyz7QfzuZe/G15qQnVRA==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + dev: true + + /@typescript-eslint/typescript-estree@8.45.0(typescript@5.5.3): + resolution: {integrity: sha512-GfE1NfVbLam6XQ0LcERKwdTTPlLvHvXXhOeUGC1OXi4eQBoyy1iVsW+uzJ/J9jtCz6/7GCQ9MtrQ0fml/jWCnA==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} + peerDependencies: + typescript: '>=4.8.4 <6.0.0' dependencies: - '@typescript-eslint/types': 6.19.0 - '@typescript-eslint/visitor-keys': 6.19.0 + '@typescript-eslint/project-service': 8.45.0(typescript@5.5.3) + '@typescript-eslint/tsconfig-utils': 8.45.0(typescript@5.5.3) + '@typescript-eslint/types': 8.45.0 + '@typescript-eslint/visitor-keys': 8.45.0 debug: 4.4.1(supports-color@8.1.1) - globby: 11.1.0 + fast-glob: 3.3.3 is-glob: 4.0.3 - minimatch: 9.0.3 + minimatch: 9.0.5 semver: 7.7.2 - ts-api-utils: 1.4.3(typescript@5.5.3) + ts-api-utils: 2.1.0(typescript@5.5.3) typescript: 5.5.3 transitivePeerDependencies: - supports-color dev: true - /@typescript-eslint/visitor-keys@6.19.0: - resolution: {integrity: sha512-hZaUCORLgubBvtGpp1JEFEazcuEdfxta9j4iUwdSAr7mEsYYAp3EAUyCZk3VEEqGj6W+AV4uWyrDGtrlawAsgQ==} - engines: {node: ^16.0.0 || >=18.0.0} + /@typescript-eslint/visitor-keys@8.45.0: + resolution: {integrity: sha512-qsaFBA3e09MIDAGFUrTk+dzqtfv1XPVz8t8d1f0ybTzrCY7BKiMC5cjrl1O/P7UmHsNyW90EYSkU/ZWpmXelag==} + engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} dependencies: - '@typescript-eslint/types': 6.19.0 - eslint-visitor-keys: 3.4.3 + '@typescript-eslint/types': 8.45.0 + eslint-visitor-keys: 4.2.1 dev: true /@typescript/vfs@1.6.1(typescript@4.5.2): @@ -11050,11 +11466,6 @@ packages: dependencies: acorn: 8.15.0 - /acorn-walk@8.2.0: - resolution: {integrity: sha512-k+iyHEuPgSw6SbuDpGQM+06HQUa04DZ3o+F6CSzXMvvI5KMvnaEqXe+YVe555R9nn6GPt404fos4wcgpw12SDA==} - engines: {node: '>=0.4.0'} - dev: true - /acorn-walk@8.3.2: resolution: {integrity: sha512-cjkyv4OtNCIeqhHrfS81QWXoCBPExR/J62oyEqepVw8WaQeSqpW2uhuLPh1m9eWhDuOo/jUXVTlifvesOWp/4A==} engines: {node: '>=0.4.0'} @@ -11078,12 +11489,6 @@ packages: engines: {node: '>=0.4.0'} hasBin: true - /acorn@8.8.1: - resolution: {integrity: sha512-7zFpHzhnqYKrkYdUjF1HI1bzd0VygEGX8lFk4k5zVMqHEoES+P+7TKI+EvLO9WVMJ8eekdO0aDEK044xTXwPPA==} - engines: {node: '>=0.4.0'} - hasBin: true - dev: true - /address@1.2.2: resolution: {integrity: sha512-4B/qKCfeE/ODUaAUpSwfzazo5x29WD4r3vXiWsB7I2mSDAihwEqKO+g8GELZUQSSAo5e1XTYh3ZVfLyxBc12nA==} engines: {node: '>= 10.0.0'} @@ -11306,10 +11711,6 @@ packages: resolution: {integrity: sha512-4Dj6M28JB+oAH8kFkTLUo+a2jwOFkuqb3yucU0CANcRRUbxS0cP0nZYCGjcc3BNXwRIsUVmDGgzawme7zvJHvg==} engines: {node: '>=12'} - /ansicolors@0.3.2: - resolution: {integrity: sha512-QXu7BPrP29VllRxH8GwB7x5iX5qWKAAMLqKQGWTeLWVlNHNOpVMJ91dsxQAIWXpjuW5wqvxu3Jd/nRjrJ+0pqg==} - dev: true - /ansis@3.17.0: resolution: {integrity: sha512-0qWUglt9JEqLFr3w1I1pbrChn1grhaiAR2ocX1PP/flRmxgtwTzPFFFnfIlD6aMOLQZgSuCRlidD70lvx8yhzg==} engines: {node: '>=14'} @@ -11437,16 +11838,6 @@ packages: tslib: 2.8.1 dev: false - /assert@2.1.0: - resolution: {integrity: sha512-eLHpSK/Y4nhMJ07gDaAzoX/XAKS8PSaojml3M0DM4JpV1LAi5JOJ/p6H/XWrl8L+DzVEvVCW1z3vWAaB9oTsQw==} - dependencies: - call-bind: 1.0.8 - is-nan: 1.3.2 - object-is: 1.1.6 - object.assign: 4.1.7 - util: 0.12.5 - dev: true - /assertion-error@1.1.0: resolution: {integrity: sha512-jgsaNduz+ndvGyFt3uSuWqvy4lCnIJiovtouQN5JZHOKCS2QuhEdbcQHFhVksz2N2U9hXJo8odG7ETyWlEeuDw==} dev: true @@ -11482,16 +11873,6 @@ packages: resolution: {integrity: sha512-Az2ZTpuytrtqENulXwO3GGv1Bztugx6TT37NIo7imr/Qo0gsYiGtSdBa2B6fsXhTpVZDNfu1Qn3pk531e3q+nQ==} dev: true - /async-mqtt@2.6.3: - resolution: {integrity: sha512-mFGTtlEpOugOoLOf9H5AJyJaZUNtOVXLGGOnPaPZDPQex6W6iIOgtV+fAgam0GQbgnLfgX+Wn/QzS6d+PYfFAQ==} - dependencies: - mqtt: 4.3.8 - transitivePeerDependencies: - - bufferutil - - supports-color - - utf-8-validate - dev: true - /async@3.2.6: resolution: {integrity: sha512-htCUDlxyyCLMgaM3xXg0C0LW2xqfuQ6p05pCEIsXuyQ+a1koYKTuBMzRNwmybfLgvJDMd0r1LTn4+E0Ti6C2AA==} dev: true @@ -11586,16 +11967,6 @@ packages: transitivePeerDependencies: - debug - /axios@1.7.4: - resolution: {integrity: sha512-DukmaFRnY6AzAALSH4J2M3k6PkaC+MfaAGdEERRWcC9q3/TWQwLpHR8ZRLKTdQ3aBDL64EdluRDjJqKw+BPZEw==} - dependencies: - follow-redirects: 1.15.11 - form-data: 4.0.4 - proxy-from-env: 1.1.0 - transitivePeerDependencies: - - debug - dev: true - /axobject-query@4.1.0: resolution: {integrity: sha512-qIj0G9wZbMGNLjLmg1PT6v2mE9AH2zlnADJD/2tC6E00hgmhUOfEB6greHPAfLRSufHqROIUTkw6E+M3lH0PTQ==} engines: {node: '>= 0.4'} @@ -11728,6 +12099,15 @@ packages: inherits: 2.0.4 readable-stream: 3.6.2 + /bl@6.1.3: + resolution: {integrity: sha512-nHB8B5roHlGX5TFsWeiQJijdddZIOHuv1eL2cM2kHnG3qR91CYLsysGe+CvxQfEd23EKD0eJf4lto0frTbddKA==} + dependencies: + '@types/readable-stream': 4.0.21 + buffer: 6.0.3 + inherits: 2.0.4 + readable-stream: 4.7.0 + dev: true + /blake3-wasm@2.1.5: resolution: {integrity: sha512-F1+K8EbfOZE49dtoPtmxUQrpXaBIl3ICvasLh+nJta0xkz+9kF/7uet9fLnwKqhDrmj6g+6K3Tw9yQPUg2ka5g==} dev: true @@ -11769,6 +12149,15 @@ packages: dependencies: fill-range: 7.1.1 + /broker-factory@3.1.10: + resolution: {integrity: sha512-BzqK5GYFhvVFvO13uzPN0SCiOsOQuhMUbsGvTXDJMA2/N4GvIlFdxEuueE+60Zk841bBU5G3+fl2cqYEo0wgGg==} + dependencies: + '@babel/runtime': 7.28.4 + fast-unique-numbers: 9.0.24 + tslib: 2.8.1 + worker-factory: 7.0.46 + dev: true + /browserslist@4.25.3: resolution: {integrity: sha512-cDGv1kkDI4/0e5yON9yM5G/0A5u8sf5TnmdX5C9qHzI9PPu++sQ9zjm1k9NiOrf3riY4OkK0zSGqfvJyJsgCBQ==} engines: {node: ^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7} @@ -11921,14 +12310,6 @@ packages: resolution: {integrity: sha512-QGUGitqsc8ARjLdgAfxETDhRbJ0REsP6O3I96TAth/mVjh2cYzN2u+3AzPP3aVSm2FehEItaJw1xd+IGBXWeSw==} dev: false - /cardinal@2.1.1: - resolution: {integrity: sha512-JSr5eOgoEymtYHBjNWyjrMqet9Am2miJhlfKNdqLp6zoeAh0KN5dRAcxlecj5mAJrmQomgiOBj35xHLrFjqBpw==} - hasBin: true - dependencies: - ansicolors: 0.3.2 - redeyed: 2.1.1 - dev: true - /ccount@2.0.1: resolution: {integrity: sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg==} @@ -12031,42 +12412,51 @@ packages: engines: {node: '>= 16'} dev: true - /checkly@4.9.0(@types/node@20.14.9)(typescript@5.5.3): - resolution: {integrity: sha512-LqohEntErF7dJaJPsEpjvr/O9wUfzBRac6DOXgFDMEw+dNi19oBAcspdOqVGjPjMoCZ9/s5b5tSJI1pusY4mJQ==} - engines: {node: '>=16.0.0'} + /checkly@6.5.0(@types/node@20.14.9)(typescript@5.5.3): + resolution: {integrity: sha512-i90/P+sWL6CmTBRGOHXFAogisrDdomE928XKSaw2g4t9e2dVGcaDfQ72ZdXFCm27gVQd7siXPGrO6LNFwyEsWg==} + engines: {node: ^18.19.0 || >=20.5.0} hasBin: true + peerDependencies: + jiti: '>=2' + peerDependenciesMeta: + jiti: + optional: true dependencies: - '@oclif/core': 2.8.11(@types/node@20.14.9)(typescript@5.5.3) - '@oclif/plugin-help': 5.1.20 - '@oclif/plugin-not-found': 2.3.23(@types/node@20.14.9)(typescript@5.5.3) - '@oclif/plugin-plugins': 5.4.4 - '@oclif/plugin-warn-if-update-available': 2.0.24(@types/node@20.14.9)(typescript@5.5.3) - '@typescript-eslint/typescript-estree': 6.19.0(typescript@5.5.3) - acorn: 8.8.1 - acorn-walk: 8.2.0 - async-mqtt: 2.6.3 - axios: 1.7.4 + '@oclif/core': 4.5.3 + '@oclif/plugin-help': 6.2.33 + '@oclif/plugin-not-found': 3.2.68(@types/node@20.14.9) + '@oclif/plugin-plugins': 5.4.48 + '@oclif/plugin-warn-if-update-available': 3.1.48 + '@types/archiver': 6.0.3 + '@typescript-eslint/typescript-estree': 8.45.0(typescript@5.5.3) + acorn: 8.15.0 + acorn-walk: 8.3.4 + archiver: 7.0.1 + axios: 1.11.0 chalk: 4.1.2 - ci-info: 3.8.0 + ci-info: 4.3.0 conf: 10.2.0 - dotenv: 16.3.1 + dotenv: 16.6.1 + execa: 9.6.0 git-repo-info: 2.1.1 - glob: 10.3.1 + glob: 10.4.5 indent-string: 4.0.0 + json-stream-stringify: 3.1.6 json5: 2.2.3 jwt-decode: 3.1.2 log-symbols: 4.1.0 - luxon: 3.3.0 - open: 8.4.0 + luxon: 3.7.2 + minimatch: 9.0.5 + mqtt: 5.14.1 + open: 8.4.2 p-queue: 6.6.2 prompts: 2.4.2 proxy-from-env: 1.1.0 - recast: 0.23.4 + recast: 0.23.11 + semver: 7.7.2 tunnel: 0.0.6 - uuid: 9.0.0 + uuid: 11.1.0 transitivePeerDependencies: - - '@swc/core' - - '@swc/wasm' - '@types/node' - bufferutil - debug @@ -12075,6 +12465,26 @@ packages: - utf-8-validate dev: true + /chevrotain-allstar@0.3.1(chevrotain@11.0.3): + resolution: {integrity: sha512-b7g+y9A0v4mxCW1qUhf3BSVPg+/NvGErk/dOkrDaHA0nQIQGAtrOjlX//9OQtRlSCy+x9rfB5N8yC71lH1nvMw==} + peerDependencies: + chevrotain: ^11.0.0 + dependencies: + chevrotain: 11.0.3 + lodash-es: 4.17.21 + dev: false + + /chevrotain@11.0.3: + resolution: {integrity: sha512-ci2iJH6LeIkvP9eJW6gpueU8cnZhv85ELY8w8WiFtNjMHA5ad6pQLaJo9mEly/9qUyCpvqX8/POVUTf18/HFdw==} + dependencies: + '@chevrotain/cst-dts-gen': 11.0.3 + '@chevrotain/gast': 11.0.3 + '@chevrotain/regexp-to-ast': 11.0.3 + '@chevrotain/types': 11.0.3 + '@chevrotain/utils': 11.0.3 + lodash-es: 4.17.21 + dev: false + /chokidar@3.5.3: resolution: {integrity: sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==} engines: {node: '>= 8.10.0'} @@ -12141,13 +12551,13 @@ packages: zod: 3.23.8 dev: true - /ci-info@3.8.0: - resolution: {integrity: sha512-eXTggHWSooYhq49F2opQhuHWgzucfF2YgODK4e1566GQs5BIfP30B0oenwBJHfWxAs2fyPB1s7Mg949zLf61Yw==} + /ci-info@3.9.0: + resolution: {integrity: sha512-NIxF55hv4nSqQswkAeiOi1r83xy8JldOFDTWiug55KBu9Jnblncd2U6ViHmYgHf01TPZS77NJBhBMKdWj9HQMQ==} engines: {node: '>=8'} dev: true - /ci-info@3.9.0: - resolution: {integrity: sha512-NIxF55hv4nSqQswkAeiOi1r83xy8JldOFDTWiug55KBu9Jnblncd2U6ViHmYgHf01TPZS77NJBhBMKdWj9HQMQ==} + /ci-info@4.3.0: + resolution: {integrity: sha512-l+2bNRMiQgcfILUi33labAZYIWlH1kWDp+ecNo5iisRKrbm0xcRyCww71/YU0Fkw0mAFpz9bJayXPjey6vkmaQ==} engines: {node: '>=8'} dev: true @@ -12204,13 +12614,6 @@ packages: restore-cursor: 4.0.0 dev: true - /cli-progress@3.12.0: - resolution: {integrity: sha512-tRkV3HJ1ASwm19THiiLIXLO7Im7wlTuKnvkYaTkyoAPefqjNg7W7DHKUlGRxy9vxDvbyCYQkQozvptuMkGCg8A==} - engines: {node: '>=4'} - dependencies: - string-width: 4.2.3 - dev: true - /cli-spinners@2.9.2: resolution: {integrity: sha512-ywqV+5MmyL4E7ybXgKys4DugZbX0FC6LnwrhjuykIjnK9k8OQacQ7axGKnjDXWNhns0xot3bZI5h55H8yo9cJg==} engines: {node: '>=6'} @@ -12392,16 +12795,17 @@ packages: resolution: {integrity: sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==} engines: {node: '>= 6'} + /commander@7.2.0: + resolution: {integrity: sha512-QrWXB+ZQSVPmIWIhtEO9H+gwHaMGYiF5ChvoJ+K9ZGHG/sVsa6yiesAD1GC/x46sET00Xlwo1u49RVVVzvcSkw==} + engines: {node: '>= 10'} + dev: false + /commander@8.3.0: resolution: {integrity: sha512-OkTL9umf+He2DZkUq8f8J9of7yL6RJKI24dVITBmNfZBmri9zYZQrKkuXiKhyfPSu8tUhnVBB1iKXevvnlR4Ww==} engines: {node: '>= 12'} - dev: true - /commist@1.1.0: - resolution: {integrity: sha512-rraC8NXWOEjhADbZe9QBNzLAN5Q3fsTPQtBV+fEVj6xKIgDgNiEVE6ZNfHpZOqfQ21YUzfVNUXLOEZquYvQPPg==} - dependencies: - leven: 2.1.0 - minimist: 1.2.8 + /commist@3.2.0: + resolution: {integrity: sha512-4PIMoPniho+LqXmpS5d3NuGYncG6XWlkBSVGiWycL22dd42OYdUGil2CWuzklaJoNxyxUSpO4MKIBU94viWNAw==} dev: true /commitizen@4.3.1(@types/node@22.14.0)(typescript@5.7.3): @@ -12474,14 +12878,16 @@ packages: /confbox@0.1.8: resolution: {integrity: sha512-RMtmw0iFkeR4YV+fUOSucriAQNb9g8zFR52MWCtl+cCZOFRNL6zeB395vPzFhEjjn4fMxXudmELnl/KF/WrK6w==} - dev: true + + /confbox@0.2.2: + resolution: {integrity: sha512-1NB+BKqhtNipMsov4xI/NnhCKp9XG9NamYp5PVm9klAT0fsrNPjaFICsCFhNhwZJKNh7zB/3q8qXz0E9oaMNtQ==} + dev: false /config-chain@1.1.13: resolution: {integrity: sha512-qj+f8APARXHrM0hraqXYb2/bOVSV4PvJQlNZ/DVj0QrmNM2q2euizkeuVckQ57J+W0mRH6Hvi+k50M4Jul2VRQ==} dependencies: ini: 1.3.8 proto-list: 1.2.4 - dev: false /consola@3.4.2: resolution: {integrity: sha512-5IKcdX0nnYavi6G7TtOhwkYzyjfJlatbjMjuLSfE2kYT5pMDOilZ4OvMhi637CcDICTmz3wARPoyhqyX1Y+XvA==} @@ -12560,6 +12966,18 @@ packages: object-assign: 4.1.1 vary: 1.1.2 + /cose-base@1.0.3: + resolution: {integrity: sha512-s9whTXInMSgAp/NVXVNuVxVKzGH2qck3aQlVHxDCdAEPgtMKwc4Wq6/QKhgdEdgbLSi9rBTAcPoRa6JpiG4ksg==} + dependencies: + layout-base: 1.0.2 + dev: false + + /cose-base@2.2.0: + resolution: {integrity: sha512-AzlgcsCbUMymkADOJtQm3wO9S3ltPfYOFD5033keQn9NJzIbtnZj+UdBJe7DYml/8TdbtHJW3j58SOnKhWY/5g==} + dependencies: + layout-base: 2.0.1 + dev: false + /cosmiconfig-typescript-loader@6.1.0(@types/node@22.14.0)(cosmiconfig@9.0.0)(typescript@5.7.3): resolution: {integrity: sha512-tJ1w35ZRUiM5FeTzT7DtYWAFFv37ZLqSRkGi2oeCK1gPhvaWjkAtfXvLmvE1pRfxxp9aQo6ba/Pvg1dKj05D4g==} engines: {node: '>=v18'} @@ -12676,6 +13094,29 @@ packages: /csstype@3.1.3: resolution: {integrity: sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==} + /cytoscape-cose-bilkent@4.1.0(cytoscape@3.33.1): + resolution: {integrity: sha512-wgQlVIUJF13Quxiv5e1gstZ08rnZj2XaLHGoFMYXz7SkNfCDOOteKBE6SYRfA9WxxI/iBc3ajfDoc6hb/MRAHQ==} + peerDependencies: + cytoscape: ^3.2.0 + dependencies: + cose-base: 1.0.3 + cytoscape: 3.33.1 + dev: false + + /cytoscape-fcose@2.2.0(cytoscape@3.33.1): + resolution: {integrity: sha512-ki1/VuRIHFCzxWNrsshHYPs6L7TvLu3DL+TyIGEsRcvVERmxokbf5Gdk7mFxZnTdiGtnA4cfSmjZJMviqSuZrQ==} + peerDependencies: + cytoscape: ^3.2.0 + dependencies: + cose-base: 2.2.0 + cytoscape: 3.33.1 + dev: false + + /cytoscape@3.33.1: + resolution: {integrity: sha512-iJc4TwyANnOGR1OmWhsS9ayRS3s+XQ185FmuHObThD+5AeJCakAAbWv8KimMTt08xCCLNgneQwFp+JRJOr9qGQ==} + engines: {node: '>=0.10'} + dev: false + /cz-conventional-changelog@3.3.0(@types/node@22.14.0)(typescript@5.7.3): resolution: {integrity: sha512-U466fIzU5U22eES5lTNiNbZ+d8dfcHcssH4o7QsdWaCcRs/feIPCxKYSWkYBNs5mny7MvEfwpTLWjvbm94hecw==} engines: {node: '>= 10'} @@ -12693,6 +13134,12 @@ packages: - typescript dev: true + /d3-array@2.12.1: + resolution: {integrity: sha512-B0ErZK/66mHtEsR1TkPEEkwdy+WDesimkM5gpZr5Dsg54BiTA5RXtYW5qTLIAcekaS9xfZrzBLF/OAkB3Qn1YQ==} + dependencies: + internmap: 1.0.1 + dev: false + /d3-array@3.2.4: resolution: {integrity: sha512-tdQAmyA18i4J7wprpYq8ClcxZy3SC31QMeByyCFyRt7BVHdREQZ5lpzoe5mFEYZUWe+oq8HBvk9JjpibyEV4Jg==} engines: {node: '>=12'} @@ -12700,11 +13147,71 @@ packages: internmap: 2.0.3 dev: false + /d3-axis@3.0.0: + resolution: {integrity: sha512-IH5tgjV4jE/GhHkRV0HiVYPDtvfjHQlQfJHs0usq7M30XcSBvOotpmH1IgkcXsO/5gEQZD43B//fc7SRT5S+xw==} + engines: {node: '>=12'} + dev: false + + /d3-brush@3.0.0: + resolution: {integrity: sha512-ALnjWlVYkXsVIGlOsuWH1+3udkYFI48Ljihfnh8FZPF2QS9o+PzGLBslO0PjzVoHLZ2KCVgAM8NVkXPJB2aNnQ==} + engines: {node: '>=12'} + dependencies: + d3-dispatch: 3.0.1 + d3-drag: 3.0.0 + d3-interpolate: 3.0.1 + d3-selection: 3.0.0 + d3-transition: 3.0.1(d3-selection@3.0.0) + dev: false + + /d3-chord@3.0.1: + resolution: {integrity: sha512-VE5S6TNa+j8msksl7HwjxMHDM2yNK3XCkusIlpX5kwauBfXuyLAtNg9jCp/iHH61tgI4sb6R/EIMWCqEIdjT/g==} + engines: {node: '>=12'} + dependencies: + d3-path: 3.1.0 + dev: false + /d3-color@3.1.0: resolution: {integrity: sha512-zg/chbXyeBtMQ1LbD/WSoW2DpC3I0mpmPdW+ynRTj/x2DAWYrIY7qeZIHidozwV24m4iavr15lNwIwLxRmOxhA==} engines: {node: '>=12'} dev: false + /d3-contour@4.0.2: + resolution: {integrity: sha512-4EzFTRIikzs47RGmdxbeUvLWtGedDUNkTcmzoeyg4sP/dvCexO47AaQL7VKy/gul85TOxw+IBgA8US2xwbToNA==} + engines: {node: '>=12'} + dependencies: + d3-array: 3.2.4 + dev: false + + /d3-delaunay@6.0.4: + resolution: {integrity: sha512-mdjtIZ1XLAM8bm/hx3WwjfHt6Sggek7qH043O8KEjDXN40xi3vx/6pYSVTwLjEgiXQTbvaouWKynLBiUZ6SK6A==} + engines: {node: '>=12'} + dependencies: + delaunator: 5.0.1 + dev: false + + /d3-dispatch@3.0.1: + resolution: {integrity: sha512-rzUyPU/S7rwUflMyLc1ETDeBj0NRuHKKAcvukozwhshr6g6c5d8zh4c2gQjY2bZ0dXeGLWc1PF174P2tVvKhfg==} + engines: {node: '>=12'} + dev: false + + /d3-drag@3.0.0: + resolution: {integrity: sha512-pWbUJLdETVA8lQNJecMxoXfH6x+mO2UQo8rSmZ+QqxcbyA3hfeprFgIT//HW2nlHChWeIIMwS2Fq+gEARkhTkg==} + engines: {node: '>=12'} + dependencies: + d3-dispatch: 3.0.1 + d3-selection: 3.0.0 + dev: false + + /d3-dsv@3.0.1: + resolution: {integrity: sha512-UG6OvdI5afDIFP9w4G0mNq50dSOsXHJaRE8arAS5o9ApWnIElp8GZw1Dun8vP8OyHOZ/QJUKUJwxiiCCnUwm+Q==} + engines: {node: '>=12'} + hasBin: true + dependencies: + commander: 7.2.0 + iconv-lite: 0.6.3 + rw: 1.3.3 + dev: false + /d3-ease@1.0.7: resolution: {integrity: sha512-lx14ZPYkhNx0s/2HX5sLFUI3mbasHjSSpwO/KaaNACweVwxUruKyWVcb293wMv1RqTPZyZ8kSZ2NogUZNcLOFQ==} dev: false @@ -12714,15 +13221,43 @@ packages: engines: {node: '>=12'} dev: false + /d3-fetch@3.0.1: + resolution: {integrity: sha512-kpkQIM20n3oLVBKGg6oHrUchHM3xODkTzjMoj7aWQFq5QEM+R6E4WkzT5+tojDY7yjez8KgCBRoj4aEr99Fdqw==} + engines: {node: '>=12'} + dependencies: + d3-dsv: 3.0.1 + dev: false + + /d3-force@3.0.0: + resolution: {integrity: sha512-zxV/SsA+U4yte8051P4ECydjD/S+qeYtnaIyAs9tgHCqfguma/aAQDjo85A9Z6EKhBirHRJHXIgJUlffT4wdLg==} + engines: {node: '>=12'} + dependencies: + d3-dispatch: 3.0.1 + d3-quadtree: 3.0.1 + d3-timer: 3.0.1 + dev: false + /d3-format@3.1.0: resolution: {integrity: sha512-YyUI6AEuY/Wpt8KWLgZHsIU86atmikuoOmCfommt0LYHiQSPjvX2AcFc38PX0CBpr2RCyZhjex+NS/LPOv6YqA==} engines: {node: '>=12'} dev: false + /d3-geo@3.1.1: + resolution: {integrity: sha512-637ln3gXKXOwhalDzinUgY83KzNWZRKbYubaG+fGVuc/dxO64RRljtCTnf5ecMyE1RIdtqpkVcq0IbtU2S8j2Q==} + engines: {node: '>=12'} + dependencies: + d3-array: 3.2.4 + dev: false + /d3-hierarchy@2.0.0: resolution: {integrity: sha512-SwIdqM3HxQX2214EG9GTjgmCc/mbSx4mQBn+DuEETubhOw6/U3fmnji4uCVrmzOydMHSO1nZle5gh6HB/wdOzw==} dev: false + /d3-hierarchy@3.1.2: + resolution: {integrity: sha512-FX/9frcub54beBdugHjDCdikxThEqjnR93Qt7PvQTOHxyiNCAlvMrHhclk3cD5VeAaq9fxmfRp+CnWw9rEMBuA==} + engines: {node: '>=12'} + dev: false + /d3-interpolate@3.0.1: resolution: {integrity: sha512-3bYs1rOD33uo8aqJfKP3JWPAibgw8Zm2+L9vBKEHJ2Rg+viTR7o5Mmv5mZcieN+FRYaAOWX5SJATX6k1PWz72g==} engines: {node: '>=12'} @@ -12730,15 +13265,49 @@ packages: d3-color: 3.1.0 dev: false + /d3-path@1.0.9: + resolution: {integrity: sha512-VLaYcn81dtHVTjEHd8B+pbe9yHWpXKZUC87PzoFmsFrJqgFwDe/qxfp5MlfsfM1V5E/iVt0MmEbWQ7FVIXh/bg==} + dev: false + /d3-path@3.1.0: resolution: {integrity: sha512-p3KP5HCf/bvjBSSKuXid6Zqijx7wIfNW+J/maPs+iwR35at5JCbLUT0LzF1cnjbCHWhqzQTIN2Jpe8pRebIEFQ==} engines: {node: '>=12'} dev: false + /d3-polygon@3.0.1: + resolution: {integrity: sha512-3vbA7vXYwfe1SYhED++fPUQlWSYTTGmFmQiany/gdbiWgU/iEyQzyymwL9SkJjFFuCS4902BSzewVGsHHmHtXg==} + engines: {node: '>=12'} + dev: false + + /d3-quadtree@3.0.1: + resolution: {integrity: sha512-04xDrxQTDTCFwP5H6hRhsRcb9xxv2RzkcsygFzmkSIOJy3PeRJP7sNk3VRIbKXcog561P9oU0/rVH6vDROAgUw==} + engines: {node: '>=12'} + dev: false + + /d3-random@3.0.1: + resolution: {integrity: sha512-FXMe9GfxTxqd5D6jFsQ+DJ8BJS4E/fT5mqqdjovykEB2oFbTMDVdg1MGFxfQW+FBOGoB++k8swBrgwSHT1cUXQ==} + engines: {node: '>=12'} + dev: false + /d3-regression@1.3.10: resolution: {integrity: sha512-PF8GWEL70cHHWpx2jUQXc68r1pyPHIA+St16muk/XRokETzlegj5LriNKg7o4LR0TySug4nHYPJNNRz/W+/Niw==} dev: false + /d3-sankey@0.12.3: + resolution: {integrity: sha512-nQhsBRmM19Ax5xEIPLMY9ZmJ/cDvd1BG3UVvt5h3WRxKg5zGRbvnteTyWAbzeSvlh3tW7ZEmq4VwR5mB3tutmQ==} + dependencies: + d3-array: 2.12.1 + d3-shape: 1.3.7 + dev: false + + /d3-scale-chromatic@3.1.0: + resolution: {integrity: sha512-A3s5PWiZ9YCXFye1o246KoscMWqf8BsD9eRiJ3He7C9OBaxKhAd5TFCdEx/7VbKtxxTsu//1mMJFrEt572cEyQ==} + engines: {node: '>=12'} + dependencies: + d3-color: 3.1.0 + d3-interpolate: 3.0.1 + dev: false + /d3-scale@4.0.2: resolution: {integrity: sha512-GZW464g1SH7ag3Y7hXjf8RoUuAFIqklOAq3MRl4OaWabTFJY9PN/E1YklhXLh+OQ3fM9yS2nOkCoS+WLZ6kvxQ==} engines: {node: '>=12'} @@ -12750,6 +13319,17 @@ packages: d3-time-format: 4.1.0 dev: false + /d3-selection@3.0.0: + resolution: {integrity: sha512-fmTRWbNMmsmWq6xJV8D19U/gw/bwrHfNXxrIN+HfZgnzqTHp9jOmKMhsTUjXOJnZOdZY9Q28y4yebKzqDKlxlQ==} + engines: {node: '>=12'} + dev: false + + /d3-shape@1.3.7: + resolution: {integrity: sha512-EUkvKjqPFUAZyOlhY5gzCxCeI0Aep04LwIRpsZ/mLFelJiUfnK56jo5JMDSE7yyP2kLSb6LtF+S5chMk7uqPqw==} + dependencies: + d3-path: 1.0.9 + dev: false + /d3-shape@3.2.0: resolution: {integrity: sha512-SaLBuwGm3MOViRq2ABk3eLoxwZELpH6zhl3FbAoJ7Vm1gofKx6El1Ib5z23NUEhF9AsGl7y+dzLe5Cw2AArGTA==} engines: {node: '>=12'} @@ -12780,6 +13360,74 @@ packages: engines: {node: '>=12'} dev: false + /d3-transition@3.0.1(d3-selection@3.0.0): + resolution: {integrity: sha512-ApKvfjsSR6tg06xrL434C0WydLr7JewBB3V+/39RMHsaXTOG0zmt/OAXeng5M5LBm0ojmxJrpomQVZ1aPvBL4w==} + engines: {node: '>=12'} + peerDependencies: + d3-selection: 2 - 3 + dependencies: + d3-color: 3.1.0 + d3-dispatch: 3.0.1 + d3-ease: 3.0.1 + d3-interpolate: 3.0.1 + d3-selection: 3.0.0 + d3-timer: 3.0.1 + dev: false + + /d3-zoom@3.0.0: + resolution: {integrity: sha512-b8AmV3kfQaqWAuacbPuNbL6vahnOJflOhexLzMMNLga62+/nh0JzvJ0aO/5a5MVgUFGS7Hu1P9P03o3fJkDCyw==} + engines: {node: '>=12'} + dependencies: + d3-dispatch: 3.0.1 + d3-drag: 3.0.0 + d3-interpolate: 3.0.1 + d3-selection: 3.0.0 + d3-transition: 3.0.1(d3-selection@3.0.0) + dev: false + + /d3@7.9.0: + resolution: {integrity: sha512-e1U46jVP+w7Iut8Jt8ri1YsPOvFpg46k+K8TpCb0P+zjCkjkPnV7WzfDJzMHy1LnA+wj5pLT1wjO901gLXeEhA==} + engines: {node: '>=12'} + dependencies: + d3-array: 3.2.4 + d3-axis: 3.0.0 + d3-brush: 3.0.0 + d3-chord: 3.0.1 + d3-color: 3.1.0 + d3-contour: 4.0.2 + d3-delaunay: 6.0.4 + d3-dispatch: 3.0.1 + d3-drag: 3.0.0 + d3-dsv: 3.0.1 + d3-ease: 3.0.1 + d3-fetch: 3.0.1 + d3-force: 3.0.0 + d3-format: 3.1.0 + d3-geo: 3.1.1 + d3-hierarchy: 3.1.2 + d3-interpolate: 3.0.1 + d3-path: 3.1.0 + d3-polygon: 3.0.1 + d3-quadtree: 3.0.1 + d3-random: 3.0.1 + d3-scale: 4.0.2 + d3-scale-chromatic: 3.1.0 + d3-selection: 3.0.0 + d3-shape: 3.2.0 + d3-time: 3.1.0 + d3-time-format: 4.1.0 + d3-timer: 3.0.1 + d3-transition: 3.0.1(d3-selection@3.0.0) + d3-zoom: 3.0.0 + dev: false + + /dagre-d3-es@7.0.11: + resolution: {integrity: sha512-tvlJLyQf834SylNKax8Wkzco/1ias1OPw8DcUMDE7oUIoSEW25riQVuiu/0OWEFqT0cxHT3Pa9/D82Jr47IONw==} + dependencies: + d3: 7.9.0 + lodash-es: 4.17.21 + dev: false + /data-uri-to-buffer@2.0.2: resolution: {integrity: sha512-ND9qDTLc6diwj+Xe5cdAgVTbLVdXbtxTJRXRhli8Mowuaan+0EJOtdqJ0QCHNSSPyoXGx9HX2/VMnKeC34AChA==} dev: true @@ -12830,6 +13478,10 @@ packages: resolution: {integrity: sha512-fRHTG8g/Gif+kSh50gaGEdToemgfj74aRX3swtiouboip5JDLAyDE9F11nHMIcvOaXeOC6D7SpNhi7uFyB7Uww==} dev: false + /dayjs@1.11.18: + resolution: {integrity: sha512-zFBQ7WFRvVRhKcWoUh+ZA1g2HVgUbsZm9sbddh8EC5iv93sui8DVVz1Npvz+r6meo9VKfa8NyLWBsQK1VvIKPA==} + dev: false + /debounce-fn@4.0.0: resolution: {integrity: sha512-8pYCQiL9Xdcg0UPSD3d+0KMlOjp+KGU5EPwYddgzQ7DATsg4fuUDjQtsYLmWjnk2obnNHgV3vE2Y4jejSOJVBQ==} engines: {node: '>=10'} @@ -12853,8 +13505,19 @@ packages: ms: 2.0.0 dev: true - /debug@4.3.7: - resolution: {integrity: sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ==} + /debug@4.3.7: + resolution: {integrity: sha512-Er2nc/H7RrMXZBFCEim6TCmMk02Z8vLC2Rbi1KEBggpo0fS6l0S1nnapwmIi3yW/+GOJap1Krg4w0Hg80oCqgQ==} + engines: {node: '>=6.0'} + peerDependencies: + supports-color: '*' + peerDependenciesMeta: + supports-color: + optional: true + dependencies: + ms: 2.1.3 + + /debug@4.4.1(supports-color@8.1.1): + resolution: {integrity: sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ==} engines: {node: '>=6.0'} peerDependencies: supports-color: '*' @@ -12863,9 +13526,10 @@ packages: optional: true dependencies: ms: 2.1.3 + supports-color: 8.1.1 - /debug@4.4.1(supports-color@8.1.1): - resolution: {integrity: sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ==} + /debug@4.4.3(supports-color@8.1.1): + resolution: {integrity: sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==} engines: {node: '>=6.0'} peerDependencies: supports-color: '*' @@ -12875,6 +13539,7 @@ packages: dependencies: ms: 2.1.3 supports-color: 8.1.1 + dev: true /decamelize@1.2.0: resolution: {integrity: sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA==} @@ -12984,6 +13649,12 @@ packages: esprima: 4.0.1 dev: true + /delaunator@5.0.1: + resolution: {integrity: sha512-8nvh+XBe96aCESrGOqMp/84b13H9cdKbG5P2ejQCh4d4sK9RL4371qou9drQjMhvnPmhWl5hnmqbEE0fXr9Xnw==} + dependencies: + robust-predicates: 3.0.2 + dev: false + /delayed-stream@1.0.0: resolution: {integrity: sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==} engines: {node: '>=0.4.0'} @@ -13172,6 +13843,12 @@ packages: domelementtype: 2.3.0 dev: false + /dompurify@3.2.7: + resolution: {integrity: sha512-WhL/YuveyGXJaerVlMYGWhvQswa7myDG17P7Vu65EWC05o8vfeNbvNf4d/BOvH99+ZW+LlQsc1GDKMa1vNK6dw==} + optionalDependencies: + '@types/trusted-types': 2.0.7 + dev: false + /domutils@3.2.2: resolution: {integrity: sha512-6kZKyUajlDuqlHKVX1w7gyslj9MPIXzIFiz/rGu35uC1wMi+kMhQwGhl4lt9unC9Vb9INnY9Z3/ZA3+FhASLaw==} dependencies: @@ -13207,11 +13884,6 @@ packages: engines: {node: '>=12'} dev: false - /dotenv@16.3.1: - resolution: {integrity: sha512-IPzF4w4/Rd94bA9imS68tZBaYyBWSCE47V1RGuMrB94iyTOIEwRmVL2x/4An+6mETpLrKJ5hQkB8W4kFAadeIQ==} - engines: {node: '>=12'} - dev: true - /dotenv@16.6.1: resolution: {integrity: sha512-uBq4egWHTcTt33a72vpSG0z3HnPuIl6NqYcTrKEg2azoEyl2hpW0zqlxysq2pK9HlDIHyHyakeYaYnSAwd8bow==} engines: {node: '>=12'} @@ -13340,15 +14012,6 @@ packages: es-errors: 1.3.0 gopd: 1.2.0 - /duplexify@4.1.3: - resolution: {integrity: sha512-M3BmBhwJRZsSx38lZyhE53Csddgzl5R7xGJNk7CVddZD6CcmwMCH8J+7AprIrQKH7TonKxaCjcv27Qmf+sQ+oA==} - dependencies: - end-of-stream: 1.4.5 - inherits: 2.0.4 - readable-stream: 3.6.2 - stream-shift: 1.0.3 - dev: true - /eastasianwidth@0.2.0: resolution: {integrity: sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==} @@ -13962,11 +14625,11 @@ packages: /eslint-visitor-keys@3.4.3: resolution: {integrity: sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==} engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + dev: false /eslint-visitor-keys@4.2.1: resolution: {integrity: sha512-Uhdk5sfqcee/9H/rCOJikYz67o0a2Tw2hGRPOG2Y1R2dg7brRe1uG0yaNQDHu+TO/uQPF/5eCapvYSmHUjt7JQ==} engines: {node: ^18.18.0 || ^20.9.0 || >=21.1.0} - dev: false /eslint@9.35.0: resolution: {integrity: sha512-QePbBFMJFjgmlE+cXAlbHZbHpdFVS2E/6vzCy7aKlebddvl1vadiC4JFV5u/wqTkNUwEV8WrQi257jf5f06hrg==} @@ -14252,7 +14915,6 @@ packages: /exsolve@1.0.7: resolution: {integrity: sha512-VO5fQUzZtI6C+vx4w/4BWJpg3s/5l+6pRQEHzFRM8WFi4XffSP1Z+4qi7GbjWbvRQEbdIco5mIMq+zX4rPuLrw==} - dev: true /extend-shallow@2.0.1: resolution: {integrity: sha512-zCnTtlxNoAiDc3gqY2aYAWFx7XWWiasuF2K8Me5WbN8otHKTUKBwjPtNpRs/rbUZm7KxWAaNj7P1a/p52GbVug==} @@ -14338,6 +15000,14 @@ packages: resolution: {integrity: sha512-n11RGP/lrWEFI/bWdygLxhI+pVeo1ZYIVwvvPkW7azl/rOy+F3HYRZ2K5zeE9mmkhQppyv9sQFx0JM9UabnpPQ==} dev: false + /fast-unique-numbers@9.0.24: + resolution: {integrity: sha512-Dv0BYn4waOWse94j16rsZ5w/0zoaCa74O3q6IZjMqaXbtT92Q+Sb6pPk+phGzD8Xh+nueQmSRI3tSCaHKidzKw==} + engines: {node: '>=18.2.0'} + dependencies: + '@babel/runtime': 7.28.4 + tslib: 2.8.1 + dev: true + /fast-uri@3.1.0: resolution: {integrity: sha512-iPeeDKJSWf4IEOasVVrknXpaBV0IApz/gp7S2bb7Z4Lljbl2MGJRqInZiUrQwV16cpzw/D3S5j5Julj/gT52AA==} @@ -15166,18 +15836,6 @@ packages: /glob-to-regexp@0.4.1: resolution: {integrity: sha512-lkX1HJXwyMcprw/5YUZc2s7DrpAiHB21/V+E1rHUrVNokkvB6bqMzT0VfV6/86ZNabt1k14YOIaT7nDvOX3Iiw==} - /glob@10.3.1: - resolution: {integrity: sha512-9BKYcEeIs7QwlCYs+Y3GBvqAMISufUS0i2ELd11zpZjxI5V9iyRj0HgzB5/cLf2NY4vcYBTYzJ7GIui7j/4DOw==} - engines: {node: '>=16 || 14 >=14.17'} - hasBin: true - dependencies: - foreground-child: 3.3.1 - jackspeak: 2.3.6 - minimatch: 9.0.5 - minipass: 6.0.2 - path-scurry: 1.11.1 - dev: true - /glob@10.3.4: resolution: {integrity: sha512-6LFElP3A+i/Q8XQKEvZjkEWEOTgAIALR9AO2rwT8bgPhDd1anmqDJDZ6lLddI4ehxxxR1S5RIqKe1uapMQfYaQ==} engines: {node: '>=16 || 14 >=14.17'} @@ -15245,6 +15903,11 @@ packages: engines: {node: '>=18'} dev: false + /globals@15.15.0: + resolution: {integrity: sha512-7ACyT3wmyp3I61S4fG682L0VA2RGD9otkqGJIwNUMF1SWUombIIk+af1unuDYgMm082aHYwD+mzJvv9Iu8dsgg==} + engines: {node: '>=18'} + dev: false + /globalthis@1.0.4: resolution: {integrity: sha512-DpLKbNU4WylpxJykQujfCcwYWiV/Jhm50Goo0wrVILAv5jOr9d+H+UR3PhSCD2rCCEIg0uc+G+muBTwD54JhDQ==} engines: {node: '>= 0.4'} @@ -15302,6 +15965,10 @@ packages: responselike: 3.0.0 dev: true + /graceful-fs@4.2.10: + resolution: {integrity: sha512-9ByhssR2fPVsNZj478qUUbKfmL0+t5BDVyjShtyZZLiK7ZDAArFFfopyOTj0M05wE2tJPisA4iTnnXl2YoPvOA==} + dev: true + /graceful-fs@4.2.11: resolution: {integrity: sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==} @@ -15337,6 +16004,10 @@ packages: section-matter: 1.0.0 strip-bom-string: 1.0.0 + /hachure-fill@0.5.2: + resolution: {integrity: sha512-3GKBOn+m2LX9iq+JC1064cSFprJY4jL1jCXTcpnfER5HYE2l/4EfWSGzkPa/ZDBmYI0ZOEj5VHV/eKnPGkHuOg==} + dev: false + /has-ansi@2.0.0: resolution: {integrity: sha512-C8vBJ8DwUCx19vhm7urhTuUsr4/IyP6l4VzNQDv+ryHQObW3TTTp9yB68WpYgRe2bbaGuZ/se74IqFeVnMnLZg==} engines: {node: '>=0.10.0'} @@ -15587,11 +16258,8 @@ packages: space-separated-tokens: 2.0.2 dev: true - /help-me@3.0.0: - resolution: {integrity: sha512-hx73jClhyk910sidBB7ERlnhMlFsJJIBqSVMFDwPN8o2v9nmp5KgLq1Xz1Bf1fCMMZ6mPrX159iG0VLy/fPMtQ==} - dependencies: - glob: 7.2.3 - readable-stream: 3.6.2 + /help-me@5.0.0: + resolution: {integrity: sha512-7xgomUX6ADmcYzFik0HzAxh/73YlKR9bmFzf51CZwR+b6YtzU2m0u49hQCqV6SvlqIqsaxovfwdvbnsw3b/zpg==} dev: true /hex-rgb@4.3.0: @@ -15657,7 +16325,7 @@ packages: engines: {node: '>=8.0.0'} dependencies: content-type: 1.0.5 - debug: 4.4.1(supports-color@8.1.1) + debug: 4.4.3(supports-color@8.1.1) is-retry-allowed: 1.2.0 is-stream: 2.0.1 parse-json: 4.0.0 @@ -15735,11 +16403,6 @@ packages: ms: 2.1.3 dev: false - /hyperlinker@1.0.0: - resolution: {integrity: sha512-Ty8UblRWFEcfSuIaajM34LdPXIhbs1ajEX/BBPv24J+enSVaEVY63xQ6lTO9VRYS5LAoghIG0IDJ+p+IPzKUQQ==} - engines: {node: '>=4'} - dev: true - /iconv-lite@0.4.24: resolution: {integrity: sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==} engines: {node: '>=0.10.0'} @@ -15944,6 +16607,10 @@ packages: hasown: 2.0.2 side-channel: 1.1.0 + /internmap@1.0.1: + resolution: {integrity: sha512-lDB5YccMydFBtasVtxnZ3MRBHuaoE8GKsppq+EchKL2U4nK/DmEpPHNH8MZe5HkMtpSiTSOZwfN0tzYjO/lJEw==} + dev: false + /internmap@2.0.3: resolution: {integrity: sha512-5Hh7Y1wQbvY5ooGgPbDaL5iYLAPzMTUrjMulskHLH6wnv/A+1q5rgEaiuqEjB+oxGXIVZs1FF+R/KPN3ZSQYYg==} engines: {node: '>=12'} @@ -16015,6 +16682,7 @@ packages: dependencies: call-bound: 1.0.4 has-tostringtag: 1.0.2 + dev: false /is-array-buffer@3.0.5: resolution: {integrity: sha512-DDfANUiiG2wC1qawP66qlTugJeL5HyzMpfr8lLK+jMQirGzNod0B12cFB/9q838Ru27sBwfw78/rdoU7RERz6A==} @@ -16166,14 +16834,6 @@ packages: resolution: {integrity: sha512-1Qed0/Hr2m+YqxnM09CjA2d/i6YZNfF6R2oRAOj36eUdS6qIV/huPJNSEpKbupewFs+ZsJlxsjjPbc0/afW6Lw==} engines: {node: '>= 0.4'} - /is-nan@1.3.2: - resolution: {integrity: sha512-E+zBKpQ2t6MEo1VsonYmluk9NxGrbzpeeLC2xIViuO2EjU2xsXsBPwTr3Ykv9l08UYEVEdWeRZNouaZqF6RN0w==} - engines: {node: '>= 0.4'} - dependencies: - call-bind: 1.0.8 - define-properties: 1.2.1 - dev: true - /is-negative-zero@2.0.3: resolution: {integrity: sha512-5KoIu2Ngpyek75jXodFvnafB6DJgr3u8uuK0LEZJjrU19DrMD3EVERaR8sjz8CCGgpZvxPl9SuE1GMVPFHx1mw==} engines: {node: '>= 0.4'} @@ -16376,6 +17036,7 @@ packages: '@isaacs/cliui': 8.0.2 optionalDependencies: '@pkgjs/parseargs': 0.11.0 + dev: false /jackspeak@3.4.3: resolution: {integrity: sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==} @@ -16542,6 +17203,11 @@ packages: resolution: {integrity: sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==} dev: false + /json-stream-stringify@3.1.6: + resolution: {integrity: sha512-x7fpwxOkbhFCaJDJ8vb1fBY3DdSa4AlITaz+HHILQJzdPMnHEFjxPwVUi1ALIbcIxDE0PNe/0i7frnY8QnBQog==} + engines: {node: '>=7.10.1'} + dev: true + /json2module@0.0.3: resolution: {integrity: sha512-qYGxqrRrt4GbB8IEOy1jJGypkNsjWoIMlZt4bAsmUScCA507Hbc2p1JOhBzqn45u3PWafUgH2OnzyNU7udO/GA==} hasBin: true @@ -16615,13 +17281,16 @@ packages: hasBin: true dependencies: commander: 8.3.0 - dev: true /keyv@4.5.4: resolution: {integrity: sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==} dependencies: json-buffer: 3.0.1 + /khroma@2.1.0: + resolution: {integrity: sha512-Ls993zuzfayK269Svk9hzpeGUKob/sIgZzyHYdjQoAdQetRKpOLj+k/QQQ/6Qi0Yz65mlROrfd+Ev+1+7dz9Kw==} + dev: false + /kind-of@3.2.2: resolution: {integrity: sha512-NOW9QQXMoZGg/oqnVNoNTTIFEIid1627WCffUBJEdMxYApq7mNE7CpzucIPc+ZQg25Phej7IJSmX3hO+oblOtQ==} engines: {node: '>=0.10.0'} @@ -16666,6 +17335,29 @@ packages: zod-validation-error: 3.5.3(zod@3.23.8) dev: true + /kolorist@1.8.0: + resolution: {integrity: sha512-Y+60/zizpJ3HRH8DCss+q95yr6145JXZo46OTpFvDZWLfRCE4qChOyk1b26nMaNpfHHgxagk9dXT5OP0Tfe+dQ==} + dev: false + + /langium@3.3.1: + resolution: {integrity: sha512-QJv/h939gDpvT+9SiLVlY7tZC3xB2qK57v0J04Sh9wpMb6MP1q8gB21L3WIo8T5P1MSMg3Ep14L7KkDCFG3y4w==} + engines: {node: '>=16.0.0'} + dependencies: + chevrotain: 11.0.3 + chevrotain-allstar: 0.3.1(chevrotain@11.0.3) + vscode-languageserver: 9.0.1 + vscode-languageserver-textdocument: 1.0.12 + vscode-uri: 3.0.8 + dev: false + + /layout-base@1.0.2: + resolution: {integrity: sha512-8h2oVEZNktL4BH2JCOI90iD1yXwL6iNW7KcCKT2QZgQJR2vbqDsldCTPRU9NifTCqHZci57XvQQ15YTu+sTYPg==} + dev: false + + /layout-base@2.0.1: + resolution: {integrity: sha512-dp3s92+uNI1hWIpPGH3jK2kxE2lMjdXdr+DH8ynZHpd6PUlH6x6cbuXnoMmiNumznqaNO31xu9e79F0uuZ0JFg==} + dev: false + /lazy-cache@1.0.4: resolution: {integrity: sha512-RE2g0b5VGZsOCFOCgP7omTRYFqydmZkBwl5oNnQ1lDYC57uyO9KqNnNVxT7COSHTxrRCWVcAVOcbjk+tvh/rgQ==} engines: {node: '>=0.10.0'} @@ -16692,11 +17384,6 @@ packages: resolution: {integrity: sha512-Y3c3QZfvKWHX60BVOQPhLCvVGmDYWyJEiINE3drOog6KCyN2AOwvuQQzlS3uJg1J85kzpILXIUwRXULWavir+w==} dev: false - /leven@2.1.0: - resolution: {integrity: sha512-nvVPLpIHUxCUoRLrFqTgSxXJ614d8AgQoWl7zPe/2VadE8+1dpU3LBhowRuBAcuwruWtOdD8oYC9jDNJjXDPyA==} - engines: {node: '>=0.10.0'} - dev: true - /leven@3.1.0: resolution: {integrity: sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==} engines: {node: '>=6'} @@ -16767,6 +17454,15 @@ packages: pkg-types: 1.3.1 dev: true + /local-pkg@1.1.2: + resolution: {integrity: sha512-arhlxbFRmoQHl33a0Zkle/YWlmNwoyt6QNZEIJcqNbdrsix5Lvc4HyyI3EnwxTYlZYc32EbYrQ8SzEZ7dqgg9A==} + engines: {node: '>=14'} + dependencies: + mlly: 1.8.0 + pkg-types: 2.3.0 + quansync: 0.2.11 + dev: false + /locate-character@3.0.0: resolution: {integrity: sha512-SW13ws7BjaeJ6p7Q6CO2nchbYEc3X3J6WrmTTDto7yMPqVSZTUyY5Tjbid+Ab8gLnATtygYtiDIJGQRRn2ZOiA==} dev: false @@ -16890,13 +17586,6 @@ packages: /lru-cache@10.4.3: resolution: {integrity: sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==} - /lru-cache@6.0.0: - resolution: {integrity: sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==} - engines: {node: '>=10'} - dependencies: - yallist: 4.0.0 - dev: true - /lru-cache@7.18.3: resolution: {integrity: sha512-jumlc0BIUrS3qJGgIkWZsyfAM7NCWiBcCDhnd+3NNM5KbBmLTgHVfWBcg6W+rLUsIpzpERPsvwUP7CckAQSOoA==} engines: {node: '>=12'} @@ -16929,8 +17618,8 @@ packages: react: 18.3.1 dev: false - /luxon@3.3.0: - resolution: {integrity: sha512-An0UCfG/rSiqtAIiBPO0Y9/zAnHUZxAMiCpTd5h2smgsj7GGmcenvrvww2cqNA8/4A5ZrD1gJpHN2mIHZQF+Mg==} + /luxon@3.7.2: + resolution: {integrity: sha512-vtEhXh/gNjI9Yg1u4jX/0YVPMvxzHuGgCm6tC5kZyb08yjGWGnqAjGJvcXbqQR2P3MyMEFnRbpcdFS6PBcLqew==} engines: {node: '>=12'} dev: true @@ -16961,6 +17650,12 @@ packages: /markdown-table@3.0.4: resolution: {integrity: sha512-wiYz4+JrLyb/DqW2hkFJxP7Vd7JuTDm77fvbM8VfEQdmSMqcImWeeRbHwZjBjIFki/VaMK2BhFi7oUUZeM5bqw==} + /marked@16.3.0: + resolution: {integrity: sha512-K3UxuKu6l6bmA5FUwYho8CfJBlsUWAooKtdGgMcERSpF7gcBUrCGsLH7wDaaNOzwq18JzSUDyoEb/YsrqMac3w==} + engines: {node: '>= 20'} + hasBin: true + dev: false + /marked@7.0.4: resolution: {integrity: sha512-t8eP0dXRJMtMvBojtkcsA7n48BkauktUKzfkPSCq85ZMTJ0v76Rke4DYz01omYpPTUh4p/f7HePgRo3ebG8+QQ==} engines: {node: '>= 16'} @@ -17215,6 +17910,33 @@ packages: resolution: {integrity: sha512-jz+Cfrg9GWOZbQAnDQ4hlVnQky+341Yk5ru8bZSe6sIDTCIg8n9i/u7hSQGSVOF3C7lH6mGtqjkiT9G4wFLL0w==} dev: true + /mermaid@11.12.0: + resolution: {integrity: sha512-ZudVx73BwrMJfCFmSSJT84y6u5brEoV8DOItdHomNLz32uBjNrelm7mg95X7g+C6UoQH/W6mBLGDEDv73JdxBg==} + dependencies: + '@braintree/sanitize-url': 7.1.1 + '@iconify/utils': 3.0.2 + '@mermaid-js/parser': 0.6.2 + '@types/d3': 7.4.3 + cytoscape: 3.33.1 + cytoscape-cose-bilkent: 4.1.0(cytoscape@3.33.1) + cytoscape-fcose: 2.2.0(cytoscape@3.33.1) + d3: 7.9.0 + d3-sankey: 0.12.3 + dagre-d3-es: 7.0.11 + dayjs: 1.11.18 + dompurify: 3.2.7 + katex: 0.16.22 + khroma: 2.1.0 + lodash-es: 4.17.21 + marked: 16.3.0 + roughjs: 4.6.6 + stylis: 4.3.6 + ts-dedent: 2.2.0 + uuid: 11.1.0 + transitivePeerDependencies: + - supports-color + dev: false + /methods@1.1.2: resolution: {integrity: sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==} engines: {node: '>= 0.6'} @@ -17660,13 +18382,6 @@ packages: brace-expansion: 2.0.2 dev: false - /minimatch@9.0.3: - resolution: {integrity: sha512-RHiac9mvaRw0x3AYRgDC1CxAP7HTcNrrECeA8YYJeWnpo+2Q5CegtZjaotWTWxDG3UeGA1coE05iH1mPjT/2mg==} - engines: {node: '>=16 || 14 >=14.17'} - dependencies: - brace-expansion: 2.0.2 - dev: true - /minimatch@9.0.5: resolution: {integrity: sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==} engines: {node: '>=16 || 14 >=14.17'} @@ -17692,11 +18407,6 @@ packages: engines: {node: '>=8'} dev: true - /minipass@6.0.2: - resolution: {integrity: sha512-MzWSV5nYVT7mVyWCwn2o7JH13w2TBRmmSqSRCKzTw+lmft9X4z+3wjvs06Tzijo5z4W/kahUCDpRXTF+ZrmF/w==} - engines: {node: '>=16 || 14 >=14.17'} - dev: true - /minipass@7.1.2: resolution: {integrity: sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==} engines: {node: '>=16 || 14 >=14.17'} @@ -17762,7 +18472,6 @@ packages: pathe: 2.0.3 pkg-types: 1.3.1 ufo: 1.6.1 - dev: true /mock-property@1.0.3: resolution: {integrity: sha512-2emPTb1reeLLYwHxyVx993iYyCHEiRRO+y8NFXFPL5kl5q14sgTK76cXyEKkeKCHeRw35SfdkUJ10Q1KfHuiIQ==} @@ -17780,38 +18489,37 @@ packages: resolution: {integrity: sha512-EGWKgxALGMgzvxYF1UyGTy0HXX/2vHLkw6+NvDKW2jypWbHpjQuj4UMcqQWXHERJhVGKikolT06G3bcKe4fi7w==} dev: true - /mqtt-packet@6.10.0: - resolution: {integrity: sha512-ja8+mFKIHdB1Tpl6vac+sktqy3gA8t9Mduom1BA75cI+R9AHnZOiaBQwpGiWnaVJLDGRdNhQmFaAqd7tkKSMGA==} + /mqtt-packet@9.0.2: + resolution: {integrity: sha512-MvIY0B8/qjq7bKxdN1eD+nrljoeaai+qjLJgfRn3TiMuz0pamsIWY2bFODPZMSNmabsLANXsLl4EMoWvlaTZWA==} dependencies: - bl: 4.1.0 + bl: 6.1.3 debug: 4.4.1(supports-color@8.1.1) process-nextick-args: 2.0.1 transitivePeerDependencies: - supports-color dev: true - /mqtt@4.3.8: - resolution: {integrity: sha512-2xT75uYa0kiPEF/PE0VPdavmEkoBzMT/UL9moid0rAvlCtV48qBwxD62m7Ld/4j8tSkIO1E/iqRl/S72SEOhOw==} - engines: {node: '>=10.0.0'} + /mqtt@5.14.1: + resolution: {integrity: sha512-NxkPxE70Uq3Ph7goefQa7ggSsVzHrayCD0OyxlJgITN/EbzlZN+JEPmaAZdxP1LsIT5FamDyILoQTF72W7Nnbw==} + engines: {node: '>=16.0.0'} hasBin: true dependencies: - commist: 1.1.0 + '@types/readable-stream': 4.0.21 + '@types/ws': 8.18.1 + commist: 3.2.0 concat-stream: 2.0.0 debug: 4.4.1(supports-color@8.1.1) - duplexify: 4.1.3 - help-me: 3.0.0 - inherits: 2.0.4 - lru-cache: 6.0.0 + help-me: 5.0.0 + lru-cache: 10.4.3 minimist: 1.2.8 - mqtt-packet: 6.10.0 + mqtt-packet: 9.0.2 number-allocator: 1.0.14 - pump: 3.0.3 - readable-stream: 3.6.2 - reinterval: 1.1.0 + readable-stream: 4.7.0 rfdc: 1.4.1 - split2: 3.2.2 - ws: 7.5.10 - xtend: 4.0.2 + socks: 2.8.7 + split2: 4.2.0 + worker-timers: 8.0.25 + ws: 8.18.3 transitivePeerDependencies: - bufferutil - supports-color @@ -17900,10 +18608,6 @@ packages: resolution: {integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==} dev: false - /natural-orderby@2.0.3: - resolution: {integrity: sha512-p7KTHxU0CUrcOXe62Zfrb5Z13nLvPhSWR/so3kFulUQU0sgUll2Z0LwpsLN351eOOD+hRGu/F1g+6xDfPeD++Q==} - dev: true - /negotiator@0.6.3: resolution: {integrity: sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==} engines: {node: '>= 0.6'} @@ -18378,16 +19082,12 @@ packages: dependencies: call-bind: 1.0.8 define-properties: 1.2.1 + dev: false /object-keys@1.1.1: resolution: {integrity: sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==} engines: {node: '>= 0.4'} - /object-treeify@1.1.33: - resolution: {integrity: sha512-EFVjAYfzWqWsBMRHPMAXLCDIJnpMhdWAqR7xG6M6a2cs6PMFpl/+Z20w9zDW4vkxOFfddegBKq9Rehd0bxWE7A==} - engines: {node: '>= 10'} - dev: true - /object-treeify@4.0.1: resolution: {integrity: sha512-Y6tg5rHfsefSkfKujv2SwHulInROy/rCL5F4w0QOWxut8AnxYxf0YmNhTh95Zfyxpsudo66uqkux0ACFnyMSgQ==} engines: {node: '>= 16'} @@ -18451,15 +19151,6 @@ packages: regex: 6.0.1 regex-recursion: 6.0.2 - /open@8.4.0: - resolution: {integrity: sha512-XgFPPM+B28FtCCgSb9I+s9szOC1vZRSwgWsRUA5ylIxRTgKozqjOCrVOqGsYABPYK5qnfqClxZTFBa8PKt2v6Q==} - engines: {node: '>=12'} - dependencies: - define-lazy-prop: 2.0.0 - is-docker: 2.2.1 - is-wsl: 2.2.0 - dev: true - /open@8.4.2: resolution: {integrity: sha512-7x81NCL719oNbsq/3mh+hVrAWmFuEYUqrq/Iw3kUzH8ReypT9QQ0BLoJS7/G9k6N81XjW4qHWtjWwe/9eLy1EQ==} engines: {node: '>=12'} @@ -18696,6 +19387,10 @@ packages: quansync: 0.2.11 dev: true + /package-manager-detector@1.3.0: + resolution: {integrity: sha512-ZsEbbZORsyHuO00lY1kV3/t72yp6Ysay6Pd17ZAlNGuGwmWDLCJxFpRs0IzfXfj1o4icJOkUEioexFHzyPurSQ==} + dev: false + /pako@0.2.9: resolution: {integrity: sha512-NUcwaKxUxWrZLpDG+z/xZaCgQITkA/Dv4V/T6bw7VON6l1Xz/VnrBqrYjZQ12TamKHzITTfOEIYUj48y2KXImA==} dev: false @@ -18780,13 +19475,6 @@ packages: engines: {node: '>= 0.8'} dev: true - /password-prompt@1.1.3: - resolution: {integrity: sha512-HkrjG2aJlvF0t2BMH0e2LB/EHf3Lcq3fNMzy4GYHcQblAvOl+QQji1Lx7WRBMqpVK8p+KR7bCg7oqAMXtdgqyw==} - dependencies: - ansi-escapes: 4.3.2 - cross-spawn: 7.0.6 - dev: true - /patch-console@2.0.0: resolution: {integrity: sha512-0YNdUceMdaQwoKce1gatDScmMo5pu/tfABfnzEqeG0gtTmd7mh/WcwgUjtAeOU7N8nFFlbQBnFK2gXW5fGvmMA==} engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} @@ -18796,6 +19484,10 @@ packages: resolution: {integrity: sha512-b7uo2UCUOYZcnF/3ID0lulOJi/bafxa1xPe7ZPsammBSpjSWQkjNxlt635YGS2MiR9GjvuXCtz2emr3jbsz98g==} dev: false + /path-data-parser@0.1.0: + resolution: {integrity: sha512-NOnmBpt5Y2RWbuv0LMzsayp3lVylAHLPUTut412ZA3l+C4uw4ZVkQbjShYCQ8TCpUMdPapr4YjUqLYD6v68j+w==} + dev: false + /path-exists@3.0.0: resolution: {integrity: sha512-bpC7GYwiDYQ4wYLe+FA8lhRjhQCMcQGuSgGGqDkg/QerRWw9CmGRT0iSOVRSZJ29NMLZgIzqaljJ63oaL4NIJQ==} engines: {node: '>=4'} @@ -18846,7 +19538,6 @@ packages: /pathe@2.0.3: resolution: {integrity: sha512-WUjGcAqP1gQacoQe+OBJsFA7Ld4DyXuUIjZ5cc75cLHvJ7dtNsTugphxIADwspS+AraAUePCKrSVtPLFj/F88w==} - dev: true /pathval@1.1.1: resolution: {integrity: sha512-Dp6zGqpTdETdR63lehJYPeIOqpiNBNtc7BpWSLrOje7UaIsE5aY92r/AunQA7rsXvet3lrJ3JnZX29UPTKXyKQ==} @@ -18907,7 +19598,14 @@ packages: confbox: 0.1.8 mlly: 1.8.0 pathe: 2.0.3 - dev: true + + /pkg-types@2.3.0: + resolution: {integrity: sha512-SIqCzDRg0s9npO5XQ3tNZioRY1uK06lA41ynBC1YmFTmnY6FjUjVt6s4LoADmwoig1qqD0oK8h1p/8mlMx8Oig==} + dependencies: + confbox: 0.2.2 + exsolve: 1.0.7 + pathe: 2.0.3 + dev: false /pkg-up@3.1.0: resolution: {integrity: sha512-nDywThFk1i4BQK4twPQ6TA4RT8bDY96yeuCVBWL3ePARCiEKDRSrNGbFIgUJpLp+XeIR65v8ra7WuJOFUBtkMA==} @@ -18921,6 +19619,17 @@ packages: engines: {node: '>=4'} dev: false + /points-on-curve@0.2.0: + resolution: {integrity: sha512-0mYKnYYe9ZcqMCWhUjItv/oHjvgEsfKvnUTg8sAtnHr3GVy7rGkXCb6d5cSyqrWqL4k81b9CPg3urd+T7aop3A==} + dev: false + + /points-on-path@0.2.1: + resolution: {integrity: sha512-25ClnWWuw7JbWZcgqY/gJ4FQWadKxGWk+3kR/7kD0tCaDtPPMj7oHu2ToLaVhfpnHrZzYby2w6tUA0eOIuUg8g==} + dependencies: + path-data-parser: 0.1.0 + points-on-curve: 0.2.0 + dev: false + /pony-cause@1.1.1: resolution: {integrity: sha512-PxkIc/2ZpLiEzQXu5YRDOUgBlfGYBY8156HY5ZcRAwwonMk5W/MrJP2LLkG/hF7GEQzaHo2aS7ho6ZLCOvf+6g==} engines: {node: '>=12.0.0'} @@ -19202,7 +19911,6 @@ packages: /proto-list@1.2.4: resolution: {integrity: sha512-vtK/94akxsTMhe0/cbfpR+syPuszcuwhqVjJq26CuNDgFGj682oRBXOP5MJpv2r7JtE8MsiepGIqvvOTBwn2vA==} - dev: false /protobufjs@7.5.4: resolution: {integrity: sha512-CvexbZtbov6jW2eXAvLukXjXUW1TzFaivC46BpWc/3BpcCysb5Vffu+B3XHMm8lVEuy2Mm4XGex8hBSg1yapPg==} @@ -19332,7 +20040,6 @@ packages: /quansync@0.2.11: resolution: {integrity: sha512-AifT7QEbW9Nri4tAwR5M/uzpBuqfZf+zwaEM/QkzEjj7NBuFD2rBuy0K3dE+8wltbezDV7JMA0WfnCPYRSYbXA==} - dev: true /querystringify@2.2.0: resolution: {integrity: sha512-FIqgj2EUvTa7R50u0rGsyTftzjYmv/a3hO345bZNrqabNqjtgiDMgmo4mkUjd+nzU5oF3dClKqFIPUKybUyqoQ==} @@ -19750,14 +20457,14 @@ packages: engines: {node: '>= 14.18.0'} dev: false - /recast@0.23.4: - resolution: {integrity: sha512-qtEDqIZGVcSZCHniWwZWbRy79Dc6Wp3kT/UmDA2RJKBPg7+7k51aQBZirHmUGn5uvHf2rg8DkjizrN26k61ATw==} + /recast@0.23.11: + resolution: {integrity: sha512-YTUo+Flmw4ZXiWfQKGcwwc11KnoRAYgzAE2E7mXKCjSviTKShtxBsN6YUUBB2gtaBzKzeKunxhUwNHQuRryhWA==} engines: {node: '>= 4'} dependencies: - assert: 2.1.0 ast-types: 0.16.1 esprima: 4.0.1 source-map: 0.6.1 + tiny-invariant: 1.3.3 tslib: 2.8.1 dev: true @@ -19828,12 +20535,6 @@ packages: unified: 11.0.5 vfile: 6.0.3 - /redeyed@2.1.1: - resolution: {integrity: sha512-FNpGGo1DycYAdnrKFxCMmKYgo/mILAqtRYbkdQD8Ep/Hk2PQ5+aEAEx+IU713RTDmuBaH0c8P5ZozurNu5ObRQ==} - dependencies: - esprima: 4.0.1 - dev: true - /reflect-metadata@0.2.2: resolution: {integrity: sha512-urBwgfrvVP/eAyXx4hluJivBKzuEbSQs9rKWCrCkbSxNv8mxPcUZKeuoF3Uy4mJl3Lwprp6yy5/39VWigZ4K6Q==} dev: true @@ -19888,6 +20589,13 @@ packages: gopd: 1.2.0 set-function-name: 2.0.2 + /registry-auth-token@5.1.0: + resolution: {integrity: sha512-GdekYuwLXLxMuFTwAPg5UKGLW/UXzQrZvH/Zj791BQif5T05T0RsaLfHc9q3ZOKi7n+BoprPD9mJ0O0k4xzUlw==} + engines: {node: '>=14'} + dependencies: + '@pnpm/npm-conf': 2.3.1 + dev: true + /rehype-katex@7.0.1: resolution: {integrity: sha512-OiM2wrZ/wuhKkigASodFoo8wimG3H12LWQaH8qSPVJn9apWKFSH3YOCtbKpBorTVw/eI7cuT21XBbvwEswbIOA==} dependencies: @@ -19924,10 +20632,6 @@ packages: transitivePeerDependencies: - supports-color - /reinterval@1.1.0: - resolution: {integrity: sha512-QIRet3SYrGp0HUHO88jVskiG6seqUGC5iAG7AwI/BV4ypGcuqk9Du6YQBUOUqm9c8pw1eyLoIaONifRua1lsEQ==} - dev: true - /remark-frontmatter@5.0.0: resolution: {integrity: sha512-XTFYvNASMe5iPN0719nPrdItC9aU0ssC4v14mH1BCi1u0n1gAocqcujWUrByftZTbLhRtiKRyjYTSIOcr69UVQ==} dependencies: @@ -20177,6 +20881,10 @@ packages: align-text: 0.1.4 dev: false + /robust-predicates@3.0.2: + resolution: {integrity: sha512-IXgzBWvWQwE6PrDI05OvmXUIruQTcoMDzRsOd5CDvHCVLcLHMTSYvOK5Cm46kWqlV3yAbuSpBZdJ5oP5OUoStg==} + dev: false + /rollup@0.25.8: resolution: {integrity: sha512-a2S4Bh3bgrdO4BhKr2E4nZkjTvrJ2m2bWjMTzVYtoqSCn0HnuxosXnaJUHrMEziOWr3CzL9GjilQQKcyCQpJoA==} hasBin: true @@ -20216,6 +20924,15 @@ packages: fsevents: 2.3.3 dev: true + /roughjs@4.6.6: + resolution: {integrity: sha512-ZUz/69+SYpFN/g/lUlo2FXcIjRkSu3nDarreVdGGndHEBJ6cXPdKguS8JGxwj5HA5xIbVKSmLgr5b3AWxtRfvQ==} + dependencies: + hachure-fill: 0.5.2 + path-data-parser: 0.1.0 + points-on-curve: 0.2.0 + points-on-path: 0.2.1 + dev: false + /rrweb-cssom@0.8.0: resolution: {integrity: sha512-guoltQEx+9aMf2gDZ0s62EcV8lsXR+0w8915TC3ITdn2YueuNjdAYh/levpU9nFaoChh9RUS5ZdQMrKfVEN9tw==} dev: true @@ -20898,10 +21615,9 @@ packages: resolution: {integrity: sha512-Q5thBSxp5t8WPTTJQS59LrGqOZqOsrhDGDVm8azCqIBjSBd7nd9o2PM+mDulQQkh8h//4U6hFZnc/mul8t5pWQ==} dev: true - /split2@3.2.2: - resolution: {integrity: sha512-9NThjpgZnifTkJpzTZ7Eue85S49QwpNhZTq6GRJwObb6jnLFNGB7Qm73V5HewTROPyxD0C29xqmaI68bQtV+hg==} - dependencies: - readable-stream: 3.6.2 + /split2@4.2.0: + resolution: {integrity: sha512-UcjcJOWknrNkF6PLX83qcHM6KHgVKNkV62Y8a5uYDVv9ydGQVwAHMKqHdJje1VTWpljG0WYpCDhrCdAOYH4TWg==} + engines: {node: '>= 10.x'} dev: true /sprintf-js@1.0.3: @@ -20985,10 +21701,6 @@ packages: engines: {node: '>=4', npm: '>=6'} dev: true - /stream-shift@1.0.3: - resolution: {integrity: sha512-76ORR0DO1o1hlKwTbi/DM3EXWGf3ZJYO8cXX5RJwnul2DEg2oyoZyjLNoQM8WsvZiFKCRfC1O0J7iCvie3RZmQ==} - dev: true - /streamsearch@1.1.0: resolution: {integrity: sha512-Mcc5wHehp9aXz1ax6bZUyY5afg9u2rv5cqQI3mRrYkGC8rW2hM02jWuwjtL++LS5qinSyhj2QfLyNsuc+VsExg==} engines: {node: '>=10.0.0'} @@ -21192,6 +21904,10 @@ packages: react: 18.3.1 dev: false + /stylis@4.3.6: + resolution: {integrity: sha512-yQ3rwFWRfwNUY7H5vpU0wfdkNSnvnJinhF9830Swlaxl03zsOjCfmX0ugac+3LtK0lYSgwL/KXc8oYL3mG4YFQ==} + dev: false + /sucrase@3.35.0: resolution: {integrity: sha512-8EbVDiu9iN/nESwxeSxDKe0dunta1GOlHufmSSXxMD2z2/tMZpDMpvXQGsc+ajGo8y2uYUmixaSRUc/QPoQ0GA==} engines: {node: '>=16 || 14 >=14.17'} @@ -21240,14 +21956,6 @@ packages: dependencies: has-flag: 4.0.0 - /supports-hyperlinks@2.3.0: - resolution: {integrity: sha512-RpsAZlpWcDwOPQA22aCH4J0t7L8JmAvsCxfOSEwm7cQs3LshN36QaTkwd70DnBOXDWGssw2eUoc8CaRWT0XunA==} - engines: {node: '>=8'} - dependencies: - has-flag: 4.0.0 - supports-color: 7.2.0 - dev: true - /supports-preserve-symlinks-flag@1.0.0: resolution: {integrity: sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==} engines: {node: '>= 0.4'} @@ -21585,7 +22293,6 @@ packages: /tiny-invariant@1.3.3: resolution: {integrity: sha512-+FbBPE1o9QAYvviau/qC5SE3caw21q3xkvWKBtja5vgqOWIHHJ3ioaq1VPfn/Szqctz2bU/oYeKd9/z5BL+PVg==} - dev: false /tinybench@2.9.0: resolution: {integrity: sha512-0+DUvqWMValLmha6lr4kD8iAMK1HzV0/aKnCtWb9v9641TnP/MFb7Pc2bxoxQjTXAErryXVgUOfv2YqNllqGeg==} @@ -21595,6 +22302,10 @@ packages: resolution: {integrity: sha512-KQQR9yN7R5+OSwaK0XQoj22pwHoTlgYqmUscPYoknOoWCWfj/5/ABTMRi69FrKU5ffPVh5QcFikpWJI/P1ocHA==} dev: true + /tinyexec@1.0.1: + resolution: {integrity: sha512-5uC6DDlmeqiOwCPmK9jMSdOuZTh8bU39Ys6yidB+UTt5hfZUPGAypSgFRiEp+jbi9qH40BLDvy85jIU88wKSqw==} + dev: false + /tinyglobby@0.2.14: resolution: {integrity: sha512-tX5e7OM1HnYr2+a2C/4V0htOcSQcoSTH9KgJnVvNm5zm/cyEWKJ7j7YutsH9CxMdtOkkLFy2AHrMci9IM8IPZQ==} engines: {node: '>=12.0.0'} @@ -21744,15 +22455,20 @@ packages: - zod dev: false - /ts-api-utils@1.4.3(typescript@5.5.3): - resolution: {integrity: sha512-i3eMG77UTMD0hZhgRS562pv83RC6ukSAC2GMNWc+9dieh/+jDM5u5YG+NHX6VNDRHQcHwmsTHctP9LhbC3WxVw==} - engines: {node: '>=16'} + /ts-api-utils@2.1.0(typescript@5.5.3): + resolution: {integrity: sha512-CUgTZL1irw8u29bzrOD/nH85jqyc74D6SshFgujOIA7osm2Rz7dYH77agkx7H4FBNxDq7Cjf+IjaX/8zwFW+ZQ==} + engines: {node: '>=18.12'} peerDependencies: - typescript: '>=4.2.0' + typescript: '>=4.8.4' dependencies: typescript: 5.5.3 dev: true + /ts-dedent@2.2.0: + resolution: {integrity: sha512-q5W7tVM71e2xjHZTlgfTDoPF/SmqKG5hddq9SzR49CH2hayqRKJtQ4mtRlSxKaJlR/+9rEM+mnBHf7I2/BQcpQ==} + engines: {node: '>=6.10'} + dev: false + /ts-interface-checker@0.1.13: resolution: {integrity: sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA==} @@ -22103,7 +22819,6 @@ packages: /ufo@1.6.1: resolution: {integrity: sha512-9a4/uxlTWJ4+a5i0ooc1rU7C7YOw3wT+UGqdeNNHWnOF9qcMBgLRS+4IYUqbczewFx4mLEig6gawh7X6mFlEkA==} - dev: true /uglify-js@2.8.29: resolution: {integrity: sha512-qLq/4y2pjcU3vhlhseXGGJ7VbFO4pBANu0kwl8VCa9KEI0V8VfZIx2Fy3w01iSTA/pGwKZSmu/+I4etLNDdt5w==} @@ -22390,16 +23105,6 @@ packages: /util-deprecate@1.0.2: resolution: {integrity: sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==} - /util@0.12.5: - resolution: {integrity: sha512-kZf/K6hEIrWHI6XqOFUiiMa+79wE/D8Q+NCNAWclkyg3b4d2k7s0QGepNjiABc+aR3N1PAyHL7p6UcLY6LmrnA==} - dependencies: - inherits: 2.0.4 - is-arguments: 1.2.0 - is-generator-function: 1.1.0 - is-typed-array: 1.1.15 - which-typed-array: 1.1.19 - dev: true - /utility-types@3.11.0: resolution: {integrity: sha512-6Z7Ma2aVEWisaL6TvBCy7P8rm2LQoPv6dJ7ecIaIixHcwfbJ0x7mWdbcwlIM5IGQxPZSFYeqRCqlOOeKoJYMkw==} engines: {node: '>= 4'} @@ -22410,16 +23115,15 @@ packages: engines: {node: '>= 0.4.0'} dev: true + /uuid@11.1.0: + resolution: {integrity: sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A==} + hasBin: true + /uuid@8.3.2: resolution: {integrity: sha512-+NYs2QeMWy+GWFOEm9xnn6HCDp0l7QBD7ml8zLUmJ+93Q5NF0NocErnwkTkXVFNiX3/fpC6afS8Dhb/gz7R7eg==} hasBin: true dev: false - /uuid@9.0.0: - resolution: {integrity: sha512-MXcSTerfPa4uqyzStbRoTgt5XIe3x5+42+q1sDuy3R5MDk66URdLMOZe5aPX/SQd+kuYAh0FdP/pO28IkQyTeg==} - hasBin: true - dev: true - /v8-compile-cache-lib@3.0.1: resolution: {integrity: sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg==} dev: true @@ -23077,6 +23781,33 @@ packages: - yaml dev: true + /vscode-jsonrpc@8.2.0: + resolution: {integrity: sha512-C+r0eKJUIfiDIfwJhria30+TYWPtuHJXHtI7J0YlOmKAo7ogxP20T0zxB7HZQIFhIyvoBPwWskjxrvAtfjyZfA==} + engines: {node: '>=14.0.0'} + dev: false + + /vscode-languageserver-protocol@3.17.5: + resolution: {integrity: sha512-mb1bvRJN8SVznADSGWM9u/b07H7Ecg0I3OgXDuLdn307rl/J3A9YD6/eYOssqhecL27hK1IPZAsaqh00i/Jljg==} + dependencies: + vscode-jsonrpc: 8.2.0 + vscode-languageserver-types: 3.17.5 + dev: false + + /vscode-languageserver-textdocument@1.0.12: + resolution: {integrity: sha512-cxWNPesCnQCcMPeenjKKsOCKQZ/L6Tv19DTRIGuLWe32lyzWhihGVJ/rcckZXJxfdKCFvRLS3fpBIsV/ZGX4zA==} + dev: false + + /vscode-languageserver-types@3.17.5: + resolution: {integrity: sha512-Ld1VelNuX9pdF39h2Hgaeb5hEZM2Z3jUrrMgWQAu82jMtZp7p3vJT3BzToKtZI7NgQssZje5o0zryOrhQvzQAg==} + dev: false + + /vscode-languageserver@9.0.1: + resolution: {integrity: sha512-woByF3PDpkHFUreUa7Hos7+pUWdeWMXRd26+ZX2A8cFx6v/JPTtd4/uN0/jB6XQHYaOlHbio03NTHCqrgG5n7g==} + hasBin: true + dependencies: + vscode-languageserver-protocol: 3.17.5 + dev: false + /vscode-oniguruma@1.7.0: resolution: {integrity: sha512-L9WMGRfrjOhgHSdOYgCt/yRMsXzLDJSL7BPrOZt73gU0iWO4mpqzqQzOz5srxqTvMBaR0XZTSrVWo4j55Rc6cA==} dev: false @@ -23085,6 +23816,10 @@ packages: resolution: {integrity: sha512-AFbieoL7a5LMqcnOF04ji+rpXadgOXnZsxQr//r83kLPr7biP7am3g9zbaZIaBGwBRWeSvoMD4mgPdX3e4NWBg==} dev: false + /vscode-uri@3.0.8: + resolution: {integrity: sha512-AyFQ0EVmsOZOlAnxoFOGOq1SQDWAB7C6aqMGS23svWAllfOaxbuFvcT8D1i8z3Gyn8fraVeZNNmN6e9bxxXkKw==} + dev: false + /vue@3.5.21(typescript@5.7.3): resolution: {integrity: sha512-xxf9rum9KtOdwdRkiApWL+9hZEMWE90FHh8yS1+KJAiWYh+iGWV1FquPjoO9VUHQ+VIhsCXNNyZ5Sf4++RVZBA==} peerDependencies: @@ -23357,6 +24092,41 @@ packages: resolution: {integrity: sha512-gvVzJFlPycKc5dZN4yPkP8w7Dc37BtP1yczEneOb4uq34pXZcvrtRTmWV8W+Ume+XCxKgbjM+nevkyFPMybd4Q==} dev: true + /worker-factory@7.0.46: + resolution: {integrity: sha512-Sr1hq2FMgNa04UVhYQacsw+i58BtMimzDb4+CqYphZ97OfefRpURu0UZ+JxMr/H36VVJBfuVkxTK7MytsanC3w==} + dependencies: + '@babel/runtime': 7.28.4 + fast-unique-numbers: 9.0.24 + tslib: 2.8.1 + dev: true + + /worker-timers-broker@8.0.11: + resolution: {integrity: sha512-uwhxKru8BI9m2tsogxr2fB6POZ8LB2xH+Pu3R0mvQnAZLPgLD6K3IX4LNKPTEgTJ/j5VsuQPB+gLI1NBNKkPlg==} + dependencies: + '@babel/runtime': 7.28.4 + broker-factory: 3.1.10 + fast-unique-numbers: 9.0.24 + tslib: 2.8.1 + worker-timers-worker: 9.0.11 + dev: true + + /worker-timers-worker@9.0.11: + resolution: {integrity: sha512-pArb5xtgHWImYpXhjg1OFv7JFG0ubmccb73TFoXHXjG830fFj+16N57q9YeBnZX52dn+itRrMoJZ9HaZBVzDaA==} + dependencies: + '@babel/runtime': 7.28.4 + tslib: 2.8.1 + worker-factory: 7.0.46 + dev: true + + /worker-timers@8.0.25: + resolution: {integrity: sha512-X7Z5dmM6PlrEnaadtFQOyXHGD/IysPA3HZzaC2koqsU1VI+RvyGmjiiLiUBQixK8PH5R7ilkOzZupWskNRaXmA==} + dependencies: + '@babel/runtime': 7.28.4 + tslib: 2.8.1 + worker-timers-broker: 8.0.11 + worker-timers-worker: 9.0.11 + dev: true + /workerd@1.20250405.0: resolution: {integrity: sha512-6+bOTz5ErQ8Ry91cAaRdipr/2o/EhNnRJAP69OKLii4nyU1A/EWsNhaZHGjBIPGKhla6qXS1BN41WEhFXUjI2w==} engines: {node: '>=16'} @@ -23501,19 +24271,6 @@ packages: /wrappy@1.0.2: resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==} - /ws@7.5.10: - resolution: {integrity: sha512-+dbF1tHwZpXcbOJdVOkzLDxZP1ailvSxM6ZweXTegylPny803bFhA+vqBYw4s31NSAk4S2Qz+AKXK9a4wkdjcQ==} - engines: {node: '>=8.3.0'} - peerDependencies: - bufferutil: ^4.0.1 - utf-8-validate: ^5.0.2 - peerDependenciesMeta: - bufferutil: - optional: true - utf-8-validate: - optional: true - dev: true - /ws@8.17.1: resolution: {integrity: sha512-6XQFvXTkbfUOZOKKILFG1PDK2NDQs4azKQl26T0YS5CxqWLgXajbPZ+h4gZekJyRqFU8pvnbAbbs/3TgRPy+GQ==} engines: {node: '>=10.0.0'} @@ -23578,11 +24335,6 @@ packages: engines: {node: '>=0.4.0'} dev: false - /xtend@4.0.2: - resolution: {integrity: sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==} - engines: {node: '>=0.4'} - dev: true - /y18n@5.0.8: resolution: {integrity: sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==} engines: {node: '>=10'}