diff --git a/Makefile b/Makefile index fd4b3277de..7004123be9 100644 --- a/Makefile +++ b/Makefile @@ -127,7 +127,7 @@ fuzz: ## Run fuzz tests for func in $$funcs; do \ echo "Fuzzing $$func in $$file"; \ parentDir=$$(dirname $$file); \ - go test $$parentDir -run=$$func -fuzz=$$func -fuzztime=60s; \ + go test $$parentDir -run=^$$func$$ -fuzz=^$$func$$ -fuzztime=1m; \ done; \ done .PHONY: unkey diff --git a/cmd/ctrl/api.go b/cmd/ctrl/api.go index fe0bd2177d..2fe98677e7 100644 --- a/cmd/ctrl/api.go +++ b/cmd/ctrl/api.go @@ -68,29 +68,18 @@ var apiCmd = &cli.Command{ cli.String("restate-api-key", "API key for Restate ingress requests", cli.EnvVar("UNKEY_RESTATE_API_KEY")), - cli.String("clickhouse-url", "ClickHouse connection string for analytics. Recommended for production. Example: clickhouse://user:pass@host:9000/unkey", - cli.EnvVar("UNKEY_CLICKHOUSE_URL")), - - // Build S3 configuration - cli.String("build-s3-url", "S3 URL for build storage", - cli.Required(), cli.EnvVar("UNKEY_BUILD_S3_URL")), - cli.String("build-s3-external-url", "External S3 URL for presigned URLs", - cli.EnvVar("UNKEY_BUILD_S3_EXTERNAL_URL")), - cli.String("build-s3-bucket", "S3 bucket for build storage", - cli.Required(), cli.EnvVar("UNKEY_BUILD_S3_BUCKET")), - cli.String("build-s3-access-key-id", "S3 access key ID", - cli.Required(), cli.EnvVar("UNKEY_BUILD_S3_ACCESS_KEY_ID")), - cli.String("build-s3-access-key-secret", "S3 access key secret", - cli.Required(), cli.EnvVar("UNKEY_BUILD_S3_ACCESS_KEY_SECRET")), - cli.StringSlice("available-regions", "Available regions for deployment", cli.EnvVar("UNKEY_AVAILABLE_REGIONS"), cli.Default([]string{"local.dev"})), // Certificate bootstrap configuration cli.String("default-domain", "Default domain for wildcard certificate bootstrapping (e.g., unkey.app)", cli.EnvVar("UNKEY_DEFAULT_DOMAIN")), + cli.String("regional-domain", "Domain for cross-region communication. Per-region wildcards created as *.{region}.{domain} (e.g., unkey.cloud)", cli.EnvVar("UNKEY_REGIONAL_DOMAIN")), // Custom domain configuration cli.String("cname-domain", "Base domain for custom domain CNAME targets (e.g., unkey-dns.com)", cli.Required(), cli.EnvVar("UNKEY_CNAME_DOMAIN")), + + // GitHub webhook configuration + cli.String("github-app-webhook-secret", "Secret for verifying GitHub webhook signatures", cli.EnvVar("UNKEY_GITHUB_APP_WEBHOOK_SECRET")), }, Action: apiAction, } @@ -137,15 +126,6 @@ func apiAction(ctx context.Context, cmd *cli.Command) error { // Control Plane Specific AuthToken: cmd.String("auth-token"), - // Build configuration - BuildS3: ctrlapi.S3Config{ - URL: cmd.String("build-s3-url"), - ExternalURL: cmd.String("build-s3-external-url"), - Bucket: cmd.String("build-s3-bucket"), - AccessKeySecret: cmd.String("build-s3-access-key-secret"), - AccessKeyID: cmd.String("build-s3-access-key-id"), - }, - // Restate configuration (API is a client, only needs ingress URL) Restate: ctrlapi.RestateConfig{ URL: cmd.String("restate-url"), @@ -161,6 +141,9 @@ func apiAction(ctx context.Context, cmd *cli.Command) error { // Custom domain configuration CnameDomain: strings.TrimSuffix(strings.TrimSpace(cmd.RequireString("cname-domain")), "."), + + // GitHub webhook + GitHubWebhookSecret: cmd.String("github-app-webhook-secret"), } err := config.Validate() diff --git a/cmd/ctrl/worker.go b/cmd/ctrl/worker.go index 2ec8beaa08..e3849add04 100644 --- a/cmd/ctrl/worker.go +++ b/cmd/ctrl/worker.go @@ -46,14 +46,6 @@ var workerCmd = &cli.Command{ ), // Build Configuration - cli.String("build-s3-url", "S3 Compatible Endpoint URL for build contexts", - cli.Required(), cli.EnvVar("UNKEY_BUILD_S3_URL")), - cli.String("build-s3-bucket", "S3 bucket name for build contexts", - cli.Required(), cli.EnvVar("UNKEY_BUILD_S3_BUCKET")), - cli.String("build-s3-access-key-id", "S3 access key ID for build contexts", - cli.Required(), cli.EnvVar("UNKEY_BUILD_S3_ACCESS_KEY_ID")), - cli.String("build-s3-access-key-secret", "S3 secret access key for build contexts", - cli.Required(), cli.EnvVar("UNKEY_BUILD_S3_ACCESS_KEY_SECRET")), cli.String("build-platform", "Run builds on this platform ('dynamic', 'linux/amd64', 'linux/arm64')", cli.Default("linux/amd64"), cli.EnvVar("UNKEY_BUILD_PLATFORM")), @@ -105,6 +97,10 @@ var workerCmd = &cli.Command{ cli.String("sentinel-image", "The image new sentinels get deployed with", cli.Default("ghcr.io/unkeyed/unkey:local"), cli.EnvVar("UNKEY_SENTINEL_IMAGE")), cli.StringSlice("available-regions", "Available regions for deployment", cli.EnvVar("UNKEY_AVAILABLE_REGIONS"), cli.Default([]string{"local.dev"})), + // GitHub App Configuration + cli.Int64("github-app-id", "GitHub App ID for webhook-triggered deployments", cli.EnvVar("UNKEY_GITHUB_APP_ID")), + cli.String("github-private-key-pem", "GitHub App private key in PEM format", cli.EnvVar("UNKEY_GITHUB_PRIVATE_KEY_PEM")), + // Healthcheck heartbeat URLs cli.String("cert-renewal-heartbeat-url", "Checkly heartbeat URL for certificate renewal", cli.EnvVar("UNKEY_CERT_RENEWAL_HEARTBEAT_URL")), cli.String("quota-check-heartbeat-url", "Checkly heartbeat URL for quota checks", cli.EnvVar("UNKEY_QUOTA_CHECK_HEARTBEAT_URL")), @@ -132,13 +128,6 @@ func workerAction(ctx context.Context, cmd *cli.Command) error { VaultToken: cmd.String("vault-token"), // Build configuration - BuildS3: worker.S3Config{ - URL: cmd.String("build-s3-url"), - Bucket: cmd.String("build-s3-bucket"), - AccessKeyID: cmd.String("build-s3-access-key-id"), - AccessKeySecret: cmd.String("build-s3-access-key-secret"), - ExternalURL: "", - }, BuildPlatform: cmd.String("build-platform"), // Registry configuration @@ -179,16 +168,20 @@ func workerAction(ctx context.Context, cmd *cli.Command) error { ClickhouseURL: cmd.String("clickhouse-url"), ClickhouseAdminURL: cmd.String("clickhouse-admin-url"), - // Common - Clock: clock.New(), - // Sentinel configuration SentinelImage: cmd.String("sentinel-image"), AvailableRegions: cmd.RequireStringSlice("available-regions"), + // GitHub configuration + GitHub: worker.GitHubConfig{ + AppID: cmd.Int64("github-app-id"), + PrivateKeyPEM: cmd.String("github-private-key-pem"), + }, // Custom domain configuration CnameDomain: strings.TrimSuffix(strings.TrimSpace(cmd.RequireString("cname-domain")), "."), + Clock: clock.New(), + // Healthcheck heartbeat URLs CertRenewalHeartbeatURL: cmd.String("cert-renewal-heartbeat-url"), QuotaCheckHeartbeatURL: cmd.String("quota-check-heartbeat-url"), diff --git a/dev/.env.github.example b/dev/.env.github.example new file mode 100644 index 0000000000..d4208e4ba3 --- /dev/null +++ b/dev/.env.github.example @@ -0,0 +1,18 @@ +# GitHub App credentials for webhook-triggered deployments +# Copy this to .env.github and fill in your values + +# GitHub App ID (from your GitHub App settings) +UNKEY_GITHUB_APP_ID=2721195 + +# GitHub App Private Key (PEM format, keep the newlines) +# Generate from GitHub App settings > Private keys > Generate a private key +UNKEY_GITHUB_PRIVATE_KEY_PEM=-----BEGIN RSA PRIVATE KEY----- +MIIEpAIBAAKCAQEA... +...your key here... +-----END RSA PRIVATE KEY----- + + +UNKEY_GITHUB_APP_WEBHOOK_SECRET=superescuretsecret + + +NEXT_PUBLIC_GITHUB_APP_NAME="unkey-staging" diff --git a/dev/BUILD.bazel b/dev/BUILD.bazel deleted file mode 100644 index 19e68d78ca..0000000000 --- a/dev/BUILD.bazel +++ /dev/null @@ -1,5 +0,0 @@ -# Export SQL files for use by //pkg/dockertest MySQL image -exports_files([ - "init-databases.sql", - "04-seed-workspace.sql", -]) diff --git a/dev/Tiltfile b/dev/Tiltfile index 82f4a4e2af..adc1361f98 100644 --- a/dev/Tiltfile +++ b/dev/Tiltfile @@ -229,6 +229,7 @@ local_resource( ) + # Dashboard - runs locally with HMR (env loaded from web/apps/dashboard/.env) local_resource( 'dashboard', diff --git a/dev/k8s/manifests/ctrl-api.yaml b/dev/k8s/manifests/ctrl-api.yaml index 7fb1423324..8ba92a3158 100644 --- a/dev/k8s/manifests/ctrl-api.yaml +++ b/dev/k8s/manifests/ctrl-api.yaml @@ -44,35 +44,7 @@ spec: # Observability - DISABLED for development - name: UNKEY_OTEL value: "false" - # Control Plane Specific - # Vault Configuration (required) - - name: UNKEY_VAULT_URL - value: http://vault:8060 - - name: UNKEY_VAULT_TOKEN - value: vault-test-token-123 - - name: UNKEY_BUILD_S3_URL - valueFrom: - secretKeyRef: - name: depot-credentials - key: UNKEY_BUILD_S3_URL - - name: UNKEY_BUILD_S3_EXTERNAL_URL - valueFrom: - secretKeyRef: - name: depot-credentials - key: UNKEY_BUILD_S3_URL - - name: UNKEY_BUILD_S3_BUCKET - value: "build-contexts" - - name: UNKEY_BUILD_S3_ACCESS_KEY_ID - valueFrom: - secretKeyRef: - name: depot-credentials - key: UNKEY_BUILD_S3_ACCESS_KEY_ID - - name: UNKEY_BUILD_S3_ACCESS_KEY_SECRET - valueFrom: - secretKeyRef: - name: depot-credentials - key: UNKEY_BUILD_S3_ACCESS_KEY_SECRET - # Registry Configuration (used by both Docker and Depot backends) + #kubectl create secret docker-registry depot-registry \ # --docker-server=registry.depot.dev \ # --docker-username=x-token \ @@ -97,6 +69,13 @@ spec: - name: UNKEY_CNAME_DOMAIN value: "unkey.local" + # GitHub webhook + - name: UNKEY_GITHUB_APP_WEBHOOK_SECRET + valueFrom: + secretKeyRef: + name: github-credentials + key: UNKEY_GITHUB_APP_WEBHOOK_SECRET + initContainers: - name: wait-for-dependencies image: busybox:1.36 @@ -104,7 +83,7 @@ spec: [ "sh", "-c", - "until nc -z mysql 3306 && nc -z s3 3902 && nc -z restate 8080 && nc -z vault 8060; do echo waiting for dependencies; sleep 2; done;", + "until nc -z mysql 3306 && nc -z restate 8080 && nc -z vault 8060; do echo waiting for dependencies; sleep 2; done;", ] --- diff --git a/dev/k8s/manifests/ctrl-worker.yaml b/dev/k8s/manifests/ctrl-worker.yaml index af27ba95e7..5db66b6337 100644 --- a/dev/k8s/manifests/ctrl-worker.yaml +++ b/dev/k8s/manifests/ctrl-worker.yaml @@ -58,24 +58,6 @@ spec: # Build Configuration - name: UNKEY_BUILD_PLATFORM value: "linux/arm64" - # Build S3 Storage (from depot-credentials secret) - - name: UNKEY_BUILD_S3_URL - valueFrom: - secretKeyRef: - name: depot-credentials - key: UNKEY_BUILD_S3_URL - - name: UNKEY_BUILD_S3_BUCKET - value: "build-contexts" - - name: UNKEY_BUILD_S3_ACCESS_KEY_ID - valueFrom: - secretKeyRef: - name: depot-credentials - key: UNKEY_BUILD_S3_ACCESS_KEY_ID - - name: UNKEY_BUILD_S3_ACCESS_KEY_SECRET - valueFrom: - secretKeyRef: - name: depot-credentials - key: UNKEY_BUILD_S3_ACCESS_KEY_SECRET # Registry Configuration - name: UNKEY_REGISTRY_URL @@ -118,6 +100,18 @@ spec: - name: UNKEY_CLICKHOUSE_ADMIN_URL value: "clickhouse://unkey_user_admin:C57RqT5EPZBqCJkMxN9mEZZEzMPcw9yBlwhIizk99t7kx6uLi9rYmtWObsXzdl@clickhouse:9000?secure=false&skip_verify=true" + # GitHub App Configuration + - name: UNKEY_GITHUB_APP_ID + valueFrom: + secretKeyRef: + name: github-credentials + key: UNKEY_GITHUB_APP_ID + - name: UNKEY_GITHUB_PRIVATE_KEY_PEM + valueFrom: + secretKeyRef: + name: github-credentials + key: UNKEY_GITHUB_PRIVATE_KEY_PEM + - name: UNKEY_SENTINEL_IMAGE value: "unkey/sentinel:latest" envFrom: @@ -135,7 +129,7 @@ spec: [ "sh", "-c", - "until nc -z mysql 3306 && nc -z s3 3902 && nc -z restate 8080 && nc -z vault 8060; do echo waiting for dependencies; sleep 2; done;", + "until nc -z mysql 3306 && nc -z restate 8080 && nc -z vault 8060; do echo waiting for dependencies; sleep 2; done;", ] --- diff --git a/dev/linters/exhaustruct/analyzer.go b/dev/linters/exhaustruct/analyzer.go index e5446baf38..ab3efb2e57 100644 --- a/dev/linters/exhaustruct/analyzer.go +++ b/dev/linters/exhaustruct/analyzer.go @@ -96,6 +96,7 @@ var excludePatterns = []string{ // Kubernetes `^k8s\.io/api/core/v1.*$`, `^k8s\.io/api/apps/v1.*$`, + `^k8s\.io/api/batch/v1.*$`, `^k8s\.io/apimachinery/pkg/apis/meta/v1.*$`, `^sigs\.k8s\.io/controller-runtime/pkg/client.*$`, diff --git a/gen/proto/ctrl/v1/ctrlv1connect/deployment.connect.go b/gen/proto/ctrl/v1/ctrlv1connect/deployment.connect.go index 05c7d5c17a..726d3bdfe7 100644 --- a/gen/proto/ctrl/v1/ctrlv1connect/deployment.connect.go +++ b/gen/proto/ctrl/v1/ctrlv1connect/deployment.connect.go @@ -33,9 +33,6 @@ const ( // reflection-formatted method names, remove the leading slash and convert the remaining slash to a // period. const ( - // DeploymentServiceCreateS3UploadURLProcedure is the fully-qualified name of the - // DeploymentService's CreateS3UploadURL RPC. - DeploymentServiceCreateS3UploadURLProcedure = "/ctrl.v1.DeploymentService/CreateS3UploadURL" // DeploymentServiceCreateDeploymentProcedure is the fully-qualified name of the DeploymentService's // CreateDeployment RPC. DeploymentServiceCreateDeploymentProcedure = "/ctrl.v1.DeploymentService/CreateDeployment" @@ -52,8 +49,7 @@ const ( // DeploymentServiceClient is a client for the ctrl.v1.DeploymentService service. type DeploymentServiceClient interface { - CreateS3UploadURL(context.Context, *connect.Request[v1.CreateS3UploadURLRequest]) (*connect.Response[v1.CreateS3UploadURLResponse], error) - // Create a new deployment + // Create a new deployment with a prebuilt docker image CreateDeployment(context.Context, *connect.Request[v1.CreateDeploymentRequest]) (*connect.Response[v1.CreateDeploymentResponse], error) // Get deployment details GetDeployment(context.Context, *connect.Request[v1.GetDeploymentRequest]) (*connect.Response[v1.GetDeploymentResponse], error) @@ -74,12 +70,6 @@ func NewDeploymentServiceClient(httpClient connect.HTTPClient, baseURL string, o baseURL = strings.TrimRight(baseURL, "/") deploymentServiceMethods := v1.File_ctrl_v1_deployment_proto.Services().ByName("DeploymentService").Methods() return &deploymentServiceClient{ - createS3UploadURL: connect.NewClient[v1.CreateS3UploadURLRequest, v1.CreateS3UploadURLResponse]( - httpClient, - baseURL+DeploymentServiceCreateS3UploadURLProcedure, - connect.WithSchema(deploymentServiceMethods.ByName("CreateS3UploadURL")), - connect.WithClientOptions(opts...), - ), createDeployment: connect.NewClient[v1.CreateDeploymentRequest, v1.CreateDeploymentResponse]( httpClient, baseURL+DeploymentServiceCreateDeploymentProcedure, @@ -109,16 +99,10 @@ func NewDeploymentServiceClient(httpClient connect.HTTPClient, baseURL string, o // deploymentServiceClient implements DeploymentServiceClient. type deploymentServiceClient struct { - createS3UploadURL *connect.Client[v1.CreateS3UploadURLRequest, v1.CreateS3UploadURLResponse] - createDeployment *connect.Client[v1.CreateDeploymentRequest, v1.CreateDeploymentResponse] - getDeployment *connect.Client[v1.GetDeploymentRequest, v1.GetDeploymentResponse] - rollback *connect.Client[v1.RollbackRequest, v1.RollbackResponse] - promote *connect.Client[v1.PromoteRequest, v1.PromoteResponse] -} - -// CreateS3UploadURL calls ctrl.v1.DeploymentService.CreateS3UploadURL. -func (c *deploymentServiceClient) CreateS3UploadURL(ctx context.Context, req *connect.Request[v1.CreateS3UploadURLRequest]) (*connect.Response[v1.CreateS3UploadURLResponse], error) { - return c.createS3UploadURL.CallUnary(ctx, req) + createDeployment *connect.Client[v1.CreateDeploymentRequest, v1.CreateDeploymentResponse] + getDeployment *connect.Client[v1.GetDeploymentRequest, v1.GetDeploymentResponse] + rollback *connect.Client[v1.RollbackRequest, v1.RollbackResponse] + promote *connect.Client[v1.PromoteRequest, v1.PromoteResponse] } // CreateDeployment calls ctrl.v1.DeploymentService.CreateDeployment. @@ -143,8 +127,7 @@ func (c *deploymentServiceClient) Promote(ctx context.Context, req *connect.Requ // DeploymentServiceHandler is an implementation of the ctrl.v1.DeploymentService service. type DeploymentServiceHandler interface { - CreateS3UploadURL(context.Context, *connect.Request[v1.CreateS3UploadURLRequest]) (*connect.Response[v1.CreateS3UploadURLResponse], error) - // Create a new deployment + // Create a new deployment with a prebuilt docker image CreateDeployment(context.Context, *connect.Request[v1.CreateDeploymentRequest]) (*connect.Response[v1.CreateDeploymentResponse], error) // Get deployment details GetDeployment(context.Context, *connect.Request[v1.GetDeploymentRequest]) (*connect.Response[v1.GetDeploymentResponse], error) @@ -161,12 +144,6 @@ type DeploymentServiceHandler interface { // and JSON codecs. They also support gzip compression. func NewDeploymentServiceHandler(svc DeploymentServiceHandler, opts ...connect.HandlerOption) (string, http.Handler) { deploymentServiceMethods := v1.File_ctrl_v1_deployment_proto.Services().ByName("DeploymentService").Methods() - deploymentServiceCreateS3UploadURLHandler := connect.NewUnaryHandler( - DeploymentServiceCreateS3UploadURLProcedure, - svc.CreateS3UploadURL, - connect.WithSchema(deploymentServiceMethods.ByName("CreateS3UploadURL")), - connect.WithHandlerOptions(opts...), - ) deploymentServiceCreateDeploymentHandler := connect.NewUnaryHandler( DeploymentServiceCreateDeploymentProcedure, svc.CreateDeployment, @@ -193,8 +170,6 @@ func NewDeploymentServiceHandler(svc DeploymentServiceHandler, opts ...connect.H ) return "/ctrl.v1.DeploymentService/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { switch r.URL.Path { - case DeploymentServiceCreateS3UploadURLProcedure: - deploymentServiceCreateS3UploadURLHandler.ServeHTTP(w, r) case DeploymentServiceCreateDeploymentProcedure: deploymentServiceCreateDeploymentHandler.ServeHTTP(w, r) case DeploymentServiceGetDeploymentProcedure: @@ -212,10 +187,6 @@ func NewDeploymentServiceHandler(svc DeploymentServiceHandler, opts ...connect.H // UnimplementedDeploymentServiceHandler returns CodeUnimplemented from all methods. type UnimplementedDeploymentServiceHandler struct{} -func (UnimplementedDeploymentServiceHandler) CreateS3UploadURL(context.Context, *connect.Request[v1.CreateS3UploadURLRequest]) (*connect.Response[v1.CreateS3UploadURLResponse], error) { - return nil, connect.NewError(connect.CodeUnimplemented, errors.New("ctrl.v1.DeploymentService.CreateS3UploadURL is not implemented")) -} - func (UnimplementedDeploymentServiceHandler) CreateDeployment(context.Context, *connect.Request[v1.CreateDeploymentRequest]) (*connect.Response[v1.CreateDeploymentResponse], error) { return nil, connect.NewError(connect.CodeUnimplemented, errors.New("ctrl.v1.DeploymentService.CreateDeployment is not implemented")) } diff --git a/gen/proto/ctrl/v1/deployment.pb.go b/gen/proto/ctrl/v1/deployment.pb.go index b7d8f2aced..48e04bb24f 100644 --- a/gen/proto/ctrl/v1/deployment.pb.go +++ b/gen/proto/ctrl/v1/deployment.pb.go @@ -138,13 +138,9 @@ type CreateDeploymentRequest struct { ProjectId string `protobuf:"bytes,2,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` Branch string `protobuf:"bytes,3,opt,name=branch,proto3" json:"branch,omitempty"` EnvironmentSlug string `protobuf:"bytes,4,opt,name=environment_slug,json=environmentSlug,proto3" json:"environment_slug,omitempty"` - // Build source, we can either build it from scratch or accept prebuilt image - // - // Types that are valid to be assigned to Source: - // - // *CreateDeploymentRequest_BuildContext - // *CreateDeploymentRequest_DockerImage - Source isCreateDeploymentRequest_Source `protobuf_oneof:"source"` + // Build source - currently only prebuilt docker images are supported via API + // GitHub source builds are triggered automatically via webhook + DockerImage string `protobuf:"bytes,6,opt,name=docker_image,json=dockerImage,proto3" json:"docker_image,omitempty"` // Prebuilt image reference // Git information GitCommit *GitCommitInfo `protobuf:"bytes,7,opt,name=git_commit,json=gitCommit,proto3,oneof" json:"git_commit,omitempty"` // Authentication @@ -207,27 +203,9 @@ func (x *CreateDeploymentRequest) GetEnvironmentSlug() string { return "" } -func (x *CreateDeploymentRequest) GetSource() isCreateDeploymentRequest_Source { - if x != nil { - return x.Source - } - return nil -} - -func (x *CreateDeploymentRequest) GetBuildContext() *BuildContext { - if x != nil { - if x, ok := x.Source.(*CreateDeploymentRequest_BuildContext); ok { - return x.BuildContext - } - } - return nil -} - func (x *CreateDeploymentRequest) GetDockerImage() string { if x != nil { - if x, ok := x.Source.(*CreateDeploymentRequest_DockerImage); ok { - return x.DockerImage - } + return x.DockerImage } return "" } @@ -253,74 +231,6 @@ func (x *CreateDeploymentRequest) GetCommand() []string { return nil } -type isCreateDeploymentRequest_Source interface { - isCreateDeploymentRequest_Source() -} - -type CreateDeploymentRequest_BuildContext struct { - BuildContext *BuildContext `protobuf:"bytes,5,opt,name=build_context,json=buildContext,proto3,oneof"` -} - -type CreateDeploymentRequest_DockerImage struct { - DockerImage string `protobuf:"bytes,6,opt,name=docker_image,json=dockerImage,proto3,oneof"` // Prebuilt image reference -} - -func (*CreateDeploymentRequest_BuildContext) isCreateDeploymentRequest_Source() {} - -func (*CreateDeploymentRequest_DockerImage) isCreateDeploymentRequest_Source() {} - -type BuildContext struct { - state protoimpl.MessageState `protogen:"open.v1"` - BuildContextPath string `protobuf:"bytes,1,opt,name=build_context_path,json=buildContextPath,proto3" json:"build_context_path,omitempty"` // S3 key for uploaded build context - DockerfilePath *string `protobuf:"bytes,2,opt,name=dockerfile_path,json=dockerfilePath,proto3,oneof" json:"dockerfile_path,omitempty"` // Path to Dockerfile within context (default: "Dockerfile") - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *BuildContext) Reset() { - *x = BuildContext{} - mi := &file_ctrl_v1_deployment_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *BuildContext) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*BuildContext) ProtoMessage() {} - -func (x *BuildContext) ProtoReflect() protoreflect.Message { - mi := &file_ctrl_v1_deployment_proto_msgTypes[1] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use BuildContext.ProtoReflect.Descriptor instead. -func (*BuildContext) Descriptor() ([]byte, []int) { - return file_ctrl_v1_deployment_proto_rawDescGZIP(), []int{1} -} - -func (x *BuildContext) GetBuildContextPath() string { - if x != nil { - return x.BuildContextPath - } - return "" -} - -func (x *BuildContext) GetDockerfilePath() string { - if x != nil && x.DockerfilePath != nil { - return *x.DockerfilePath - } - return "" -} - type GitCommitInfo struct { state protoimpl.MessageState `protogen:"open.v1"` CommitSha string `protobuf:"bytes,1,opt,name=commit_sha,json=commitSha,proto3" json:"commit_sha,omitempty"` @@ -334,7 +244,7 @@ type GitCommitInfo struct { func (x *GitCommitInfo) Reset() { *x = GitCommitInfo{} - mi := &file_ctrl_v1_deployment_proto_msgTypes[2] + mi := &file_ctrl_v1_deployment_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -346,7 +256,7 @@ func (x *GitCommitInfo) String() string { func (*GitCommitInfo) ProtoMessage() {} func (x *GitCommitInfo) ProtoReflect() protoreflect.Message { - mi := &file_ctrl_v1_deployment_proto_msgTypes[2] + mi := &file_ctrl_v1_deployment_proto_msgTypes[1] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -359,7 +269,7 @@ func (x *GitCommitInfo) ProtoReflect() protoreflect.Message { // Deprecated: Use GitCommitInfo.ProtoReflect.Descriptor instead. func (*GitCommitInfo) Descriptor() ([]byte, []int) { - return file_ctrl_v1_deployment_proto_rawDescGZIP(), []int{2} + return file_ctrl_v1_deployment_proto_rawDescGZIP(), []int{1} } func (x *GitCommitInfo) GetCommitSha() string { @@ -400,14 +310,14 @@ func (x *GitCommitInfo) GetTimestamp() int64 { type CreateDeploymentResponse struct { state protoimpl.MessageState `protogen:"open.v1"` DeploymentId string `protobuf:"bytes,1,opt,name=deployment_id,json=deploymentId,proto3" json:"deployment_id,omitempty"` - Status DeploymentStatus `protobuf:"varint,2,opt,name=status,proto3,enum=ctrl.v1.DeploymentStatus" json:"status,omitempty"` // Will be PENDING or BUILDING + Status DeploymentStatus `protobuf:"varint,2,opt,name=status,proto3,enum=ctrl.v1.DeploymentStatus" json:"status,omitempty"` // Will be PENDING or DEPLOYING unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } func (x *CreateDeploymentResponse) Reset() { *x = CreateDeploymentResponse{} - mi := &file_ctrl_v1_deployment_proto_msgTypes[3] + mi := &file_ctrl_v1_deployment_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -419,7 +329,7 @@ func (x *CreateDeploymentResponse) String() string { func (*CreateDeploymentResponse) ProtoMessage() {} func (x *CreateDeploymentResponse) ProtoReflect() protoreflect.Message { - mi := &file_ctrl_v1_deployment_proto_msgTypes[3] + mi := &file_ctrl_v1_deployment_proto_msgTypes[2] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -432,7 +342,7 @@ func (x *CreateDeploymentResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use CreateDeploymentResponse.ProtoReflect.Descriptor instead. func (*CreateDeploymentResponse) Descriptor() ([]byte, []int) { - return file_ctrl_v1_deployment_proto_rawDescGZIP(), []int{3} + return file_ctrl_v1_deployment_proto_rawDescGZIP(), []int{2} } func (x *CreateDeploymentResponse) GetDeploymentId() string { @@ -458,7 +368,7 @@ type GetDeploymentRequest struct { func (x *GetDeploymentRequest) Reset() { *x = GetDeploymentRequest{} - mi := &file_ctrl_v1_deployment_proto_msgTypes[4] + mi := &file_ctrl_v1_deployment_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -470,7 +380,7 @@ func (x *GetDeploymentRequest) String() string { func (*GetDeploymentRequest) ProtoMessage() {} func (x *GetDeploymentRequest) ProtoReflect() protoreflect.Message { - mi := &file_ctrl_v1_deployment_proto_msgTypes[4] + mi := &file_ctrl_v1_deployment_proto_msgTypes[3] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -483,7 +393,7 @@ func (x *GetDeploymentRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use GetDeploymentRequest.ProtoReflect.Descriptor instead. func (*GetDeploymentRequest) Descriptor() ([]byte, []int) { - return file_ctrl_v1_deployment_proto_rawDescGZIP(), []int{4} + return file_ctrl_v1_deployment_proto_rawDescGZIP(), []int{3} } func (x *GetDeploymentRequest) GetDeploymentId() string { @@ -502,7 +412,7 @@ type GetDeploymentResponse struct { func (x *GetDeploymentResponse) Reset() { *x = GetDeploymentResponse{} - mi := &file_ctrl_v1_deployment_proto_msgTypes[5] + mi := &file_ctrl_v1_deployment_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -514,7 +424,7 @@ func (x *GetDeploymentResponse) String() string { func (*GetDeploymentResponse) ProtoMessage() {} func (x *GetDeploymentResponse) ProtoReflect() protoreflect.Message { - mi := &file_ctrl_v1_deployment_proto_msgTypes[5] + mi := &file_ctrl_v1_deployment_proto_msgTypes[4] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -527,7 +437,7 @@ func (x *GetDeploymentResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use GetDeploymentResponse.ProtoReflect.Descriptor instead. func (*GetDeploymentResponse) Descriptor() ([]byte, []int) { - return file_ctrl_v1_deployment_proto_rawDescGZIP(), []int{5} + return file_ctrl_v1_deployment_proto_rawDescGZIP(), []int{4} } func (x *GetDeploymentResponse) GetDeployment() *Deployment { @@ -575,7 +485,7 @@ type Deployment struct { func (x *Deployment) Reset() { *x = Deployment{} - mi := &file_ctrl_v1_deployment_proto_msgTypes[6] + mi := &file_ctrl_v1_deployment_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -587,7 +497,7 @@ func (x *Deployment) String() string { func (*Deployment) ProtoMessage() {} func (x *Deployment) ProtoReflect() protoreflect.Message { - mi := &file_ctrl_v1_deployment_proto_msgTypes[6] + mi := &file_ctrl_v1_deployment_proto_msgTypes[5] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -600,7 +510,7 @@ func (x *Deployment) ProtoReflect() protoreflect.Message { // Deprecated: Use Deployment.ProtoReflect.Descriptor instead. func (*Deployment) Descriptor() ([]byte, []int) { - return file_ctrl_v1_deployment_proto_rawDescGZIP(), []int{6} + return file_ctrl_v1_deployment_proto_rawDescGZIP(), []int{5} } func (x *Deployment) GetId() string { @@ -755,7 +665,7 @@ type DeploymentStep struct { func (x *DeploymentStep) Reset() { *x = DeploymentStep{} - mi := &file_ctrl_v1_deployment_proto_msgTypes[7] + mi := &file_ctrl_v1_deployment_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -767,7 +677,7 @@ func (x *DeploymentStep) String() string { func (*DeploymentStep) ProtoMessage() {} func (x *DeploymentStep) ProtoReflect() protoreflect.Message { - mi := &file_ctrl_v1_deployment_proto_msgTypes[7] + mi := &file_ctrl_v1_deployment_proto_msgTypes[6] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -780,7 +690,7 @@ func (x *DeploymentStep) ProtoReflect() protoreflect.Message { // Deprecated: Use DeploymentStep.ProtoReflect.Descriptor instead. func (*DeploymentStep) Descriptor() ([]byte, []int) { - return file_ctrl_v1_deployment_proto_rawDescGZIP(), []int{7} + return file_ctrl_v1_deployment_proto_rawDescGZIP(), []int{6} } func (x *DeploymentStep) GetStatus() string { @@ -827,7 +737,7 @@ type Topology struct { func (x *Topology) Reset() { *x = Topology{} - mi := &file_ctrl_v1_deployment_proto_msgTypes[8] + mi := &file_ctrl_v1_deployment_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -839,7 +749,7 @@ func (x *Topology) String() string { func (*Topology) ProtoMessage() {} func (x *Topology) ProtoReflect() protoreflect.Message { - mi := &file_ctrl_v1_deployment_proto_msgTypes[8] + mi := &file_ctrl_v1_deployment_proto_msgTypes[7] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -852,7 +762,7 @@ func (x *Topology) ProtoReflect() protoreflect.Message { // Deprecated: Use Topology.ProtoReflect.Descriptor instead. func (*Topology) Descriptor() ([]byte, []int) { - return file_ctrl_v1_deployment_proto_rawDescGZIP(), []int{8} + return file_ctrl_v1_deployment_proto_rawDescGZIP(), []int{7} } func (x *Topology) GetCpuMillicores() int32 { @@ -908,7 +818,7 @@ type RegionalConfig struct { func (x *RegionalConfig) Reset() { *x = RegionalConfig{} - mi := &file_ctrl_v1_deployment_proto_msgTypes[9] + mi := &file_ctrl_v1_deployment_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -920,7 +830,7 @@ func (x *RegionalConfig) String() string { func (*RegionalConfig) ProtoMessage() {} func (x *RegionalConfig) ProtoReflect() protoreflect.Message { - mi := &file_ctrl_v1_deployment_proto_msgTypes[9] + mi := &file_ctrl_v1_deployment_proto_msgTypes[8] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -933,7 +843,7 @@ func (x *RegionalConfig) ProtoReflect() protoreflect.Message { // Deprecated: Use RegionalConfig.ProtoReflect.Descriptor instead. func (*RegionalConfig) Descriptor() ([]byte, []int) { - return file_ctrl_v1_deployment_proto_rawDescGZIP(), []int{9} + return file_ctrl_v1_deployment_proto_rawDescGZIP(), []int{8} } func (x *RegionalConfig) GetRegion() string { @@ -967,7 +877,7 @@ type RollbackRequest struct { func (x *RollbackRequest) Reset() { *x = RollbackRequest{} - mi := &file_ctrl_v1_deployment_proto_msgTypes[10] + mi := &file_ctrl_v1_deployment_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -979,7 +889,7 @@ func (x *RollbackRequest) String() string { func (*RollbackRequest) ProtoMessage() {} func (x *RollbackRequest) ProtoReflect() protoreflect.Message { - mi := &file_ctrl_v1_deployment_proto_msgTypes[10] + mi := &file_ctrl_v1_deployment_proto_msgTypes[9] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -992,7 +902,7 @@ func (x *RollbackRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RollbackRequest.ProtoReflect.Descriptor instead. func (*RollbackRequest) Descriptor() ([]byte, []int) { - return file_ctrl_v1_deployment_proto_rawDescGZIP(), []int{10} + return file_ctrl_v1_deployment_proto_rawDescGZIP(), []int{9} } func (x *RollbackRequest) GetSourceDeploymentId() string { @@ -1017,7 +927,7 @@ type RollbackResponse struct { func (x *RollbackResponse) Reset() { *x = RollbackResponse{} - mi := &file_ctrl_v1_deployment_proto_msgTypes[11] + mi := &file_ctrl_v1_deployment_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1029,7 +939,7 @@ func (x *RollbackResponse) String() string { func (*RollbackResponse) ProtoMessage() {} func (x *RollbackResponse) ProtoReflect() protoreflect.Message { - mi := &file_ctrl_v1_deployment_proto_msgTypes[11] + mi := &file_ctrl_v1_deployment_proto_msgTypes[10] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1042,7 +952,7 @@ func (x *RollbackResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RollbackResponse.ProtoReflect.Descriptor instead. func (*RollbackResponse) Descriptor() ([]byte, []int) { - return file_ctrl_v1_deployment_proto_rawDescGZIP(), []int{11} + return file_ctrl_v1_deployment_proto_rawDescGZIP(), []int{10} } type PromoteRequest struct { @@ -1054,7 +964,7 @@ type PromoteRequest struct { func (x *PromoteRequest) Reset() { *x = PromoteRequest{} - mi := &file_ctrl_v1_deployment_proto_msgTypes[12] + mi := &file_ctrl_v1_deployment_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1066,7 +976,7 @@ func (x *PromoteRequest) String() string { func (*PromoteRequest) ProtoMessage() {} func (x *PromoteRequest) ProtoReflect() protoreflect.Message { - mi := &file_ctrl_v1_deployment_proto_msgTypes[12] + mi := &file_ctrl_v1_deployment_proto_msgTypes[11] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1079,7 +989,7 @@ func (x *PromoteRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PromoteRequest.ProtoReflect.Descriptor instead. func (*PromoteRequest) Descriptor() ([]byte, []int) { - return file_ctrl_v1_deployment_proto_rawDescGZIP(), []int{12} + return file_ctrl_v1_deployment_proto_rawDescGZIP(), []int{11} } func (x *PromoteRequest) GetTargetDeploymentId() string { @@ -1097,7 +1007,7 @@ type PromoteResponse struct { func (x *PromoteResponse) Reset() { *x = PromoteResponse{} - mi := &file_ctrl_v1_deployment_proto_msgTypes[13] + mi := &file_ctrl_v1_deployment_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1109,7 +1019,7 @@ func (x *PromoteResponse) String() string { func (*PromoteResponse) ProtoMessage() {} func (x *PromoteResponse) ProtoReflect() protoreflect.Message { - mi := &file_ctrl_v1_deployment_proto_msgTypes[13] + mi := &file_ctrl_v1_deployment_proto_msgTypes[12] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1122,129 +1032,27 @@ func (x *PromoteResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use PromoteResponse.ProtoReflect.Descriptor instead. func (*PromoteResponse) Descriptor() ([]byte, []int) { - return file_ctrl_v1_deployment_proto_rawDescGZIP(), []int{13} -} - -type CreateS3UploadURLRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - UnkeyProjectId string `protobuf:"bytes,1,opt,name=unkey_project_id,json=unkeyProjectId,proto3" json:"unkey_project_id,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *CreateS3UploadURLRequest) Reset() { - *x = CreateS3UploadURLRequest{} - mi := &file_ctrl_v1_deployment_proto_msgTypes[14] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *CreateS3UploadURLRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateS3UploadURLRequest) ProtoMessage() {} - -func (x *CreateS3UploadURLRequest) ProtoReflect() protoreflect.Message { - mi := &file_ctrl_v1_deployment_proto_msgTypes[14] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateS3UploadURLRequest.ProtoReflect.Descriptor instead. -func (*CreateS3UploadURLRequest) Descriptor() ([]byte, []int) { - return file_ctrl_v1_deployment_proto_rawDescGZIP(), []int{14} -} - -func (x *CreateS3UploadURLRequest) GetUnkeyProjectId() string { - if x != nil { - return x.UnkeyProjectId - } - return "" -} - -type CreateS3UploadURLResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - UploadUrl string `protobuf:"bytes,1,opt,name=upload_url,json=uploadUrl,proto3" json:"upload_url,omitempty"` // Presigned PUT URL - BuildContextPath string `protobuf:"bytes,2,opt,name=build_context_path,json=buildContextPath,proto3" json:"build_context_path,omitempty"` // S3 key to use in CreateBuild - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *CreateS3UploadURLResponse) Reset() { - *x = CreateS3UploadURLResponse{} - mi := &file_ctrl_v1_deployment_proto_msgTypes[15] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *CreateS3UploadURLResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*CreateS3UploadURLResponse) ProtoMessage() {} - -func (x *CreateS3UploadURLResponse) ProtoReflect() protoreflect.Message { - mi := &file_ctrl_v1_deployment_proto_msgTypes[15] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use CreateS3UploadURLResponse.ProtoReflect.Descriptor instead. -func (*CreateS3UploadURLResponse) Descriptor() ([]byte, []int) { - return file_ctrl_v1_deployment_proto_rawDescGZIP(), []int{15} -} - -func (x *CreateS3UploadURLResponse) GetUploadUrl() string { - if x != nil { - return x.UploadUrl - } - return "" -} - -func (x *CreateS3UploadURLResponse) GetBuildContextPath() string { - if x != nil { - return x.BuildContextPath - } - return "" + return file_ctrl_v1_deployment_proto_rawDescGZIP(), []int{12} } var File_ctrl_v1_deployment_proto protoreflect.FileDescriptor const file_ctrl_v1_deployment_proto_rawDesc = "" + "\n" + - "\x18ctrl/v1/deployment.proto\x12\actrl.v1\"\x89\x03\n" + + "\x18ctrl/v1/deployment.proto\x12\actrl.v1\"\xbf\x02\n" + "\x17CreateDeploymentRequest\x12\x1d\n" + "\n" + "project_id\x18\x02 \x01(\tR\tprojectId\x12\x16\n" + "\x06branch\x18\x03 \x01(\tR\x06branch\x12)\n" + - "\x10environment_slug\x18\x04 \x01(\tR\x0fenvironmentSlug\x12<\n" + - "\rbuild_context\x18\x05 \x01(\v2\x15.ctrl.v1.BuildContextH\x00R\fbuildContext\x12#\n" + - "\fdocker_image\x18\x06 \x01(\tH\x00R\vdockerImage\x12:\n" + + "\x10environment_slug\x18\x04 \x01(\tR\x0fenvironmentSlug\x12!\n" + + "\fdocker_image\x18\x06 \x01(\tR\vdockerImage\x12:\n" + "\n" + - "git_commit\x18\a \x01(\v2\x16.ctrl.v1.GitCommitInfoH\x01R\tgitCommit\x88\x01\x01\x12$\n" + - "\vkeyspace_id\x18\b \x01(\tH\x02R\n" + + "git_commit\x18\a \x01(\v2\x16.ctrl.v1.GitCommitInfoH\x00R\tgitCommit\x88\x01\x01\x12$\n" + + "\vkeyspace_id\x18\b \x01(\tH\x01R\n" + "keyspaceId\x88\x01\x01\x12\x18\n" + - "\acommand\x18\t \x03(\tR\acommandB\b\n" + - "\x06sourceB\r\n" + + "\acommand\x18\t \x03(\tR\acommandB\r\n" + "\v_git_commitB\x0e\n" + - "\f_keyspace_idJ\x04\b\x01\x10\x02\"~\n" + - "\fBuildContext\x12,\n" + - "\x12build_context_path\x18\x01 \x01(\tR\x10buildContextPath\x12,\n" + - "\x0fdockerfile_path\x18\x02 \x01(\tH\x00R\x0edockerfilePath\x88\x01\x01B\x12\n" + - "\x10_dockerfile_path\"\xc4\x01\n" + + "\f_keyspace_idJ\x04\b\x01\x10\x02\"\xc4\x01\n" + "\rGitCommitInfo\x12\x1d\n" + "\n" + "commit_sha\x18\x01 \x01(\tR\tcommitSha\x12%\n" + @@ -1315,13 +1123,7 @@ const file_ctrl_v1_deployment_proto_rawDesc = "" + "\x10RollbackResponse\"B\n" + "\x0ePromoteRequest\x120\n" + "\x14target_deployment_id\x18\x01 \x01(\tR\x12targetDeploymentId\"\x11\n" + - "\x0fPromoteResponse\"D\n" + - "\x18CreateS3UploadURLRequest\x12(\n" + - "\x10unkey_project_id\x18\x01 \x01(\tR\x0eunkeyProjectId\"h\n" + - "\x19CreateS3UploadURLResponse\x12\x1d\n" + - "\n" + - "upload_url\x18\x01 \x01(\tR\tuploadUrl\x12,\n" + - "\x12build_context_path\x18\x02 \x01(\tR\x10buildContextPath*\xef\x01\n" + + "\x0fPromoteResponse*\xef\x01\n" + "\x10DeploymentStatus\x12!\n" + "\x1dDEPLOYMENT_STATUS_UNSPECIFIED\x10\x00\x12\x1d\n" + "\x19DEPLOYMENT_STATUS_PENDING\x10\x01\x12\x1e\n" + @@ -1334,9 +1136,8 @@ const file_ctrl_v1_deployment_proto_rawDesc = "" + "SourceType\x12\x1b\n" + "\x17SOURCE_TYPE_UNSPECIFIED\x10\x00\x12\x13\n" + "\x0fSOURCE_TYPE_GIT\x10\x01\x12\x1a\n" + - "\x16SOURCE_TYPE_CLI_UPLOAD\x10\x022\xa1\x03\n" + - "\x11DeploymentService\x12\\\n" + - "\x11CreateS3UploadURL\x12!.ctrl.v1.CreateS3UploadURLRequest\x1a\".ctrl.v1.CreateS3UploadURLResponse\"\x00\x12Y\n" + + "\x16SOURCE_TYPE_CLI_UPLOAD\x10\x022\xc3\x02\n" + + "\x11DeploymentService\x12Y\n" + "\x10CreateDeployment\x12 .ctrl.v1.CreateDeploymentRequest\x1a!.ctrl.v1.CreateDeploymentResponse\"\x00\x12P\n" + "\rGetDeployment\x12\x1d.ctrl.v1.GetDeploymentRequest\x1a\x1e.ctrl.v1.GetDeploymentResponse\"\x00\x12A\n" + "\bRollback\x12\x18.ctrl.v1.RollbackRequest\x1a\x19.ctrl.v1.RollbackResponse\"\x00\x12>\n" + @@ -1356,53 +1157,47 @@ func file_ctrl_v1_deployment_proto_rawDescGZIP() []byte { } var file_ctrl_v1_deployment_proto_enumTypes = make([]protoimpl.EnumInfo, 2) -var file_ctrl_v1_deployment_proto_msgTypes = make([]protoimpl.MessageInfo, 17) +var file_ctrl_v1_deployment_proto_msgTypes = make([]protoimpl.MessageInfo, 14) var file_ctrl_v1_deployment_proto_goTypes = []any{ - (DeploymentStatus)(0), // 0: ctrl.v1.DeploymentStatus - (SourceType)(0), // 1: ctrl.v1.SourceType - (*CreateDeploymentRequest)(nil), // 2: ctrl.v1.CreateDeploymentRequest - (*BuildContext)(nil), // 3: ctrl.v1.BuildContext - (*GitCommitInfo)(nil), // 4: ctrl.v1.GitCommitInfo - (*CreateDeploymentResponse)(nil), // 5: ctrl.v1.CreateDeploymentResponse - (*GetDeploymentRequest)(nil), // 6: ctrl.v1.GetDeploymentRequest - (*GetDeploymentResponse)(nil), // 7: ctrl.v1.GetDeploymentResponse - (*Deployment)(nil), // 8: ctrl.v1.Deployment - (*DeploymentStep)(nil), // 9: ctrl.v1.DeploymentStep - (*Topology)(nil), // 10: ctrl.v1.Topology - (*RegionalConfig)(nil), // 11: ctrl.v1.RegionalConfig - (*RollbackRequest)(nil), // 12: ctrl.v1.RollbackRequest - (*RollbackResponse)(nil), // 13: ctrl.v1.RollbackResponse - (*PromoteRequest)(nil), // 14: ctrl.v1.PromoteRequest - (*PromoteResponse)(nil), // 15: ctrl.v1.PromoteResponse - (*CreateS3UploadURLRequest)(nil), // 16: ctrl.v1.CreateS3UploadURLRequest - (*CreateS3UploadURLResponse)(nil), // 17: ctrl.v1.CreateS3UploadURLResponse - nil, // 18: ctrl.v1.Deployment.EnvironmentVariablesEntry + (DeploymentStatus)(0), // 0: ctrl.v1.DeploymentStatus + (SourceType)(0), // 1: ctrl.v1.SourceType + (*CreateDeploymentRequest)(nil), // 2: ctrl.v1.CreateDeploymentRequest + (*GitCommitInfo)(nil), // 3: ctrl.v1.GitCommitInfo + (*CreateDeploymentResponse)(nil), // 4: ctrl.v1.CreateDeploymentResponse + (*GetDeploymentRequest)(nil), // 5: ctrl.v1.GetDeploymentRequest + (*GetDeploymentResponse)(nil), // 6: ctrl.v1.GetDeploymentResponse + (*Deployment)(nil), // 7: ctrl.v1.Deployment + (*DeploymentStep)(nil), // 8: ctrl.v1.DeploymentStep + (*Topology)(nil), // 9: ctrl.v1.Topology + (*RegionalConfig)(nil), // 10: ctrl.v1.RegionalConfig + (*RollbackRequest)(nil), // 11: ctrl.v1.RollbackRequest + (*RollbackResponse)(nil), // 12: ctrl.v1.RollbackResponse + (*PromoteRequest)(nil), // 13: ctrl.v1.PromoteRequest + (*PromoteResponse)(nil), // 14: ctrl.v1.PromoteResponse + nil, // 15: ctrl.v1.Deployment.EnvironmentVariablesEntry } var file_ctrl_v1_deployment_proto_depIdxs = []int32{ - 3, // 0: ctrl.v1.CreateDeploymentRequest.build_context:type_name -> ctrl.v1.BuildContext - 4, // 1: ctrl.v1.CreateDeploymentRequest.git_commit:type_name -> ctrl.v1.GitCommitInfo - 0, // 2: ctrl.v1.CreateDeploymentResponse.status:type_name -> ctrl.v1.DeploymentStatus - 8, // 3: ctrl.v1.GetDeploymentResponse.deployment:type_name -> ctrl.v1.Deployment - 0, // 4: ctrl.v1.Deployment.status:type_name -> ctrl.v1.DeploymentStatus - 18, // 5: ctrl.v1.Deployment.environment_variables:type_name -> ctrl.v1.Deployment.EnvironmentVariablesEntry - 10, // 6: ctrl.v1.Deployment.topology:type_name -> ctrl.v1.Topology - 9, // 7: ctrl.v1.Deployment.steps:type_name -> ctrl.v1.DeploymentStep - 11, // 8: ctrl.v1.Topology.regions:type_name -> ctrl.v1.RegionalConfig - 16, // 9: ctrl.v1.DeploymentService.CreateS3UploadURL:input_type -> ctrl.v1.CreateS3UploadURLRequest - 2, // 10: ctrl.v1.DeploymentService.CreateDeployment:input_type -> ctrl.v1.CreateDeploymentRequest - 6, // 11: ctrl.v1.DeploymentService.GetDeployment:input_type -> ctrl.v1.GetDeploymentRequest - 12, // 12: ctrl.v1.DeploymentService.Rollback:input_type -> ctrl.v1.RollbackRequest - 14, // 13: ctrl.v1.DeploymentService.Promote:input_type -> ctrl.v1.PromoteRequest - 17, // 14: ctrl.v1.DeploymentService.CreateS3UploadURL:output_type -> ctrl.v1.CreateS3UploadURLResponse - 5, // 15: ctrl.v1.DeploymentService.CreateDeployment:output_type -> ctrl.v1.CreateDeploymentResponse - 7, // 16: ctrl.v1.DeploymentService.GetDeployment:output_type -> ctrl.v1.GetDeploymentResponse - 13, // 17: ctrl.v1.DeploymentService.Rollback:output_type -> ctrl.v1.RollbackResponse - 15, // 18: ctrl.v1.DeploymentService.Promote:output_type -> ctrl.v1.PromoteResponse - 14, // [14:19] is the sub-list for method output_type - 9, // [9:14] is the sub-list for method input_type - 9, // [9:9] is the sub-list for extension type_name - 9, // [9:9] is the sub-list for extension extendee - 0, // [0:9] is the sub-list for field type_name + 3, // 0: ctrl.v1.CreateDeploymentRequest.git_commit:type_name -> ctrl.v1.GitCommitInfo + 0, // 1: ctrl.v1.CreateDeploymentResponse.status:type_name -> ctrl.v1.DeploymentStatus + 7, // 2: ctrl.v1.GetDeploymentResponse.deployment:type_name -> ctrl.v1.Deployment + 0, // 3: ctrl.v1.Deployment.status:type_name -> ctrl.v1.DeploymentStatus + 15, // 4: ctrl.v1.Deployment.environment_variables:type_name -> ctrl.v1.Deployment.EnvironmentVariablesEntry + 9, // 5: ctrl.v1.Deployment.topology:type_name -> ctrl.v1.Topology + 8, // 6: ctrl.v1.Deployment.steps:type_name -> ctrl.v1.DeploymentStep + 10, // 7: ctrl.v1.Topology.regions:type_name -> ctrl.v1.RegionalConfig + 2, // 8: ctrl.v1.DeploymentService.CreateDeployment:input_type -> ctrl.v1.CreateDeploymentRequest + 5, // 9: ctrl.v1.DeploymentService.GetDeployment:input_type -> ctrl.v1.GetDeploymentRequest + 11, // 10: ctrl.v1.DeploymentService.Rollback:input_type -> ctrl.v1.RollbackRequest + 13, // 11: ctrl.v1.DeploymentService.Promote:input_type -> ctrl.v1.PromoteRequest + 4, // 12: ctrl.v1.DeploymentService.CreateDeployment:output_type -> ctrl.v1.CreateDeploymentResponse + 6, // 13: ctrl.v1.DeploymentService.GetDeployment:output_type -> ctrl.v1.GetDeploymentResponse + 12, // 14: ctrl.v1.DeploymentService.Rollback:output_type -> ctrl.v1.RollbackResponse + 14, // 15: ctrl.v1.DeploymentService.Promote:output_type -> ctrl.v1.PromoteResponse + 12, // [12:16] is the sub-list for method output_type + 8, // [8:12] is the sub-list for method input_type + 8, // [8:8] is the sub-list for extension type_name + 8, // [8:8] is the sub-list for extension extendee + 0, // [0:8] is the sub-list for field type_name } func init() { file_ctrl_v1_deployment_proto_init() } @@ -1410,18 +1205,14 @@ func file_ctrl_v1_deployment_proto_init() { if File_ctrl_v1_deployment_proto != nil { return } - file_ctrl_v1_deployment_proto_msgTypes[0].OneofWrappers = []any{ - (*CreateDeploymentRequest_BuildContext)(nil), - (*CreateDeploymentRequest_DockerImage)(nil), - } - file_ctrl_v1_deployment_proto_msgTypes[1].OneofWrappers = []any{} + file_ctrl_v1_deployment_proto_msgTypes[0].OneofWrappers = []any{} type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_ctrl_v1_deployment_proto_rawDesc), len(file_ctrl_v1_deployment_proto_rawDesc)), NumEnums: 2, - NumMessages: 17, + NumMessages: 14, NumExtensions: 0, NumServices: 1, }, diff --git a/gen/proto/hydra/v1/BUILD.bazel b/gen/proto/hydra/v1/BUILD.bazel index 7bd9582aed..536c24b6d5 100644 --- a/gen/proto/hydra/v1/BUILD.bazel +++ b/gen/proto/hydra/v1/BUILD.bazel @@ -3,8 +3,6 @@ load("@rules_go//go:def.bzl", "go_library") go_library( name = "hydra", srcs = [ - "build.pb.go", - "build_restate.pb.go", "certificate.pb.go", "certificate_restate.pb.go", "clickhouse_user.pb.go", diff --git a/gen/proto/hydra/v1/build.pb.go b/gen/proto/hydra/v1/build.pb.go deleted file mode 100644 index 10f181642f..0000000000 --- a/gen/proto/hydra/v1/build.pb.go +++ /dev/null @@ -1,241 +0,0 @@ -// Code generated by protoc-gen-go. DO NOT EDIT. -// versions: -// protoc-gen-go v1.36.8 -// protoc (unknown) -// source: hydra/v1/build.proto - -package hydrav1 - -import ( - _ "github.com/restatedev/sdk-go/generated/dev/restate/sdk" - protoreflect "google.golang.org/protobuf/reflect/protoreflect" - protoimpl "google.golang.org/protobuf/runtime/protoimpl" - reflect "reflect" - sync "sync" - unsafe "unsafe" -) - -const ( - // Verify that this generated code is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) - // Verify that runtime/protoimpl is sufficiently up-to-date. - _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) -) - -type BuildDockerImageRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - S3Url string `protobuf:"bytes,1,opt,name=s3_url,json=s3Url,proto3" json:"s3_url,omitempty"` - BuildContextPath string `protobuf:"bytes,2,opt,name=build_context_path,json=buildContextPath,proto3" json:"build_context_path,omitempty"` - DockerfilePath string `protobuf:"bytes,3,opt,name=dockerfile_path,json=dockerfilePath,proto3" json:"dockerfile_path,omitempty"` - ProjectId string `protobuf:"bytes,4,opt,name=project_id,json=projectId,proto3" json:"project_id,omitempty"` - DeploymentId string `protobuf:"bytes,5,opt,name=deployment_id,json=deploymentId,proto3" json:"deployment_id,omitempty"` - WorkspaceId string `protobuf:"bytes,6,opt,name=workspace_id,json=workspaceId,proto3" json:"workspace_id,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *BuildDockerImageRequest) Reset() { - *x = BuildDockerImageRequest{} - mi := &file_hydra_v1_build_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *BuildDockerImageRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*BuildDockerImageRequest) ProtoMessage() {} - -func (x *BuildDockerImageRequest) ProtoReflect() protoreflect.Message { - mi := &file_hydra_v1_build_proto_msgTypes[0] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use BuildDockerImageRequest.ProtoReflect.Descriptor instead. -func (*BuildDockerImageRequest) Descriptor() ([]byte, []int) { - return file_hydra_v1_build_proto_rawDescGZIP(), []int{0} -} - -func (x *BuildDockerImageRequest) GetS3Url() string { - if x != nil { - return x.S3Url - } - return "" -} - -func (x *BuildDockerImageRequest) GetBuildContextPath() string { - if x != nil { - return x.BuildContextPath - } - return "" -} - -func (x *BuildDockerImageRequest) GetDockerfilePath() string { - if x != nil { - return x.DockerfilePath - } - return "" -} - -func (x *BuildDockerImageRequest) GetProjectId() string { - if x != nil { - return x.ProjectId - } - return "" -} - -func (x *BuildDockerImageRequest) GetDeploymentId() string { - if x != nil { - return x.DeploymentId - } - return "" -} - -func (x *BuildDockerImageRequest) GetWorkspaceId() string { - if x != nil { - return x.WorkspaceId - } - return "" -} - -type BuildDockerImageResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - DepotProjectId string `protobuf:"bytes,1,opt,name=depot_project_id,json=depotProjectId,proto3" json:"depot_project_id,omitempty"` - DepotBuildId string `protobuf:"bytes,2,opt,name=depot_build_id,json=depotBuildId,proto3" json:"depot_build_id,omitempty"` - ImageName string `protobuf:"bytes,3,opt,name=image_name,json=imageName,proto3" json:"image_name,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *BuildDockerImageResponse) Reset() { - *x = BuildDockerImageResponse{} - mi := &file_hydra_v1_build_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *BuildDockerImageResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*BuildDockerImageResponse) ProtoMessage() {} - -func (x *BuildDockerImageResponse) ProtoReflect() protoreflect.Message { - mi := &file_hydra_v1_build_proto_msgTypes[1] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use BuildDockerImageResponse.ProtoReflect.Descriptor instead. -func (*BuildDockerImageResponse) Descriptor() ([]byte, []int) { - return file_hydra_v1_build_proto_rawDescGZIP(), []int{1} -} - -func (x *BuildDockerImageResponse) GetDepotProjectId() string { - if x != nil { - return x.DepotProjectId - } - return "" -} - -func (x *BuildDockerImageResponse) GetDepotBuildId() string { - if x != nil { - return x.DepotBuildId - } - return "" -} - -func (x *BuildDockerImageResponse) GetImageName() string { - if x != nil { - return x.ImageName - } - return "" -} - -var File_hydra_v1_build_proto protoreflect.FileDescriptor - -const file_hydra_v1_build_proto_rawDesc = "" + - "\n" + - "\x14hydra/v1/build.proto\x12\bhydra.v1\x1a\x18dev/restate/sdk/go.proto\"\xee\x01\n" + - "\x17BuildDockerImageRequest\x12\x15\n" + - "\x06s3_url\x18\x01 \x01(\tR\x05s3Url\x12,\n" + - "\x12build_context_path\x18\x02 \x01(\tR\x10buildContextPath\x12'\n" + - "\x0fdockerfile_path\x18\x03 \x01(\tR\x0edockerfilePath\x12\x1d\n" + - "\n" + - "project_id\x18\x04 \x01(\tR\tprojectId\x12#\n" + - "\rdeployment_id\x18\x05 \x01(\tR\fdeploymentId\x12!\n" + - "\fworkspace_id\x18\x06 \x01(\tR\vworkspaceId\"\x89\x01\n" + - "\x18BuildDockerImageResponse\x12(\n" + - "\x10depot_project_id\x18\x01 \x01(\tR\x0edepotProjectId\x12$\n" + - "\x0edepot_build_id\x18\x02 \x01(\tR\fdepotBuildId\x12\x1d\n" + - "\n" + - "image_name\x18\x03 \x01(\tR\timageName2q\n" + - "\fBuildService\x12[\n" + - "\x10BuildDockerImage\x12!.hydra.v1.BuildDockerImageRequest\x1a\".hydra.v1.BuildDockerImageResponse\"\x00\x1a\x04\x98\x80\x01\x00B\x90\x01\n" + - "\fcom.hydra.v1B\n" + - "BuildProtoP\x01Z3github.com/unkeyed/unkey/gen/proto/hydra/v1;hydrav1\xa2\x02\x03HXX\xaa\x02\bHydra.V1\xca\x02\bHydra\\V1\xe2\x02\x14Hydra\\V1\\GPBMetadata\xea\x02\tHydra::V1b\x06proto3" - -var ( - file_hydra_v1_build_proto_rawDescOnce sync.Once - file_hydra_v1_build_proto_rawDescData []byte -) - -func file_hydra_v1_build_proto_rawDescGZIP() []byte { - file_hydra_v1_build_proto_rawDescOnce.Do(func() { - file_hydra_v1_build_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_hydra_v1_build_proto_rawDesc), len(file_hydra_v1_build_proto_rawDesc))) - }) - return file_hydra_v1_build_proto_rawDescData -} - -var file_hydra_v1_build_proto_msgTypes = make([]protoimpl.MessageInfo, 2) -var file_hydra_v1_build_proto_goTypes = []any{ - (*BuildDockerImageRequest)(nil), // 0: hydra.v1.BuildDockerImageRequest - (*BuildDockerImageResponse)(nil), // 1: hydra.v1.BuildDockerImageResponse -} -var file_hydra_v1_build_proto_depIdxs = []int32{ - 0, // 0: hydra.v1.BuildService.BuildDockerImage:input_type -> hydra.v1.BuildDockerImageRequest - 1, // 1: hydra.v1.BuildService.BuildDockerImage:output_type -> hydra.v1.BuildDockerImageResponse - 1, // [1:2] is the sub-list for method output_type - 0, // [0:1] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name -} - -func init() { file_hydra_v1_build_proto_init() } -func file_hydra_v1_build_proto_init() { - if File_hydra_v1_build_proto != nil { - return - } - type x struct{} - out := protoimpl.TypeBuilder{ - File: protoimpl.DescBuilder{ - GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: unsafe.Slice(unsafe.StringData(file_hydra_v1_build_proto_rawDesc), len(file_hydra_v1_build_proto_rawDesc)), - NumEnums: 0, - NumMessages: 2, - NumExtensions: 0, - NumServices: 1, - }, - GoTypes: file_hydra_v1_build_proto_goTypes, - DependencyIndexes: file_hydra_v1_build_proto_depIdxs, - MessageInfos: file_hydra_v1_build_proto_msgTypes, - }.Build() - File_hydra_v1_build_proto = out.File - file_hydra_v1_build_proto_goTypes = nil - file_hydra_v1_build_proto_depIdxs = nil -} diff --git a/gen/proto/hydra/v1/build_restate.pb.go b/gen/proto/hydra/v1/build_restate.pb.go deleted file mode 100644 index 56f6a2a9dc..0000000000 --- a/gen/proto/hydra/v1/build_restate.pb.go +++ /dev/null @@ -1,103 +0,0 @@ -// Code generated by protoc-gen-go-restate. DO NOT EDIT. -// versions: -// - protoc-gen-go-restate v0.1 -// - protoc (unknown) -// source: hydra/v1/build.proto - -package hydrav1 - -import ( - fmt "fmt" - sdk_go "github.com/restatedev/sdk-go" - encoding "github.com/restatedev/sdk-go/encoding" - ingress "github.com/restatedev/sdk-go/ingress" -) - -// BuildServiceClient is the client API for hydra.v1.BuildService service. -type BuildServiceClient interface { - BuildDockerImage(opts ...sdk_go.ClientOption) sdk_go.Client[*BuildDockerImageRequest, *BuildDockerImageResponse] -} - -type buildServiceClient struct { - ctx sdk_go.Context - options []sdk_go.ClientOption -} - -func NewBuildServiceClient(ctx sdk_go.Context, opts ...sdk_go.ClientOption) BuildServiceClient { - cOpts := append([]sdk_go.ClientOption{sdk_go.WithProtoJSON}, opts...) - return &buildServiceClient{ - ctx, - cOpts, - } -} -func (c *buildServiceClient) BuildDockerImage(opts ...sdk_go.ClientOption) sdk_go.Client[*BuildDockerImageRequest, *BuildDockerImageResponse] { - cOpts := c.options - if len(opts) > 0 { - cOpts = append(append([]sdk_go.ClientOption{}, cOpts...), opts...) - } - return sdk_go.WithRequestType[*BuildDockerImageRequest](sdk_go.Service[*BuildDockerImageResponse](c.ctx, "hydra.v1.BuildService", "BuildDockerImage", cOpts...)) -} - -// BuildServiceIngressClient is the ingress client API for hydra.v1.BuildService service. -// -// This client is used to call the service from outside of a Restate context. -type BuildServiceIngressClient interface { - BuildDockerImage() ingress.Requester[*BuildDockerImageRequest, *BuildDockerImageResponse] -} - -type buildServiceIngressClient struct { - client *ingress.Client - serviceName string -} - -func NewBuildServiceIngressClient(client *ingress.Client) BuildServiceIngressClient { - return &buildServiceIngressClient{ - client, - "hydra.v1.BuildService", - } -} - -func (c *buildServiceIngressClient) BuildDockerImage() ingress.Requester[*BuildDockerImageRequest, *BuildDockerImageResponse] { - codec := encoding.ProtoJSONCodec - return ingress.NewRequester[*BuildDockerImageRequest, *BuildDockerImageResponse](c.client, c.serviceName, "BuildDockerImage", nil, &codec) -} - -// BuildServiceServer is the server API for hydra.v1.BuildService service. -// All implementations should embed UnimplementedBuildServiceServer -// for forward compatibility. -type BuildServiceServer interface { - BuildDockerImage(ctx sdk_go.Context, req *BuildDockerImageRequest) (*BuildDockerImageResponse, error) -} - -// UnimplementedBuildServiceServer should be embedded to have -// forward compatible implementations. -// -// NOTE: this should be embedded by value instead of pointer to avoid a nil -// pointer dereference when methods are called. -type UnimplementedBuildServiceServer struct{} - -func (UnimplementedBuildServiceServer) BuildDockerImage(ctx sdk_go.Context, req *BuildDockerImageRequest) (*BuildDockerImageResponse, error) { - return nil, sdk_go.TerminalError(fmt.Errorf("method BuildDockerImage not implemented"), 501) -} -func (UnimplementedBuildServiceServer) testEmbeddedByValue() {} - -// UnsafeBuildServiceServer may be embedded to opt out of forward compatibility for this service. -// Use of this interface is not recommended, as added methods to BuildServiceServer will -// result in compilation errors. -type UnsafeBuildServiceServer interface { - mustEmbedUnimplementedBuildServiceServer() -} - -func NewBuildServiceServer(srv BuildServiceServer, opts ...sdk_go.ServiceDefinitionOption) sdk_go.ServiceDefinition { - // If the following call panics, it indicates UnimplementedBuildServiceServer was - // embedded by pointer and is nil. This will cause panics if an - // unimplemented method is ever invoked, so we test this at initialization - // time to prevent it from happening at runtime later due to I/O. - if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { - t.testEmbeddedByValue() - } - sOpts := append([]sdk_go.ServiceDefinitionOption{sdk_go.WithProtoJSON}, opts...) - router := sdk_go.NewService("hydra.v1.BuildService", sOpts...) - router = router.Handler("BuildDockerImage", sdk_go.NewServiceHandler(srv.BuildDockerImage)) - return router -} diff --git a/gen/proto/hydra/v1/deployment.pb.go b/gen/proto/hydra/v1/deployment.pb.go index 6b8d5adbb1..859a66e3ac 100644 --- a/gen/proto/hydra/v1/deployment.pb.go +++ b/gen/proto/hydra/v1/deployment.pb.go @@ -22,23 +22,144 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) +type DockerImage struct { + state protoimpl.MessageState `protogen:"open.v1"` + Image string `protobuf:"bytes,1,opt,name=image,proto3" json:"image,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DockerImage) Reset() { + *x = DockerImage{} + mi := &file_hydra_v1_deployment_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DockerImage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DockerImage) ProtoMessage() {} + +func (x *DockerImage) ProtoReflect() protoreflect.Message { + mi := &file_hydra_v1_deployment_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DockerImage.ProtoReflect.Descriptor instead. +func (*DockerImage) Descriptor() ([]byte, []int) { + return file_hydra_v1_deployment_proto_rawDescGZIP(), []int{0} +} + +func (x *DockerImage) GetImage() string { + if x != nil { + return x.Image + } + return "" +} + +type GitSource struct { + state protoimpl.MessageState `protogen:"open.v1"` + InstallationId int64 `protobuf:"varint,1,opt,name=installation_id,json=installationId,proto3" json:"installation_id,omitempty"` + Repository string `protobuf:"bytes,2,opt,name=repository,proto3" json:"repository,omitempty"` + CommitSha string `protobuf:"bytes,3,opt,name=commit_sha,json=commitSha,proto3" json:"commit_sha,omitempty"` + ContextPath string `protobuf:"bytes,4,opt,name=context_path,json=contextPath,proto3" json:"context_path,omitempty"` + DockerfilePath string `protobuf:"bytes,5,opt,name=dockerfile_path,json=dockerfilePath,proto3" json:"dockerfile_path,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GitSource) Reset() { + *x = GitSource{} + mi := &file_hydra_v1_deployment_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GitSource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GitSource) ProtoMessage() {} + +func (x *GitSource) ProtoReflect() protoreflect.Message { + mi := &file_hydra_v1_deployment_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GitSource.ProtoReflect.Descriptor instead. +func (*GitSource) Descriptor() ([]byte, []int) { + return file_hydra_v1_deployment_proto_rawDescGZIP(), []int{1} +} + +func (x *GitSource) GetInstallationId() int64 { + if x != nil { + return x.InstallationId + } + return 0 +} + +func (x *GitSource) GetRepository() string { + if x != nil { + return x.Repository + } + return "" +} + +func (x *GitSource) GetCommitSha() string { + if x != nil { + return x.CommitSha + } + return "" +} + +func (x *GitSource) GetContextPath() string { + if x != nil { + return x.ContextPath + } + return "" +} + +func (x *GitSource) GetDockerfilePath() string { + if x != nil { + return x.DockerfilePath + } + return "" +} + type DeployRequest struct { state protoimpl.MessageState `protogen:"open.v1"` DeploymentId string `protobuf:"bytes,1,opt,name=deployment_id,json=deploymentId,proto3" json:"deployment_id,omitempty"` KeyAuthId *string `protobuf:"bytes,2,opt,name=key_auth_id,json=keyAuthId,proto3,oneof" json:"key_auth_id,omitempty"` - // Build source fields, exactly one of (context_key, docker_image) must be set - BuildContextPath *string `protobuf:"bytes,3,opt,name=build_context_path,json=buildContextPath,proto3,oneof" json:"build_context_path,omitempty"` - DockerfilePath *string `protobuf:"bytes,4,opt,name=dockerfile_path,json=dockerfilePath,proto3,oneof" json:"dockerfile_path,omitempty"` - DockerImage *string `protobuf:"bytes,5,opt,name=docker_image,json=dockerImage,proto3,oneof" json:"docker_image,omitempty"` + // Types that are valid to be assigned to Source: + // + // *DeployRequest_Git + // *DeployRequest_DockerImage + Source isDeployRequest_Source `protobuf_oneof:"source"` // Container command override (e.g., ["./app", "serve"]) - Command []string `protobuf:"bytes,6,rep,name=command,proto3" json:"command,omitempty"` + Command []string `protobuf:"bytes,5,rep,name=command,proto3" json:"command,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } func (x *DeployRequest) Reset() { *x = DeployRequest{} - mi := &file_hydra_v1_deployment_proto_msgTypes[0] + mi := &file_hydra_v1_deployment_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -50,7 +171,7 @@ func (x *DeployRequest) String() string { func (*DeployRequest) ProtoMessage() {} func (x *DeployRequest) ProtoReflect() protoreflect.Message { - mi := &file_hydra_v1_deployment_proto_msgTypes[0] + mi := &file_hydra_v1_deployment_proto_msgTypes[2] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -63,7 +184,7 @@ func (x *DeployRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use DeployRequest.ProtoReflect.Descriptor instead. func (*DeployRequest) Descriptor() ([]byte, []int) { - return file_hydra_v1_deployment_proto_rawDescGZIP(), []int{0} + return file_hydra_v1_deployment_proto_rawDescGZIP(), []int{2} } func (x *DeployRequest) GetDeploymentId() string { @@ -80,25 +201,29 @@ func (x *DeployRequest) GetKeyAuthId() string { return "" } -func (x *DeployRequest) GetBuildContextPath() string { - if x != nil && x.BuildContextPath != nil { - return *x.BuildContextPath +func (x *DeployRequest) GetSource() isDeployRequest_Source { + if x != nil { + return x.Source } - return "" + return nil } -func (x *DeployRequest) GetDockerfilePath() string { - if x != nil && x.DockerfilePath != nil { - return *x.DockerfilePath +func (x *DeployRequest) GetGit() *GitSource { + if x != nil { + if x, ok := x.Source.(*DeployRequest_Git); ok { + return x.Git + } } - return "" + return nil } -func (x *DeployRequest) GetDockerImage() string { - if x != nil && x.DockerImage != nil { - return *x.DockerImage +func (x *DeployRequest) GetDockerImage() *DockerImage { + if x != nil { + if x, ok := x.Source.(*DeployRequest_DockerImage); ok { + return x.DockerImage + } } - return "" + return nil } func (x *DeployRequest) GetCommand() []string { @@ -108,6 +233,22 @@ func (x *DeployRequest) GetCommand() []string { return nil } +type isDeployRequest_Source interface { + isDeployRequest_Source() +} + +type DeployRequest_Git struct { + Git *GitSource `protobuf:"bytes,3,opt,name=git,proto3,oneof"` +} + +type DeployRequest_DockerImage struct { + DockerImage *DockerImage `protobuf:"bytes,4,opt,name=docker_image,json=dockerImage,proto3,oneof"` +} + +func (*DeployRequest_Git) isDeployRequest_Source() {} + +func (*DeployRequest_DockerImage) isDeployRequest_Source() {} + type DeployResponse struct { state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields @@ -116,7 +257,7 @@ type DeployResponse struct { func (x *DeployResponse) Reset() { *x = DeployResponse{} - mi := &file_hydra_v1_deployment_proto_msgTypes[1] + mi := &file_hydra_v1_deployment_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -128,7 +269,7 @@ func (x *DeployResponse) String() string { func (*DeployResponse) ProtoMessage() {} func (x *DeployResponse) ProtoReflect() protoreflect.Message { - mi := &file_hydra_v1_deployment_proto_msgTypes[1] + mi := &file_hydra_v1_deployment_proto_msgTypes[3] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -141,7 +282,7 @@ func (x *DeployResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use DeployResponse.ProtoReflect.Descriptor instead. func (*DeployResponse) Descriptor() ([]byte, []int) { - return file_hydra_v1_deployment_proto_rawDescGZIP(), []int{1} + return file_hydra_v1_deployment_proto_rawDescGZIP(), []int{3} } type RollbackRequest struct { @@ -154,7 +295,7 @@ type RollbackRequest struct { func (x *RollbackRequest) Reset() { *x = RollbackRequest{} - mi := &file_hydra_v1_deployment_proto_msgTypes[2] + mi := &file_hydra_v1_deployment_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -166,7 +307,7 @@ func (x *RollbackRequest) String() string { func (*RollbackRequest) ProtoMessage() {} func (x *RollbackRequest) ProtoReflect() protoreflect.Message { - mi := &file_hydra_v1_deployment_proto_msgTypes[2] + mi := &file_hydra_v1_deployment_proto_msgTypes[4] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -179,7 +320,7 @@ func (x *RollbackRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use RollbackRequest.ProtoReflect.Descriptor instead. func (*RollbackRequest) Descriptor() ([]byte, []int) { - return file_hydra_v1_deployment_proto_rawDescGZIP(), []int{2} + return file_hydra_v1_deployment_proto_rawDescGZIP(), []int{4} } func (x *RollbackRequest) GetSourceDeploymentId() string { @@ -204,7 +345,7 @@ type RollbackResponse struct { func (x *RollbackResponse) Reset() { *x = RollbackResponse{} - mi := &file_hydra_v1_deployment_proto_msgTypes[3] + mi := &file_hydra_v1_deployment_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -216,7 +357,7 @@ func (x *RollbackResponse) String() string { func (*RollbackResponse) ProtoMessage() {} func (x *RollbackResponse) ProtoReflect() protoreflect.Message { - mi := &file_hydra_v1_deployment_proto_msgTypes[3] + mi := &file_hydra_v1_deployment_proto_msgTypes[5] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -229,7 +370,7 @@ func (x *RollbackResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use RollbackResponse.ProtoReflect.Descriptor instead. func (*RollbackResponse) Descriptor() ([]byte, []int) { - return file_hydra_v1_deployment_proto_rawDescGZIP(), []int{3} + return file_hydra_v1_deployment_proto_rawDescGZIP(), []int{5} } type PromoteRequest struct { @@ -241,7 +382,7 @@ type PromoteRequest struct { func (x *PromoteRequest) Reset() { *x = PromoteRequest{} - mi := &file_hydra_v1_deployment_proto_msgTypes[4] + mi := &file_hydra_v1_deployment_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -253,7 +394,7 @@ func (x *PromoteRequest) String() string { func (*PromoteRequest) ProtoMessage() {} func (x *PromoteRequest) ProtoReflect() protoreflect.Message { - mi := &file_hydra_v1_deployment_proto_msgTypes[4] + mi := &file_hydra_v1_deployment_proto_msgTypes[6] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -266,7 +407,7 @@ func (x *PromoteRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PromoteRequest.ProtoReflect.Descriptor instead. func (*PromoteRequest) Descriptor() ([]byte, []int) { - return file_hydra_v1_deployment_proto_rawDescGZIP(), []int{4} + return file_hydra_v1_deployment_proto_rawDescGZIP(), []int{6} } func (x *PromoteRequest) GetTargetDeploymentId() string { @@ -284,7 +425,7 @@ type PromoteResponse struct { func (x *PromoteResponse) Reset() { *x = PromoteResponse{} - mi := &file_hydra_v1_deployment_proto_msgTypes[5] + mi := &file_hydra_v1_deployment_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -296,7 +437,7 @@ func (x *PromoteResponse) String() string { func (*PromoteResponse) ProtoMessage() {} func (x *PromoteResponse) ProtoReflect() protoreflect.Message { - mi := &file_hydra_v1_deployment_proto_msgTypes[5] + mi := &file_hydra_v1_deployment_proto_msgTypes[7] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -309,25 +450,33 @@ func (x *PromoteResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use PromoteResponse.ProtoReflect.Descriptor instead. func (*PromoteResponse) Descriptor() ([]byte, []int) { - return file_hydra_v1_deployment_proto_rawDescGZIP(), []int{5} + return file_hydra_v1_deployment_proto_rawDescGZIP(), []int{7} } var File_hydra_v1_deployment_proto protoreflect.FileDescriptor const file_hydra_v1_deployment_proto_rawDesc = "" + "\n" + - "\x19hydra/v1/deployment.proto\x12\bhydra.v1\x1a\x18dev/restate/sdk/go.proto\"\xc8\x02\n" + + "\x19hydra/v1/deployment.proto\x12\bhydra.v1\x1a\x18dev/restate/sdk/go.proto\"#\n" + + "\vDockerImage\x12\x14\n" + + "\x05image\x18\x01 \x01(\tR\x05image\"\xbf\x01\n" + + "\tGitSource\x12'\n" + + "\x0finstallation_id\x18\x01 \x01(\x03R\x0einstallationId\x12\x1e\n" + + "\n" + + "repository\x18\x02 \x01(\tR\n" + + "repository\x12\x1d\n" + + "\n" + + "commit_sha\x18\x03 \x01(\tR\tcommitSha\x12!\n" + + "\fcontext_path\x18\x04 \x01(\tR\vcontextPath\x12'\n" + + "\x0fdockerfile_path\x18\x05 \x01(\tR\x0edockerfilePath\"\xf2\x01\n" + "\rDeployRequest\x12#\n" + "\rdeployment_id\x18\x01 \x01(\tR\fdeploymentId\x12#\n" + - "\vkey_auth_id\x18\x02 \x01(\tH\x00R\tkeyAuthId\x88\x01\x01\x121\n" + - "\x12build_context_path\x18\x03 \x01(\tH\x01R\x10buildContextPath\x88\x01\x01\x12,\n" + - "\x0fdockerfile_path\x18\x04 \x01(\tH\x02R\x0edockerfilePath\x88\x01\x01\x12&\n" + - "\fdocker_image\x18\x05 \x01(\tH\x03R\vdockerImage\x88\x01\x01\x12\x18\n" + - "\acommand\x18\x06 \x03(\tR\acommandB\x0e\n" + - "\f_key_auth_idB\x15\n" + - "\x13_build_context_pathB\x12\n" + - "\x10_dockerfile_pathB\x0f\n" + - "\r_docker_image\"\x10\n" + + "\vkey_auth_id\x18\x02 \x01(\tH\x01R\tkeyAuthId\x88\x01\x01\x12'\n" + + "\x03git\x18\x03 \x01(\v2\x13.hydra.v1.GitSourceH\x00R\x03git\x12:\n" + + "\fdocker_image\x18\x04 \x01(\v2\x15.hydra.v1.DockerImageH\x00R\vdockerImage\x12\x18\n" + + "\acommand\x18\x05 \x03(\tR\acommandB\b\n" + + "\x06sourceB\x0e\n" + + "\f_key_auth_id\"\x10\n" + "\x0eDeployResponse\"u\n" + "\x0fRollbackRequest\x120\n" + "\x14source_deployment_id\x18\x01 \x01(\tR\x12sourceDeploymentId\x120\n" + @@ -354,27 +503,31 @@ func file_hydra_v1_deployment_proto_rawDescGZIP() []byte { return file_hydra_v1_deployment_proto_rawDescData } -var file_hydra_v1_deployment_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_hydra_v1_deployment_proto_msgTypes = make([]protoimpl.MessageInfo, 8) var file_hydra_v1_deployment_proto_goTypes = []any{ - (*DeployRequest)(nil), // 0: hydra.v1.DeployRequest - (*DeployResponse)(nil), // 1: hydra.v1.DeployResponse - (*RollbackRequest)(nil), // 2: hydra.v1.RollbackRequest - (*RollbackResponse)(nil), // 3: hydra.v1.RollbackResponse - (*PromoteRequest)(nil), // 4: hydra.v1.PromoteRequest - (*PromoteResponse)(nil), // 5: hydra.v1.PromoteResponse + (*DockerImage)(nil), // 0: hydra.v1.DockerImage + (*GitSource)(nil), // 1: hydra.v1.GitSource + (*DeployRequest)(nil), // 2: hydra.v1.DeployRequest + (*DeployResponse)(nil), // 3: hydra.v1.DeployResponse + (*RollbackRequest)(nil), // 4: hydra.v1.RollbackRequest + (*RollbackResponse)(nil), // 5: hydra.v1.RollbackResponse + (*PromoteRequest)(nil), // 6: hydra.v1.PromoteRequest + (*PromoteResponse)(nil), // 7: hydra.v1.PromoteResponse } var file_hydra_v1_deployment_proto_depIdxs = []int32{ - 0, // 0: hydra.v1.DeploymentService.Deploy:input_type -> hydra.v1.DeployRequest - 2, // 1: hydra.v1.DeploymentService.Rollback:input_type -> hydra.v1.RollbackRequest - 4, // 2: hydra.v1.DeploymentService.Promote:input_type -> hydra.v1.PromoteRequest - 1, // 3: hydra.v1.DeploymentService.Deploy:output_type -> hydra.v1.DeployResponse - 3, // 4: hydra.v1.DeploymentService.Rollback:output_type -> hydra.v1.RollbackResponse - 5, // 5: hydra.v1.DeploymentService.Promote:output_type -> hydra.v1.PromoteResponse - 3, // [3:6] is the sub-list for method output_type - 0, // [0:3] is the sub-list for method input_type - 0, // [0:0] is the sub-list for extension type_name - 0, // [0:0] is the sub-list for extension extendee - 0, // [0:0] is the sub-list for field type_name + 1, // 0: hydra.v1.DeployRequest.git:type_name -> hydra.v1.GitSource + 0, // 1: hydra.v1.DeployRequest.docker_image:type_name -> hydra.v1.DockerImage + 2, // 2: hydra.v1.DeploymentService.Deploy:input_type -> hydra.v1.DeployRequest + 4, // 3: hydra.v1.DeploymentService.Rollback:input_type -> hydra.v1.RollbackRequest + 6, // 4: hydra.v1.DeploymentService.Promote:input_type -> hydra.v1.PromoteRequest + 3, // 5: hydra.v1.DeploymentService.Deploy:output_type -> hydra.v1.DeployResponse + 5, // 6: hydra.v1.DeploymentService.Rollback:output_type -> hydra.v1.RollbackResponse + 7, // 7: hydra.v1.DeploymentService.Promote:output_type -> hydra.v1.PromoteResponse + 5, // [5:8] is the sub-list for method output_type + 2, // [2:5] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name } func init() { file_hydra_v1_deployment_proto_init() } @@ -382,14 +535,17 @@ func file_hydra_v1_deployment_proto_init() { if File_hydra_v1_deployment_proto != nil { return } - file_hydra_v1_deployment_proto_msgTypes[0].OneofWrappers = []any{} + file_hydra_v1_deployment_proto_msgTypes[2].OneofWrappers = []any{ + (*DeployRequest_Git)(nil), + (*DeployRequest_DockerImage)(nil), + } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_hydra_v1_deployment_proto_rawDesc), len(file_hydra_v1_deployment_proto_rawDesc)), NumEnums: 0, - NumMessages: 6, + NumMessages: 8, NumExtensions: 0, NumServices: 1, }, diff --git a/pkg/db/BUILD.bazel b/pkg/db/BUILD.bazel index 56786c24b5..befcb7c869 100644 --- a/pkg/db/BUILD.bazel +++ b/pkg/db/BUILD.bazel @@ -41,6 +41,7 @@ go_library( "bulk_deployment_topology_insert.sql_generated.go", "bulk_environment_insert.sql_generated.go", "bulk_environment_upsert.sql_generated.go", + "bulk_github_repo_connection_insert.sql_generated.go", "bulk_identity_insert.sql_generated.go", "bulk_identity_insert_ratelimit.sql_generated.go", "bulk_identity_upsert.sql_generated.go", @@ -109,6 +110,8 @@ go_library( "environment_variables_find_by_environment_id.sql_generated.go", "frontline_route_delete_by_fqdn.sql_generated.go", "generate.go", + "github_repo_connection_find.sql_generated.go", + "github_repo_connection_insert.sql_generated.go", "handle_err_deadlock.go", "handle_err_duplicate_key.go", "handle_err_no_rows.go", diff --git a/pkg/db/bulk_github_repo_connection_insert.sql_generated.go b/pkg/db/bulk_github_repo_connection_insert.sql_generated.go new file mode 100644 index 0000000000..9c2dcf5b35 --- /dev/null +++ b/pkg/db/bulk_github_repo_connection_insert.sql_generated.go @@ -0,0 +1,43 @@ +// Code generated by sqlc bulk insert plugin. DO NOT EDIT. + +package db + +import ( + "context" + "fmt" + "strings" +) + +// bulkInsertGithubRepoConnection is the base query for bulk insert +const bulkInsertGithubRepoConnection = `INSERT INTO github_repo_connections ( project_id, installation_id, repository_id, repository_full_name, created_at, updated_at ) VALUES %s` + +// InsertGithubRepoConnections performs bulk insert in a single query +func (q *BulkQueries) InsertGithubRepoConnections(ctx context.Context, db DBTX, args []InsertGithubRepoConnectionParams) error { + + if len(args) == 0 { + return nil + } + + // Build the bulk insert query + valueClauses := make([]string, len(args)) + for i := range args { + valueClauses[i] = "( ?, ?, ?, ?, ?, ? )" + } + + bulkQuery := fmt.Sprintf(bulkInsertGithubRepoConnection, strings.Join(valueClauses, ", ")) + + // Collect all arguments + var allArgs []any + for _, arg := range args { + allArgs = append(allArgs, arg.ProjectID) + allArgs = append(allArgs, arg.InstallationID) + allArgs = append(allArgs, arg.RepositoryID) + allArgs = append(allArgs, arg.RepositoryFullName) + allArgs = append(allArgs, arg.CreatedAt) + allArgs = append(allArgs, arg.UpdatedAt) + } + + // Execute the bulk insert + _, err := db.ExecContext(ctx, bulkQuery, allArgs...) + return err +} diff --git a/pkg/db/github_repo_connection_find.sql_generated.go b/pkg/db/github_repo_connection_find.sql_generated.go new file mode 100644 index 0000000000..4693b107bc --- /dev/null +++ b/pkg/db/github_repo_connection_find.sql_generated.go @@ -0,0 +1,57 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.30.0 +// source: github_repo_connection_find.sql + +package db + +import ( + "context" +) + +const findGithubRepoConnection = `-- name: FindGithubRepoConnection :one +SELECT + pk, + project_id, + installation_id, + repository_id, + repository_full_name, + created_at, + updated_at +FROM github_repo_connections +WHERE installation_id = ? + AND repository_id = ? +` + +type FindGithubRepoConnectionParams struct { + InstallationID int64 `db:"installation_id"` + RepositoryID int64 `db:"repository_id"` +} + +// FindGithubRepoConnection +// +// SELECT +// pk, +// project_id, +// installation_id, +// repository_id, +// repository_full_name, +// created_at, +// updated_at +// FROM github_repo_connections +// WHERE installation_id = ? +// AND repository_id = ? +func (q *Queries) FindGithubRepoConnection(ctx context.Context, db DBTX, arg FindGithubRepoConnectionParams) (GithubRepoConnection, error) { + row := db.QueryRowContext(ctx, findGithubRepoConnection, arg.InstallationID, arg.RepositoryID) + var i GithubRepoConnection + err := row.Scan( + &i.Pk, + &i.ProjectID, + &i.InstallationID, + &i.RepositoryID, + &i.RepositoryFullName, + &i.CreatedAt, + &i.UpdatedAt, + ) + return i, err +} diff --git a/pkg/db/github_repo_connection_insert.sql_generated.go b/pkg/db/github_repo_connection_insert.sql_generated.go new file mode 100644 index 0000000000..cb761b44e9 --- /dev/null +++ b/pkg/db/github_repo_connection_insert.sql_generated.go @@ -0,0 +1,69 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.30.0 +// source: github_repo_connection_insert.sql + +package db + +import ( + "context" + "database/sql" +) + +const insertGithubRepoConnection = `-- name: InsertGithubRepoConnection :exec +INSERT INTO github_repo_connections ( + project_id, + installation_id, + repository_id, + repository_full_name, + created_at, + updated_at +) +VALUES ( + ?, + ?, + ?, + ?, + ?, + ? +) +` + +type InsertGithubRepoConnectionParams struct { + ProjectID string `db:"project_id"` + InstallationID int64 `db:"installation_id"` + RepositoryID int64 `db:"repository_id"` + RepositoryFullName string `db:"repository_full_name"` + CreatedAt int64 `db:"created_at"` + UpdatedAt sql.NullInt64 `db:"updated_at"` +} + +// InsertGithubRepoConnection +// +// INSERT INTO github_repo_connections ( +// project_id, +// installation_id, +// repository_id, +// repository_full_name, +// created_at, +// updated_at +// ) +// VALUES ( +// ?, +// ?, +// ?, +// ?, +// ?, +// ? +// ) +func (q *Queries) InsertGithubRepoConnection(ctx context.Context, db DBTX, arg InsertGithubRepoConnectionParams) error { + _, err := db.ExecContext(ctx, insertGithubRepoConnection, + arg.ProjectID, + arg.InstallationID, + arg.RepositoryID, + arg.RepositoryFullName, + arg.CreatedAt, + arg.UpdatedAt, + ) + return err +} diff --git a/pkg/db/querier_bulk_generated.go b/pkg/db/querier_bulk_generated.go index 49b9ab8ed5..23a49382f3 100644 --- a/pkg/db/querier_bulk_generated.go +++ b/pkg/db/querier_bulk_generated.go @@ -19,6 +19,7 @@ type BulkQuerier interface { InsertDeploymentTopologies(ctx context.Context, db DBTX, args []InsertDeploymentTopologyParams) error InsertEnvironments(ctx context.Context, db DBTX, args []InsertEnvironmentParams) error UpsertEnvironment(ctx context.Context, db DBTX, args []UpsertEnvironmentParams) error + InsertGithubRepoConnections(ctx context.Context, db DBTX, args []InsertGithubRepoConnectionParams) error InsertIdentities(ctx context.Context, db DBTX, args []InsertIdentityParams) error InsertIdentityRatelimits(ctx context.Context, db DBTX, args []InsertIdentityRatelimitParams) error UpsertIdentity(ctx context.Context, db DBTX, args []UpsertIdentityParams) error diff --git a/pkg/db/querier_generated.go b/pkg/db/querier_generated.go index 245427b778..54afde69ec 100644 --- a/pkg/db/querier_generated.go +++ b/pkg/db/querier_generated.go @@ -316,6 +316,20 @@ type Querier interface { // AND sticky IN (/*SLICE:sticky*/?) // ORDER BY created_at ASC FindFrontlineRoutesForRollback(ctx context.Context, db DBTX, arg FindFrontlineRoutesForRollbackParams) ([]FindFrontlineRoutesForRollbackRow, error) + //FindGithubRepoConnection + // + // SELECT + // pk, + // project_id, + // installation_id, + // repository_id, + // repository_full_name, + // created_at, + // updated_at + // FROM github_repo_connections + // WHERE installation_id = ? + // AND repository_id = ? + FindGithubRepoConnection(ctx context.Context, db DBTX, arg FindGithubRepoConnectionParams) (GithubRepoConnection, error) //FindIdentities // // SELECT pk, id, external_id, workspace_id, environment, meta, deleted, created_at, updated_at @@ -1260,6 +1274,25 @@ type Querier interface { // ? // ) InsertFrontlineRoute(ctx context.Context, db DBTX, arg InsertFrontlineRouteParams) error + //InsertGithubRepoConnection + // + // INSERT INTO github_repo_connections ( + // project_id, + // installation_id, + // repository_id, + // repository_full_name, + // created_at, + // updated_at + // ) + // VALUES ( + // ?, + // ?, + // ?, + // ?, + // ?, + // ? + // ) + InsertGithubRepoConnection(ctx context.Context, db DBTX, arg InsertGithubRepoConnectionParams) error //InsertIdentity // // INSERT INTO `identities` ( diff --git a/pkg/db/queries/github_repo_connection_find.sql b/pkg/db/queries/github_repo_connection_find.sql new file mode 100644 index 0000000000..7a45e5eeab --- /dev/null +++ b/pkg/db/queries/github_repo_connection_find.sql @@ -0,0 +1,12 @@ +-- name: FindGithubRepoConnection :one +SELECT + pk, + project_id, + installation_id, + repository_id, + repository_full_name, + created_at, + updated_at +FROM github_repo_connections +WHERE installation_id = sqlc.arg(installation_id) + AND repository_id = sqlc.arg(repository_id); diff --git a/pkg/db/queries/github_repo_connection_insert.sql b/pkg/db/queries/github_repo_connection_insert.sql new file mode 100644 index 0000000000..fb94f4b20a --- /dev/null +++ b/pkg/db/queries/github_repo_connection_insert.sql @@ -0,0 +1,17 @@ +-- name: InsertGithubRepoConnection :exec +INSERT INTO github_repo_connections ( + project_id, + installation_id, + repository_id, + repository_full_name, + created_at, + updated_at +) +VALUES ( + sqlc.arg(project_id), + sqlc.arg(installation_id), + sqlc.arg(repository_id), + sqlc.arg(repository_full_name), + sqlc.arg(created_at), + sqlc.arg(updated_at) +); diff --git a/pkg/db/schema.sql b/pkg/db/schema.sql index 6b06dfc44b..47ef3f425f 100644 --- a/pkg/db/schema.sql +++ b/pkg/db/schema.sql @@ -644,4 +644,5 @@ CREATE INDEX `idx_deployment_id` ON `instances` (`deployment_id`); CREATE INDEX `idx_region` ON `instances` (`region`); CREATE INDEX `environment_id_idx` ON `frontline_routes` (`environment_id`); CREATE INDEX `deployment_id_idx` ON `frontline_routes` (`deployment_id`); +CREATE INDEX `installation_id_idx` ON `github_repo_connections` (`installation_id`); diff --git a/pkg/dockertest/BUILD.bazel b/pkg/dockertest/BUILD.bazel index 94cc066faa..f4707fb427 100644 --- a/pkg/dockertest/BUILD.bazel +++ b/pkg/dockertest/BUILD.bazel @@ -5,10 +5,13 @@ go_library( srcs = [ "doc.go", "docker.go", + "mysql.go", "redis.go", + "restate.go", "s3.go", "wait.go", ], + data = ["//pkg/db:schema.sql"], importpath = "github.com/unkeyed/unkey/pkg/dockertest", visibility = ["//visibility:public"], deps = [ @@ -16,6 +19,7 @@ go_library( "@com_github_docker_docker//api/types/image", "@com_github_docker_docker//client", "@com_github_docker_go_connections//nat", + "@com_github_go_sql_driver_mysql//:mysql", "@com_github_stretchr_testify//require", ], ) @@ -25,6 +29,7 @@ go_test( size = "large", srcs = [ "redis_test.go", + "restate_test.go", "s3_test.go", ], deps = [ diff --git a/pkg/dockertest/doc.go b/pkg/dockertest/doc.go index 08a7a39bae..c818834857 100644 --- a/pkg/dockertest/doc.go +++ b/pkg/dockertest/doc.go @@ -41,6 +41,8 @@ // # Available Services // // Currently supported: +// - [MySQL]: MySQL with dev schema preloaded // - [Redis]: Redis 8.0 container // - [S3]: MinIO S3-compatible object storage +// - [Restate]: Restate server (ingress + admin) package dockertest diff --git a/pkg/dockertest/docker.go b/pkg/dockertest/docker.go index 1970e7db23..73fc03978e 100644 --- a/pkg/dockertest/docker.go +++ b/pkg/dockertest/docker.go @@ -37,6 +37,16 @@ type Container struct { Ports map[string]string } +// HostURL returns a URL for the container using the provided scheme and port. +// The containerPort should be in the format "port/protocol" (e.g., "8080/tcp"). +func (c *Container) HostURL(scheme, containerPort string) string { + port := c.Port(containerPort) + if port == "" { + return "" + } + return fmt.Sprintf("%s://%s:%s", scheme, c.Host, port) +} + // Port returns the mapped host port for a given container port. // The containerPort should be in the format "port/protocol" (e.g., "6379/tcp"). // Returns an empty string if the port is not mapped. diff --git a/pkg/dockertest/mysql.go b/pkg/dockertest/mysql.go new file mode 100644 index 0000000000..4a1c938390 --- /dev/null +++ b/pkg/dockertest/mysql.go @@ -0,0 +1,118 @@ +package dockertest + +import ( + "context" + "database/sql" + "fmt" + "os" + "path/filepath" + "runtime" + "testing" + "time" + + mysql "github.com/go-sql-driver/mysql" + "github.com/stretchr/testify/require" +) + +const ( + mysqlImage = "mysql:9.4.0" + mysqlPort = "3306/tcp" + mysqlUser = "unkey" + mysqlPassword = "password" + mysqlDatabase = "unkey" +) + +// MySQLConfig holds connection information for a MySQL test container. +type MySQLConfig struct { + // DSN is the host DSN for connecting from the test runner. + DSN string + // DockerDSN is the DSN for connecting from containers on the docker network. + DockerDSN string +} + +// MySQL starts the local MySQL test container and returns DSNs. +// +// The container is based on the local dev image with preloaded schema. +// This function blocks until the MySQL port is accepting TCP connections +// (up to 60s). Fails the test if Docker is unavailable or the container fails to start. +func MySQL(t *testing.T) MySQLConfig { + t.Helper() + + ctr := startContainer(t, containerConfig{ + Image: mysqlImage, + ExposedPorts: []string{mysqlPort}, + WaitStrategy: NewTCPWait(mysqlPort), + WaitTimeout: 60 * time.Second, + Env: map[string]string{ + "MYSQL_ROOT_PASSWORD": mysqlPassword, + "MYSQL_DATABASE": mysqlDatabase, + "MYSQL_USER": mysqlUser, + "MYSQL_PASSWORD": mysqlPassword, + }, + Cmd: []string{}, + }) + + port := ctr.Port(mysqlPort) + addr := fmt.Sprintf("%s:%s", ctr.Host, port) + + hostCfg := mysql.NewConfig() + hostCfg.User = mysqlUser + hostCfg.Passwd = mysqlPassword + hostCfg.Net = "tcp" + hostCfg.Addr = addr + hostCfg.DBName = mysqlDatabase + hostCfg.ParseTime = true + hostCfg.MultiStatements = true + hostCfg.Logger = &mysql.NopLogger{} + + dockerCfg := mysql.NewConfig() + dockerCfg.User = mysqlUser + dockerCfg.Passwd = mysqlPassword + dockerCfg.Net = "tcp" + dockerCfg.Addr = "mysql:3306" + dockerCfg.DBName = mysqlDatabase + dockerCfg.ParseTime = true + dockerCfg.Logger = &mysql.NopLogger{} + + hostDB, err := sql.Open("mysql", hostCfg.FormatDSN()) + require.NoError(t, err) + defer func() { require.NoError(t, hostDB.Close()) }() + require.Eventually(t, func() bool { + pingErr := hostDB.PingContext(context.Background()) + return pingErr == nil + }, 60*time.Second, 500*time.Millisecond) + + schemaPath := schemaSQLPath() + schemaBytes, err := os.ReadFile(schemaPath) + require.NoError(t, err) + _, err = hostDB.ExecContext(context.Background(), string(schemaBytes)) + require.NoError(t, err) + + return MySQLConfig{ + DSN: hostCfg.FormatDSN(), + DockerDSN: dockerCfg.FormatDSN(), + } +} + +func schemaSQLPath() string { + if runfiles := os.Getenv("TEST_SRCDIR"); runfiles != "" { + workspace := os.Getenv("TEST_WORKSPACE") + if workspace != "" { + candidate := filepath.Join(runfiles, workspace, "pkg", "db", "schema.sql") + if _, err := os.Stat(candidate); err == nil { + return candidate + } + } + candidate := filepath.Join(runfiles, "_main", "pkg", "db", "schema.sql") + if _, err := os.Stat(candidate); err == nil { + return candidate + } + } + + _, currentFile, _, ok := runtime.Caller(0) + if !ok { + return "" + } + root := filepath.Dir(filepath.Dir(currentFile)) + return filepath.Join(root, "db", "schema.sql") +} diff --git a/pkg/dockertest/restate.go b/pkg/dockertest/restate.go new file mode 100644 index 0000000000..f6b2dce191 --- /dev/null +++ b/pkg/dockertest/restate.go @@ -0,0 +1,43 @@ +package dockertest + +import ( + "testing" + "time" +) + +const ( + restateImage = "restatedev/restate:1.5.3" + restatePort = "8080/tcp" + restateAdminPort = "9070/tcp" +) + +// RestateConfig holds connection information for a Restate container. +type RestateConfig struct { + // IngressURL is the Restate ingress endpoint URL. + IngressURL string + // AdminURL is the Restate admin endpoint URL. + AdminURL string +} + +// Restate starts a Restate container and returns ingress/admin URLs. +// +// The container is automatically removed when the test completes via t.Cleanup. +// This function blocks until the admin health endpoint responds (up to 30s). +// Fails the test if Docker is unavailable or the container fails to start. +func Restate(t *testing.T) RestateConfig { + t.Helper() + + ctr := startContainer(t, containerConfig{ + Image: restateImage, + ExposedPorts: []string{restatePort, restateAdminPort}, + WaitStrategy: NewHTTPWait(restateAdminPort, "/health"), + WaitTimeout: 30 * time.Second, + Env: map[string]string{}, + Cmd: []string{}, + }) + + return RestateConfig{ + IngressURL: ctr.HostURL("http", restatePort), + AdminURL: ctr.HostURL("http", restateAdminPort), + } +} diff --git a/pkg/dockertest/restate_test.go b/pkg/dockertest/restate_test.go new file mode 100644 index 0000000000..a30080d91e --- /dev/null +++ b/pkg/dockertest/restate_test.go @@ -0,0 +1,20 @@ +package dockertest_test + +import ( + "net/http" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/unkeyed/unkey/pkg/dockertest" +) + +func TestRestate(t *testing.T) { + config := dockertest.Restate(t) + + client := &http.Client{Timeout: 5 * time.Second} + resp, err := client.Get(config.AdminURL + "/health") + require.NoError(t, err) + require.NoError(t, resp.Body.Close()) + require.Equal(t, http.StatusOK, resp.StatusCode) +} diff --git a/svc/api/BUILD.bazel b/svc/api/BUILD.bazel index 459350a9ec..4d36d8f5bd 100644 --- a/svc/api/BUILD.bazel +++ b/svc/api/BUILD.bazel @@ -48,7 +48,6 @@ go_test( deps = [ ":api", "//pkg/dockertest", - "//pkg/testutil/containers", "//pkg/uid", "//pkg/vault/keys", "@com_github_stretchr_testify//require", diff --git a/svc/api/cancel_test.go b/svc/api/cancel_test.go index cad40377cf..395ecd3014 100644 --- a/svc/api/cancel_test.go +++ b/svc/api/cancel_test.go @@ -10,7 +10,6 @@ import ( "github.com/stretchr/testify/require" "github.com/unkeyed/unkey/pkg/dockertest" - "github.com/unkeyed/unkey/pkg/testutil/containers" "github.com/unkeyed/unkey/pkg/uid" "github.com/unkeyed/unkey/pkg/vault/keys" "github.com/unkeyed/unkey/svc/api" @@ -20,8 +19,8 @@ import ( func TestContextCancellation(t *testing.T) { // Use testcontainers for dynamic service management - mysqlCfg := containers.MySQL(t) - dbDsn := mysqlCfg.FormatDSN() + mysqlCfg := dockertest.MySQL(t) + dbDsn := mysqlCfg.DSN redisUrl := dockertest.Redis(t) // Create ephemeral listener diff --git a/svc/api/internal/testutil/mock_deployment_client.go b/svc/api/internal/testutil/mock_deployment_client.go index 8691f064a1..2080b4c1cd 100644 --- a/svc/api/internal/testutil/mock_deployment_client.go +++ b/svc/api/internal/testutil/mock_deployment_client.go @@ -19,27 +19,15 @@ var _ ctrlv1connect.DeploymentServiceClient = (*MockDeploymentClient)(nil) // // This mock is safe for concurrent use. All call recording is protected by a mutex. type MockDeploymentClient struct { - mu sync.Mutex - CreateS3UploadURLFunc func(context.Context, *connect.Request[ctrlv1.CreateS3UploadURLRequest]) (*connect.Response[ctrlv1.CreateS3UploadURLResponse], error) - CreateDeploymentFunc func(context.Context, *connect.Request[ctrlv1.CreateDeploymentRequest]) (*connect.Response[ctrlv1.CreateDeploymentResponse], error) - GetDeploymentFunc func(context.Context, *connect.Request[ctrlv1.GetDeploymentRequest]) (*connect.Response[ctrlv1.GetDeploymentResponse], error) - RollbackFunc func(context.Context, *connect.Request[ctrlv1.RollbackRequest]) (*connect.Response[ctrlv1.RollbackResponse], error) - PromoteFunc func(context.Context, *connect.Request[ctrlv1.PromoteRequest]) (*connect.Response[ctrlv1.PromoteResponse], error) - CreateS3UploadURLCalls []*ctrlv1.CreateS3UploadURLRequest - CreateDeploymentCalls []*ctrlv1.CreateDeploymentRequest - GetDeploymentCalls []*ctrlv1.GetDeploymentRequest - RollbackCalls []*ctrlv1.RollbackRequest - PromoteCalls []*ctrlv1.PromoteRequest -} - -func (m *MockDeploymentClient) CreateS3UploadURL(ctx context.Context, req *connect.Request[ctrlv1.CreateS3UploadURLRequest]) (*connect.Response[ctrlv1.CreateS3UploadURLResponse], error) { - m.mu.Lock() - m.CreateS3UploadURLCalls = append(m.CreateS3UploadURLCalls, req.Msg) - m.mu.Unlock() - if m.CreateS3UploadURLFunc != nil { - return m.CreateS3UploadURLFunc(ctx, req) - } - return connect.NewResponse(&ctrlv1.CreateS3UploadURLResponse{}), nil + mu sync.Mutex + CreateDeploymentFunc func(context.Context, *connect.Request[ctrlv1.CreateDeploymentRequest]) (*connect.Response[ctrlv1.CreateDeploymentResponse], error) + GetDeploymentFunc func(context.Context, *connect.Request[ctrlv1.GetDeploymentRequest]) (*connect.Response[ctrlv1.GetDeploymentResponse], error) + RollbackFunc func(context.Context, *connect.Request[ctrlv1.RollbackRequest]) (*connect.Response[ctrlv1.RollbackResponse], error) + PromoteFunc func(context.Context, *connect.Request[ctrlv1.PromoteRequest]) (*connect.Response[ctrlv1.PromoteResponse], error) + CreateDeploymentCalls []*ctrlv1.CreateDeploymentRequest + GetDeploymentCalls []*ctrlv1.GetDeploymentRequest + RollbackCalls []*ctrlv1.RollbackRequest + PromoteCalls []*ctrlv1.PromoteRequest } func (m *MockDeploymentClient) CreateDeployment(ctx context.Context, req *connect.Request[ctrlv1.CreateDeploymentRequest]) (*connect.Response[ctrlv1.CreateDeploymentResponse], error) { diff --git a/svc/api/openapi/BUILD.bazel b/svc/api/openapi/BUILD.bazel index 749fc7b583..5c7d25edcd 100644 --- a/svc/api/openapi/BUILD.bazel +++ b/svc/api/openapi/BUILD.bazel @@ -10,8 +10,5 @@ go_library( embedsrcs = ["openapi-generated.yaml"], importpath = "github.com/unkeyed/unkey/svc/api/openapi", visibility = ["//visibility:public"], - deps = [ - "@com_github_oapi_codegen_nullable//:nullable", - "@com_github_oapi_codegen_runtime//:runtime", - ], + deps = ["@com_github_oapi_codegen_nullable//:nullable"], ) diff --git a/svc/api/openapi/gen.go b/svc/api/openapi/gen.go index 32b9867646..5cf5fc68cd 100644 --- a/svc/api/openapi/gen.go +++ b/svc/api/openapi/gen.go @@ -4,11 +4,7 @@ package openapi import ( - "encoding/json" - "fmt" - "github.com/oapi-codegen/nullable" - "github.com/oapi-codegen/runtime" ) const ( @@ -708,24 +704,14 @@ type V2ApisListKeysResponseBody struct { // V2ApisListKeysResponseData Array of API keys with complete configuration and metadata. type V2ApisListKeysResponseData = []KeyResponseData -// V2DeployBuildSource Build from source configuration -type V2DeployBuildSource struct { - // Build Build context for building from source. - // Provide either `build.context` (build from source) or `image` (prebuilt image), but not both. - Build struct { - // Context S3 path to uploaded build context tarball - Context string `json:"context"` - - // Dockerfile Optional path to Dockerfile within build context (defaults to "Dockerfile") - Dockerfile *string `json:"dockerfile,omitempty"` - } `json:"build"` -} - -// V2DeployCreateDeploymentRequestBody Deployment source - either build from source or use prebuilt image +// V2DeployCreateDeploymentRequestBody Create a deployment from a pre-built Docker image type V2DeployCreateDeploymentRequestBody struct { // Branch Git branch name Branch string `json:"branch"` + // DockerImage Docker image reference to deploy + DockerImage string `json:"dockerImage"` + // EnvironmentSlug Environment slug (e.g., "production", "staging") EnvironmentSlug string `json:"environmentSlug"` @@ -737,7 +723,6 @@ type V2DeployCreateDeploymentRequestBody struct { // ProjectId Unkey project ID ProjectId string `json:"projectId"` - union json.RawMessage } // V2DeployCreateDeploymentResponseBody defines model for V2DeployCreateDeploymentResponseBody. @@ -769,29 +754,6 @@ type V2DeployDeploymentStep struct { Status *string `json:"status,omitempty"` } -// V2DeployGenerateUploadUrlRequestBody defines model for V2DeployGenerateUploadUrlRequestBody. -type V2DeployGenerateUploadUrlRequestBody struct { - // ProjectId Unkey project ID for which to generate the upload URL - ProjectId string `json:"projectId"` -} - -// V2DeployGenerateUploadUrlResponseBody defines model for V2DeployGenerateUploadUrlResponseBody. -type V2DeployGenerateUploadUrlResponseBody struct { - Data V2DeployGenerateUploadUrlResponseData `json:"data"` - - // Meta Metadata object included in every API response. This provides context about the request and is essential for debugging, audit trails, and support inquiries. The `requestId` is particularly important when troubleshooting issues with the Unkey support team. - Meta Meta `json:"meta"` -} - -// V2DeployGenerateUploadUrlResponseData defines model for V2DeployGenerateUploadUrlResponseData. -type V2DeployGenerateUploadUrlResponseData struct { - // Context S3 path to use in the createDeployment request when building from source - Context string `json:"context"` - - // UploadUrl Presigned PUT URL for uploading the build context tar file - UploadUrl string `json:"uploadUrl"` -} - // V2DeployGetDeploymentRequestBody defines model for V2DeployGetDeploymentRequestBody. type V2DeployGetDeploymentRequestBody struct { // DeploymentId Unique deployment identifier to retrieve @@ -845,12 +807,6 @@ type V2DeployGitCommit struct { Timestamp *int64 `json:"timestamp,omitempty"` } -// V2DeployImageSource Prebuilt Docker image configuration -type V2DeployImageSource struct { - // Image Prebuilt Docker image reference - Image string `json:"image"` -} - // V2IdentitiesCreateIdentityRequestBody defines model for V2IdentitiesCreateIdentityRequestBody. type V2IdentitiesCreateIdentityRequestBody struct { // ExternalId Creates an identity using your system's unique identifier for a user, organization, or entity. @@ -2422,9 +2378,6 @@ type ApisListKeysJSONRequestBody = V2ApisListKeysRequestBody // DeployCreateDeploymentJSONRequestBody defines body for DeployCreateDeployment for application/json ContentType. type DeployCreateDeploymentJSONRequestBody = V2DeployCreateDeploymentRequestBody -// DeployGenerateUploadUrlJSONRequestBody defines body for DeployGenerateUploadUrl for application/json ContentType. -type DeployGenerateUploadUrlJSONRequestBody = V2DeployGenerateUploadUrlRequestBody - // DeployGetDeploymentJSONRequestBody defines body for DeployGetDeployment for application/json ContentType. type DeployGetDeploymentJSONRequestBody = V2DeployGetDeploymentRequestBody @@ -2529,150 +2482,3 @@ type RatelimitMultiLimitJSONRequestBody = V2RatelimitMultiLimitRequestBody // RatelimitSetOverrideJSONRequestBody defines body for RatelimitSetOverride for application/json ContentType. type RatelimitSetOverrideJSONRequestBody = V2RatelimitSetOverrideRequestBody - -// AsV2DeployBuildSource returns the union data inside the V2DeployCreateDeploymentRequestBody as a V2DeployBuildSource -func (t V2DeployCreateDeploymentRequestBody) AsV2DeployBuildSource() (V2DeployBuildSource, error) { - var body V2DeployBuildSource - err := json.Unmarshal(t.union, &body) - return body, err -} - -// FromV2DeployBuildSource overwrites any union data inside the V2DeployCreateDeploymentRequestBody as the provided V2DeployBuildSource -func (t *V2DeployCreateDeploymentRequestBody) FromV2DeployBuildSource(v V2DeployBuildSource) error { - b, err := json.Marshal(v) - t.union = b - return err -} - -// MergeV2DeployBuildSource performs a merge with any union data inside the V2DeployCreateDeploymentRequestBody, using the provided V2DeployBuildSource -func (t *V2DeployCreateDeploymentRequestBody) MergeV2DeployBuildSource(v V2DeployBuildSource) error { - b, err := json.Marshal(v) - if err != nil { - return err - } - - merged, err := runtime.JSONMerge(t.union, b) - t.union = merged - return err -} - -// AsV2DeployImageSource returns the union data inside the V2DeployCreateDeploymentRequestBody as a V2DeployImageSource -func (t V2DeployCreateDeploymentRequestBody) AsV2DeployImageSource() (V2DeployImageSource, error) { - var body V2DeployImageSource - err := json.Unmarshal(t.union, &body) - return body, err -} - -// FromV2DeployImageSource overwrites any union data inside the V2DeployCreateDeploymentRequestBody as the provided V2DeployImageSource -func (t *V2DeployCreateDeploymentRequestBody) FromV2DeployImageSource(v V2DeployImageSource) error { - b, err := json.Marshal(v) - t.union = b - return err -} - -// MergeV2DeployImageSource performs a merge with any union data inside the V2DeployCreateDeploymentRequestBody, using the provided V2DeployImageSource -func (t *V2DeployCreateDeploymentRequestBody) MergeV2DeployImageSource(v V2DeployImageSource) error { - b, err := json.Marshal(v) - if err != nil { - return err - } - - merged, err := runtime.JSONMerge(t.union, b) - t.union = merged - return err -} - -func (t V2DeployCreateDeploymentRequestBody) MarshalJSON() ([]byte, error) { - b, err := t.union.MarshalJSON() - if err != nil { - return nil, err - } - object := make(map[string]json.RawMessage) - if t.union != nil { - err = json.Unmarshal(b, &object) - if err != nil { - return nil, err - } - } - - object["branch"], err = json.Marshal(t.Branch) - if err != nil { - return nil, fmt.Errorf("error marshaling 'branch': %w", err) - } - - object["environmentSlug"], err = json.Marshal(t.EnvironmentSlug) - if err != nil { - return nil, fmt.Errorf("error marshaling 'environmentSlug': %w", err) - } - - if t.GitCommit != nil { - object["gitCommit"], err = json.Marshal(t.GitCommit) - if err != nil { - return nil, fmt.Errorf("error marshaling 'gitCommit': %w", err) - } - } - - if t.KeyspaceId != nil { - object["keyspaceId"], err = json.Marshal(t.KeyspaceId) - if err != nil { - return nil, fmt.Errorf("error marshaling 'keyspaceId': %w", err) - } - } - - object["projectId"], err = json.Marshal(t.ProjectId) - if err != nil { - return nil, fmt.Errorf("error marshaling 'projectId': %w", err) - } - - b, err = json.Marshal(object) - return b, err -} - -func (t *V2DeployCreateDeploymentRequestBody) UnmarshalJSON(b []byte) error { - err := t.union.UnmarshalJSON(b) - if err != nil { - return err - } - object := make(map[string]json.RawMessage) - err = json.Unmarshal(b, &object) - if err != nil { - return err - } - - if raw, found := object["branch"]; found { - err = json.Unmarshal(raw, &t.Branch) - if err != nil { - return fmt.Errorf("error reading 'branch': %w", err) - } - } - - if raw, found := object["environmentSlug"]; found { - err = json.Unmarshal(raw, &t.EnvironmentSlug) - if err != nil { - return fmt.Errorf("error reading 'environmentSlug': %w", err) - } - } - - if raw, found := object["gitCommit"]; found { - err = json.Unmarshal(raw, &t.GitCommit) - if err != nil { - return fmt.Errorf("error reading 'gitCommit': %w", err) - } - } - - if raw, found := object["keyspaceId"]; found { - err = json.Unmarshal(raw, &t.KeyspaceId) - if err != nil { - return fmt.Errorf("error reading 'keyspaceId': %w", err) - } - } - - if raw, found := object["projectId"]; found { - err = json.Unmarshal(raw, &t.ProjectId) - if err != nil { - return fmt.Errorf("error reading 'projectId': %w", err) - } - } - - return err -} diff --git a/svc/api/openapi/openapi-generated.yaml b/svc/api/openapi/openapi-generated.yaml index c890bf8d7b..ca17d64a10 100644 --- a/svc/api/openapi/openapi-generated.yaml +++ b/svc/api/openapi/openapi-generated.yaml @@ -387,61 +387,42 @@ components: x-go-type-skip-optional-pointer-with-omitzero: true additionalProperties: false V2DeployCreateDeploymentRequestBody: - allOf: - - type: object - required: - - projectId - - branch - - environmentSlug - properties: - projectId: - type: string - minLength: 1 - description: Unkey project ID - example: "proj_123abc" - pattern: "^proj_[a-zA-Z0-9]+$" - keyspaceId: - type: string - description: Optional keyspace ID for authentication context - example: "key_abc123" - branch: - type: string - minLength: 1 - description: Git branch name - example: "main" - environmentSlug: - type: string - minLength: 1 - description: Environment slug (e.g., "production", "staging") - example: "production" - gitCommit: - $ref: "#/components/schemas/V2DeployGitCommit" - oneOf: - - $ref: "#/components/schemas/V2DeployBuildSource" - - $ref: "#/components/schemas/V2DeployImageSource" - description: Deployment source - either build from source or use prebuilt image - V2DeployCreateDeploymentResponseBody: - type: object - required: - - meta - - data - properties: - meta: - $ref: "#/components/schemas/Meta" - data: - $ref: "#/components/schemas/V2DeployCreateDeploymentResponseData" - V2DeployGenerateUploadUrlRequestBody: type: object required: - projectId + - branch + - environmentSlug + - dockerImage properties: projectId: type: string minLength: 1 - description: Unkey project ID for which to generate the upload URL + description: Unkey project ID example: "proj_123abc" pattern: "^proj_[a-zA-Z0-9]+$" - V2DeployGenerateUploadUrlResponseBody: + keyspaceId: + type: string + description: Optional keyspace ID for authentication context + example: "key_abc123" + branch: + type: string + minLength: 1 + description: Git branch name + example: "main" + environmentSlug: + type: string + minLength: 1 + description: Environment slug (e.g., "production", "staging") + example: "production" + dockerImage: + type: string + minLength: 1 + description: Docker image reference to deploy + example: "ghcr.io/user/app:v1.0.0" + gitCommit: + $ref: "#/components/schemas/V2DeployGitCommit" + description: Create a deployment from a pre-built Docker image + V2DeployCreateDeploymentResponseBody: type: object required: - meta @@ -450,7 +431,7 @@ components: meta: $ref: "#/components/schemas/Meta" data: - $ref: "#/components/schemas/V2DeployGenerateUploadUrlResponseData" + $ref: "#/components/schemas/V2DeployCreateDeploymentResponseData" V2DeployGetDeploymentRequestBody: type: object required: @@ -2610,41 +2591,6 @@ components: format: int64 description: Commit timestamp in milliseconds example: 1704067200000 - V2DeployBuildSource: - type: object - description: Build from source configuration - required: - - build - properties: - build: - type: object - description: | - Build context for building from source. - Provide either `build.context` (build from source) or `image` (prebuilt image), but not both. - required: - - context - properties: - context: - type: string - minLength: 1 - description: S3 path to uploaded build context tarball - example: "s3://bucket/path/to/context.tar.gz" - dockerfile: - type: string - description: Optional path to Dockerfile within build context (defaults to "Dockerfile") - example: "Dockerfile" - default: "Dockerfile" - V2DeployImageSource: - type: object - description: Prebuilt Docker image configuration - required: - - image - properties: - image: - type: string - minLength: 1 - description: Prebuilt Docker image reference - example: "nginx:latest" V2DeployCreateDeploymentResponseData: type: object required: @@ -2654,20 +2600,6 @@ components: type: string description: Unique deployment identifier example: "d_abc123xyz" - V2DeployGenerateUploadUrlResponseData: - type: object - required: - - uploadUrl - - context - properties: - uploadUrl: - type: string - description: Presigned PUT URL for uploading the build context tar file - example: "https://s3.amazonaws.com/bucket/path?signature=..." - context: - type: string - description: S3 path to use in the createDeployment request when building from source - example: "proj_123abc/ctx_456def.tar.gz" V2DeployGetDeploymentResponseData: type: object required: @@ -4329,66 +4261,6 @@ paths: - deploy x-speakeasy-group: internal x-speakeasy-name-override: createDeployment - /v2/deploy.generateUploadUrl: - post: - description: | - **INTERNAL** - This endpoint is internal and may change without notice. - Not recommended for production use. - - Generates a presigned S3 URL for uploading build context archives. - - **Authentication**: Requires a valid root key with appropriate permissions. - operationId: deploy.generateUploadUrl - requestBody: - content: - application/json: - schema: - $ref: '#/components/schemas/V2DeployGenerateUploadUrlRequestBody' - required: true - responses: - "200": - content: - application/json: - schema: - $ref: '#/components/schemas/V2DeployGenerateUploadUrlResponseBody' - description: Upload URL generated successfully - "400": - content: - application/json: - schema: - $ref: '#/components/schemas/BadRequestErrorResponse' - description: Bad request - "401": - content: - application/json: - schema: - $ref: '#/components/schemas/UnauthorizedErrorResponse' - description: Unauthorized - "403": - content: - application/json: - schema: - $ref: '#/components/schemas/ForbiddenErrorResponse' - description: Forbidden - "404": - content: - application/json: - schema: - $ref: '#/components/schemas/NotFoundErrorResponse' - description: Not found - "500": - content: - application/json: - schema: - $ref: '#/components/schemas/InternalServerErrorResponse' - description: Internal server error - security: - - rootKey: [] - summary: Generate upload URL - tags: - - deploy - x-speakeasy-group: internal - x-speakeasy-name-override: generateUploadUrl /v2/deploy.getDeployment: post: description: | diff --git a/svc/api/openapi/openapi-split.yaml b/svc/api/openapi/openapi-split.yaml index d44c3d80fc..08d2fe0fa8 100644 --- a/svc/api/openapi/openapi-split.yaml +++ b/svc/api/openapi/openapi-split.yaml @@ -139,8 +139,6 @@ paths: $ref: "./spec/paths/v2/deploy/createDeployment/index.yaml" /v2/deploy.getDeployment: $ref: "./spec/paths/v2/deploy/getDeployment/index.yaml" - /v2/deploy.generateUploadUrl: - $ref: "./spec/paths/v2/deploy/generateUploadUrl/index.yaml" # Identity Endpoints /v2/identities.createIdentity: diff --git a/svc/api/openapi/spec/paths/v2/deploy/createDeployment/V2DeployBuildSource.yaml b/svc/api/openapi/spec/paths/v2/deploy/createDeployment/V2DeployBuildSource.yaml deleted file mode 100644 index 5ee180294d..0000000000 --- a/svc/api/openapi/spec/paths/v2/deploy/createDeployment/V2DeployBuildSource.yaml +++ /dev/null @@ -1,23 +0,0 @@ -type: object -description: Build from source configuration -required: - - build -properties: - build: - type: object - description: | - Build context for building from source. - Provide either `build.context` (build from source) or `image` (prebuilt image), but not both. - required: - - context - properties: - context: - type: string - minLength: 1 - description: S3 path to uploaded build context tarball - example: "s3://bucket/path/to/context.tar.gz" - dockerfile: - type: string - description: Optional path to Dockerfile within build context (defaults to "Dockerfile") - example: "Dockerfile" - default: "Dockerfile" diff --git a/svc/api/openapi/spec/paths/v2/deploy/createDeployment/V2DeployCreateDeploymentRequestBody.yaml b/svc/api/openapi/spec/paths/v2/deploy/createDeployment/V2DeployCreateDeploymentRequestBody.yaml index 76ab19dca5..ecb34eeff7 100644 --- a/svc/api/openapi/spec/paths/v2/deploy/createDeployment/V2DeployCreateDeploymentRequestBody.yaml +++ b/svc/api/openapi/spec/paths/v2/deploy/createDeployment/V2DeployCreateDeploymentRequestBody.yaml @@ -1,33 +1,35 @@ -allOf: - - type: object - required: - - projectId - - branch - - environmentSlug - properties: - projectId: - type: string - minLength: 1 - description: Unkey project ID - example: "proj_123abc" - pattern: "^proj_[a-zA-Z0-9]+$" - keyspaceId: - type: string - description: Optional keyspace ID for authentication context - example: "key_abc123" - branch: - type: string - minLength: 1 - description: Git branch name - example: "main" - environmentSlug: - type: string - minLength: 1 - description: Environment slug (e.g., "production", "staging") - example: "production" - gitCommit: - $ref: "./V2DeployGitCommit.yaml" - oneOf: - - $ref: "./V2DeployBuildSource.yaml" - - $ref: "./V2DeployImageSource.yaml" - description: Deployment source - either build from source or use prebuilt image +type: object +required: + - projectId + - branch + - environmentSlug + - dockerImage +properties: + projectId: + type: string + minLength: 1 + description: Unkey project ID + example: "proj_123abc" + pattern: "^proj_[a-zA-Z0-9]+$" + keyspaceId: + type: string + description: Optional keyspace ID for authentication context + example: "key_abc123" + branch: + type: string + minLength: 1 + description: Git branch name + example: "main" + environmentSlug: + type: string + minLength: 1 + description: Environment slug (e.g., "production", "staging") + example: "production" + dockerImage: + type: string + minLength: 1 + description: Docker image reference to deploy + example: "ghcr.io/user/app:v1.0.0" + gitCommit: + $ref: "./V2DeployGitCommit.yaml" +description: Create a deployment from a pre-built Docker image diff --git a/svc/api/openapi/spec/paths/v2/deploy/createDeployment/V2DeployImageSource.yaml b/svc/api/openapi/spec/paths/v2/deploy/createDeployment/V2DeployImageSource.yaml deleted file mode 100644 index c8e311ccb3..0000000000 --- a/svc/api/openapi/spec/paths/v2/deploy/createDeployment/V2DeployImageSource.yaml +++ /dev/null @@ -1,10 +0,0 @@ -type: object -description: Prebuilt Docker image configuration -required: - - image -properties: - image: - type: string - minLength: 1 - description: Prebuilt Docker image reference - example: "nginx:latest" diff --git a/svc/api/openapi/spec/paths/v2/deploy/generateUploadUrl/V2DeployGenerateUploadUrlRequestBody.yaml b/svc/api/openapi/spec/paths/v2/deploy/generateUploadUrl/V2DeployGenerateUploadUrlRequestBody.yaml deleted file mode 100644 index 8474b8d34f..0000000000 --- a/svc/api/openapi/spec/paths/v2/deploy/generateUploadUrl/V2DeployGenerateUploadUrlRequestBody.yaml +++ /dev/null @@ -1,10 +0,0 @@ -type: object -required: - - projectId -properties: - projectId: - type: string - minLength: 1 - description: Unkey project ID for which to generate the upload URL - example: "proj_123abc" - pattern: "^proj_[a-zA-Z0-9]+$" diff --git a/svc/api/openapi/spec/paths/v2/deploy/generateUploadUrl/V2DeployGenerateUploadUrlResponseBody.yaml b/svc/api/openapi/spec/paths/v2/deploy/generateUploadUrl/V2DeployGenerateUploadUrlResponseBody.yaml deleted file mode 100644 index ede97a3c64..0000000000 --- a/svc/api/openapi/spec/paths/v2/deploy/generateUploadUrl/V2DeployGenerateUploadUrlResponseBody.yaml +++ /dev/null @@ -1,9 +0,0 @@ -type: object -required: - - meta - - data -properties: - meta: - $ref: "../../../../common/Meta.yaml" - data: - $ref: "./V2DeployGenerateUploadUrlResponseData.yaml" diff --git a/svc/api/openapi/spec/paths/v2/deploy/generateUploadUrl/V2DeployGenerateUploadUrlResponseData.yaml b/svc/api/openapi/spec/paths/v2/deploy/generateUploadUrl/V2DeployGenerateUploadUrlResponseData.yaml deleted file mode 100644 index 84a56a3cfc..0000000000 --- a/svc/api/openapi/spec/paths/v2/deploy/generateUploadUrl/V2DeployGenerateUploadUrlResponseData.yaml +++ /dev/null @@ -1,13 +0,0 @@ -type: object -required: - - uploadUrl - - context -properties: - uploadUrl: - type: string - description: Presigned PUT URL for uploading the build context tar file - example: "https://s3.amazonaws.com/bucket/path?signature=..." - context: - type: string - description: S3 path to use in the createDeployment request when building from source - example: "proj_123abc/ctx_456def.tar.gz" diff --git a/svc/api/openapi/spec/paths/v2/deploy/generateUploadUrl/index.yaml b/svc/api/openapi/spec/paths/v2/deploy/generateUploadUrl/index.yaml deleted file mode 100644 index a70f15d620..0000000000 --- a/svc/api/openapi/spec/paths/v2/deploy/generateUploadUrl/index.yaml +++ /dev/null @@ -1,59 +0,0 @@ -post: - tags: - - deploy - summary: Generate upload URL - description: | - **INTERNAL** - This endpoint is internal and may change without notice. - Not recommended for production use. - - Generates a presigned S3 URL for uploading build context archives. - - **Authentication**: Requires a valid root key with appropriate permissions. - operationId: deploy.generateUploadUrl - x-speakeasy-name-override: generateUploadUrl - x-speakeasy-group: internal - security: - - rootKey: [] - requestBody: - required: true - content: - application/json: - schema: - "$ref": "./V2DeployGenerateUploadUrlRequestBody.yaml" - responses: - "200": - description: Upload URL generated successfully - content: - application/json: - schema: - "$ref": "./V2DeployGenerateUploadUrlResponseBody.yaml" - "400": - description: Bad request - content: - application/json: - schema: - "$ref": "../../../../error/BadRequestErrorResponse.yaml" - "401": - description: Unauthorized - content: - application/json: - schema: - "$ref": "../../../../error/UnauthorizedErrorResponse.yaml" - "403": - description: Forbidden - content: - application/json: - schema: - $ref: "../../../../error/ForbiddenErrorResponse.yaml" - "404": - description: Not found - content: - application/json: - schema: - "$ref": "../../../../error/NotFoundErrorResponse.yaml" - "500": - description: Internal server error - content: - application/json: - schema: - "$ref": "../../../../error/InternalServerErrorResponse.yaml" diff --git a/svc/api/routes/BUILD.bazel b/svc/api/routes/BUILD.bazel index 66ef439641..37554be522 100644 --- a/svc/api/routes/BUILD.bazel +++ b/svc/api/routes/BUILD.bazel @@ -35,7 +35,6 @@ go_library( "//svc/api/routes/v2_apis_get_api", "//svc/api/routes/v2_apis_list_keys", "//svc/api/routes/v2_deploy_create_deployment", - "//svc/api/routes/v2_deploy_generate_upload_url", "//svc/api/routes/v2_deploy_get_deployment", "//svc/api/routes/v2_identities_create_identity", "//svc/api/routes/v2_identities_delete_identity", diff --git a/svc/api/routes/register.go b/svc/api/routes/register.go index 2e01e59847..bda3e61a32 100644 --- a/svc/api/routes/register.go +++ b/svc/api/routes/register.go @@ -27,7 +27,6 @@ import ( v2ApisListKeys "github.com/unkeyed/unkey/svc/api/routes/v2_apis_list_keys" v2DeployCreateDeployment "github.com/unkeyed/unkey/svc/api/routes/v2_deploy_create_deployment" - v2DeployGenerateUploadUrl "github.com/unkeyed/unkey/svc/api/routes/v2_deploy_generate_upload_url" v2DeployGetDeployment "github.com/unkeyed/unkey/svc/api/routes/v2_deploy_get_deployment" v2IdentitiesCreateIdentity "github.com/unkeyed/unkey/svc/api/routes/v2_identities_create_identity" @@ -358,17 +357,6 @@ func Register(srv *zen.Server, svc *Services, info zen.InstanceInfo) { }, ) - // v2/deploy.generateUploadUrl - srv.RegisterRoute( - defaultMiddlewares, - &v2DeployGenerateUploadUrl.Handler{ - Logger: svc.Logger, - DB: svc.Database, - Keys: svc.Keys, - CtrlClient: svc.CtrlDeploymentClient, - }, - ) - // --------------------------------------------------------------------------- // v2/permissions diff --git a/svc/api/routes/v2_deploy_create_deployment/200_test.go b/svc/api/routes/v2_deploy_create_deployment/200_test.go index 287778fb82..c111424d6d 100644 --- a/svc/api/routes/v2_deploy_create_deployment/200_test.go +++ b/svc/api/routes/v2_deploy_create_deployment/200_test.go @@ -44,53 +44,9 @@ func TestCreateDeploymentSuccessfully(t *testing.T) { ProjectId: setup.Project.ID, Branch: "main", EnvironmentSlug: "production", - } - err := req.FromV2DeployImageSource(openapi.V2DeployImageSource{ - Image: "nginx:latest", - }) - require.NoError(t, err, "failed to set image source") - - res := testutil.CallRoute[handler.Request, handler.Response]( - h, - route, - headers, - req, - ) - - require.Equal(t, 201, res.Status, "expected 201, received: %#v", res) - require.NotNil(t, res.Body) - require.NotEmpty(t, res.Body.Data.DeploymentId, "deployment ID should not be empty") - }) - - t.Run("create deployment with build context", func(t *testing.T) { - setup := h.CreateTestDeploymentSetup(testutil.CreateTestDeploymentSetupOptions{ - ProjectName: "test-build-project", - ProjectSlug: "staging", - EnvironmentSlug: "staging", - Permissions: []string{"project.*.create_deployment"}, - }) - - headers := http.Header{ - "Content-Type": {"application/json"}, - "Authorization": {fmt.Sprintf("Bearer %s", setup.RootKey)}, + DockerImage: "nginx:latest", } - req := handler.Request{ - ProjectId: setup.Project.ID, - Branch: "develop", - EnvironmentSlug: "staging", - } - err := req.FromV2DeployBuildSource(openapi.V2DeployBuildSource{ - Build: struct { - Context string `json:"context"` - Dockerfile *string `json:"dockerfile,omitempty"` - }{ - Context: "s3://bucket/path/to/context.tar.gz", - Dockerfile: ptr.P("./Dockerfile"), - }, - }) - require.NoError(t, err, "failed to set build source") - res := testutil.CallRoute[handler.Request, handler.Response]( h, route, @@ -118,6 +74,7 @@ func TestCreateDeploymentSuccessfully(t *testing.T) { ProjectId: setup.Project.ID, Branch: "main", EnvironmentSlug: "production", + DockerImage: "nginx:latest", GitCommit: &openapi.V2DeployGitCommit{ AuthorAvatarUrl: ptr.P("https://avatar.example.com/johndoe.jpg"), AuthorHandle: ptr.P("johndoe"), @@ -126,10 +83,6 @@ func TestCreateDeploymentSuccessfully(t *testing.T) { Timestamp: ptr.P(int64(1704067200000)), }, } - err := req.FromV2DeployImageSource(openapi.V2DeployImageSource{ - Image: "nginx:latest", - }) - require.NoError(t, err, "failed to set image source") res := testutil.CallRoute[handler.Request, handler.Response]( h, @@ -173,11 +126,8 @@ func TestCreateDeploymentWithWildcardPermission(t *testing.T) { ProjectId: setup.Project.ID, Branch: "main", EnvironmentSlug: "production", + DockerImage: "nginx:latest", } - err := req.FromV2DeployImageSource(openapi.V2DeployImageSource{ - Image: "nginx:latest", - }) - require.NoError(t, err, "failed to set image source") res := testutil.CallRoute[handler.Request, handler.Response](h, route, headers, req) require.Equal(t, http.StatusCreated, res.Status, "Expected 201, got: %d", res.Status) @@ -215,11 +165,8 @@ func TestCreateDeploymentWithSpecificProjectPermission(t *testing.T) { ProjectId: setup.Project.ID, Branch: "main", EnvironmentSlug: "production", + DockerImage: "nginx:latest", } - err := req.FromV2DeployImageSource(openapi.V2DeployImageSource{ - Image: "nginx:latest", - }) - require.NoError(t, err, "failed to set image source") res := testutil.CallRoute[handler.Request, handler.Response](h, route, headers, req) require.Equal(t, http.StatusCreated, res.Status, "Expected 201, got: %d", res.Status) diff --git a/svc/api/routes/v2_deploy_create_deployment/400_test.go b/svc/api/routes/v2_deploy_create_deployment/400_test.go index 07cb966465..fcd54a13d5 100644 --- a/svc/api/routes/v2_deploy_create_deployment/400_test.go +++ b/svc/api/routes/v2_deploy_create_deployment/400_test.go @@ -38,12 +38,12 @@ func TestBadRequests(t *testing.T) { "Authorization": {fmt.Sprintf("Bearer %s", setup.RootKey)}, } - t.Run("missing both build and image", func(t *testing.T) { + t.Run("missing dockerImage", func(t *testing.T) { req := handler.Request{ ProjectId: setup.Project.ID, Branch: "main", EnvironmentSlug: "production", - // Neither build nor image provided + // DockerImage not provided } res := testutil.CallRoute[handler.Request, openapi.BadRequestErrorResponse](h, route, headers, req) @@ -51,40 +51,6 @@ func TestBadRequests(t *testing.T) { require.Equal(t, 400, res.Status, "expected 400, sent: %+v, received: %s", req, res.RawBody) require.NotNil(t, res.Body) require.Equal(t, "https://unkey.com/docs/errors/unkey/application/invalid_input", res.Body.Error.Type) - // The OpenAPI schema validator catches this with a generic schema validation error - require.Contains(t, res.Body.Error.Detail, "failed to validate schema") - require.Equal(t, http.StatusBadRequest, res.Body.Error.Status) - require.NotEmpty(t, res.Body.Meta.RequestId) - }) - - t.Run("both build and image provided", func(t *testing.T) { - req := handler.Request{ - ProjectId: setup.Project.ID, - Branch: "main", - EnvironmentSlug: "production", - } - - // Manually set both build and image in the union by merging both types - // This tests that the OpenAPI oneOf validation rejects requests with both sources - _ = req.FromV2DeployBuildSource(openapi.V2DeployBuildSource{ - Build: struct { - Context string "json:\"context\"" - Dockerfile *string "json:\"dockerfile,omitempty\"" - }{ - Context: "/app", - }, - }) - _ = req.MergeV2DeployImageSource(openapi.V2DeployImageSource{ - Image: "nginx:latest", - }) - - res := testutil.CallRoute[handler.Request, openapi.BadRequestErrorResponse](h, route, headers, req) - - require.Equal(t, 400, res.Status, "expected 400 when both build and image are provided, sent: %+v, received: %s", req, res.RawBody) - require.NotNil(t, res.Body) - require.Equal(t, "https://unkey.com/docs/errors/unkey/application/invalid_input", res.Body.Error.Type) - // The OpenAPI schema validator catches this with a generic schema validation error - require.Contains(t, res.Body.Error.Detail, "failed to validate schema") require.Equal(t, http.StatusBadRequest, res.Body.Error.Status) require.NotEmpty(t, res.Body.Meta.RequestId) }) @@ -93,10 +59,8 @@ func TestBadRequests(t *testing.T) { req := handler.Request{ Branch: "main", EnvironmentSlug: "production", + DockerImage: "nginx:latest", } - _ = req.FromV2DeployImageSource(openapi.V2DeployImageSource{ - Image: "nginx:latest", - }) res := testutil.CallRoute[handler.Request, openapi.BadRequestErrorResponse](h, route, headers, req) @@ -110,10 +74,8 @@ func TestBadRequests(t *testing.T) { req := handler.Request{ ProjectId: setup.Project.ID, EnvironmentSlug: "production", + DockerImage: "nginx:latest", } - _ = req.FromV2DeployImageSource(openapi.V2DeployImageSource{ - Image: "nginx:latest", - }) res := testutil.CallRoute[handler.Request, openapi.BadRequestErrorResponse](h, route, headers, req) @@ -127,15 +89,11 @@ func TestBadRequests(t *testing.T) { t.Run("missing environmentSlug", func(t *testing.T) { req := handler.Request{ - ProjectId: setup.Project.ID, - Branch: "main", + ProjectId: setup.Project.ID, + Branch: "main", + DockerImage: "nginx:latest", } - err := req.FromV2DeployImageSource(openapi.V2DeployImageSource{ - Image: "nginx:latest", - }) - require.NoError(t, err, "failed to set image source") - res := testutil.CallRoute[handler.Request, openapi.BadRequestErrorResponse](h, route, headers, req) require.Equal(t, 400, res.Status, "expected 400, sent: %+v, received: %s", req, res.RawBody) @@ -156,13 +114,9 @@ func TestBadRequests(t *testing.T) { ProjectId: setup.Project.ID, Branch: "main", EnvironmentSlug: "production", + DockerImage: "nginx:latest", } - err := req.FromV2DeployImageSource(openapi.V2DeployImageSource{ - Image: "nginx:latest", - }) - require.NoError(t, err, "failed to set image source") - res := testutil.CallRoute[handler.Request, handler.Response](h, route, headers, req) require.Equal(t, http.StatusBadRequest, res.Status) require.NotNil(t, res.Body) @@ -178,13 +132,9 @@ func TestBadRequests(t *testing.T) { ProjectId: setup.Project.ID, Branch: "main", EnvironmentSlug: "production", + DockerImage: "nginx:latest", } - err := req.FromV2DeployImageSource(openapi.V2DeployImageSource{ - Image: "nginx:latest", - }) - require.NoError(t, err, "failed to set image source") - res := testutil.CallRoute[handler.Request, handler.Response](h, route, headers, req) require.Equal(t, http.StatusBadRequest, res.Status) require.NotNil(t, res.Body) @@ -195,13 +145,9 @@ func TestBadRequests(t *testing.T) { ProjectId: "", Branch: "main", EnvironmentSlug: "production", + DockerImage: "nginx:latest", } - err := req.FromV2DeployImageSource(openapi.V2DeployImageSource{ - Image: "nginx:latest", - }) - require.NoError(t, err, "failed to set image source") - res := testutil.CallRoute[handler.Request, openapi.BadRequestErrorResponse](h, route, headers, req) require.Equal(t, 400, res.Status, "expected 400, sent: %+v, received: %s", req, res.RawBody) @@ -216,13 +162,9 @@ func TestBadRequests(t *testing.T) { ProjectId: setup.Project.ID, Branch: "", EnvironmentSlug: "production", + DockerImage: "nginx:latest", } - err := req.FromV2DeployImageSource(openapi.V2DeployImageSource{ - Image: "nginx:latest", - }) - require.NoError(t, err, "failed to set image source") - res := testutil.CallRoute[handler.Request, openapi.BadRequestErrorResponse](h, route, headers, req) require.Equal(t, 400, res.Status, "expected 400, sent: %+v, received: %s", req, res.RawBody) @@ -237,13 +179,9 @@ func TestBadRequests(t *testing.T) { ProjectId: setup.Project.ID, Branch: "main", EnvironmentSlug: "", + DockerImage: "nginx:latest", } - err := req.FromV2DeployImageSource(openapi.V2DeployImageSource{ - Image: "nginx:latest", - }) - require.NoError(t, err, "failed to set image source") - res := testutil.CallRoute[handler.Request, openapi.BadRequestErrorResponse](h, route, headers, req) require.Equal(t, 400, res.Status, "expected 400, sent: %+v, received: %s", req, res.RawBody) @@ -253,21 +191,14 @@ func TestBadRequests(t *testing.T) { require.NotEmpty(t, res.Body.Meta.RequestId) }) - t.Run("build with missing context", func(t *testing.T) { + t.Run("empty dockerImage", func(t *testing.T) { req := handler.Request{ ProjectId: setup.Project.ID, Branch: "main", EnvironmentSlug: "production", + DockerImage: "", } - err := req.FromV2DeployBuildSource(openapi.V2DeployBuildSource{ - Build: struct { - Context string "json:\"context\"" - Dockerfile *string "json:\"dockerfile,omitempty\"" - }{}, - }) - require.NoError(t, err, "failed to set build source") - res := testutil.CallRoute[handler.Request, openapi.BadRequestErrorResponse](h, route, headers, req) require.Equal(t, 400, res.Status, "expected 400, sent: %+v, received: %s", req, res.RawBody) diff --git a/svc/api/routes/v2_deploy_create_deployment/401_test.go b/svc/api/routes/v2_deploy_create_deployment/401_test.go index 69479f78e6..3029eb4809 100644 --- a/svc/api/routes/v2_deploy_create_deployment/401_test.go +++ b/svc/api/routes/v2_deploy_create_deployment/401_test.go @@ -9,7 +9,6 @@ import ( "github.com/stretchr/testify/require" ctrlv1 "github.com/unkeyed/unkey/gen/proto/ctrl/v1" "github.com/unkeyed/unkey/svc/api/internal/testutil" - "github.com/unkeyed/unkey/svc/api/openapi" handler "github.com/unkeyed/unkey/svc/api/routes/v2_deploy_create_deployment" ) @@ -42,14 +41,9 @@ func TestUnauthorizedAccess(t *testing.T) { ProjectId: setup.Project.ID, Branch: "main", EnvironmentSlug: "production", + DockerImage: "nginx:latest", } - err := req.FromV2DeployImageSource(openapi.V2DeployImageSource{ - Image: "nginx:latest", - }) - - require.NoError(t, err, "failed to set image source") - res := testutil.CallRoute[handler.Request, handler.Response](h, route, headers, req) require.Equal(t, http.StatusUnauthorized, res.Status, "expected 401, received: %s", res.RawBody) require.NotNil(t, res.Body) diff --git a/svc/api/routes/v2_deploy_create_deployment/403_test.go b/svc/api/routes/v2_deploy_create_deployment/403_test.go index e1044f2c52..201505304f 100644 --- a/svc/api/routes/v2_deploy_create_deployment/403_test.go +++ b/svc/api/routes/v2_deploy_create_deployment/403_test.go @@ -10,7 +10,6 @@ import ( "github.com/stretchr/testify/require" ctrlv1 "github.com/unkeyed/unkey/gen/proto/ctrl/v1" "github.com/unkeyed/unkey/svc/api/internal/testutil" - "github.com/unkeyed/unkey/svc/api/openapi" handler "github.com/unkeyed/unkey/svc/api/routes/v2_deploy_create_deployment" ) @@ -45,13 +44,10 @@ func TestCreateDeploymentInsufficientPermissions(t *testing.T) { ProjectId: setup.Project.ID, Branch: "main", EnvironmentSlug: "production", + DockerImage: "nginx:latest", } - err := req.FromV2DeployImageSource(openapi.V2DeployImageSource{ - Image: "nginx:latest", - }) - require.NoError(t, err, "failed to set image source") - res := testutil.CallRoute[handler.Request, openapi.ForbiddenErrorResponse](h, route, headers, req) + res := testutil.CallRoute[handler.Request, handler.Response](h, route, headers, req) require.Equal(t, http.StatusForbidden, res.Status) require.NotNil(t, res.Body) } diff --git a/svc/api/routes/v2_deploy_create_deployment/404_test.go b/svc/api/routes/v2_deploy_create_deployment/404_test.go index d327edb81a..9f608c22b7 100644 --- a/svc/api/routes/v2_deploy_create_deployment/404_test.go +++ b/svc/api/routes/v2_deploy_create_deployment/404_test.go @@ -43,14 +43,9 @@ func TestProjectNotFound(t *testing.T) { ProjectId: uid.New(uid.ProjectPrefix), // Non-existent project ID Branch: "main", EnvironmentSlug: "production", + DockerImage: "nginx:latest", } - err := req.FromV2DeployImageSource(openapi.V2DeployImageSource{ - Image: "nginx:latest", - }) - - require.NoError(t, err, "failed to set image source") - res := testutil.CallRoute[handler.Request, openapi.NotFoundErrorResponse](h, route, headers, req) require.Equal(t, http.StatusNotFound, res.Status, "expected 404, received: %s", res.RawBody) require.NotNil(t, res.Body) @@ -87,13 +82,9 @@ func TestEnvironmentNotFound(t *testing.T) { ProjectId: setup.Project.ID, Branch: "main", EnvironmentSlug: "nonexistent-env", // Non-existent environment + DockerImage: "nginx:latest", } - err := req.FromV2DeployImageSource(openapi.V2DeployImageSource{ - Image: "nginx:latest", - }) - require.NoError(t, err, "failed to set image source") - res := testutil.CallRoute[handler.Request, openapi.NotFoundErrorResponse](h, route, headers, req) require.Equal(t, http.StatusNotFound, res.Status, "expected 404, received: %s", res.RawBody) require.NotNil(t, res.Body) diff --git a/svc/api/routes/v2_deploy_create_deployment/handler.go b/svc/api/routes/v2_deploy_create_deployment/handler.go index 4ecc264c9f..e316e9a126 100644 --- a/svc/api/routes/v2_deploy_create_deployment/handler.go +++ b/svc/api/routes/v2_deploy_create_deployment/handler.go @@ -91,6 +91,7 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { ProjectId: req.ProjectId, Branch: req.Branch, EnvironmentSlug: req.EnvironmentSlug, + DockerImage: req.DockerImage, GitCommit: &ctrlv1.GitCommitInfo{}, } @@ -99,29 +100,6 @@ func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { ctrlReq.KeyspaceId = req.KeyspaceId } - // Handle source (build vs image) using oneOf union type - buildSource, buildErr := req.AsV2DeployBuildSource() - - if buildErr == nil && buildSource.Build.Context != "" { - // Build source - // nolint: exhaustruct // optional proto fields, only setting whats provided - buildContext := &ctrlv1.BuildContext{ - BuildContextPath: buildSource.Build.Context, - } - if buildSource.Build.Dockerfile != nil { - buildContext.DockerfilePath = buildSource.Build.Dockerfile - } - ctrlReq.Source = &ctrlv1.CreateDeploymentRequest_BuildContext{ - BuildContext: buildContext, - } - } else { - // Image source - imageSource, _ := req.AsV2DeployImageSource() - ctrlReq.Source = &ctrlv1.CreateDeploymentRequest_DockerImage{ - DockerImage: imageSource.Image, - } - } - // Handle optional git commit info if req.GitCommit != nil { // nolint: exhaustruct // optional proto fields, only setting whats provided diff --git a/svc/api/routes/v2_deploy_generate_upload_url/200_test.go b/svc/api/routes/v2_deploy_generate_upload_url/200_test.go deleted file mode 100644 index a98922b0a9..0000000000 --- a/svc/api/routes/v2_deploy_generate_upload_url/200_test.go +++ /dev/null @@ -1,140 +0,0 @@ -package handler_test - -import ( - "context" - "fmt" - "net/http" - "testing" - - "connectrpc.com/connect" - "github.com/stretchr/testify/require" - ctrlv1 "github.com/unkeyed/unkey/gen/proto/ctrl/v1" - "github.com/unkeyed/unkey/svc/api/internal/testutil" - handler "github.com/unkeyed/unkey/svc/api/routes/v2_deploy_generate_upload_url" -) - -func TestGenerateUploadUrlSuccessfully(t *testing.T) { - h := testutil.NewHarness(t) - - route := &handler.Handler{ - Logger: h.Logger, - DB: h.DB, - Keys: h.Keys, - CtrlClient: &testutil.MockDeploymentClient{ - CreateS3UploadURLFunc: func(ctx context.Context, req *connect.Request[ctrlv1.CreateS3UploadURLRequest]) (*connect.Response[ctrlv1.CreateS3UploadURLResponse], error) { - return connect.NewResponse(&ctrlv1.CreateS3UploadURLResponse{ - UploadUrl: "https://s3.example.com/upload", - BuildContextPath: "s3://bucket/path/to/context.tar.gz", - }), nil - }, - }, - } - h.Register(route) - - t.Run("generate upload URL successfully", func(t *testing.T) { - setup := h.CreateTestDeploymentSetup(testutil.CreateTestDeploymentSetupOptions{ - SkipEnvironment: true, - Permissions: []string{"project.*.generate_upload_url"}, - }) - - headers := http.Header{ - "Content-Type": {"application/json"}, - "Authorization": {fmt.Sprintf("Bearer %s", setup.RootKey)}, - } - - req := handler.Request{ - ProjectId: setup.Project.ID, - } - - res := testutil.CallRoute[handler.Request, handler.Response]( - h, - route, - headers, - req, - ) - - require.Equal(t, 200, res.Status, "expected 200, received: %#v", res) - require.NotNil(t, res.Body) - require.NotEmpty(t, res.Body.Data.UploadUrl, "upload URL should not be empty") - require.NotEmpty(t, res.Body.Data.Context, "build context path should not be empty") - }) -} - -func TestGenerateUploadUrlWithWildcardPermission(t *testing.T) { - t.Parallel() - h := testutil.NewHarness(t) - - route := &handler.Handler{ - Logger: h.Logger, - DB: h.DB, - Keys: h.Keys, - CtrlClient: &testutil.MockDeploymentClient{ - CreateS3UploadURLFunc: func(ctx context.Context, req *connect.Request[ctrlv1.CreateS3UploadURLRequest]) (*connect.Response[ctrlv1.CreateS3UploadURLResponse], error) { - return connect.NewResponse(&ctrlv1.CreateS3UploadURLResponse{ - UploadUrl: "https://s3.example.com/upload", - BuildContextPath: "s3://bucket/path/to/context.tar.gz", - }), nil - }, - }, - } - h.Register(route) - - setup := h.CreateTestDeploymentSetup(testutil.CreateTestDeploymentSetupOptions{ - SkipEnvironment: true, - Permissions: []string{"project.*.generate_upload_url"}, - }) - - headers := http.Header{ - "Content-Type": {"application/json"}, - "Authorization": {fmt.Sprintf("Bearer %s", setup.RootKey)}, - } - - req := handler.Request{ - ProjectId: setup.Project.ID, - } - - res := testutil.CallRoute[handler.Request, handler.Response](h, route, headers, req) - require.Equal(t, http.StatusOK, res.Status, "Expected 200, got: %d", res.Status) - require.NotNil(t, res.Body) -} - -func TestGenerateUploadUrlWithSpecificProjectPermission(t *testing.T) { - t.Parallel() - h := testutil.NewHarness(t) - - route := &handler.Handler{ - Logger: h.Logger, - DB: h.DB, - Keys: h.Keys, - CtrlClient: &testutil.MockDeploymentClient{ - CreateS3UploadURLFunc: func(ctx context.Context, req *connect.Request[ctrlv1.CreateS3UploadURLRequest]) (*connect.Response[ctrlv1.CreateS3UploadURLResponse], error) { - return connect.NewResponse(&ctrlv1.CreateS3UploadURLResponse{ - UploadUrl: "https://s3.example.com/upload", - BuildContextPath: "s3://bucket/path/to/context.tar.gz", - }), nil - }, - }, - } - h.Register(route) - - // First create the project setup - setup := h.CreateTestDeploymentSetup(testutil.CreateTestDeploymentSetupOptions{ - SkipEnvironment: true, - }) - - // Now create a root key with project-specific permission - rootKey := h.CreateRootKey(setup.Workspace.ID, fmt.Sprintf("project.%s.generate_upload_url", setup.Project.ID)) - - headers := http.Header{ - "Content-Type": {"application/json"}, - "Authorization": {fmt.Sprintf("Bearer %s", rootKey)}, - } - - req := handler.Request{ - ProjectId: setup.Project.ID, - } - - res := testutil.CallRoute[handler.Request, handler.Response](h, route, headers, req) - require.Equal(t, http.StatusOK, res.Status, "Expected 200, got: %d", res.Status) - require.NotNil(t, res.Body) -} diff --git a/svc/api/routes/v2_deploy_generate_upload_url/400_test.go b/svc/api/routes/v2_deploy_generate_upload_url/400_test.go deleted file mode 100644 index 5f94a46911..0000000000 --- a/svc/api/routes/v2_deploy_generate_upload_url/400_test.go +++ /dev/null @@ -1,99 +0,0 @@ -package handler_test - -import ( - "context" - "fmt" - "net/http" - "testing" - - "connectrpc.com/connect" - "github.com/stretchr/testify/require" - ctrlv1 "github.com/unkeyed/unkey/gen/proto/ctrl/v1" - "github.com/unkeyed/unkey/svc/api/internal/testutil" - "github.com/unkeyed/unkey/svc/api/openapi" - handler "github.com/unkeyed/unkey/svc/api/routes/v2_deploy_generate_upload_url" -) - -func TestBadRequests(t *testing.T) { - h := testutil.NewHarness(t) - - route := &handler.Handler{ - Logger: h.Logger, - DB: h.DB, - Keys: h.Keys, - CtrlClient: &testutil.MockDeploymentClient{ - CreateS3UploadURLFunc: func(ctx context.Context, req *connect.Request[ctrlv1.CreateS3UploadURLRequest]) (*connect.Response[ctrlv1.CreateS3UploadURLResponse], error) { - return connect.NewResponse(&ctrlv1.CreateS3UploadURLResponse{ - UploadUrl: "https://s3.example.com/upload", - BuildContextPath: "s3://bucket/path/to/context.tar.gz", - }), nil - }, - }, - } - h.Register(route) - - setup := h.CreateTestDeploymentSetup(testutil.CreateTestDeploymentSetupOptions{ - SkipEnvironment: true, - Permissions: []string{"project.*.generate_upload_url"}, - }) - - headers := http.Header{ - "Content-Type": {"application/json"}, - "Authorization": {fmt.Sprintf("Bearer %s", setup.RootKey)}, - } - - t.Run("missing projectId", func(t *testing.T) { - req := handler.Request{} - - res := testutil.CallRoute[handler.Request, openapi.BadRequestErrorResponse](h, route, headers, req) - - require.Equal(t, 400, res.Status, "expected 400, sent: %+v, received: %s", req, res.RawBody) - require.NotNil(t, res.Body) - require.Equal(t, "https://unkey.com/docs/errors/unkey/application/invalid_input", res.Body.Error.Type) - require.Equal(t, http.StatusBadRequest, res.Body.Error.Status) - }) - - t.Run("empty projectId", func(t *testing.T) { - req := handler.Request{ - ProjectId: "", - } - - res := testutil.CallRoute[handler.Request, openapi.BadRequestErrorResponse](h, route, headers, req) - - require.Equal(t, 400, res.Status, "expected 400, sent: %+v, received: %s", req, res.RawBody) - require.NotNil(t, res.Body) - require.Equal(t, "https://unkey.com/docs/errors/unkey/application/invalid_input", res.Body.Error.Type) - require.Equal(t, http.StatusBadRequest, res.Body.Error.Status) - require.NotEmpty(t, res.Body.Meta.RequestId) - }) - - t.Run("missing authorization header", func(t *testing.T) { - headers := http.Header{ - "Content-Type": {"application/json"}, - // No Authorization header - } - - req := handler.Request{ - ProjectId: setup.Project.ID, - } - - res := testutil.CallRoute[handler.Request, handler.Response](h, route, headers, req) - require.Equal(t, http.StatusBadRequest, res.Status) - require.NotNil(t, res.Body) - }) - - t.Run("malformed authorization header", func(t *testing.T) { - headers := http.Header{ - "Content-Type": {"application/json"}, - "Authorization": {"malformed_header"}, - } - - req := handler.Request{ - ProjectId: setup.Project.ID, - } - - res := testutil.CallRoute[handler.Request, handler.Response](h, route, headers, req) - require.Equal(t, http.StatusBadRequest, res.Status) - require.NotNil(t, res.Body) - }) -} diff --git a/svc/api/routes/v2_deploy_generate_upload_url/401_test.go b/svc/api/routes/v2_deploy_generate_upload_url/401_test.go deleted file mode 100644 index 0944caf86f..0000000000 --- a/svc/api/routes/v2_deploy_generate_upload_url/401_test.go +++ /dev/null @@ -1,52 +0,0 @@ -package handler_test - -import ( - "context" - "net/http" - "testing" - - "connectrpc.com/connect" - "github.com/stretchr/testify/require" - ctrlv1 "github.com/unkeyed/unkey/gen/proto/ctrl/v1" - "github.com/unkeyed/unkey/svc/api/internal/testutil" - handler "github.com/unkeyed/unkey/svc/api/routes/v2_deploy_generate_upload_url" -) - -func TestUnauthorizedAccess(t *testing.T) { - h := testutil.NewHarness(t) - - route := &handler.Handler{ - Logger: h.Logger, - DB: h.DB, - Keys: h.Keys, - CtrlClient: &testutil.MockDeploymentClient{ - CreateS3UploadURLFunc: func(ctx context.Context, req *connect.Request[ctrlv1.CreateS3UploadURLRequest]) (*connect.Response[ctrlv1.CreateS3UploadURLResponse], error) { - return connect.NewResponse(&ctrlv1.CreateS3UploadURLResponse{ - UploadUrl: "https://s3.example.com/upload", - BuildContextPath: "s3://bucket/path/to/context.tar.gz", - }), nil - }, - }, - } - h.Register(route) - - setup := h.CreateTestDeploymentSetup(testutil.CreateTestDeploymentSetupOptions{ - SkipEnvironment: true, - Permissions: []string{"project.*.generate_upload_url"}, - }) - - t.Run("invalid authorization token", func(t *testing.T) { - headers := http.Header{ - "Content-Type": {"application/json"}, - "Authorization": {"Bearer invalid_token"}, - } - - req := handler.Request{ - ProjectId: setup.Project.ID, - } - - res := testutil.CallRoute[handler.Request, handler.Response](h, route, headers, req) - require.Equal(t, http.StatusUnauthorized, res.Status, "expected 401, received: %s", res.RawBody) - require.NotNil(t, res.Body) - }) -} diff --git a/svc/api/routes/v2_deploy_generate_upload_url/403_test.go b/svc/api/routes/v2_deploy_generate_upload_url/403_test.go deleted file mode 100644 index 279915c15c..0000000000 --- a/svc/api/routes/v2_deploy_generate_upload_url/403_test.go +++ /dev/null @@ -1,54 +0,0 @@ -package handler_test - -import ( - "context" - "fmt" - "net/http" - "testing" - - "connectrpc.com/connect" - "github.com/stretchr/testify/require" - ctrlv1 "github.com/unkeyed/unkey/gen/proto/ctrl/v1" - "github.com/unkeyed/unkey/svc/api/internal/testutil" - "github.com/unkeyed/unkey/svc/api/openapi" - handler "github.com/unkeyed/unkey/svc/api/routes/v2_deploy_generate_upload_url" -) - -func TestGenerateUploadUrlInsufficientPermissions(t *testing.T) { - t.Parallel() - - h := testutil.NewHarness(t) - - route := &handler.Handler{ - Logger: h.Logger, - DB: h.DB, - Keys: h.Keys, - CtrlClient: &testutil.MockDeploymentClient{ - CreateS3UploadURLFunc: func(ctx context.Context, req *connect.Request[ctrlv1.CreateS3UploadURLRequest]) (*connect.Response[ctrlv1.CreateS3UploadURLResponse], error) { - return connect.NewResponse(&ctrlv1.CreateS3UploadURLResponse{ - UploadUrl: "https://s3.example.com/upload", - BuildContextPath: "s3://bucket/path/to/context.tar.gz", - }), nil - }, - }, - } - h.Register(route) - - // Create setup with insufficient permissions - setup := h.CreateTestDeploymentSetup(testutil.CreateTestDeploymentSetupOptions{ - Permissions: []string{"project.*.create_deployment"}, - }) - - headers := http.Header{ - "Content-Type": {"application/json"}, - "Authorization": {fmt.Sprintf("Bearer %s", setup.RootKey)}, - } - - req := handler.Request{ - ProjectId: setup.Project.ID, - } - - res := testutil.CallRoute[handler.Request, openapi.ForbiddenErrorResponse](h, route, headers, req) - require.Equal(t, http.StatusForbidden, res.Status) - require.NotNil(t, res.Body) -} diff --git a/svc/api/routes/v2_deploy_generate_upload_url/404_test.go b/svc/api/routes/v2_deploy_generate_upload_url/404_test.go deleted file mode 100644 index 64f6b249fe..0000000000 --- a/svc/api/routes/v2_deploy_generate_upload_url/404_test.go +++ /dev/null @@ -1,56 +0,0 @@ -package handler_test - -import ( - "context" - "fmt" - "net/http" - "testing" - - "connectrpc.com/connect" - "github.com/stretchr/testify/require" - ctrlv1 "github.com/unkeyed/unkey/gen/proto/ctrl/v1" - "github.com/unkeyed/unkey/pkg/uid" - "github.com/unkeyed/unkey/svc/api/internal/testutil" - "github.com/unkeyed/unkey/svc/api/openapi" - handler "github.com/unkeyed/unkey/svc/api/routes/v2_deploy_generate_upload_url" -) - -func TestNotFound(t *testing.T) { - h := testutil.NewHarness(t) - - route := &handler.Handler{ - Logger: h.Logger, - DB: h.DB, - Keys: h.Keys, - CtrlClient: &testutil.MockDeploymentClient{ - CreateS3UploadURLFunc: func(ctx context.Context, req *connect.Request[ctrlv1.CreateS3UploadURLRequest]) (*connect.Response[ctrlv1.CreateS3UploadURLResponse], error) { - return connect.NewResponse(&ctrlv1.CreateS3UploadURLResponse{ - UploadUrl: "https://s3.example.com/upload", - BuildContextPath: "s3://bucket/path/to/context.tar.gz", - }), nil - }, - }, - } - h.Register(route) - - workspace := h.CreateWorkspace() - rootKey := h.CreateRootKey(workspace.ID, "project.*.generate_upload_url") - - headers := http.Header{ - "Content-Type": {"application/json"}, - "Authorization": {fmt.Sprintf("Bearer %s", rootKey)}, - } - - t.Run("project not found", func(t *testing.T) { - req := handler.Request{ - ProjectId: uid.New(uid.ProjectPrefix), // Non-existent project ID - } - - res := testutil.CallRoute[handler.Request, openapi.InternalServerErrorResponse](h, route, headers, req) - require.Equal(t, http.StatusNotFound, res.Status, "expected 400, received: %s", res.RawBody) - require.NotNil(t, res.Body) - require.Equal(t, "https://unkey.com/docs/errors/unkey/data/project_not_found", res.Body.Error.Type) - require.Equal(t, http.StatusNotFound, res.Body.Error.Status) - require.Equal(t, "The requested project does not exist or has been deleted.", res.Body.Error.Detail) - }) -} diff --git a/svc/api/routes/v2_deploy_generate_upload_url/BUILD.bazel b/svc/api/routes/v2_deploy_generate_upload_url/BUILD.bazel deleted file mode 100644 index ff532c23fa..0000000000 --- a/svc/api/routes/v2_deploy_generate_upload_url/BUILD.bazel +++ /dev/null @@ -1,45 +0,0 @@ -load("@rules_go//go:def.bzl", "go_library", "go_test") - -go_library( - name = "v2_deploy_generate_upload_url", - srcs = [ - "doc.go", - "handler.go", - ], - importpath = "github.com/unkeyed/unkey/svc/api/routes/v2_deploy_generate_upload_url", - visibility = ["//visibility:public"], - deps = [ - "//gen/proto/ctrl/v1:ctrl", - "//gen/proto/ctrl/v1/ctrlv1connect", - "//internal/services/keys", - "//pkg/codes", - "//pkg/db", - "//pkg/fault", - "//pkg/otel/logging", - "//pkg/rbac", - "//pkg/zen", - "//svc/api/internal/ctrlclient", - "//svc/api/openapi", - "@com_connectrpc_connect//:connect", - ], -) - -go_test( - name = "v2_deploy_generate_upload_url_test", - srcs = [ - "200_test.go", - "400_test.go", - "401_test.go", - "403_test.go", - "404_test.go", - ], - deps = [ - ":v2_deploy_generate_upload_url", - "//gen/proto/ctrl/v1:ctrl", - "//pkg/uid", - "//svc/api/internal/testutil", - "//svc/api/openapi", - "@com_connectrpc_connect//:connect", - "@com_github_stretchr_testify//require", - ], -) diff --git a/svc/api/routes/v2_deploy_generate_upload_url/doc.go b/svc/api/routes/v2_deploy_generate_upload_url/doc.go deleted file mode 100644 index 71d655d10b..0000000000 --- a/svc/api/routes/v2_deploy_generate_upload_url/doc.go +++ /dev/null @@ -1,31 +0,0 @@ -// Package handler implements the POST /v2/deploy.generateUploadUrl endpoint -// for generating pre-signed S3 URLs used to upload deployment build contexts. -// -// This endpoint is part of the deployment workflow where clients upload their -// build artifacts to S3 before triggering a deployment. The handler delegates -// URL generation to the control plane service via gRPC, ensuring all upload -// URLs are centrally managed and consistently configured. -// -// # Authentication and Authorization -// -// Requests must include a valid root key in the Authorization header. The root -// key must have either wildcard project permission (project.*.generate_upload_url) -// or specific permission for the target project (project..generate_upload_url). -// -// The handler also verifies that the requested project belongs to the workspace -// associated with the root key. Requests for projects in other workspaces return -// 404 to avoid leaking information about project existence. -// -// # Request Flow -// -// The handler validates the root key, binds and validates the request body, -// checks RBAC permissions, verifies project ownership, then calls the control -// plane to generate the upload URL. On success, it returns both the pre-signed -// upload URL and the build context path where the uploaded artifact will be stored. -// -// # Error Responses -// -// The handler returns 400 for missing or invalid request body, 401 for invalid -// root keys, 403 for insufficient permissions, and 404 when the project does -// not exist or belongs to a different workspace. -package handler diff --git a/svc/api/routes/v2_deploy_generate_upload_url/handler.go b/svc/api/routes/v2_deploy_generate_upload_url/handler.go deleted file mode 100644 index 5329b349f8..0000000000 --- a/svc/api/routes/v2_deploy_generate_upload_url/handler.go +++ /dev/null @@ -1,121 +0,0 @@ -package handler - -import ( - "context" - "net/http" - - "connectrpc.com/connect" - ctrlv1 "github.com/unkeyed/unkey/gen/proto/ctrl/v1" - "github.com/unkeyed/unkey/gen/proto/ctrl/v1/ctrlv1connect" - "github.com/unkeyed/unkey/internal/services/keys" - "github.com/unkeyed/unkey/pkg/codes" - "github.com/unkeyed/unkey/pkg/db" - "github.com/unkeyed/unkey/pkg/fault" - "github.com/unkeyed/unkey/pkg/otel/logging" - "github.com/unkeyed/unkey/pkg/rbac" - "github.com/unkeyed/unkey/pkg/zen" - "github.com/unkeyed/unkey/svc/api/internal/ctrlclient" - "github.com/unkeyed/unkey/svc/api/openapi" -) - -type ( - // Request is the request body for generating an upload URL, containing the - // target project ID. Aliased from [openapi.V2DeployGenerateUploadUrlRequestBody]. - Request = openapi.V2DeployGenerateUploadUrlRequestBody - - // Response is the response body containing the pre-signed upload URL and - // build context path. Aliased from [openapi.V2DeployGenerateUploadUrlResponseBody]. - Response = openapi.V2DeployGenerateUploadUrlResponseBody -) - -// Handler generates pre-signed S3 upload URLs for deployment build contexts. -// It validates authentication, checks RBAC permissions, verifies project ownership, -// and delegates URL generation to the control plane service. -type Handler struct { - Logger logging.Logger - DB db.Database - Keys keys.KeyService - CtrlClient ctrlv1connect.DeploymentServiceClient -} - -// Path returns the URL path for this endpoint. -func (h *Handler) Path() string { - return "/v2/deploy.generateUploadUrl" -} - -// Method returns the HTTP method for this endpoint. -func (h *Handler) Method() string { - return "POST" -} - -// Handle processes a request to generate a pre-signed S3 upload URL. It -// authenticates via root key, verifies the caller has generate_upload_url -// permission on the project, confirms the project belongs to the caller's -// workspace, then returns an upload URL from the control plane. Returns 400 -// for invalid input, 401 for invalid root key, 403 for missing permissions, -// or 404 if the project does not exist in the caller's workspace. -func (h *Handler) Handle(ctx context.Context, s *zen.Session) error { - auth, emit, err := h.Keys.GetRootKey(ctx, s) - defer emit() - if err != nil { - return err - } - - req, err := zen.BindBody[Request](s) - if err != nil { - return err - } - - err = auth.VerifyRootKey(ctx, keys.WithPermissions(rbac.Or( - rbac.T(rbac.Tuple{ - ResourceType: rbac.Project, - ResourceID: "*", - Action: rbac.GenerateUploadURL, - }), - rbac.T(rbac.Tuple{ - ResourceType: rbac.Project, - ResourceID: req.ProjectId, - Action: rbac.GenerateUploadURL, - }), - ))) - if err != nil { - return err - } - - // Verify project belongs to the authenticated workspace - project, err := db.Query.FindProjectById(ctx, h.DB.RO(), req.ProjectId) - if err != nil { - if db.IsNotFound(err) { - return fault.New("project not found", - fault.Code(codes.Data.Project.NotFound.URN()), - fault.Internal("project not found"), - fault.Public("The requested project does not exist or has been deleted."), - ) - } - return fault.Wrap(err, fault.Internal("failed to find project")) - } - if project.WorkspaceID != auth.AuthorizedWorkspaceID { - return fault.New("wrong workspace", - fault.Code(codes.Data.Project.NotFound.URN()), - fault.Internal("wrong workspace, masking as 404"), - fault.Public("The requested project does not exist or has been deleted."), - ) - } - - ctrlResp, err := h.CtrlClient.CreateS3UploadURL(ctx, connect.NewRequest(&ctrlv1.CreateS3UploadURLRequest{ - UnkeyProjectId: req.ProjectId, - })) - if err != nil { - return ctrlclient.HandleError(err, "generate upload URL") - } - - return s.JSON(http.StatusOK, Response{ - Meta: openapi.Meta{ - RequestId: s.RequestID(), - }, - Data: openapi.V2DeployGenerateUploadUrlResponseData{ - UploadUrl: ctrlResp.Msg.GetUploadUrl(), - Context: ctrlResp.Msg.GetBuildContextPath(), - }, - }) -} diff --git a/svc/ctrl/api/BUILD.bazel b/svc/ctrl/api/BUILD.bazel index f57d899cd3..a7432e2659 100644 --- a/svc/ctrl/api/BUILD.bazel +++ b/svc/ctrl/api/BUILD.bazel @@ -1,4 +1,4 @@ -load("@rules_go//go:def.bzl", "go_library") +load("@rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "api", @@ -6,15 +6,19 @@ go_library( "certificate.go", "config.go", "doc.go", + "github_webhook.go", "run.go", + "types.go", ], importpath = "github.com/unkeyed/unkey/svc/ctrl/api", visibility = ["//visibility:public"], deps = [ "//gen/proto/ctrl/v1/ctrlv1connect", + "//gen/proto/hydra/v1:hydra", "//pkg/cache", "//pkg/clock", "//pkg/db", + "//pkg/db/types", "//pkg/otel", "//pkg/otel/logging", "//pkg/prometheus", @@ -23,16 +27,42 @@ go_library( "//pkg/tls", "//pkg/uid", "//pkg/version", - "//svc/ctrl/pkg/s3", "//svc/ctrl/services/acme", "//svc/ctrl/services/cluster", "//svc/ctrl/services/ctrl", "//svc/ctrl/services/customdomain", "//svc/ctrl/services/deployment", "//svc/ctrl/services/openapi", + "//svc/ctrl/worker/github", "@com_github_restatedev_sdk_go//:sdk-go", "@com_github_restatedev_sdk_go//ingress", "@org_golang_x_net//http2", "@org_golang_x_net//http2/h2c", ], ) + +go_test( + name = "api_test", + srcs = [ + "deployment_integration_test.go", + "github_webhook_integration_test.go", + "harness_test.go", + ], + embed = [":api"], + deps = [ + "//gen/proto/ctrl/v1:ctrl", + "//gen/proto/ctrl/v1/ctrlv1connect", + "//gen/proto/hydra/v1:hydra", + "//pkg/db", + "//pkg/dockertest", + "//pkg/otel/logging", + "//pkg/uid", + "//svc/ctrl/integration/seed", + "@com_connectrpc_connect//:connect", + "@com_github_restatedev_sdk_go//:sdk-go", + "@com_github_restatedev_sdk_go//server", + "@com_github_stretchr_testify//require", + "@org_golang_x_net//http2", + "@org_golang_x_net//http2/h2c", + ], +) diff --git a/svc/ctrl/api/certificate.go b/svc/ctrl/api/certificate.go index a3d27fbfff..19b9954b69 100644 --- a/svc/ctrl/api/certificate.go +++ b/svc/ctrl/api/certificate.go @@ -22,6 +22,9 @@ type certificateBootstrap struct { regions []string } +// run bootstraps wildcard certificates for all configured domains and starts +// the renewal cron. It waits briefly for dependent services to initialize +// before issuing certificate requests. func (c *certificateBootstrap) run(ctx context.Context) { // Wait for services to be ready time.Sleep(5 * time.Second) diff --git a/svc/ctrl/api/config.go b/svc/ctrl/api/config.go index 77a796d5ef..bcec58b0f4 100644 --- a/svc/ctrl/api/config.go +++ b/svc/ctrl/api/config.go @@ -4,30 +4,6 @@ import ( "github.com/unkeyed/unkey/pkg/tls" ) -// S3Config holds S3 configuration for storage backends. -type S3Config struct { - // URL is the S3 endpoint URL including protocol and region. - // Examples: "https://s3.amazonaws.com" or "https://s3.us-west-2.amazonaws.com". - URL string - - // Bucket is the S3 bucket name for storing objects. - // Must exist and be accessible with the provided credentials. - Bucket string - - // AccessKeyID is the AWS access key ID for S3 authentication. - // Must have appropriate permissions for bucket operations. - AccessKeyID string - - // AccessKeySecret is the AWS secret access key for S3 authentication. - // Should be stored securely and rotated regularly. - AccessKeySecret string - - // ExternalURL is the public-facing URL for accessing S3 objects. - // Used when objects need to be accessed from outside the AWS network. - // Optional - can be empty for internal-only access. - ExternalURL string -} - // RestateConfig holds configuration for Restate workflow engine integration. // // The API is a Restate client that invokes workflows. It only needs the @@ -94,13 +70,14 @@ type Config struct { // The API invokes workflows via Restate ingress. Restate RestateConfig - // BuildS3 configures storage for build artifacts and outputs. - BuildS3 S3Config - // AvailableRegions is a list of available regions for deployments. // Typically in the format "region.provider", ie "us-east-1.aws", "local.dev" AvailableRegions []string + // GitHubWebhookSecret is the secret used to verify webhook signatures. + // Configured in the GitHub App webhook settings. + GitHubWebhookSecret string + // DefaultDomain is the fallback domain for system operations. // Used for wildcard certificate bootstrapping. When set, the API will // ensure a wildcard certificate exists for *.{DefaultDomain}. diff --git a/svc/ctrl/api/deployment_integration_test.go b/svc/ctrl/api/deployment_integration_test.go new file mode 100644 index 0000000000..d47b9f7f6b --- /dev/null +++ b/svc/ctrl/api/deployment_integration_test.go @@ -0,0 +1,79 @@ +package api + +import ( + "testing" + "time" + + "connectrpc.com/connect" + restate "github.com/restatedev/sdk-go" + "github.com/stretchr/testify/require" + ctrlv1 "github.com/unkeyed/unkey/gen/proto/ctrl/v1" + "github.com/unkeyed/unkey/gen/proto/ctrl/v1/ctrlv1connect" + hydrav1 "github.com/unkeyed/unkey/gen/proto/hydra/v1" + "github.com/unkeyed/unkey/pkg/db" + "github.com/unkeyed/unkey/pkg/uid" + "github.com/unkeyed/unkey/svc/ctrl/integration/seed" +) + +type mockDeploymentService struct { + hydrav1.UnimplementedDeploymentServiceServer + requests chan *hydrav1.DeployRequest +} + +func (m *mockDeploymentService) Deploy(ctx restate.WorkflowSharedContext, req *hydrav1.DeployRequest) (*hydrav1.DeployResponse, error) { + m.requests <- req + return &hydrav1.DeployResponse{}, nil +} + +func TestDeployment_Create_TriggersWorkflow(t *testing.T) { + requests := make(chan *hydrav1.DeployRequest, 1) + harness := newWebhookHarness(t, webhookHarnessConfig{ + Services: []restate.ServiceDefinition{hydrav1.NewDeploymentServiceServer(&mockDeploymentService{requests: requests})}, + }) + + ctx := harness.RequestContext() + workspaceID := harness.Seed.Resources.UserWorkspace.ID + project := harness.CreateProject(ctx, seed.CreateProjectRequest{ + ID: uid.New("prj"), + WorkspaceID: workspaceID, + Name: "test-project", + Slug: uid.New("slug"), + GitRepositoryURL: "https://github.com/acme/repo", + DefaultBranch: "main", + DeleteProtection: false, + }) + environment := harness.CreateEnvironment(ctx, seed.CreateEnvironmentRequest{ + ID: uid.New("env"), + WorkspaceID: workspaceID, + ProjectID: project.ID, + Slug: "production", + Description: "", + SentinelConfig: []byte("{}"), + DeleteProtection: false, + }) + + client := ctrlv1connect.NewDeploymentServiceClient(harness.ConnectClient(), harness.CtrlURL, harness.ConnectOptions()...) + resp, err := client.CreateDeployment(ctx, connect.NewRequest(&ctrlv1.CreateDeploymentRequest{ + ProjectId: project.ID, + EnvironmentSlug: environment.Slug, + DockerImage: "nginx:latest", + })) + require.NoError(t, err) + require.NotEmpty(t, resp.Msg.GetDeploymentId()) + require.Equal(t, ctrlv1.DeploymentStatus_DEPLOYMENT_STATUS_PENDING, resp.Msg.GetStatus()) + + select { + case req := <-requests: + require.Equal(t, resp.Msg.GetDeploymentId(), req.GetDeploymentId()) + dockerImage, ok := req.GetSource().(*hydrav1.DeployRequest_DockerImage) + require.True(t, ok, "expected DockerImage source") + require.Equal(t, "nginx:latest", dockerImage.DockerImage.GetImage()) + case <-time.After(10 * time.Second): + t.Fatal("expected deployment workflow invocation") + } + + deployment, err := db.Query.FindDeploymentById(ctx, harness.DB.RO(), resp.Msg.GetDeploymentId()) + require.NoError(t, err) + require.Equal(t, project.ID, deployment.ProjectID) + require.Equal(t, db.DeploymentsStatusPending, deployment.Status) +} diff --git a/svc/ctrl/api/doc.go b/svc/ctrl/api/doc.go index 8cb1891491..f1f5f8b1c3 100644 --- a/svc/ctrl/api/doc.go +++ b/svc/ctrl/api/doc.go @@ -6,21 +6,12 @@ // // # Architecture // -// The control plane sits at the center of Unkey's infrastructure, coordinating between: -// - Sentinel instances that run customer workloads -// - Restate for durable async workflow execution -// - S3-compatible storage for build artifacts -// - ACME providers for automatic TLS certificates -// -// # Services -// -// The server exposes several Connect RPC services: -// -// - [ctrl.Ctrl] - Core control plane operations -// - [deployment.Deployment] - Application deployment workflows -// - [acme.Acme] - ACME certificate management and HTTP-01 challenges -// - [openapi.OpenApi] - OpenAPI specification management -// - [cluster.Cluster] - Cluster coordination and sentinel management +// The control plane sits at the center of Unkey's infrastructure. It coordinates +// sentinel instances that run customer workloads, Restate for durable workflow +// execution, build artifact storage, and ACME providers for automatic TLS +// certificates. Connect RPC services are exposed for core control plane +// operations, deployment workflows, ACME management, OpenAPI specs, and cluster +// coordination. // // # Usage // @@ -30,7 +21,6 @@ // InstanceID: "ctrl-1", // HttpPort: 8080, // DatabasePrimary: "postgres://...", -// Clock: clock.RealClock{}, // Restate: api.RestateConfig{ // URL: "http://restate:8080", // AdminURL: "http://restate:9070", diff --git a/svc/ctrl/api/github_webhook.go b/svc/ctrl/api/github_webhook.go new file mode 100644 index 0000000000..c15089e796 --- /dev/null +++ b/svc/ctrl/api/github_webhook.go @@ -0,0 +1,293 @@ +package api + +import ( + "context" + "database/sql" + "encoding/json" + "io" + "net/http" + "strings" + "time" + + restateingress "github.com/restatedev/sdk-go/ingress" + hydrav1 "github.com/unkeyed/unkey/gen/proto/hydra/v1" + "github.com/unkeyed/unkey/pkg/db" + dbtype "github.com/unkeyed/unkey/pkg/db/types" + "github.com/unkeyed/unkey/pkg/otel/logging" + "github.com/unkeyed/unkey/pkg/uid" + githubclient "github.com/unkeyed/unkey/svc/ctrl/worker/github" +) + +const maxWebhookBodySize = 2 * 1024 * 1024 // 2 MB + +// GitHubWebhook handles incoming GitHub App webhook events and triggers +// deployment workflows via Restate. It validates webhook signatures using +// the configured secret before processing any events. +type GitHubWebhook struct { + db db.Database + logger logging.Logger + restate *restateingress.Client + webhookSecret string +} + +// ServeHTTP validates the webhook signature and dispatches to event-specific +// handlers. Currently supports push events for triggering deployments. +// Unknown event types are acknowledged with 200 OK but not processed. +func (s *GitHubWebhook) ServeHTTP(w http.ResponseWriter, r *http.Request) { + s.logger.Info("GitHub webhook request received", + "method", r.Method, + "path", r.URL.Path, + "remote_addr", r.RemoteAddr, + ) + + if r.Method != http.MethodPost { + s.logger.Warn("GitHub webhook rejected: method not allowed", "method", r.Method) + http.Error(w, "method not allowed", http.StatusMethodNotAllowed) + return + } + + event := r.Header.Get("X-GitHub-Event") + if event == "" { + http.Error(w, "missing X-GitHub-Event header", http.StatusBadRequest) + return + } + + signature := r.Header.Get("X-Hub-Signature-256") + if signature == "" { + s.logger.Warn("GitHub webhook rejected: missing signature header") + http.Error(w, "missing X-Hub-Signature-256 header", http.StatusUnauthorized) + return + } + + body, err := io.ReadAll(http.MaxBytesReader(w, r.Body, maxWebhookBodySize)) + + if err != nil { + s.logger.Warn("GitHub webhook rejected: failed to read body", "error", err) + http.Error(w, "failed to read body", http.StatusBadRequest) + return + } + + if !githubclient.VerifyWebhookSignature(body, signature, s.webhookSecret) { + s.logger.Warn("GitHub webhook rejected: invalid signature") + http.Error(w, "invalid signature", http.StatusUnauthorized) + return + } + + s.logger.Info("GitHub webhook signature verified", "event", event) + + switch event { + case "push": + s.handlePush(r.Context(), w, body) + case "installation": + s.logger.Info("Installation event received") + w.WriteHeader(http.StatusOK) + default: + s.logger.Info("Unhandled event type", "event", event) + w.WriteHeader(http.StatusOK) + } + +} + +// handlePush processes push events by creating a deployment record and +// starting the deploy workflow. Maps branches to environments: the project's +// default branch deploys to production, all others to preview. +func (s *GitHubWebhook) handlePush(ctx context.Context, w http.ResponseWriter, body []byte) { + var payload pushPayload + if err := json.Unmarshal(body, &payload); err != nil { + s.logger.Error("failed to parse push payload", "error", err) + http.Error(w, "failed to parse push payload", http.StatusBadRequest) + return + } + + branch := extractBranchFromRef(payload.Ref) + if branch == "" { + s.logger.Info("Ignoring non-branch push", "ref", payload.Ref) + w.WriteHeader(http.StatusOK) + return + } + + repoConnection, err := db.Query.FindGithubRepoConnection(ctx, s.db.RO(), db.FindGithubRepoConnectionParams{ + InstallationID: payload.Installation.ID, + RepositoryID: payload.Repository.ID, + }) + if err != nil { + if db.IsNotFound(err) { + s.logger.Info("No repo connection found for repository", "repository", payload.Repository.FullName) + w.WriteHeader(http.StatusOK) + return + } + s.logger.Error("failed to find repo connection", "error", err, "repository", payload.Repository.FullName) + http.Error(w, "failed to find repo connection", http.StatusInternalServerError) + return + } + + project, err := db.Query.FindProjectById(ctx, s.db.RO(), repoConnection.ProjectID) + if err != nil { + if db.IsNotFound(err) { + s.logger.Info("No project found for repo connection", "projectId", repoConnection.ProjectID) + w.WriteHeader(http.StatusOK) + return + } + s.logger.Error("failed to find project", "error", err, "projectId", repoConnection.ProjectID) + http.Error(w, "failed to find project", http.StatusInternalServerError) + return + } + + defaultBranch := "main" + if project.DefaultBranch.Valid && project.DefaultBranch.String != "" { + defaultBranch = project.DefaultBranch.String + } + + // Determine environment based on branch + envSlug := "preview" + if branch == defaultBranch { + envSlug = "production" + } + + env, err := db.Query.FindEnvironmentByProjectIdAndSlug(ctx, s.db.RO(), db.FindEnvironmentByProjectIdAndSlugParams{ + WorkspaceID: project.WorkspaceID, + ProjectID: project.ID, + Slug: envSlug, + }) + if err != nil { + s.logger.Error("failed to find environment", "error", err, "projectId", project.ID, "envSlug", envSlug) + http.Error(w, "failed to find environment", http.StatusInternalServerError) + return + } + + // Create deployment record + deploymentID := uid.New(uid.DeploymentPrefix) + now := time.Now().UnixMilli() + gitCommit := s.extractGitCommitInfo(&payload, branch) + + err = db.Query.InsertDeployment(ctx, s.db.RW(), db.InsertDeploymentParams{ + ID: deploymentID, + K8sName: uid.DNS1035(12), + WorkspaceID: project.WorkspaceID, + ProjectID: project.ID, + EnvironmentID: env.ID, + SentinelConfig: env.SentinelConfig, + EncryptedEnvironmentVariables: []byte{}, + Command: dbtype.StringSlice{}, + Status: db.DeploymentsStatusPending, + CreatedAt: now, + UpdatedAt: sql.NullInt64{Valid: false}, + GitCommitSha: sql.NullString{String: payload.After, Valid: payload.After != ""}, + GitBranch: sql.NullString{String: branch, Valid: branch != ""}, + GitCommitMessage: sql.NullString{String: gitCommit.message, Valid: gitCommit.message != ""}, + GitCommitAuthorHandle: sql.NullString{String: gitCommit.authorHandle, Valid: gitCommit.authorHandle != ""}, + GitCommitAuthorAvatarUrl: sql.NullString{String: gitCommit.authorAvatarURL, Valid: gitCommit.authorAvatarURL != ""}, + GitCommitTimestamp: sql.NullInt64{Int64: gitCommit.timestamp, Valid: gitCommit.timestamp != 0}, + OpenapiSpec: sql.NullString{Valid: false}, + CpuMillicores: 256, + MemoryMib: 256, + }) + if err != nil { + s.logger.Error("failed to insert deployment", "error", err) + http.Error(w, "failed to create deployment", http.StatusInternalServerError) + return + } + + s.logger.Info("Created deployment record", + "deployment_id", deploymentID, + "project_id", project.ID, + "repository", payload.Repository.FullName, + "commit_sha", payload.After, + "branch", branch, + "environment", envSlug, + ) + + // Start deploy workflow with GitSource + deployClient := hydrav1.NewDeploymentServiceIngressClient(s.restate, deploymentID) + invocation, err := deployClient.Deploy().Send(ctx, &hydrav1.DeployRequest{ + DeploymentId: deploymentID, + Source: &hydrav1.DeployRequest_Git{ + Git: &hydrav1.GitSource{ + InstallationId: repoConnection.InstallationID, + Repository: payload.Repository.FullName, + CommitSha: payload.After, + ContextPath: ".", // TODO read from project settings + DockerfilePath: "Dockerfile", // TODO read from project settings + }, + }, + }) + if err != nil { + s.logger.Error("failed to start deployment workflow", "error", err) + http.Error(w, "failed to start workflow", http.StatusInternalServerError) + return + } + + s.logger.Info("Deployment workflow started", + "invocation_id", invocation.Id, + "deployment_id", deploymentID, + "project_id", project.ID, + "repository", payload.Repository.FullName, + "commit_sha", payload.After, + ) + + w.WriteHeader(http.StatusOK) +} + +// extractBranchFromRef extracts the branch name from a Git ref. +// Returns empty string for non-branch refs (e.g., tags). +func extractBranchFromRef(ref string) string { + const prefix = "refs/heads/" + if !strings.HasPrefix(ref, prefix) { + return "" + } + return strings.TrimPrefix(ref, prefix) +} + +// gitCommitInfo holds extracted commit metadata for deployment records. +type gitCommitInfo struct { + message string + authorHandle string + authorAvatarURL string + timestamp int64 +} + +// extractGitCommitInfo extracts commit metadata from the push payload, +// preferring HeadCommit when available and falling back to the first commit. +func (s *GitHubWebhook) extractGitCommitInfo(payload *pushPayload, branch string) gitCommitInfo { + headCommit := payload.HeadCommit + if headCommit == nil && len(payload.Commits) > 0 { + c := payload.Commits[0] + headCommit = &pushCommit{ + ID: c.ID, + Message: c.Message, + Timestamp: c.Timestamp, + Author: c.Author, + } + } + + if headCommit == nil { + return gitCommitInfo{ + message: "", + authorHandle: "", + authorAvatarURL: "", + timestamp: 0, + } + } + + authorHandle := headCommit.Author.Username + if authorHandle == "" { + authorHandle = headCommit.Author.Name + } + + var timestamp int64 + if t, err := time.Parse(time.RFC3339, headCommit.Timestamp); err == nil { + timestamp = t.UnixMilli() + } + + message := headCommit.Message + if idx := strings.Index(message, "\n"); idx != -1 { + message = message[:idx] + } + + return gitCommitInfo{ + message: message, + authorHandle: authorHandle, + authorAvatarURL: payload.Sender.AvatarURL, + timestamp: timestamp, + } +} diff --git a/svc/ctrl/api/github_webhook_integration_test.go b/svc/ctrl/api/github_webhook_integration_test.go new file mode 100644 index 0000000000..d5102e5945 --- /dev/null +++ b/svc/ctrl/api/github_webhook_integration_test.go @@ -0,0 +1,137 @@ +package api + +import ( + "bytes" + "crypto/hmac" + "crypto/sha256" + "database/sql" + "encoding/hex" + "fmt" + "net/http" + "testing" + "time" + + restate "github.com/restatedev/sdk-go" + "github.com/stretchr/testify/require" + hydrav1 "github.com/unkeyed/unkey/gen/proto/hydra/v1" + "github.com/unkeyed/unkey/pkg/db" + "github.com/unkeyed/unkey/pkg/uid" + "github.com/unkeyed/unkey/svc/ctrl/integration/seed" +) + +const testRepoFullName = "acme/repo" + +func TestGitHubWebhook_Push_TriggersDeployWorkflow(t *testing.T) { + deployRequests := make(chan *hydrav1.DeployRequest, 1) + harness := newWebhookHarness(t, webhookHarnessConfig{ + Services: []restate.ServiceDefinition{hydrav1.NewDeploymentServiceServer(&mockDeploymentService{requests: deployRequests})}, + }) + projectID := insertRepoConnection(t, harness, testRepoFullName, 101, 202) + + resp, err := sendWebhook(fmt.Sprintf("%s/webhooks/github", harness.CtrlURL), webhookBody(testRepoFullName), harness.Secret) + require.NoError(t, err) + require.Equal(t, http.StatusOK, resp.StatusCode) + _ = resp.Body.Close() + + select { + case req := <-deployRequests: + require.NotEmpty(t, req.GetDeploymentId()) + gitSource := req.GetGit() + require.NotNil(t, gitSource, "expected GitSource in deploy request") + require.Equal(t, int64(101), gitSource.GetInstallationId()) + require.Equal(t, testRepoFullName, gitSource.GetRepository()) + require.Equal(t, "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", gitSource.GetCommitSha()) + _ = projectID // projectID is stored in the deployment record, not passed to workflow + case <-time.After(10 * time.Second): + t.Fatal("expected deploy workflow invocation") + } +} + +func TestGitHubWebhook_InvalidSignature(t *testing.T) { + deployRequests := make(chan *hydrav1.DeployRequest, 1) + harness := newWebhookHarness(t, webhookHarnessConfig{ + Services: []restate.ServiceDefinition{hydrav1.NewDeploymentServiceServer(&mockDeploymentService{requests: deployRequests})}, + }) + _ = insertRepoConnection(t, harness, testRepoFullName, 101, 202) + + resp, err := sendWebhook(fmt.Sprintf("%s/webhooks/github", harness.CtrlURL), webhookBody(testRepoFullName), "wrong-secret") + require.NoError(t, err) + require.Equal(t, http.StatusUnauthorized, resp.StatusCode) + _ = resp.Body.Close() + + select { + case <-deployRequests: + t.Fatal("unexpected deploy workflow invocation") + case <-time.After(1 * time.Second): + } +} + +func insertRepoConnection(t *testing.T, harness *webhookHarness, repoFullName string, installationID, repositoryID int64) string { + t.Helper() + + projectID := uid.New("prj") + project := harness.Seed.CreateProject(harness.ctx, seed.CreateProjectRequest{ + ID: projectID, + WorkspaceID: harness.Seed.Resources.UserWorkspace.ID, + Name: "test-project", + Slug: uid.New("slug"), + GitRepositoryURL: fmt.Sprintf("https://github.com/%s", repoFullName), + DefaultBranch: "main", + DeleteProtection: false, + }) + + // Create production environment (required for webhook handler to find environment by slug) + harness.Seed.CreateEnvironment(harness.ctx, seed.CreateEnvironmentRequest{ + ID: uid.New("env"), + WorkspaceID: harness.Seed.Resources.UserWorkspace.ID, + ProjectID: project.ID, + Slug: "production", + Description: "", + SentinelConfig: []byte("{}"), + DeleteProtection: false, + }) + + createdAt := time.Now().UnixMilli() + params := db.InsertGithubRepoConnectionParams{ + ProjectID: project.ID, + InstallationID: installationID, + RepositoryID: repositoryID, + RepositoryFullName: repoFullName, + CreatedAt: createdAt, + UpdatedAt: sql.NullInt64{Valid: false}, + } + require.NoError(t, db.Query.InsertGithubRepoConnection(harness.ctx, harness.DB.RW(), params)) + + return project.ID +} + +func sendWebhook(url string, body []byte, secret string) (*http.Response, error) { + req, err := http.NewRequest(http.MethodPost, url, bytes.NewReader(body)) + if err != nil { + return nil, err + } + + req.Header.Set("X-GitHub-Event", "push") + req.Header.Set("X-Hub-Signature-256", sign(body, secret)) + + client := &http.Client{Timeout: 10 * time.Second} + return client.Do(req) +} + +func webhookBody(repoFullName string) []byte { + return []byte(fmt.Sprintf(`{ + "ref": "refs/heads/main", + "after": "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + "installation": {"id": 101}, + "repository": {"id": 202, "full_name": "%s"}, + "commits": [{"id": "c1", "message": "m", "timestamp": "2024-01-01T00:00:00Z", "author": {"name": "n", "username": "u"}}], + "head_commit": {"id": "c1", "message": "hello\nworld", "timestamp": "2024-01-01T00:00:00Z", "author": {"name": "n", "username": "u"}}, + "sender": {"login": "u", "avatar_url": "https://avatar"} + }`, repoFullName)) +} + +func sign(body []byte, secret string) string { + mac := hmac.New(sha256.New, []byte(secret)) + mac.Write(body) + return "sha256=" + hex.EncodeToString(mac.Sum(nil)) +} diff --git a/svc/ctrl/api/harness_test.go b/svc/ctrl/api/harness_test.go new file mode 100644 index 0000000000..add1c74330 --- /dev/null +++ b/svc/ctrl/api/harness_test.go @@ -0,0 +1,241 @@ +package api + +import ( + "bytes" + "context" + "crypto/tls" + "fmt" + "net" + "net/http" + "net/http/httptest" + "runtime" + "strings" + "testing" + "time" + + "connectrpc.com/connect" + restate "github.com/restatedev/sdk-go" + restateServer "github.com/restatedev/sdk-go/server" + "github.com/stretchr/testify/require" + "github.com/unkeyed/unkey/pkg/db" + "github.com/unkeyed/unkey/pkg/dockertest" + "github.com/unkeyed/unkey/pkg/otel/logging" + "github.com/unkeyed/unkey/pkg/uid" + "github.com/unkeyed/unkey/svc/ctrl/integration/seed" + "golang.org/x/net/http2" + "golang.org/x/net/http2/h2c" +) + +type webhookHarnessConfig struct { + Services []restate.ServiceDefinition + WebhookSecret string +} + +type webhookHarness struct { + ctx context.Context + CtrlURL string + DB db.Database + Seed *seed.Seeder + Secret string +} + +func newWebhookHarness(t *testing.T, cfg webhookHarnessConfig) *webhookHarness { + t.Helper() + + ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second) + t.Cleanup(cancel) + + restateCfg := dockertest.Restate(t) + + restateSrv := restateServer.NewRestate().WithLogger(logging.Handler(), false) + for _, service := range cfg.Services { + restateSrv.Bind(service) + } + + restateHandler, err := restateSrv.Handler() + require.NoError(t, err) + + workerMux := http.NewServeMux() + workerMux.HandleFunc("/health", func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusOK) + }) + workerMux.Handle("/", restateHandler) + + workerListener, err := net.Listen("tcp", "0.0.0.0:0") + require.NoError(t, err) + workerServer := httptest.NewUnstartedServer(h2c.NewHandler(workerMux, &http2.Server{})) + workerServer.Listener = workerListener + workerServer.Start() + t.Cleanup(workerServer.Close) + + workerPort := workerListener.Addr().(*net.TCPAddr).Port + registration := &restateRegistration{adminURL: restateCfg.AdminURL, registerAs: fmt.Sprintf("http://%s:%d", dockerHost(), workerPort)} + require.NoError(t, registration.register(ctx)) + + mysqlCfg := dockertest.MySQL(t) + database, err := db.New(db.Config{ + Logger: logging.NewNoop(), + PrimaryDSN: mysqlCfg.DSN, + ReadOnlyDSN: "", + }) + require.NoError(t, err) + t.Cleanup(func() { require.NoError(t, database.Close()) }) + + seeder := seed.New(t, database, nil) + seeder.Seed(ctx) + + ctrlAddr := pickAddr(t) + ctrlPort := ctrlAddr.Port + + secret := cfg.WebhookSecret + if secret == "" { + secret = uid.New("whsec") + } + + apiConfig := Config{ + InstanceID: "test", + Region: "local", + HttpPort: ctrlPort, + PrometheusPort: 0, + DatabasePrimary: mysqlCfg.DSN, + OtelEnabled: false, + OtelTraceSamplingRate: 0, + TLSConfig: nil, + AuthToken: "", + Restate: RestateConfig{ + URL: restateCfg.IngressURL, + APIKey: "", + }, + AvailableRegions: []string{"local.dev"}, + GitHubWebhookSecret: secret, + DefaultDomain: "", + RegionalDomain: "", + } + + ctrlCtx, ctrlCancel := context.WithCancel(ctx) + t.Cleanup(ctrlCancel) + + go func() { + require.NoError(t, Run(ctrlCtx, apiConfig)) + }() + + ctrlURL := fmt.Sprintf("http://127.0.0.1:%d", ctrlPort) + require.Eventually(t, func() bool { + resp, err := http.Get(ctrlURL + "/health") + if err != nil { + return false + } + defer func() { _ = resp.Body.Close() }() + return resp.StatusCode == http.StatusOK + }, 10*time.Second, 200*time.Millisecond) + + return &webhookHarness{ + ctx: ctx, + CtrlURL: ctrlURL, + DB: database, + Seed: seeder, + Secret: secret, + } +} + +func (h *webhookHarness) ConnectClient() *http.Client { + if !strings.HasPrefix(h.CtrlURL, "http://") { + return &http.Client{Timeout: 10 * time.Second} + } + + return &http.Client{ + Timeout: 10 * time.Second, + Transport: &http2.Transport{ + AllowHTTP: true, + DialTLSContext: func(ctx context.Context, network, addr string, _ *tls.Config) (net.Conn, error) { + var d net.Dialer + return d.DialContext(ctx, network, addr) + }, + ReadIdleTimeout: 10 * time.Second, + PingTimeout: 5 * time.Second, + }, + } +} + +func (h *webhookHarness) ConnectOptions() []connect.ClientOption { + return []connect.ClientOption{} +} + +func (h *webhookHarness) RequestContext() context.Context { + return context.Background() +} + +func (h *webhookHarness) CreateProject(ctx context.Context, req seed.CreateProjectRequest) db.Project { + return h.Seed.CreateProject(ctx, req) +} + +func (h *webhookHarness) CreateEnvironment(ctx context.Context, req seed.CreateEnvironmentRequest) db.Environment { + return h.Seed.CreateEnvironment(ctx, req) +} + +type restateRegistration struct { + adminURL string + registerAs string +} + +func (r *restateRegistration) register(ctx context.Context) error { + registerURL := r.adminURL + "/deployments" + payload := []byte("{\"uri\": \"" + r.registerAs + "\"}") + + req, err := http.NewRequestWithContext(ctx, http.MethodPost, registerURL, bytes.NewReader(payload)) + if err != nil { + return err + } + requireJSON(req) + + client := &http.Client{Timeout: 10 * time.Second} + resp, err := client.Do(req) + if err != nil { + return err + } + defer func() { _ = resp.Body.Close() }() + + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + return fmtStatus(resp.StatusCode) + } + return nil +} + +func requireJSON(req *http.Request) { + req.Header.Set("Content-Type", "application/json") +} + +type statusErr struct { + code int +} + +func (e statusErr) Error() string { + return fmt.Sprintf("unexpected status code: %d", e.code) +} + +func fmtStatus(code int) error { + return statusErr{code: code} +} + +type addrInfo struct { + Host string + Port int +} + +func pickAddr(t *testing.T) addrInfo { + listener, err := net.Listen("tcp", "127.0.0.1:0") + require.NoError(t, err) + defer func() { require.NoError(t, listener.Close()) }() + + addr, ok := listener.Addr().(*net.TCPAddr) + require.True(t, ok) + + return addrInfo{Host: addr.IP.String(), Port: addr.Port} +} + +func dockerHost() string { + if runtime.GOOS == "darwin" { + return "host.docker.internal" + } + return "172.17.0.1" +} diff --git a/svc/ctrl/api/run.go b/svc/ctrl/api/run.go index f19a2818f2..2b8e54765a 100644 --- a/svc/ctrl/api/run.go +++ b/svc/ctrl/api/run.go @@ -20,7 +20,6 @@ import ( restateadmin "github.com/unkeyed/unkey/pkg/restate/admin" "github.com/unkeyed/unkey/pkg/shutdown" pkgversion "github.com/unkeyed/unkey/pkg/version" - "github.com/unkeyed/unkey/svc/ctrl/pkg/s3" "github.com/unkeyed/unkey/svc/ctrl/services/acme" "github.com/unkeyed/unkey/svc/ctrl/services/cluster" "github.com/unkeyed/unkey/svc/ctrl/services/ctrl" @@ -85,18 +84,6 @@ func Run(ctx context.Context, cfg Config) error { logger.Info("TLS is enabled, server will use HTTPS") } - buildStorage, err := s3.NewS3(s3.S3Config{ - S3PresignURL: "", - S3URL: cfg.BuildS3.URL, - S3Bucket: cfg.BuildS3.Bucket, - S3AccessKeyID: cfg.BuildS3.AccessKeyID, - S3AccessKeySecret: cfg.BuildS3.AccessKeySecret, - Logger: logger, - }) - if err != nil { - return fmt.Errorf("unable to create build storage backend: %w", err) - } - // Initialize database database, err := db.New(db.Config{ PrimaryDSN: cfg.DatabasePrimary, @@ -168,7 +155,6 @@ func Run(ctx context.Context, cfg Config) error { Restate: restateClient, Logger: logger, AvailableRegions: cfg.AvailableRegions, - BuildStorage: buildStorage, }))) mux.Handle(ctrlv1connect.NewOpenApiServiceHandler(openapi.New(database, logger))) @@ -187,6 +173,18 @@ func Run(ctx context.Context, cfg Config) error { CnameDomain: cfg.CnameDomain, }))) + if cfg.GitHubWebhookSecret != "" { + mux.Handle("POST /webhooks/github", &GitHubWebhook{ + db: database, + logger: logger, + restate: restateClient, + webhookSecret: cfg.GitHubWebhookSecret, + }) + logger.Info("GitHub webhook handler registered") + } else { + logger.Info("GitHub webhook handler not registered, no webhook secret configured") + } + // Configure server addr := fmt.Sprintf(":%d", cfg.HttpPort) diff --git a/svc/ctrl/api/types.go b/svc/ctrl/api/types.go new file mode 100644 index 0000000000..cd8a7f799e --- /dev/null +++ b/svc/ctrl/api/types.go @@ -0,0 +1,37 @@ +package api + +type pushPayload struct { + Ref string `json:"ref"` + After string `json:"after"` + Installation pushInstallation `json:"installation"` + Repository pushRepository `json:"repository"` + Commits []pushCommit `json:"commits"` + HeadCommit *pushCommit `json:"head_commit"` + Sender pushSender `json:"sender"` +} + +type pushInstallation struct { + ID int64 `json:"id"` +} + +type pushRepository struct { + ID int64 `json:"id"` + FullName string `json:"full_name"` +} + +type pushCommit struct { + ID string `json:"id"` + Message string `json:"message"` + Timestamp string `json:"timestamp"` + Author pushCommitAuthor `json:"author"` +} + +type pushCommitAuthor struct { + Name string `json:"name"` + Username string `json:"username"` +} + +type pushSender struct { + Login string `json:"login"` + AvatarURL string `json:"avatar_url"` +} diff --git a/svc/ctrl/integration/BUILD.bazel b/svc/ctrl/integration/BUILD.bazel index f9564e4d3e..ec2fcdaa3f 100644 --- a/svc/ctrl/integration/BUILD.bazel +++ b/svc/ctrl/integration/BUILD.bazel @@ -7,8 +7,8 @@ go_library( visibility = ["//visibility:public"], deps = [ "//pkg/db", + "//pkg/dockertest", "//pkg/otel/logging", - "//pkg/testutil/containers", "//pkg/uid", "//svc/ctrl/integration/seed", "@com_github_stretchr_testify//require", diff --git a/svc/ctrl/integration/harness.go b/svc/ctrl/integration/harness.go index be75125c85..6944c28473 100644 --- a/svc/ctrl/integration/harness.go +++ b/svc/ctrl/integration/harness.go @@ -8,8 +8,8 @@ import ( "github.com/stretchr/testify/require" "github.com/unkeyed/unkey/pkg/db" + "github.com/unkeyed/unkey/pkg/dockertest" "github.com/unkeyed/unkey/pkg/otel/logging" - "github.com/unkeyed/unkey/pkg/testutil/containers" "github.com/unkeyed/unkey/pkg/uid" "github.com/unkeyed/unkey/svc/ctrl/integration/seed" ) @@ -30,9 +30,8 @@ func New(t *testing.T) *Harness { ctx := context.Background() - mysqlHostCfg := containers.MySQL(t) - mysqlHostCfg.DBName = "unkey" - mysqlHostDSN := mysqlHostCfg.FormatDSN() + mysqlCfg := dockertest.MySQL(t) + mysqlHostDSN := mysqlCfg.DSN database, err := db.New(db.Config{ Logger: logging.NewNoop(), diff --git a/svc/ctrl/pkg/build/BUILD.bazel b/svc/ctrl/pkg/build/BUILD.bazel deleted file mode 100644 index 673dbe1b16..0000000000 --- a/svc/ctrl/pkg/build/BUILD.bazel +++ /dev/null @@ -1,34 +0,0 @@ -load("@rules_go//go:def.bzl", "go_library") - -go_library( - name = "build", - srcs = [ - "build.go", - "doc.go", - "service.go", - ], - importpath = "github.com/unkeyed/unkey/svc/ctrl/pkg/build", - visibility = ["//visibility:public"], - deps = [ - "//gen/proto/hydra/v1:hydra", - "//pkg/assert", - "//pkg/clickhouse", - "//pkg/clickhouse/schema", - "//pkg/db", - "//pkg/otel/logging", - "//pkg/ptr", - "@build_buf_gen_go_depot_api_connectrpc_go//depot/core/v1/corev1connect", - "@build_buf_gen_go_depot_api_protocolbuffers_go//depot/core/v1:core", - "@com_connectrpc_connect//:connect", - "@com_github_depot_depot_go//build", - "@com_github_depot_depot_go//machine", - "@com_github_depot_depot_go//proto/depot/cli/v1:cli", - "@com_github_docker_cli//cli/config/configfile", - "@com_github_docker_cli//cli/config/types", - "@com_github_moby_buildkit//client", - "@com_github_moby_buildkit//session", - "@com_github_moby_buildkit//session/auth/authprovider", - "@com_github_opencontainers_go_digest//:go-digest", - "@com_github_restatedev_sdk_go//:sdk-go", - ], -) diff --git a/svc/ctrl/pkg/build/doc.go b/svc/ctrl/pkg/build/doc.go deleted file mode 100644 index b843b29a22..0000000000 --- a/svc/ctrl/pkg/build/doc.go +++ /dev/null @@ -1,54 +0,0 @@ -// Package build provides container image building via [Depot.dev]. -// -// Unkey uses Depot for container builds because it provides isolated build -// environments with automatic caching, eliminating the need to manage buildkit -// infrastructure. Each Unkey project gets a dedicated Depot project, ensuring -// cache isolation between tenants while sharing cache within a project. -// -// # Architecture -// -// The build service operates as a Restate workflow step within the deployment -// pipeline. When a deployment requires building from source, the deploy worker -// calls [Depot.BuildDockerImage] which: -// -// 1. Creates or retrieves a Depot project for the Unkey project -// 2. Acquires a build machine from Depot's infrastructure -// 3. Connects to the buildkit instance on that machine -// 4. Streams build context from S3 and executes the build -// 5. Pushes the resulting image to the configured registry -// 6. Records build step telemetry to ClickHouse -// -// # Usage -// -// Create a Depot backend and register it with Restate: -// -// backend := build.New(build.Config{ -// InstanceID: "build-instance-001", -// DB: database, -// DepotConfig: build.DepotConfig{ -// APIUrl: "https://api.depot.dev", -// ProjectRegion: "us-east-1", -// }, -// RegistryConfig: build.RegistryConfig{ -// URL: "registry.depot.dev", -// Username: "x-token", -// Password: depotToken, -// }, -// BuildPlatform: build.BuildPlatform{ -// Platform: "linux/amd64", -// Architecture: "amd64", -// }, -// Clickhouse: clickhouseClient, -// Logger: logger, -// }) -// -// The backend implements [hydrav1.BuildServiceServer] and exposes -// [Depot.BuildDockerImage] as an RPC endpoint. -// -// # Cache Policy -// -// New Depot projects are created with a cache policy of 50GB retained for 14 -// days. This balances build speed (cache hits) against storage costs. -// -// [Depot.dev]: https://depot.dev -package build diff --git a/svc/ctrl/pkg/build/service.go b/svc/ctrl/pkg/build/service.go deleted file mode 100644 index a7dbe17eff..0000000000 --- a/svc/ctrl/pkg/build/service.go +++ /dev/null @@ -1,92 +0,0 @@ -package build - -import ( - hydrav1 "github.com/unkeyed/unkey/gen/proto/hydra/v1" - "github.com/unkeyed/unkey/pkg/clickhouse" - "github.com/unkeyed/unkey/pkg/db" - "github.com/unkeyed/unkey/pkg/otel/logging" -) - -// BuildPlatform specifies the target platform for container builds. -// Platform is the full platform string (e.g., "linux/amd64") while -// Architecture is just the architecture portion (e.g., "amd64") used -// when requesting build machines from Depot. -type BuildPlatform struct { - Platform string - Architecture string -} - -// DepotConfig holds configuration for connecting to the Depot.dev API. -type DepotConfig struct { - // APIUrl is the base URL for the Depot API, typically "https://api.depot.dev". - APIUrl string - - // ProjectRegion determines where Depot projects are created. Build machines - // run in this region, so choose one close to your registry for faster pushes. - ProjectRegion string -} - -// RegistryConfig holds credentials for the container registry where built -// images are pushed. The Password field is also used as the Depot API token -// for authentication. -type RegistryConfig struct { - URL string - Username string - Password string -} - -// Depot orchestrates container builds using the Depot.dev platform. It -// implements [hydrav1.BuildServiceServer] for integration with Restate -// workflows. -// -// Create instances with [New]. The zero value is not usable. -type Depot struct { - instanceID string - db db.Database - depotConfig DepotConfig - registryConfig RegistryConfig - buildPlatform BuildPlatform - clickhouse clickhouse.ClickHouse - logger logging.Logger -} - -var _ hydrav1.BuildServiceServer = (*Depot)(nil) - -// Config holds all dependencies required to create a [Depot] service. -// All fields are required. -type Config struct { - // InstanceID identifies this service instance in logs and telemetry. - InstanceID string - - // DB provides database access for reading and updating project mappings. - DB db.Database - - // DepotConfig configures the Depot API connection. - DepotConfig DepotConfig - - // Clickhouse receives build step telemetry for observability. - Clickhouse clickhouse.ClickHouse - - // RegistryConfig provides credentials for the container registry. - RegistryConfig RegistryConfig - - // BuildPlatform specifies the target platform for all builds. - BuildPlatform BuildPlatform - - // Logger is used for structured logging throughout the build process. - Logger logging.Logger -} - -// New creates a [Depot] service from the provided configuration. All fields -// in [Config] must be set; the function does not validate inputs. -func New(cfg Config) *Depot { - return &Depot{ - instanceID: cfg.InstanceID, - db: cfg.DB, - depotConfig: cfg.DepotConfig, - clickhouse: cfg.Clickhouse, - registryConfig: cfg.RegistryConfig, - buildPlatform: cfg.BuildPlatform, - logger: cfg.Logger, - } -} diff --git a/svc/ctrl/proto/ctrl/v1/deployment.proto b/svc/ctrl/proto/ctrl/v1/deployment.proto index 75c773a868..946264d535 100644 --- a/svc/ctrl/proto/ctrl/v1/deployment.proto +++ b/svc/ctrl/proto/ctrl/v1/deployment.proto @@ -27,11 +27,9 @@ message CreateDeploymentRequest { string branch = 3; string environment_slug = 4; - // Build source, we can either build it from scratch or accept prebuilt image - oneof source { - BuildContext build_context = 5; - string docker_image = 6; // Prebuilt image reference - } + // Build source - currently only prebuilt docker images are supported via API + // GitHub source builds are triggered automatically via webhook + string docker_image = 6; // Prebuilt image reference // Git information optional GitCommitInfo git_commit = 7; @@ -44,11 +42,6 @@ message CreateDeploymentRequest { repeated string command = 9; } -message BuildContext { - string build_context_path = 1; // S3 key for uploaded build context - optional string dockerfile_path = 2; // Path to Dockerfile within context (default: "Dockerfile") -} - message GitCommitInfo { string commit_sha = 1; string commit_message = 2; @@ -59,7 +52,7 @@ message GitCommitInfo { message CreateDeploymentResponse { string deployment_id = 1; - DeploymentStatus status = 2; // Will be PENDING or BUILDING + DeploymentStatus status = 2; // Will be PENDING or DEPLOYING } message GetDeploymentRequest { @@ -151,18 +144,8 @@ message PromoteRequest { message PromoteResponse {} -message CreateS3UploadURLRequest { - string unkey_project_id = 1; -} - -message CreateS3UploadURLResponse { - string upload_url = 1; // Presigned PUT URL - string build_context_path = 2; // S3 key to use in CreateBuild -} - service DeploymentService { - rpc CreateS3UploadURL(CreateS3UploadURLRequest) returns (CreateS3UploadURLResponse) {} - // Create a new deployment + // Create a new deployment with a prebuilt docker image rpc CreateDeployment(CreateDeploymentRequest) returns (CreateDeploymentResponse) {} // Get deployment details diff --git a/svc/ctrl/proto/hydra/v1/build.proto b/svc/ctrl/proto/hydra/v1/build.proto deleted file mode 100644 index e7fe0002cc..0000000000 --- a/svc/ctrl/proto/hydra/v1/build.proto +++ /dev/null @@ -1,27 +0,0 @@ -syntax = "proto3"; - -package hydra.v1; - -import "dev/restate/sdk/go.proto"; - -option go_package = "github.com/unkeyed/unkey/gen/proto/hydra/v1;hydrav1"; - -service BuildService { - option (dev.restate.sdk.go.service_type) = SERVICE; - - rpc BuildDockerImage(BuildDockerImageRequest) returns (BuildDockerImageResponse) {} -} - -message BuildDockerImageRequest { - string s3_url = 1; - string build_context_path = 2; - string dockerfile_path = 3; - string project_id = 4; - string deployment_id = 5; - string workspace_id = 6; -} -message BuildDockerImageResponse { - string depot_project_id = 1; - string depot_build_id = 2; - string image_name = 3; -} diff --git a/svc/ctrl/proto/hydra/v1/deployment.proto b/svc/ctrl/proto/hydra/v1/deployment.proto index 2ff2d0c03c..c5c445d98d 100644 --- a/svc/ctrl/proto/hydra/v1/deployment.proto +++ b/svc/ctrl/proto/hydra/v1/deployment.proto @@ -12,15 +12,29 @@ service DeploymentService { rpc Promote(PromoteRequest) returns (PromoteResponse) {} } +message DockerImage { + string image = 1; +} + +message GitSource { + int64 installation_id = 1; + string repository = 2; + string commit_sha = 3; + string context_path = 4; + string dockerfile_path = 5; +} + message DeployRequest { string deployment_id = 1; optional string key_auth_id = 2; - // Build source fields, exactly one of (context_key, docker_image) must be set - optional string build_context_path = 3; - optional string dockerfile_path = 4; - optional string docker_image = 5; + + oneof source { + GitSource git = 3; + DockerImage docker_image = 4; + } + // Container command override (e.g., ["./app", "serve"]) - repeated string command = 6; + repeated string command = 5; } message DeployResponse {} diff --git a/svc/ctrl/services/deployment/BUILD.bazel b/svc/ctrl/services/deployment/BUILD.bazel index 42dfc9ea48..f997032433 100644 --- a/svc/ctrl/services/deployment/BUILD.bazel +++ b/svc/ctrl/services/deployment/BUILD.bazel @@ -4,7 +4,6 @@ go_library( name = "deployment", srcs = [ "create_deployment.go", - "create_s3_upload_url.go", "doc.go", "get_deployment.go", "promote.go", @@ -21,11 +20,9 @@ go_library( "//pkg/db/types", "//pkg/otel/logging", "//pkg/uid", - "//svc/ctrl/pkg/s3", "@com_connectrpc_connect//:connect", "@com_github_restatedev_sdk_go//ingress", "@org_golang_google_protobuf//encoding/protojson", - "@org_golang_google_protobuf//proto", ], ) @@ -37,7 +34,6 @@ go_test( deps = [ "//gen/proto/ctrl/v1:ctrl", "//pkg/db", - "//pkg/ptr", "@com_github_stretchr_testify//require", ], ) diff --git a/svc/ctrl/services/deployment/create_deployment.go b/svc/ctrl/services/deployment/create_deployment.go index e346a2a969..bf10f150f0 100644 --- a/svc/ctrl/services/deployment/create_deployment.go +++ b/svc/ctrl/services/deployment/create_deployment.go @@ -14,7 +14,6 @@ import ( dbtype "github.com/unkeyed/unkey/pkg/db/types" "github.com/unkeyed/unkey/pkg/uid" "google.golang.org/protobuf/encoding/protojson" - "google.golang.org/protobuf/proto" ) const ( @@ -27,8 +26,7 @@ const ( ) // CreateDeployment creates a new deployment record and initiates an async Restate -// workflow. The deployment source must be either a build context (S3 path to a -// tar.gz archive with an optional Dockerfile path) or a prebuilt Docker image. +// workflow. The deployment source must be a prebuilt Docker image. // // The method looks up the project to infer the workspace, validates the // environment exists, fetches environment variables, and persists the deployment @@ -47,6 +45,12 @@ func (s *Service) CreateDeployment( fmt.Errorf("project_id is required")) } + dockerImage := req.Msg.GetDockerImage() + if dockerImage == "" { + return nil, connect.NewError(connect.CodeInvalidArgument, + fmt.Errorf("docker_image is required")) + } + // Lookup project and infer workspace from it project, err := db.Query.FindProjectById(ctx, s.db.RO(), req.Msg.GetProjectId()) if err != nil { @@ -141,38 +145,9 @@ func (s *Service) CreateDeployment( gitCommitTimestamp = gitCommit.GetTimestamp() } - var buildContextKey string - var dockerfilePath string - var dockerImage *string - - switch source := req.Msg.GetSource().(type) { - case *ctrlv1.CreateDeploymentRequest_BuildContext: - buildContextKey = source.BuildContext.GetBuildContextPath() - dockerfilePath = source.BuildContext.GetDockerfilePath() - if dockerfilePath == "" { - dockerfilePath = "./Dockerfile" - } - - case *ctrlv1.CreateDeploymentRequest_DockerImage: - image := source.DockerImage - dockerImage = &image - - default: - return nil, connect.NewError(connect.CodeInvalidArgument, - fmt.Errorf("source must be specified (either build_context or docker_image)")) - } - - // Log deployment source - if buildContextKey != "" { - s.logger.Info("deployment will build from source", - "deployment_id", deploymentID, - "context_key", buildContextKey, - "dockerfile", dockerfilePath) - } else { - s.logger.Info("deployment will use prebuilt image", - "deployment_id", deploymentID, - "image", *dockerImage) - } + s.logger.Info("deployment will use prebuilt image", + "deployment_id", deploymentID, + "image", dockerImage) // Determine command: CLI override > project default > empty array var command dbtype.StringSlice @@ -217,7 +192,6 @@ func (s *Service) CreateDeployment( "workspace_id", workspaceID, "project_id", req.Msg.GetProjectId(), "environment", env.ID, - "context_key", buildContextKey, "docker_image", dockerImage, ) @@ -229,20 +203,14 @@ func (s *Service) CreateDeployment( } deployReq := &hydrav1.DeployRequest{ - BuildContextPath: nil, - DockerfilePath: nil, - DockerImage: nil, - DeploymentId: deploymentID, - KeyAuthId: keySpaceID, - Command: req.Msg.GetCommand(), - } - - switch source := req.Msg.GetSource().(type) { - case *ctrlv1.CreateDeploymentRequest_BuildContext: - deployReq.BuildContextPath = proto.String(source.BuildContext.GetBuildContextPath()) - deployReq.DockerfilePath = source.BuildContext.DockerfilePath - case *ctrlv1.CreateDeploymentRequest_DockerImage: - deployReq.DockerImage = proto.String(source.DockerImage) + DeploymentId: deploymentID, + KeyAuthId: keySpaceID, + Command: req.Msg.GetCommand(), + Source: &hydrav1.DeployRequest_DockerImage{ + DockerImage: &hydrav1.DockerImage{ + Image: dockerImage, + }, + }, } // Send deployment request asynchronously (fire-and-forget) diff --git a/svc/ctrl/services/deployment/create_deployment_simple_test.go b/svc/ctrl/services/deployment/create_deployment_simple_test.go index d020b32404..2eabc1598c 100644 --- a/svc/ctrl/services/deployment/create_deployment_simple_test.go +++ b/svc/ctrl/services/deployment/create_deployment_simple_test.go @@ -8,7 +8,6 @@ import ( "github.com/stretchr/testify/require" ctrlv1 "github.com/unkeyed/unkey/gen/proto/ctrl/v1" "github.com/unkeyed/unkey/pkg/db" - "github.com/unkeyed/unkey/pkg/ptr" ) // validateTimestamp applies the same validation logic as the CreateDeployment service @@ -161,12 +160,7 @@ func TestCreateDeploymentTimestampValidation_InvalidSecondsFormat(t *testing.T) ProjectId: "proj_test456", Branch: "main", EnvironmentSlug: "production", - Source: &ctrlv1.CreateDeploymentRequest_BuildContext{ - BuildContext: &ctrlv1.BuildContext{ - BuildContextPath: "test-key", - DockerfilePath: ptr.P("Dockerfile"), - }, - }, + DockerImage: "registry.example.com/app:v1.0.0", GitCommit: &ctrlv1.GitCommitInfo{ CommitSha: "abc123def456", Timestamp: time.Now().Unix(), // This is in seconds - should be rejected @@ -264,12 +258,7 @@ func TestCreateDeploymentFieldMapping(t *testing.T) { ProjectId: "proj_test456", Branch: "feature/test-branch", EnvironmentSlug: "production", - Source: &ctrlv1.CreateDeploymentRequest_BuildContext{ - BuildContext: &ctrlv1.BuildContext{ - BuildContextPath: "test-key", - DockerfilePath: ptr.P("Dockerfile"), - }, - }, + DockerImage: "registry.example.com/app:v1.0.0", GitCommit: &ctrlv1.GitCommitInfo{ CommitSha: "abc123def456789", CommitMessage: "feat: implement new feature", @@ -312,12 +301,7 @@ func TestCreateDeploymentFieldMapping(t *testing.T) { ProjectId: "proj_test456", Branch: "main", EnvironmentSlug: "production", - Source: &ctrlv1.CreateDeploymentRequest_BuildContext{ - BuildContext: &ctrlv1.BuildContext{ - BuildContextPath: "test-key", - DockerfilePath: ptr.P("Dockerfile"), - }, - }, + DockerImage: "registry.example.com/app:v1.0.0", GitCommit: &ctrlv1.GitCommitInfo{ CommitSha: "", CommitMessage: "", @@ -360,12 +344,7 @@ func TestCreateDeploymentFieldMapping(t *testing.T) { ProjectId: "proj_test456", Branch: "hotfix/urgent-fix", EnvironmentSlug: "production", - Source: &ctrlv1.CreateDeploymentRequest_BuildContext{ - BuildContext: &ctrlv1.BuildContext{ - BuildContextPath: "test-key", - DockerfilePath: ptr.P("Dockerfile"), - }, - }, + DockerImage: "registry.example.com/app:v1.0.0", GitCommit: &ctrlv1.GitCommitInfo{ CommitSha: "xyz789abc123", CommitMessage: "fix: critical security issue", diff --git a/svc/ctrl/services/deployment/create_s3_upload_url.go b/svc/ctrl/services/deployment/create_s3_upload_url.go deleted file mode 100644 index 1e34050881..0000000000 --- a/svc/ctrl/services/deployment/create_s3_upload_url.go +++ /dev/null @@ -1,34 +0,0 @@ -package deployment - -import ( - "context" - "fmt" - "time" - - "connectrpc.com/connect" - ctrlv1 "github.com/unkeyed/unkey/gen/proto/ctrl/v1" - "github.com/unkeyed/unkey/pkg/uid" -) - -// CreateS3UploadURL generates a presigned S3 URL for uploading a build context -// archive. The URL is valid for 15 minutes. The build context path is generated -// using the project ID and a unique build ID, formatted as -// "{project_id}/{build_id}.tar.gz". Clients should upload a tar.gz archive -// containing the application source code to this URL, then pass the returned -// BuildContextPath to [CreateDeployment]. -func (s *Service) CreateS3UploadURL( - ctx context.Context, - req *connect.Request[ctrlv1.CreateS3UploadURLRequest], -) (*connect.Response[ctrlv1.CreateS3UploadURLResponse], error) { - - buildContextPath := fmt.Sprintf("%s/%s.tar.gz", req.Msg.GetUnkeyProjectId(), uid.New("build")) - - url, err := s.buildStorage.GenerateUploadURL(ctx, buildContextPath, 15*time.Minute) - if err != nil { - return nil, connect.NewError(connect.CodeInternal, err) - } - return connect.NewResponse(&ctrlv1.CreateS3UploadURLResponse{ - UploadUrl: url, - BuildContextPath: buildContextPath, - }), nil -} diff --git a/svc/ctrl/services/deployment/service.go b/svc/ctrl/services/deployment/service.go index 9ca353b87d..6240107001 100644 --- a/svc/ctrl/services/deployment/service.go +++ b/svc/ctrl/services/deployment/service.go @@ -6,7 +6,6 @@ import ( hydrav1 "github.com/unkeyed/unkey/gen/proto/hydra/v1" "github.com/unkeyed/unkey/pkg/db" "github.com/unkeyed/unkey/pkg/otel/logging" - "github.com/unkeyed/unkey/svc/ctrl/pkg/s3" ) // Service implements the DeploymentService ConnectRPC API. It coordinates @@ -18,7 +17,6 @@ type Service struct { restate *restateingress.Client logger logging.Logger availableRegions []string - buildStorage s3.Storage } // deploymentClient creates a typed Restate ingress client for the DeploymentService @@ -37,8 +35,6 @@ type Config struct { Logger logging.Logger // AvailableRegions lists the regions where deployments can be created. AvailableRegions []string - // BuildStorage provides presigned URL generation for build context uploads. - BuildStorage s3.Storage } // New creates a new [Service] with the given configuration. All fields in @@ -50,6 +46,5 @@ func New(cfg Config) *Service { restate: cfg.Restate, logger: cfg.Logger, availableRegions: cfg.AvailableRegions, - buildStorage: cfg.BuildStorage, } } diff --git a/svc/ctrl/worker/BUILD.bazel b/svc/ctrl/worker/BUILD.bazel index d88526901f..4ea0c990d6 100644 --- a/svc/ctrl/worker/BUILD.bazel +++ b/svc/ctrl/worker/BUILD.bazel @@ -23,13 +23,12 @@ go_library( "//pkg/restate/admin", "//pkg/rpc/interceptor", "//pkg/shutdown", - "//svc/ctrl/pkg/build", - "//svc/ctrl/pkg/s3", "//svc/ctrl/services/acme/providers", "//svc/ctrl/worker/certificate", "//svc/ctrl/worker/clickhouseuser", "//svc/ctrl/worker/customdomain", "//svc/ctrl/worker/deploy", + "//svc/ctrl/worker/github", "//svc/ctrl/worker/quotacheck", "//svc/ctrl/worker/routing", "//svc/ctrl/worker/versioning", diff --git a/svc/ctrl/worker/certificate/bootstrap_infra_certs.go b/svc/ctrl/worker/certificate/bootstrap_infra_certs.go index c154ee210f..8752639a67 100644 --- a/svc/ctrl/worker/certificate/bootstrap_infra_certs.go +++ b/svc/ctrl/worker/certificate/bootstrap_infra_certs.go @@ -86,6 +86,8 @@ func (s *Service) BootstrapInfraCerts(ctx context.Context, cfg BootstrapConfig) return nil } +// ensureInfraDomain creates infrastructure domain records and triggers +// [Service.ProcessChallenge] if a certificate is missing. func (s *Service) ensureInfraDomain(ctx context.Context, domain string, restate *restateIngress.Client) error { // Check if domain already has a cert via JOIN existingDomain, err := db.Query.FindCustomDomainWithCertByDomain(ctx, s.db.RO(), domain) diff --git a/svc/ctrl/worker/certificate/process_challenge_handler.go b/svc/ctrl/worker/certificate/process_challenge_handler.go index b22a32eaed..97477e0560 100644 --- a/svc/ctrl/worker/certificate/process_challenge_handler.go +++ b/svc/ctrl/worker/certificate/process_challenge_handler.go @@ -195,9 +195,8 @@ func (s *Service) ProcessChallenge( }, nil } -// globalAcmeUserID identifies the single shared ACME account used for all certificate -// requests. Using one account avoids per-workspace ACME account registration and -// simplifies key management, while staying well under Let's Encrypt's account limits. +// globalAcmeUserID identifies the shared ACME account used for all certificate +// requests to avoid per-workspace account creation and stay under account limits. const globalAcmeUserID = "acme" // isWildcard reports whether domain is a wildcard domain pattern. Wildcard domains @@ -207,6 +206,8 @@ func isWildcard(domain string) bool { return len(domain) > 2 && domain[0] == '*' && domain[1] == '.' } +// getOrCreateAcmeClient returns a configured ACME client for the domain's +// challenge type using the shared account. func (s *Service) getOrCreateAcmeClient(ctx context.Context, domain string) (*lego.Client, error) { // Use a single global ACME user for all certificates client, err := acme.GetOrCreateUser(ctx, acme.UserConfig{ @@ -243,6 +244,7 @@ func (s *Service) getOrCreateAcmeClient(ctx context.Context, domain string) (*le return client, nil } +// obtainCertificate requests a certificate and encrypts the private key for storage. func (s *Service) obtainCertificate(ctx context.Context, _ string, dom db.CustomDomain, domain string) (EncryptedCertificate, error) { s.logger.Info("creating ACME client", "domain", domain) client, err := s.getOrCreateAcmeClient(ctx, domain) @@ -310,6 +312,7 @@ func (s *Service) obtainCertificate(ctx context.Context, _ string, dom db.Custom }, nil } +// persistCertificate stores the certificate and reuses the existing ID on renewals. func (s *Service) persistCertificate(ctx context.Context, dom db.CustomDomain, domain string, cert EncryptedCertificate) (string, error) { now := time.Now().UnixMilli() @@ -342,6 +345,7 @@ func (s *Service) persistCertificate(ctx context.Context, dom db.CustomDomain, d return certID, nil } +// markChallengeFailed marks a challenge as failed during cleanup. func (s *Service) markChallengeFailed(ctx restate.ObjectContext, domainID string) { _, _ = restate.Run(ctx, func(stepCtx restate.RunContext) (restate.Void, error) { if updateErr := db.Query.UpdateAcmeChallengeStatus(stepCtx, s.db.RW(), db.UpdateAcmeChallengeStatusParams{ diff --git a/svc/ctrl/worker/clickhouseuser/configure_user_handler.go b/svc/ctrl/worker/clickhouseuser/configure_user_handler.go index 9844dba794..bcf2051394 100644 --- a/svc/ctrl/worker/clickhouseuser/configure_user_handler.go +++ b/svc/ctrl/worker/clickhouseuser/configure_user_handler.go @@ -16,22 +16,31 @@ import ( ) const ( - defaultQuotaDurationSeconds = 3600 // 1 hour - defaultMaxQueriesPerWindow = 1000 // queries - defaultMaxExecutionTimePerWindow = 1800 // 30 minutes - defaultMaxQueryExecutionTime = 30 // seconds - defaultMaxQueryMemoryBytes = 1000000000 // 1 GB - defaultMaxQueryResultRows = 10000000 // 10 million rows - passwordLength = 64 + // defaultQuotaDurationSeconds defines the quota window used for rate limits. + defaultQuotaDurationSeconds = 3600 + // defaultMaxQueriesPerWindow caps queries per quota window. + defaultMaxQueriesPerWindow = 1000 + // defaultMaxExecutionTimePerWindow caps total query runtime per window. + defaultMaxExecutionTimePerWindow = 1800 + // defaultMaxQueryExecutionTime caps runtime per query to avoid long-running scans. + defaultMaxQueryExecutionTime = 30 + // defaultMaxQueryMemoryBytes caps memory per query to avoid exhausting the cluster. + defaultMaxQueryMemoryBytes = 1000000000 + // defaultMaxQueryResultRows caps result size to prevent accidental full exports. + defaultMaxQueryResultRows = 10000000 + // passwordLength defines the generated ClickHouse password length. + passwordLength = 64 ) -// existingUserResult wraps the DB lookup to avoid Restate error serialization issues. +// existingUserResult wraps the DB lookup to avoid Restate error serialization issues +// when journaling results with sql.Null fields. type existingUserResult struct { Row db.FindClickhouseWorkspaceSettingsByWorkspaceIDRow Found bool } -// quotaSettings holds resolved quota configuration with defaults applied. +// quotaSettings holds resolved quota configuration with defaults applied and +// non-zero values enforced. type quotaSettings struct { quotaDurationSeconds int32 maxQueriesPerWindow int32 @@ -42,6 +51,11 @@ type quotaSettings struct { } // ConfigureUser creates or updates a ClickHouse user for a workspace. +// +// For new workspaces, it generates credentials, encrypts them with Vault, and +// persists the encrypted values before provisioning ClickHouse. For existing +// workspaces, it preserves credentials while updating quota settings. The flow +// is idempotent and safe to retry after partial failures. func (s *Service) ConfigureUser( ctx restate.ObjectContext, req *hydrav1.ConfigureUserRequest, diff --git a/svc/ctrl/worker/config.go b/svc/ctrl/worker/config.go index e667314434..2d4529d6de 100644 --- a/svc/ctrl/worker/config.go +++ b/svc/ctrl/worker/config.go @@ -8,33 +8,6 @@ import ( "github.com/unkeyed/unkey/pkg/clock" ) -// S3Config holds S3 configuration for storage backends. -// -// This configuration is used by vault, build storage, and other services -// that need to store data in S3-compatible object storage. -type S3Config struct { - // URL is the S3 endpoint URL including protocol and region. - // Examples: "https://s3.amazonaws.com" or "https://s3.us-west-2.amazonaws.com". - URL string - - // Bucket is the S3 bucket name for storing objects. - // Must exist and be accessible with the provided credentials. - Bucket string - - // AccessKeyID is the AWS access key ID for S3 authentication. - // Must have appropriate permissions for bucket operations. - AccessKeyID string - - // AccessKeySecret is the AWS secret access key for S3 authentication. - // Should be stored securely and rotated regularly. - AccessKeySecret string - - // ExternalURL is the public-facing URL for accessing S3 objects. - // Used when objects need to be accessed from outside the AWS network. - // Optional - can be empty for internal-only access. - ExternalURL string -} - // Route53Config holds AWS Route53 configuration for ACME DNS-01 challenges. // // This configuration enables automatic DNS record creation for wildcard @@ -189,10 +162,6 @@ type Config struct { // Enables asynchronous deployment and certificate renewal workflows. Restate RestateConfig - // BuildS3 configures storage for build artifacts and outputs. - // Used by both Depot and Docker build backends. - BuildS3 S3Config - // BuildPlatform defines the target architecture for container builds. // Format: "linux/amd64", "linux/arm64". Only "linux" OS supported. BuildPlatform string @@ -238,6 +207,9 @@ type Config struct { // For local: "unkey.local" CnameDomain string + // GitHub configures GitHub App integration for webhook-triggered deployments. + GitHub GitHubConfig + // Clock provides time operations for testing and scheduling. // Use clock.RealClock{} for production deployments. Clock clock.Clock @@ -258,6 +230,21 @@ type Config struct { QuotaCheckSlackWebhookURL string } +// GitHubConfig holds configuration for GitHub App integration. +type GitHubConfig struct { + // AppID is the GitHub App ID for authentication. + AppID int64 + + // PrivateKeyPEM is the GitHub App private key in PEM format. + PrivateKeyPEM string +} + +// Enabled returns true only if ALL required GitHub App fields are configured. +// This ensures we never register the workflow with partial/insecure config. +func (c GitHubConfig) Enabled() bool { + return c.AppID != 0 && c.PrivateKeyPEM != "" +} + // parseBuildPlatform validates and parses a build platform string. // // This function validates that the build platform follows the expected @@ -349,15 +336,14 @@ func (c Config) Validate() error { // Validate build platform format _, platformErr := parseBuildPlatform(c.BuildPlatform) - // Validate registry configuration - registryErr := assert.All( + // Validate build configuration (Depot backend) + return assert.All( + platformErr, assert.NotEmpty(c.RegistryURL, "registry URL is required"), assert.NotEmpty(c.RegistryUsername, "registry username is required"), assert.NotEmpty(c.RegistryPassword, "registry password is required"), - ) - - return assert.All( - platformErr, - registryErr, + assert.NotEmpty(c.BuildPlatform, "build platform is required"), + assert.NotEmpty(c.Depot.APIUrl, "Depot API URL is required"), + assert.NotEmpty(c.Depot.ProjectRegion, "Depot project region is required"), ) } diff --git a/svc/ctrl/worker/customdomain/BUILD.bazel b/svc/ctrl/worker/customdomain/BUILD.bazel index 387e771231..645ec09d4a 100644 --- a/svc/ctrl/worker/customdomain/BUILD.bazel +++ b/svc/ctrl/worker/customdomain/BUILD.bazel @@ -3,6 +3,7 @@ load("@rules_go//go:def.bzl", "go_library") go_library( name = "customdomain", srcs = [ + "doc.go", "service.go", "verify_handler.go", ], diff --git a/svc/ctrl/worker/customdomain/doc.go b/svc/ctrl/worker/customdomain/doc.go new file mode 100644 index 0000000000..c48c2be70b --- /dev/null +++ b/svc/ctrl/worker/customdomain/doc.go @@ -0,0 +1,55 @@ +// Package customdomain implements domain ownership verification workflows. +// +// This package provides a Restate-based service for verifying custom domain ownership +// through DNS record validation. When a user adds a custom domain to their project, +// this service orchestrates the verification process that proves they control the domain. +// +// # Verification Flow +// +// Domain verification uses a two-step process. TXT record verification proves +// ownership by checking for a TXT record at _unkey. containing a unique +// token, and must complete before CNAME verification. CNAME verification enables +// traffic routing by checking that the domain points to a unique target subdomain +// under the platform's DNS apex (for example, .unkey-dns.com). Both checks +// must succeed before the domain is marked as verified. +// +// # Why Restate +// +// DNS propagation is inherently slow and unpredictable. A user may add DNS records +// that take anywhere from seconds to hours to propagate globally, so the workflow +// needs durable execution that survives restarts, a single verification attempt +// per domain, and a long retry window. Restate provides virtual objects keyed by +// domain name, durable retries every minute for up to 24 hours, and exactly-once +// semantics for post-verification actions such as certificate issuance and routing. +// +// # Post-Verification +// +// Once verified, the service triggers certificate issuance via [certificate.Service] +// and creates frontline routes to enable traffic routing to the user's deployment. +// +// # Key Types +// +// [Service] implements hydrav1.CustomDomainServiceServer with handlers for domain +// verification. Configure it via [Config] and create instances with [New]. +// +// # Usage +// +// Create a custom domain service: +// +// svc := customdomain.New(customdomain.Config{ +// DB: database, +// Logger: logger, +// CnameDomain: "unkey-dns.com", +// }) +// +// Register with Restate. The virtual object key is the domain name being verified: +// +// client := hydrav1.NewCustomDomainServiceClient(ctx, "api.example.com") +// client.VerifyDomain().Send(&hydrav1.VerifyDomainRequest{}) +// +// # Retry Behavior +// +// The service uses a fixed 1-minute retry interval (no exponential backoff) for up to +// 24 hours (1440 attempts). If verification fails after this window, the domain is +// marked as failed and Restate terminates the invocation. +package customdomain diff --git a/svc/ctrl/worker/customdomain/verify_handler.go b/svc/ctrl/worker/customdomain/verify_handler.go index 2cf9436b25..770a6f80e3 100644 --- a/svc/ctrl/worker/customdomain/verify_handler.go +++ b/svc/ctrl/worker/customdomain/verify_handler.go @@ -16,10 +16,11 @@ import ( "github.com/unkeyed/unkey/pkg/uid" ) -// maxVerificationDuration is how long we keep retrying before marking as failed. +// maxVerificationDuration limits how long we retry DNS verification before +// marking a domain as failed. const maxVerificationDuration = 24 * time.Hour -// errNotVerified is returned when verification is incomplete, triggering a Restate retry. +// errNotVerified signals incomplete verification and triggers Restate retries. var errNotVerified = errors.New("domain not verified yet") // VerifyDomain performs two-step verification for a custom domain: @@ -142,7 +143,8 @@ func (s *Service) VerifyDomain( return nil, errNotVerified } -// RetryVerification resets a failed domain and restarts the verification process. +// RetryVerification resets a failed domain and restarts verification after the +// user fixes DNS configuration. func (s *Service) RetryVerification( ctx restate.ObjectContext, _ *hydrav1.RetryVerificationRequest, diff --git a/svc/ctrl/worker/deploy/BUILD.bazel b/svc/ctrl/worker/deploy/BUILD.bazel index 0212ffc5e0..de7c9ce6c4 100644 --- a/svc/ctrl/worker/deploy/BUILD.bazel +++ b/svc/ctrl/worker/deploy/BUILD.bazel @@ -3,6 +3,7 @@ load("@rules_go//go:def.bzl", "go_library") go_library( name = "deploy", srcs = [ + "build.go", "deploy_handler.go", "doc.go", "domains.go", @@ -17,10 +18,27 @@ go_library( "//gen/proto/ctrl/v1:ctrl", "//gen/proto/hydra/v1:hydra", "//gen/proto/vault/v1/vaultv1connect", + "//pkg/assert", + "//pkg/clickhouse", + "//pkg/clickhouse/schema", "//pkg/db", "//pkg/otel/logging", + "//pkg/ptr", "//pkg/uid", - "//svc/ctrl/pkg/s3", + "//svc/ctrl/worker/github", + "@build_buf_gen_go_depot_api_connectrpc_go//depot/core/v1/corev1connect", + "@build_buf_gen_go_depot_api_protocolbuffers_go//depot/core/v1:core", + "@com_connectrpc_connect//:connect", + "@com_github_depot_depot_go//build", + "@com_github_depot_depot_go//machine", + "@com_github_depot_depot_go//proto/depot/cli/v1:cli", + "@com_github_docker_cli//cli/config/configfile", + "@com_github_docker_cli//cli/config/types", + "@com_github_moby_buildkit//client", + "@com_github_moby_buildkit//session", + "@com_github_moby_buildkit//session/auth/authprovider", + "@com_github_moby_buildkit//session/secrets/secretsprovider", + "@com_github_opencontainers_go_digest//:go-digest", "@com_github_restatedev_sdk_go//:sdk-go", ], ) diff --git a/svc/ctrl/pkg/build/build.go b/svc/ctrl/worker/deploy/build.go similarity index 52% rename from svc/ctrl/pkg/build/build.go rename to svc/ctrl/worker/deploy/build.go index ba1974f1e4..a617dc25f2 100644 --- a/svc/ctrl/pkg/build/build.go +++ b/svc/ctrl/worker/deploy/build.go @@ -1,10 +1,11 @@ -package build +package deploy import ( "context" "database/sql" "fmt" "net/http" + "strings" "time" "buf.build/gen/go/depot/api/connectrpc/go/depot/core/v1/corev1connect" @@ -18,11 +19,10 @@ import ( "github.com/moby/buildkit/client" "github.com/moby/buildkit/session" "github.com/moby/buildkit/session/auth/authprovider" + "github.com/moby/buildkit/session/secrets/secretsprovider" "github.com/opencontainers/go-digest" restate "github.com/restatedev/sdk-go" - hydrav1 "github.com/unkeyed/unkey/gen/proto/hydra/v1" - "github.com/unkeyed/unkey/pkg/assert" "github.com/unkeyed/unkey/pkg/clickhouse/schema" "github.com/unkeyed/unkey/pkg/db" "github.com/unkeyed/unkey/pkg/ptr" @@ -38,77 +38,83 @@ const ( defaultCacheKeepDays = 14 ) -// BuildDockerImage builds a container image using Depot and pushes it to the -// configured registry. +// buildResult contains the output of a Docker image build, including the image +// name and identifiers needed to trace builds in Depot. +type buildResult struct { + ImageName string + DepotBuildID string + DepotProjectID string +} + +// gitBuildParams holds the inputs for building a container image from a Git +// repository, including the exact commit and the build context location. +type gitBuildParams struct { + InstallationID int64 + Repository string + CommitSHA string + ContextPath string + DockerfilePath string + ProjectID string + DeploymentID string + WorkspaceID string +} + +// buildDockerImageFromGit builds a container image from a GitHub repository using Depot. // // The method retrieves or creates a Depot project for the Unkey project, -// acquires a remote build machine, and executes the build. Build progress -// is streamed to ClickHouse for observability. On success, returns the -// fully-qualified image name and Depot metadata. -// -// Required request fields: S3Url (build context), BuildContextPath, ProjectId, -// DeploymentId, and DockerfilePath. All fields are validated; missing fields -// result in a terminal error. -// -// Returns a terminal error for validation failures. Other errors may be -// retried by Restate. -func (s *Depot) BuildDockerImage( +// acquires a remote build machine, and executes the build. BuildKit fetches +// the repository directly from GitHub using the provided installation token. +// Build progress is streamed to ClickHouse for observability. +func (w *Workflow) buildDockerImageFromGit( ctx restate.Context, - req *hydrav1.BuildDockerImageRequest, -) (*hydrav1.BuildDockerImageResponse, error) { - - unkeyProjectID := req.GetProjectId() - - if err := assert.All( - assert.NotEmpty(req.GetS3Url(), "s3_url is required"), - assert.NotEmpty(req.GetBuildContextPath(), "build_context_path is required"), - assert.NotEmpty(unkeyProjectID, "project_id is required"), - assert.NotEmpty(req.GetDeploymentId(), "deployment_id is required"), - assert.NotEmpty(req.GetDockerfilePath(), "dockerfile_path is required"), - ); err != nil { - return nil, restate.TerminalError(err) - } - - platform := s.buildPlatform.Platform - architecture := s.buildPlatform.Architecture - - s.logger.Info("Starting build process - getting presigned URL for build context", - "build_context_path", req.GetBuildContextPath(), - "unkey_project_id", unkeyProjectID, + params gitBuildParams, +) (*buildResult, error) { + platform := w.buildPlatform.Platform + architecture := w.buildPlatform.Architecture + + w.logger.Info("Starting git build process", + "repository", params.Repository, + "commit_sha", params.CommitSHA, + "project_id", params.ProjectID, "platform", platform, "architecture", architecture) depotProjectID, err := restate.Run(ctx, func(runCtx restate.RunContext) (string, error) { - return s.getOrCreateDepotProject(runCtx, unkeyProjectID) + return w.getOrCreateDepotProject(runCtx, params.ProjectID) }, restate.WithName("get or create depot project")) if err != nil { return nil, fmt.Errorf("failed to get/create depot project: %w", err) } - s.logger.Info("Creating depot build", + w.logger.Info("Creating depot build", "depot_project_id", depotProjectID, - "unkey_project_id", unkeyProjectID) + "project_id", params.ProjectID) - return restate.Run(ctx, func(runCtx restate.RunContext) (*hydrav1.BuildDockerImageResponse, error) { + return restate.Run(ctx, func(runCtx restate.RunContext) (*buildResult, error) { + // Get GitHub installation token for BuildKit to fetch the repo + ghToken, err := w.github.GetInstallationToken(params.InstallationID) + if err != nil { + return nil, fmt.Errorf("failed to get GitHub installation token: %w", err) + } depotBuild, err := build.NewBuild(runCtx, &cliv1.CreateBuildRequest{ Options: nil, ProjectId: depotProjectID, - }, s.registryConfig.Password) + }, w.registryConfig.Password) if err != nil { return nil, fmt.Errorf("failed to create build: %w", err) } - defer depotBuild.Finish(err) + defer func() { depotBuild.Finish(err) }() - s.logger.Info("Depot build created", + w.logger.Info("Depot build created", "build_id", depotBuild.ID, "depot_project_id", depotProjectID, - "unkey_project_id", unkeyProjectID) + "project_id", params.ProjectID) - s.logger.Info("Acquiring build machine", + w.logger.Info("Acquiring build machine", "build_id", depotBuild.ID, "architecture", architecture, - "unkey_project_id", unkeyProjectID) + "project_id", params.ProjectID) buildkit, err := machine.Acquire(runCtx, depotBuild.ID, depotBuild.Token, architecture) if err != nil { @@ -116,13 +122,13 @@ func (s *Depot) BuildDockerImage( } defer func() { if releaseErr := buildkit.Release(); releaseErr != nil { - s.logger.Error("unable to release buildkit", "error", releaseErr) + w.logger.Error("unable to release buildkit", "error", releaseErr) } }() - s.logger.Info("Build machine acquired, connecting to buildkit", + w.logger.Info("Build machine acquired, connecting to buildkit", "build_id", depotBuild.ID, - "unkey_project_id", unkeyProjectID) + "project_id", params.ProjectID) buildClient, err := buildkit.Connect(runCtx) if err != nil { @@ -130,49 +136,66 @@ func (s *Depot) BuildDockerImage( } defer func() { if closeErr := buildClient.Close(); closeErr != nil { - s.logger.Error("unable to close client", "error", closeErr) + w.logger.Error("unable to close client", "error", closeErr) } }() - imageName := fmt.Sprintf("%s/%s:%s-%s", s.registryConfig.URL, depotProjectID, unkeyProjectID, req.GetDeploymentId()) + imageName := fmt.Sprintf("%s/%s:%s-%s", w.registryConfig.URL, depotProjectID, params.ProjectID, params.DeploymentID) - dockerfilePath := req.GetDockerfilePath() + dockerfilePath := params.DockerfilePath if dockerfilePath == "" { dockerfilePath = "Dockerfile" } - s.logger.Info("Starting build execution", + // Normalize context path: trim whitespace and leading slashes, treat "." as root + contextPath := strings.TrimSpace(params.ContextPath) + contextPath = strings.TrimPrefix(contextPath, "/") + if contextPath == "." { + contextPath = "" + } + + // Build git context URL with commit SHA + // Format: https://github.com/owner/repo.git#: + // Note: BuildKit requires full 40-char SHA for reliable builds + gitContextURL := fmt.Sprintf("https://github.com/%s.git#%s", params.Repository, params.CommitSHA) + if contextPath != "" { + gitContextURL = fmt.Sprintf("https://github.com/%s.git#%s:%s", params.Repository, params.CommitSHA, contextPath) + } + + w.logger.Info("Starting build execution", "image_name", imageName, "dockerfile", dockerfilePath, "platform", platform, "architecture", architecture, "build_id", depotBuild.ID, - "unkey_project_id", unkeyProjectID) + "project_id", params.ProjectID, + "git_context_url", gitContextURL, + ) buildStatusCh := make(chan *client.SolveStatus, 100) - go s.processBuildStatus(buildStatusCh, req.GetWorkspaceId(), unkeyProjectID, req.GetDeploymentId()) + go w.processBuildStatus(buildStatusCh, params.WorkspaceID, params.ProjectID, params.DeploymentID) - solverOptions := s.buildSolverOptions(platform, req.GetS3Url(), dockerfilePath, imageName) + solverOptions := w.buildGitSolverOptions(platform, gitContextURL, dockerfilePath, imageName, ghToken.Token) _, err = buildClient.Solve(runCtx, nil, solverOptions, buildStatusCh) if err != nil { return nil, fmt.Errorf("build failed: %w", err) } - s.logger.Info("Build completed successfully") + w.logger.Info("Build completed successfully") - return &hydrav1.BuildDockerImageResponse{ + return &buildResult{ ImageName: imageName, - DepotBuildId: depotBuild.ID, - DepotProjectId: depotProjectID, + DepotBuildID: depotBuild.ID, + DepotProjectID: depotProjectID, }, nil - }) + }, restate.WithName("build docker image from git")) } -// buildSolverOptions constructs the buildkit solver configuration for a build. -// It configures the dockerfile frontend, sets the platform and context URL, -// attaches registry authentication, and configures image export with push. -func (s *Depot) buildSolverOptions( +// buildSolverOptions constructs the BuildKit solver configuration for URL-based +// contexts, including registry auth and image export settings. Use +// [Workflow.buildGitSolverOptions] when the context requires GitHub credentials. +func (w *Workflow) buildSolverOptions( platform, contextURL, dockerfilePath, imageName string, ) client.SolveOpt { return client.SolveOpt{ @@ -182,14 +205,15 @@ func (s *Depot) buildSolverOptions( "context": contextURL, "filename": dockerfilePath, }, + Session: []session.Attachable{ //nolint: exhaustruct authprovider.NewDockerAuthProvider(authprovider.DockerAuthProviderConfig{ ConfigFile: &configfile.ConfigFile{ AuthConfigs: map[string]types.AuthConfig{ - s.registryConfig.URL: { - Username: s.registryConfig.Username, - Password: s.registryConfig.Password, + w.registryConfig.URL: { + Username: w.registryConfig.Username, + Password: w.registryConfig.Password, }, }, }, @@ -209,24 +233,64 @@ func (s *Depot) buildSolverOptions( } } +// buildGitSolverOptions constructs the buildkit solver configuration for a git context build. +// It includes GitHub token authentication via the secrets provider. +func (w *Workflow) buildGitSolverOptions( + platform, gitContextURL, dockerfilePath, imageName, githubToken string, +) client.SolveOpt { + return client.SolveOpt{ + Frontend: "dockerfile.v0", + FrontendAttrs: map[string]string{ + "platform": platform, + "context": gitContextURL, + "filename": dockerfilePath, + }, + + Session: []session.Attachable{ + //nolint: exhaustruct + authprovider.NewDockerAuthProvider(authprovider.DockerAuthProviderConfig{ + ConfigFile: &configfile.ConfigFile{ + AuthConfigs: map[string]types.AuthConfig{ + w.registryConfig.URL: { + Username: w.registryConfig.Username, + Password: w.registryConfig.Password, + }, + }, + }, + }), + // Provide GitHub token for BuildKit to authenticate when fetching the git repo + secretsprovider.FromMap(map[string][]byte{ + "GIT_AUTH_TOKEN.github.com": []byte(githubToken), + }), + }, + //nolint: exhaustruct + Exports: []client.ExportEntry{ + { + Type: "image", + Attrs: map[string]string{ + "name": imageName, + "oci-mediatypes": "true", + "push": "true", + }, + }, + }, + } +} + // getOrCreateDepotProject retrieves the Depot project ID for an Unkey project, -// creating one if it doesn't exist. The mapping is persisted to the database -// so subsequent builds reuse the same Depot project and its cache. -// -// New projects are named "unkey-{projectID}" and created in the region -// specified by [DepotConfig.ProjectRegion] with the default cache policy. -func (s *Depot) getOrCreateDepotProject(ctx context.Context, unkeyProjectID string) (string, error) { - project, err := db.Query.FindProjectById(ctx, s.db.RO(), unkeyProjectID) +// creating one if it doesn't exist. +func (w *Workflow) getOrCreateDepotProject(ctx context.Context, unkeyProjectID string) (string, error) { + project, err := db.Query.FindProjectById(ctx, w.db.RO(), unkeyProjectID) if err != nil { return "", fmt.Errorf("failed to query project: %w", err) } projectName := fmt.Sprintf("unkey-%s", unkeyProjectID) if project.DepotProjectID.Valid && project.DepotProjectID.String != "" { - s.logger.Info( + w.logger.Info( "Returning existing depot project", "depot_project_id", project.DepotProjectID, - "unkey_project_id", unkeyProjectID, + "project_id", unkeyProjectID, "project_name", projectName, ) return project.DepotProjectID.String, nil @@ -235,16 +299,16 @@ func (s *Depot) getOrCreateDepotProject(ctx context.Context, unkeyProjectID stri httpClient := &http.Client{} authInterceptor := connect.UnaryInterceptorFunc(func(next connect.UnaryFunc) connect.UnaryFunc { return func(ctx context.Context, req connect.AnyRequest) (connect.AnyResponse, error) { - req.Header().Set("Authorization", "Bearer "+s.registryConfig.Password) + req.Header().Set("Authorization", "Bearer "+w.registryConfig.Password) return next(ctx, req) } }) - projectClient := corev1connect.NewProjectServiceClient(httpClient, s.depotConfig.APIUrl, connect.WithInterceptors(authInterceptor)) + projectClient := corev1connect.NewProjectServiceClient(httpClient, w.depotConfig.APIUrl, connect.WithInterceptors(authInterceptor)) //nolint: exhaustruct // optional fields createResp, err := projectClient.CreateProject(ctx, connect.NewRequest(&corev1.CreateProjectRequest{ Name: projectName, - RegionId: s.depotConfig.ProjectRegion, + RegionId: w.depotConfig.ProjectRegion, //nolint: exhaustruct // missing fields is deprecated CachePolicy: &corev1.CachePolicy{ KeepGb: defaultCacheKeepGB, @@ -257,7 +321,7 @@ func (s *Depot) getOrCreateDepotProject(ctx context.Context, unkeyProjectID stri depotProjectID := createResp.Msg.GetProject().GetProjectId() now := time.Now().UnixMilli() - err = db.Query.UpdateProjectDepotID(ctx, s.db.RW(), db.UpdateProjectDepotIDParams{ + err = db.Query.UpdateProjectDepotID(ctx, w.db.RW(), db.UpdateProjectDepotIDParams{ DepotProjectID: sql.NullString{ String: depotProjectID, Valid: true, @@ -269,20 +333,17 @@ func (s *Depot) getOrCreateDepotProject(ctx context.Context, unkeyProjectID stri return "", fmt.Errorf("failed to update depot_project_id: %w", err) } - s.logger.Info("Created new Depot project", + w.logger.Info("Created new Depot project", "depot_project_id", depotProjectID, - "unkey_project_id", unkeyProjectID, + "project_id", unkeyProjectID, "project_name", projectName) return depotProjectID, nil } // processBuildStatus consumes build status events from buildkit and writes -// telemetry to ClickHouse. It tracks completed vertices (build steps) and -// their logs, buffering them for batch insertion. -// -// This method runs in its own goroutine and exits when statusCh is closed. -func (s *Depot) processBuildStatus( +// telemetry to ClickHouse. +func (w *Workflow) processBuildStatus( statusCh <-chan *client.SolveStatus, workspaceID, projectID, deploymentID string, ) { @@ -290,21 +351,19 @@ func (s *Depot) processBuildStatus( verticesWithLogs := map[digest.Digest]bool{} for status := range statusCh { - // Mark vertices that have logs for _, log := range status.Logs { verticesWithLogs[log.Vertex] = true } - // Process completed vertices for _, vertex := range status.Vertexes { if vertex == nil { - s.logger.Warn("vertex is nil") + w.logger.Warn("vertex is nil") continue } if vertex.Completed != nil && !completed[vertex.Digest] { completed[vertex.Digest] = true - s.clickhouse.BufferBuildStep(schema.BuildStepV1{ + w.clickhouse.BufferBuildStep(schema.BuildStepV1{ Error: vertex.Error, StartedAt: ptr.SafeDeref(vertex.Started).UnixMilli(), CompletedAt: ptr.SafeDeref(vertex.Completed).UnixMilli(), @@ -319,9 +378,8 @@ func (s *Depot) processBuildStatus( } } - // Process logs for _, log := range status.Logs { - s.clickhouse.BufferBuildStepLog(schema.BuildStepLogV1{ + w.clickhouse.BufferBuildStepLog(schema.BuildStepLogV1{ WorkspaceID: workspaceID, ProjectID: projectID, DeploymentID: deploymentID, diff --git a/svc/ctrl/worker/deploy/deploy_handler.go b/svc/ctrl/worker/deploy/deploy_handler.go index 7751ac7028..01468998e2 100644 --- a/svc/ctrl/worker/deploy/deploy_handler.go +++ b/svc/ctrl/worker/deploy/deploy_handler.go @@ -10,15 +10,18 @@ import ( restate "github.com/restatedev/sdk-go" ctrlv1 "github.com/unkeyed/unkey/gen/proto/ctrl/v1" hydrav1 "github.com/unkeyed/unkey/gen/proto/hydra/v1" + "github.com/unkeyed/unkey/pkg/assert" "github.com/unkeyed/unkey/pkg/db" "github.com/unkeyed/unkey/pkg/uid" ) const ( - // sentinelNamespace is the Kubernetes namespace where sentinel containers are deployed. + // sentinelNamespace isolates sentinel resources from tenant namespaces to + // simplify RBAC and keep routing infrastructure separate from workloads. sentinelNamespace = "sentinel" - // sentinelPort is the port that sentinel containers listen on for traffic routing. + // sentinelPort is the port exposed by sentinel services for frontline traffic + // and must match the container port and service configuration. sentinelPort = 8040 ) @@ -52,8 +55,17 @@ const ( func (w *Workflow) Deploy(ctx restate.WorkflowSharedContext, req *hydrav1.DeployRequest) (*hydrav1.DeployResponse, error) { finishedSuccessfully := false - deployment, err := restate.Run(ctx, func(stepCtx restate.RunContext) (db.Deployment, error) { - return db.Query.FindDeploymentById(stepCtx, w.db.RW(), req.GetDeploymentId()) + err := assert.All( + assert.NotEmpty(req.GetDeploymentId(), "deployment_id is required"), + ) + if err != nil { + return nil, restate.TerminalError(err) + } + + w.logger.Info("deployment workflow started", "req", fmt.Sprintf("%+v", req)) + + deployment, err := restate.Run(ctx, func(runCtx restate.RunContext) (db.Deployment, error) { + return db.Query.FindDeploymentById(runCtx, w.db.RW(), req.GetDeploymentId()) }, restate.WithName("finding deployment")) if err != nil { return nil, err @@ -100,55 +112,44 @@ func (w *Workflow) Deploy(ctx restate.WorkflowSharedContext, req *hydrav1.Deploy if err != nil { return nil, err } - project, err := restate.Run(ctx, func(stepCtx restate.RunContext) (db.FindProjectByIdRow, error) { - return db.Query.FindProjectById(stepCtx, w.db.RW(), deployment.ProjectID) + project, err := restate.Run(ctx, func(runCtx restate.RunContext) (db.FindProjectByIdRow, error) { + return db.Query.FindProjectById(runCtx, w.db.RW(), deployment.ProjectID) }, restate.WithName("finding project")) if err != nil { return nil, err } - environment, err := restate.Run(ctx, func(stepCtx restate.RunContext) (db.FindEnvironmentByIdRow, error) { - return db.Query.FindEnvironmentById(stepCtx, w.db.RW(), deployment.EnvironmentID) + environment, err := restate.Run(ctx, func(runCtx restate.RunContext) (db.FindEnvironmentByIdRow, error) { + return db.Query.FindEnvironmentById(runCtx, w.db.RW(), deployment.EnvironmentID) }, restate.WithName("finding environment")) if err != nil { return nil, err } - var dockerImage string - - if req.GetBuildContextPath() != "" { - if err = w.updateDeploymentStatus(ctx, deployment.ID, db.DeploymentsStatusBuilding); err != nil { - return nil, err - } - - s3DownloadURL, err := restate.Run(ctx, func(stepCtx restate.RunContext) (string, error) { - return w.buildStorage.GenerateDownloadURL(stepCtx, req.GetBuildContextPath(), 1*time.Hour) - }, restate.WithName("generate s3 download url")) - if err != nil { - return nil, fmt.Errorf("failed to generate s3 download url: %w", err) - } - - w.logger.Info("starting docker build", - "deployment_id", deployment.ID, - "build_context_path", req.GetBuildContextPath()) - - build, err := hydrav1.NewBuildServiceClient(ctx).BuildDockerImage().Request(&hydrav1.BuildDockerImageRequest{ - S3Url: s3DownloadURL, - BuildContextPath: req.GetBuildContextPath(), - DockerfilePath: req.GetDockerfilePath(), - ProjectId: deployment.ProjectID, - DeploymentId: deployment.ID, - WorkspaceId: deployment.WorkspaceID, + dockerImage := "" + + switch source := req.GetSource().(type) { + case *hydrav1.DeployRequest_DockerImage: + dockerImage = source.DockerImage.GetImage() + case *hydrav1.DeployRequest_Git: + build, err := w.buildDockerImageFromGit(ctx, gitBuildParams{ + InstallationID: source.Git.GetInstallationId(), + Repository: source.Git.GetRepository(), + CommitSHA: source.Git.GetCommitSha(), + ContextPath: source.Git.GetContextPath(), + DockerfilePath: source.Git.GetDockerfilePath(), + ProjectID: deployment.ProjectID, + DeploymentID: deployment.ID, + WorkspaceID: deployment.WorkspaceID, }) - if err != nil { - return nil, fmt.Errorf("failed to build docker image: %w", err) + return nil, fmt.Errorf("failed to build docker image from git: %w", err) } - dockerImage = build.GetImageName() + dockerImage = build.ImageName - err = restate.RunVoid(ctx, func(stepCtx restate.RunContext) error { - return db.Query.UpdateDeploymentBuildID(stepCtx, w.db.RW(), db.UpdateDeploymentBuildIDParams{ + err = restate.RunVoid(ctx, func(runCtx restate.RunContext) error { + return db.Query.UpdateDeploymentBuildID(runCtx, w.db.RW(), db.UpdateDeploymentBuildIDParams{ ID: deployment.ID, - BuildID: sql.NullString{Valid: true, String: build.GetDepotBuildId()}, + BuildID: sql.NullString{Valid: true, String: build.DepotBuildID}, UpdatedAt: sql.NullInt64{Valid: true, Int64: time.Now().UnixMilli()}, }) }) @@ -156,13 +157,8 @@ func (w *Workflow) Deploy(ctx restate.WorkflowSharedContext, req *hydrav1.Deploy return nil, fmt.Errorf("failed to update deployment build ID: %w", err) } - } else if req.GetDockerImage() != "" { - dockerImage = req.GetDockerImage() - w.logger.Info("using prebuilt docker image", - "deployment_id", deployment.ID, - "image", dockerImage) - } else { - return nil, fmt.Errorf("either build_context_path or docker_image must be specified") + default: + return nil, restate.TerminalError(fmt.Errorf("unknown source type: %T", source)) } err = restate.RunVoid(ctx, func(runCtx restate.RunContext) error { @@ -329,12 +325,12 @@ func (w *Workflow) Deploy(ctx restate.WorkflowSharedContext, req *hydrav1.Deploy existingRouteIDs := make([]string, 0) for _, domain := range allDomains { - frontlineRouteID, getFrontlineRouteErr := restate.Run(ctx, func(stepCtx restate.RunContext) (string, error) { - return db.TxWithResultRetry(stepCtx, w.db.RW(), func(txCtx context.Context, tx db.DBTX) (string, error) { + frontlineRouteID, getFrontlineRouteErr := restate.Run(ctx, func(runCtx restate.RunContext) (string, error) { + return db.TxWithResultRetry(runCtx, w.db.RW(), func(txCtx context.Context, tx db.DBTX) (string, error) { found, err := db.Query.FindFrontlineRouteByFQDN(txCtx, tx, domain.domain) if err != nil { if db.IsNotFound(err) { - err = db.Query.InsertFrontlineRoute(stepCtx, tx, db.InsertFrontlineRouteParams{ + err = db.Query.InsertFrontlineRoute(runCtx, tx, db.InsertFrontlineRouteParams{ ID: uid.New(uid.FrontlineRoutePrefix), ProjectID: project.ID, DeploymentID: deployment.ID, @@ -396,8 +392,8 @@ func (w *Workflow) Deploy(ctx restate.WorkflowSharedContext, req *hydrav1.Deploy } if !project.IsRolledBack && environment.Slug == "production" { - _, err = restate.Run(ctx, func(stepCtx restate.RunContext) (restate.Void, error) { - return restate.Void{}, db.Query.UpdateProjectDeployments(stepCtx, w.db.RW(), db.UpdateProjectDeploymentsParams{ + _, err = restate.Run(ctx, func(runCtx restate.RunContext) (restate.Void, error) { + return restate.Void{}, db.Query.UpdateProjectDeployments(runCtx, w.db.RW(), db.UpdateProjectDeploymentsParams{ IsRolledBack: false, ID: deployment.ProjectID, LiveDeploymentID: sql.NullString{Valid: true, String: deployment.ID}, diff --git a/svc/ctrl/worker/deploy/domains.go b/svc/ctrl/worker/deploy/domains.go index ca73a1d6a4..da59719743 100644 --- a/svc/ctrl/worker/deploy/domains.go +++ b/svc/ctrl/worker/deploy/domains.go @@ -87,10 +87,11 @@ func buildDomains(workspaceSlug, projectSlug, environmentSlug, gitSha, branchNam } var ( - // nonAlphanumericRegex matches any character that is not a letter, digit, or whitespace. + // nonAlphanumericRegex removes characters that are unsafe for domain slugs and + // avoids double hyphens when combined with whitespace normalization. nonAlphanumericRegex = regexp.MustCompile(`[^a-zA-Z0-9\s]`) - // multipleSpacesRegex matches one or more consecutive whitespace characters. + // multipleSpacesRegex collapses consecutive whitespace before hyphen conversion. multipleSpacesRegex = regexp.MustCompile(`\s+`) ) diff --git a/svc/ctrl/worker/deploy/service.go b/svc/ctrl/worker/deploy/service.go index 87932e244b..ef791f3108 100644 --- a/svc/ctrl/worker/deploy/service.go +++ b/svc/ctrl/worker/deploy/service.go @@ -3,11 +3,31 @@ package deploy import ( hydrav1 "github.com/unkeyed/unkey/gen/proto/hydra/v1" "github.com/unkeyed/unkey/gen/proto/vault/v1/vaultv1connect" + "github.com/unkeyed/unkey/pkg/clickhouse" "github.com/unkeyed/unkey/pkg/db" "github.com/unkeyed/unkey/pkg/otel/logging" - "github.com/unkeyed/unkey/svc/ctrl/pkg/s3" + githubclient "github.com/unkeyed/unkey/svc/ctrl/worker/github" ) +// BuildPlatform specifies the target platform for container builds. +type BuildPlatform struct { + Platform string + Architecture string +} + +// DepotConfig holds configuration for connecting to the Depot.dev API. +type DepotConfig struct { + APIUrl string + ProjectRegion string +} + +// RegistryConfig holds credentials for the container registry. +type RegistryConfig struct { + URL string + Username string + Password string +} + // Workflow orchestrates deployment lifecycle operations. // // This workflow manages the complete deployment lifecycle including deploying new versions, @@ -27,7 +47,13 @@ type Workflow struct { vault vaultv1connect.VaultServiceClient sentinelImage string availableRegions []string - buildStorage s3.Storage + github *githubclient.Client + + // Build dependencies + depotConfig DepotConfig + registryConfig RegistryConfig + buildPlatform BuildPlatform + clickhouse clickhouse.ClickHouse } var _ hydrav1.DeploymentServiceServer = (*Workflow)(nil) @@ -52,8 +78,20 @@ type Config struct { // AvailableRegions is the list of available regions for deployments. AvailableRegions []string - // BuildStorage provides access to S3-compatible storage for build context archives. - BuildStorage s3.Storage + // GitHub provides access to GitHub API for downloading tarballs. + GitHub *githubclient.Client + + // DepotConfig configures the Depot API connection. + DepotConfig DepotConfig + + // RegistryConfig provides credentials for the container registry. + RegistryConfig RegistryConfig + + // BuildPlatform specifies the target platform for all builds. + BuildPlatform BuildPlatform + + // Clickhouse receives build step telemetry for observability. + Clickhouse clickhouse.ClickHouse } // New creates a new deployment workflow instance. @@ -66,6 +104,10 @@ func New(cfg Config) *Workflow { vault: cfg.Vault, sentinelImage: cfg.SentinelImage, availableRegions: cfg.AvailableRegions, - buildStorage: cfg.BuildStorage, + github: cfg.GitHub, + depotConfig: cfg.DepotConfig, + registryConfig: cfg.RegistryConfig, + buildPlatform: cfg.BuildPlatform, + clickhouse: cfg.Clickhouse, } } diff --git a/svc/ctrl/worker/doc.go b/svc/ctrl/worker/doc.go index d61333e4ad..be25228edf 100644 --- a/svc/ctrl/worker/doc.go +++ b/svc/ctrl/worker/doc.go @@ -7,23 +7,15 @@ // // # Architecture // -// The worker acts as a Restate service host, binding multiple workflow services that handle -// distinct operational concerns. Each service is implemented as a separate sub-package: -// -// - [deploy] handles container deployments across multiple regions -// - [certificate] manages TLS certificates via ACME (Let's Encrypt) -// - [routing] configures traffic routing for custom domains -// - [versioning] manages application version lifecycle -// -// The worker maintains connections to several infrastructure components: the primary database -// for persistent state, two separate vault services (one for application secrets, one for -// ACME certificates), S3-compatible storage for build artifacts, and ClickHouse for analytics. +// The worker acts as a Restate service host, binding workflow services that handle container +// deployments, TLS certificate management, traffic routing, and versioning. It maintains +// connections to the primary database for persistent state, vault services for secrets and +// ACME certificates, S3-compatible storage for build artifacts, and ClickHouse for analytics. // // # Configuration // -// Configuration is provided through the [Config] struct, which validates all settings on startup. -// The worker supports two build backends ("depot" for cloud builds and "docker" for local builds), -// each with different requirements validated by [Config.Validate]. +// Configuration is provided through [Config], which validates settings on startup. The worker +// supports multiple build backends and validates their requirements in [Config.Validate]. // // # Usage // diff --git a/svc/ctrl/worker/github/BUILD.bazel b/svc/ctrl/worker/github/BUILD.bazel new file mode 100644 index 0000000000..50aa92cdfd --- /dev/null +++ b/svc/ctrl/worker/github/BUILD.bazel @@ -0,0 +1,17 @@ +load("@rules_go//go:def.bzl", "go_library") + +go_library( + name = "github", + srcs = [ + "client.go", + "doc.go", + ], + importpath = "github.com/unkeyed/unkey/svc/ctrl/worker/github", + visibility = ["//visibility:public"], + deps = [ + "//pkg/assert", + "//pkg/fault", + "//pkg/jwt", + "//pkg/otel/logging", + ], +) diff --git a/svc/ctrl/worker/github/client.go b/svc/ctrl/worker/github/client.go new file mode 100644 index 0000000000..7d6c951d65 --- /dev/null +++ b/svc/ctrl/worker/github/client.go @@ -0,0 +1,206 @@ +package github + +import ( + "crypto/hmac" + "crypto/sha256" + "crypto/subtle" + "encoding/json" + "fmt" + "io" + "net/http" + "strings" + "time" + + "github.com/unkeyed/unkey/pkg/assert" + "github.com/unkeyed/unkey/pkg/fault" + "github.com/unkeyed/unkey/pkg/jwt" + "github.com/unkeyed/unkey/pkg/otel/logging" +) + +// ClientConfig holds configuration for creating a [Client] instance. +type ClientConfig struct { + // AppID is the numeric ID assigned to the GitHub App during registration. + AppID int64 + + // PrivateKeyPEM is the RSA private key in PEM format for signing JWTs. + // Generate this in the GitHub App settings under "Private keys". + PrivateKeyPEM string + + // WebhookSecret is the shared secret for verifying webhook signatures. + // Set this in the GitHub App settings under "Webhook secret". + WebhookSecret string +} + +// Client provides access to GitHub API using App authentication. +// +// Client handles JWT generation for App-level authentication and installation +// token retrieval for repository-level operations. It is safe for concurrent use. +type Client struct { + config ClientConfig + httpClient *http.Client + signer jwt.Signer[jwt.RegisteredClaims] + logger logging.Logger +} + +// NewClient creates a [Client] with the given configuration. Returns an error if +// the private key cannot be parsed for JWT signing. +func NewClient(config ClientConfig, logger logging.Logger) (*Client, error) { + signer, err := jwt.NewRS256Signer[jwt.RegisteredClaims](config.PrivateKeyPEM) + if err != nil { + return nil, fault.Wrap(err, fault.Internal("failed to create JWT signer")) + } + + return &Client{ + config: config, + httpClient: &http.Client{Timeout: 30 * time.Second}, + signer: signer, + logger: logger, + }, nil +} + +// generateJWT creates a short-lived JWT for GitHub App authentication. +func (c *Client) generateJWT() (string, error) { + now := time.Now() + // nolint:exhaustruct + claims := jwt.RegisteredClaims{ + IssuedAt: now.Unix(), + ExpiresAt: now.Add(10 * time.Minute).Unix(), + Issuer: fmt.Sprintf("%d", c.config.AppID), + } + return c.signer.Sign(claims) +} + +// InstallationToken represents a GitHub installation access token. The token +// provides repository access for a specific App installation and expires after +// 1 hour. +type InstallationToken struct { + // Token is the installation access token for API requests. + Token string `json:"token"` + + // ExpiresAt indicates when the token expires, typically 1 hour from issuance. + ExpiresAt time.Time `json:"expires_at"` +} + +// GetInstallationToken retrieves an access token for a specific installation. +// The installation ID is provided by GitHub when the App is installed on an +// organization or user account. Returns an error if the installation ID is zero +// or if the GitHub API request fails. +func (c *Client) GetInstallationToken(installationID int64) (*InstallationToken, error) { + if err := assert.NotNilAndNotZero(installationID, "installationID must be provided"); err != nil { + return nil, err + } + + c.logger.Info("Getting GitHub installation token", "installation_id", installationID) + + token, err := c.generateJWT() + if err != nil { + return nil, err + } + + url := fmt.Sprintf("https://api.github.com/app/installations/%d/access_tokens", installationID) + c.logger.Info("Calling GitHub API", "url", url) + req, err := http.NewRequest(http.MethodPost, url, nil) + if err != nil { + return nil, fault.Wrap(err, fault.Internal("failed to create request")) + } + + req.Header.Set("Authorization", "Bearer "+token) + req.Header.Set("Accept", "application/vnd.github+json") + req.Header.Set("X-GitHub-Api-Version", "2022-11-28") + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, fault.Wrap(err, fault.Internal("failed to get installation token")) + } + defer func() { _ = resp.Body.Close() }() + + if resp.StatusCode != http.StatusCreated { + body, _ := io.ReadAll(resp.Body) + c.logger.Error("GitHub API returned unexpected status", + "status_code", resp.StatusCode, + "installation_id", installationID, + "response_body", string(body), + "url", url, + ) + return nil, fault.New("failed to get installation token", + fault.Internal(fmt.Sprintf("status %d: %s", resp.StatusCode, string(body))), + ) + } + + var installToken InstallationToken + if err := json.NewDecoder(resp.Body).Decode(&installToken); err != nil { + return nil, fault.Wrap(err, fault.Internal("failed to decode installation token")) + } + + return &installToken, nil +} + +// DownloadRepoTarball downloads a repository tarball for a specific ref. +// The repoFullName should be in "owner/repo" format. The ref can be a branch +// name, tag, or commit SHA. The entire tarball is loaded into memory, so this +// should only be used for reasonably-sized repositories. +func (c *Client) DownloadRepoTarball(installationID int64, repoFullName, ref string) ([]byte, error) { + token, err := c.GetInstallationToken(installationID) + if err != nil { + return nil, err + } + + url := fmt.Sprintf("https://api.github.com/repos/%s/tarball/%s", repoFullName, ref) + req, err := http.NewRequest(http.MethodGet, url, nil) + if err != nil { + return nil, fault.Wrap(err, fault.Internal("failed to create tarball request")) + } + + req.Header.Set("Authorization", "Bearer "+token.Token) + req.Header.Set("Accept", "application/vnd.github+json") + req.Header.Set("X-GitHub-Api-Version", "2022-11-28") + + resp, err := c.httpClient.Do(req) + if err != nil { + return nil, fault.Wrap(err, fault.Internal("failed to download tarball")) + } + defer func() { _ = resp.Body.Close() }() + + if resp.StatusCode != http.StatusOK { + body, _ := io.ReadAll(resp.Body) + return nil, fault.New("failed to download tarball", + fault.Internal(fmt.Sprintf("status %d: %s", resp.StatusCode, string(body))), + ) + } + + tarball, err := io.ReadAll(resp.Body) + if err != nil { + return nil, fault.Wrap(err, fault.Internal("failed to read tarball")) + } + + return tarball, nil +} + +// VerifyWebhookSignature verifies a GitHub webhook signature using constant-time +// comparison. The signature should be the value of the X-Hub-Signature-256 header +// (e.g., "sha256=..."). Returns true only if the signature is valid and matches +// the expected HMAC-SHA256 of the payload. +func VerifyWebhookSignature(payload []byte, signature, secret string) bool { + if !strings.HasPrefix(signature, "sha256=") { + return false + } + + expectedSig := signature[7:] + + mac := hmacSHA256([]byte(secret), payload) + actualSig := fmt.Sprintf("%x", mac) + + return hmacEqual([]byte(expectedSig), []byte(actualSig)) +} + +// hmacSHA256 computes the HMAC-SHA256 of data using the provided key. +func hmacSHA256(key, data []byte) []byte { + h := hmac.New(sha256.New, key) + h.Write(data) + return h.Sum(nil) +} + +// hmacEqual compares HMAC digests in constant time to avoid timing attacks. +func hmacEqual(a, b []byte) bool { + return subtle.ConstantTimeCompare(a, b) == 1 +} diff --git a/svc/ctrl/worker/github/doc.go b/svc/ctrl/worker/github/doc.go new file mode 100644 index 0000000000..82db699604 --- /dev/null +++ b/svc/ctrl/worker/github/doc.go @@ -0,0 +1,47 @@ +// Package github provides a GitHub App client for repository access. +// +// This package provides authenticated access to GitHub repositories via the GitHub +// App installation API. It handles JWT generation for App authentication and +// installation token retrieval for repository-level operations. +// +// # Authentication Flow +// +// GitHub Apps use a two-step authentication process. The client generates a +// short-lived JWT signed with the App's private key to authenticate as the App, +// then exchanges it for an installation access token scoped to a specific +// organization or user account. Installation tokens provide repository access +// and are valid for one hour. +// +// # Key Types +// +// [Client] is the main entry point for GitHub API operations. Configure it via +// [ClientConfig] and create instances with [NewClient]. It provides +// [Client.GetInstallationToken] for access tokens and [Client.DownloadRepoTarball] +// for repository archives. [InstallationToken] holds the access token and its +// expiration time. +// +// # Webhook Verification +// +// [VerifyWebhookSignature] validates incoming webhook payloads using HMAC-SHA256. +// This ensures webhooks originate from GitHub and have not been tampered with. +// +// # Usage +// +// Create a client and download a repository tarball: +// +// client, err := github.NewClient(github.ClientConfig{ +// AppID: 12345, +// PrivateKeyPEM: privateKey, +// }, logger) +// if err != nil { +// return err +// } +// +// tarball, err := client.DownloadRepoTarball(installationID, "owner/repo", "main") +// +// Verify a webhook signature: +// +// if !github.VerifyWebhookSignature(payload, signature, webhookSecret) { +// return errors.New("invalid webhook signature") +// } +package github diff --git a/svc/ctrl/worker/routing/service.go b/svc/ctrl/worker/routing/service.go index 3eaf269b44..e82533d592 100644 --- a/svc/ctrl/worker/routing/service.go +++ b/svc/ctrl/worker/routing/service.go @@ -22,8 +22,11 @@ var _ hydrav1.RoutingServiceServer = (*Service)(nil) // Config holds the configuration for creating a [Service]. type Config struct { - Logger logging.Logger - DB db.Database + // Logger receives structured log output from routing operations. + Logger logging.Logger + // DB provides access to frontline route records. + DB db.Database + // DefaultDomain is the apex domain for generated deployment URLs. DefaultDomain string } diff --git a/svc/ctrl/worker/run.go b/svc/ctrl/worker/run.go index 783a6a12b6..51a761f79a 100644 --- a/svc/ctrl/worker/run.go +++ b/svc/ctrl/worker/run.go @@ -25,13 +25,12 @@ import ( restateadmin "github.com/unkeyed/unkey/pkg/restate/admin" "github.com/unkeyed/unkey/pkg/rpc/interceptor" "github.com/unkeyed/unkey/pkg/shutdown" - "github.com/unkeyed/unkey/svc/ctrl/pkg/build" - "github.com/unkeyed/unkey/svc/ctrl/pkg/s3" "github.com/unkeyed/unkey/svc/ctrl/services/acme/providers" "github.com/unkeyed/unkey/svc/ctrl/worker/certificate" "github.com/unkeyed/unkey/svc/ctrl/worker/clickhouseuser" workercustomdomain "github.com/unkeyed/unkey/svc/ctrl/worker/customdomain" "github.com/unkeyed/unkey/svc/ctrl/worker/deploy" + githubclient "github.com/unkeyed/unkey/svc/ctrl/worker/github" "github.com/unkeyed/unkey/svc/ctrl/worker/quotacheck" "github.com/unkeyed/unkey/svc/ctrl/worker/routing" "github.com/unkeyed/unkey/svc/ctrl/worker/versioning" @@ -102,16 +101,14 @@ func Run(ctx context.Context, cfg Config) error { shutdowns.Register(database.Close) - imageStore, err := s3.NewS3(s3.S3Config{ - Logger: logger, - S3URL: cfg.BuildS3.URL, - S3PresignURL: cfg.BuildS3.ExternalURL, - S3Bucket: cfg.BuildS3.Bucket, - S3AccessKeyID: cfg.BuildS3.AccessKeyID, - S3AccessKeySecret: cfg.BuildS3.AccessKeySecret, - }) + // Create GitHub client for deploy workflow + ghClient, err := githubclient.NewClient(githubclient.ClientConfig{ + AppID: cfg.GitHub.AppID, + PrivateKeyPEM: cfg.GitHub.PrivateKeyPEM, + WebhookSecret: "", + }, logger) if err != nil { - return fmt.Errorf("unable to create build storage: %w", err) + return fmt.Errorf("failed to create GitHub client: %w", err) } var ch clickhouse.ClickHouse = clickhouse.NewNoop() @@ -130,16 +127,6 @@ func Run(ctx context.Context, cfg Config) error { // Restate Server restateSrv := restateServer.NewRestate().WithLogger(logging.Handler(), false) - restateSrv.Bind(hydrav1.NewBuildServiceServer(build.New(build.Config{ - InstanceID: cfg.InstanceID, - DB: database, - RegistryConfig: build.RegistryConfig(cfg.GetRegistryConfig()), - BuildPlatform: build.BuildPlatform(cfg.GetBuildPlatform()), - DepotConfig: build.DepotConfig(cfg.GetDepotConfig()), - Clickhouse: ch, - Logger: logger, - }))) - restateSrv.Bind(hydrav1.NewDeploymentServiceServer(deploy.New(deploy.Config{ Logger: logger, DB: database, @@ -147,7 +134,11 @@ func Run(ctx context.Context, cfg Config) error { Vault: vaultClient, SentinelImage: cfg.SentinelImage, AvailableRegions: cfg.AvailableRegions, - BuildStorage: imageStore, + GitHub: ghClient, + RegistryConfig: deploy.RegistryConfig(cfg.GetRegistryConfig()), + BuildPlatform: deploy.BuildPlatform(cfg.GetBuildPlatform()), + DepotConfig: deploy.DepotConfig(cfg.GetDepotConfig()), + Clickhouse: ch, }))) restateSrv.Bind(hydrav1.NewRoutingServiceServer(routing.New(routing.Config{ @@ -174,7 +165,10 @@ func Run(ctx context.Context, cfg Config) error { )) // Initialize domain cache for ACME providers - clk := clock.New() + clk := cfg.Clock + if clk == nil { + clk = clock.New() + } domainCache, domainCacheErr := cache.New(cache.Config[string, db.CustomDomain]{ Fresh: 5 * time.Minute, Stale: 10 * time.Minute, diff --git a/web/apps/dashboard/gen/proto/ctrl/v1/deployment_pb.ts b/web/apps/dashboard/gen/proto/ctrl/v1/deployment_pb.ts index 9b1cb8cc0c..0bcf822d5c 100644 --- a/web/apps/dashboard/gen/proto/ctrl/v1/deployment_pb.ts +++ b/web/apps/dashboard/gen/proto/ctrl/v1/deployment_pb.ts @@ -10,7 +10,7 @@ import type { Message } from "@bufbuild/protobuf"; * Describes the file ctrl/v1/deployment.proto. */ export const file_ctrl_v1_deployment: GenFile = /*@__PURE__*/ - fileDesc("ChhjdHJsL3YxL2RlcGxveW1lbnQucHJvdG8SB2N0cmwudjEiqgIKF0NyZWF0ZURlcGxveW1lbnRSZXF1ZXN0EhIKCnByb2plY3RfaWQYAiABKAkSDgoGYnJhbmNoGAMgASgJEhgKEGVudmlyb25tZW50X3NsdWcYBCABKAkSLgoNYnVpbGRfY29udGV4dBgFIAEoCzIVLmN0cmwudjEuQnVpbGRDb250ZXh0SAASFgoMZG9ja2VyX2ltYWdlGAYgASgJSAASLwoKZ2l0X2NvbW1pdBgHIAEoCzIWLmN0cmwudjEuR2l0Q29tbWl0SW5mb0gBiAEBEhgKC2tleXNwYWNlX2lkGAggASgJSAKIAQESDwoHY29tbWFuZBgJIAMoCUIICgZzb3VyY2VCDQoLX2dpdF9jb21taXRCDgoMX2tleXNwYWNlX2lkSgQIARACIlwKDEJ1aWxkQ29udGV4dBIaChJidWlsZF9jb250ZXh0X3BhdGgYASABKAkSHAoPZG9ja2VyZmlsZV9wYXRoGAIgASgJSACIAQFCEgoQX2RvY2tlcmZpbGVfcGF0aCKAAQoNR2l0Q29tbWl0SW5mbxISCgpjb21taXRfc2hhGAEgASgJEhYKDmNvbW1pdF9tZXNzYWdlGAIgASgJEhUKDWF1dGhvcl9oYW5kbGUYAyABKAkSGQoRYXV0aG9yX2F2YXRhcl91cmwYBCABKAkSEQoJdGltZXN0YW1wGAUgASgDIlwKGENyZWF0ZURlcGxveW1lbnRSZXNwb25zZRIVCg1kZXBsb3ltZW50X2lkGAEgASgJEikKBnN0YXR1cxgCIAEoDjIZLmN0cmwudjEuRGVwbG95bWVudFN0YXR1cyItChRHZXREZXBsb3ltZW50UmVxdWVzdBIVCg1kZXBsb3ltZW50X2lkGAEgASgJIkAKFUdldERlcGxveW1lbnRSZXNwb25zZRInCgpkZXBsb3ltZW50GAEgASgLMhMuY3RybC52MS5EZXBsb3ltZW50IogFCgpEZXBsb3ltZW50EgoKAmlkGAEgASgJEhQKDHdvcmtzcGFjZV9pZBgCIAEoCRISCgpwcm9qZWN0X2lkGAMgASgJEhYKDmVudmlyb25tZW50X2lkGAQgASgJEhYKDmdpdF9jb21taXRfc2hhGAUgASgJEhIKCmdpdF9icmFuY2gYBiABKAkSKQoGc3RhdHVzGAcgASgOMhkuY3RybC52MS5EZXBsb3ltZW50U3RhdHVzEhUKDWVycm9yX21lc3NhZ2UYCCABKAkSTAoVZW52aXJvbm1lbnRfdmFyaWFibGVzGAkgAygLMi0uY3RybC52MS5EZXBsb3ltZW50LkVudmlyb25tZW50VmFyaWFibGVzRW50cnkSIwoIdG9wb2xvZ3kYCiABKAsyES5jdHJsLnYxLlRvcG9sb2d5EhIKCmNyZWF0ZWRfYXQYCyABKAMSEgoKdXBkYXRlZF9hdBgMIAEoAxIRCglob3N0bmFtZXMYDSADKAkSFwoPcm9vdGZzX2ltYWdlX2lkGA4gASgJEhAKCGJ1aWxkX2lkGA8gASgJEiYKBXN0ZXBzGBAgAygLMhcuY3RybC52MS5EZXBsb3ltZW50U3RlcBIaChJnaXRfY29tbWl0X21lc3NhZ2UYESABKAkSIAoYZ2l0X2NvbW1pdF9hdXRob3JfaGFuZGxlGBIgASgJEiQKHGdpdF9jb21taXRfYXV0aG9yX2F2YXRhcl91cmwYEyABKAkSHAoUZ2l0X2NvbW1pdF90aW1lc3RhbXAYFCABKAMaOwoZRW52aXJvbm1lbnRWYXJpYWJsZXNFbnRyeRILCgNrZXkYASABKAkSDQoFdmFsdWUYAiABKAk6AjgBIlwKDkRlcGxveW1lbnRTdGVwEg4KBnN0YXR1cxgBIAEoCRIPCgdtZXNzYWdlGAIgASgJEhUKDWVycm9yX21lc3NhZ2UYAyABKAkSEgoKY3JlYXRlZF9hdBgEIAEoAyKnAQoIVG9wb2xvZ3kSFgoOY3B1X21pbGxpY29yZXMYASABKAUSEgoKbWVtb3J5X21pYhgCIAEoBRIoCgdyZWdpb25zGAMgAygLMhcuY3RybC52MS5SZWdpb25hbENvbmZpZxIcChRpZGxlX3RpbWVvdXRfc2Vjb25kcxgEIAEoBRIZChFoZWFsdGhfY2hlY2tfcGF0aBgFIAEoCRIMCgRwb3J0GAYgASgFIk4KDlJlZ2lvbmFsQ29uZmlnEg4KBnJlZ2lvbhgBIAEoCRIVCg1taW5faW5zdGFuY2VzGAIgASgFEhUKDW1heF9pbnN0YW5jZXMYAyABKAUiTQoPUm9sbGJhY2tSZXF1ZXN0EhwKFHNvdXJjZV9kZXBsb3ltZW50X2lkGAEgASgJEhwKFHRhcmdldF9kZXBsb3ltZW50X2lkGAIgASgJIhIKEFJvbGxiYWNrUmVzcG9uc2UiLgoOUHJvbW90ZVJlcXVlc3QSHAoUdGFyZ2V0X2RlcGxveW1lbnRfaWQYASABKAkiEQoPUHJvbW90ZVJlc3BvbnNlIjQKGENyZWF0ZVMzVXBsb2FkVVJMUmVxdWVzdBIYChB1bmtleV9wcm9qZWN0X2lkGAEgASgJIksKGUNyZWF0ZVMzVXBsb2FkVVJMUmVzcG9uc2USEgoKdXBsb2FkX3VybBgBIAEoCRIaChJidWlsZF9jb250ZXh0X3BhdGgYAiABKAkq7wEKEERlcGxveW1lbnRTdGF0dXMSIQodREVQTE9ZTUVOVF9TVEFUVVNfVU5TUEVDSUZJRUQQABIdChlERVBMT1lNRU5UX1NUQVRVU19QRU5ESU5HEAESHgoaREVQTE9ZTUVOVF9TVEFUVVNfQlVJTERJTkcQAhIfChtERVBMT1lNRU5UX1NUQVRVU19ERVBMT1lJTkcQAxIdChlERVBMT1lNRU5UX1NUQVRVU19ORVRXT1JLEAQSGwoXREVQTE9ZTUVOVF9TVEFUVVNfUkVBRFkQBRIcChhERVBMT1lNRU5UX1NUQVRVU19GQUlMRUQQBipaCgpTb3VyY2VUeXBlEhsKF1NPVVJDRV9UWVBFX1VOU1BFQ0lGSUVEEAASEwoPU09VUkNFX1RZUEVfR0lUEAESGgoWU09VUkNFX1RZUEVfQ0xJX1VQTE9BRBACMqEDChFEZXBsb3ltZW50U2VydmljZRJcChFDcmVhdGVTM1VwbG9hZFVSTBIhLmN0cmwudjEuQ3JlYXRlUzNVcGxvYWRVUkxSZXF1ZXN0GiIuY3RybC52MS5DcmVhdGVTM1VwbG9hZFVSTFJlc3BvbnNlIgASWQoQQ3JlYXRlRGVwbG95bWVudBIgLmN0cmwudjEuQ3JlYXRlRGVwbG95bWVudFJlcXVlc3QaIS5jdHJsLnYxLkNyZWF0ZURlcGxveW1lbnRSZXNwb25zZSIAElAKDUdldERlcGxveW1lbnQSHS5jdHJsLnYxLkdldERlcGxveW1lbnRSZXF1ZXN0Gh4uY3RybC52MS5HZXREZXBsb3ltZW50UmVzcG9uc2UiABJBCghSb2xsYmFjaxIYLmN0cmwudjEuUm9sbGJhY2tSZXF1ZXN0GhkuY3RybC52MS5Sb2xsYmFja1Jlc3BvbnNlIgASPgoHUHJvbW90ZRIXLmN0cmwudjEuUHJvbW90ZVJlcXVlc3QaGC5jdHJsLnYxLlByb21vdGVSZXNwb25zZSIAQo4BCgtjb20uY3RybC52MUIPRGVwbG95bWVudFByb3RvUAFaMWdpdGh1Yi5jb20vdW5rZXllZC91bmtleS9nZW4vcHJvdG8vY3RybC92MTtjdHJsdjGiAgNDWFiqAgdDdHJsLlYxygIHQ3RybFxWMeICE0N0cmxcVjFcR1BCTWV0YWRhdGHqAghDdHJsOjpWMWIGcHJvdG8z"); + fileDesc("ChhjdHJsL3YxL2RlcGxveW1lbnQucHJvdG8SB2N0cmwudjEi7gEKF0NyZWF0ZURlcGxveW1lbnRSZXF1ZXN0EhIKCnByb2plY3RfaWQYAiABKAkSDgoGYnJhbmNoGAMgASgJEhgKEGVudmlyb25tZW50X3NsdWcYBCABKAkSFAoMZG9ja2VyX2ltYWdlGAYgASgJEi8KCmdpdF9jb21taXQYByABKAsyFi5jdHJsLnYxLkdpdENvbW1pdEluZm9IAIgBARIYCgtrZXlzcGFjZV9pZBgIIAEoCUgBiAEBEg8KB2NvbW1hbmQYCSADKAlCDQoLX2dpdF9jb21taXRCDgoMX2tleXNwYWNlX2lkSgQIARACIoABCg1HaXRDb21taXRJbmZvEhIKCmNvbW1pdF9zaGEYASABKAkSFgoOY29tbWl0X21lc3NhZ2UYAiABKAkSFQoNYXV0aG9yX2hhbmRsZRgDIAEoCRIZChFhdXRob3JfYXZhdGFyX3VybBgEIAEoCRIRCgl0aW1lc3RhbXAYBSABKAMiXAoYQ3JlYXRlRGVwbG95bWVudFJlc3BvbnNlEhUKDWRlcGxveW1lbnRfaWQYASABKAkSKQoGc3RhdHVzGAIgASgOMhkuY3RybC52MS5EZXBsb3ltZW50U3RhdHVzIi0KFEdldERlcGxveW1lbnRSZXF1ZXN0EhUKDWRlcGxveW1lbnRfaWQYASABKAkiQAoVR2V0RGVwbG95bWVudFJlc3BvbnNlEicKCmRlcGxveW1lbnQYASABKAsyEy5jdHJsLnYxLkRlcGxveW1lbnQiiAUKCkRlcGxveW1lbnQSCgoCaWQYASABKAkSFAoMd29ya3NwYWNlX2lkGAIgASgJEhIKCnByb2plY3RfaWQYAyABKAkSFgoOZW52aXJvbm1lbnRfaWQYBCABKAkSFgoOZ2l0X2NvbW1pdF9zaGEYBSABKAkSEgoKZ2l0X2JyYW5jaBgGIAEoCRIpCgZzdGF0dXMYByABKA4yGS5jdHJsLnYxLkRlcGxveW1lbnRTdGF0dXMSFQoNZXJyb3JfbWVzc2FnZRgIIAEoCRJMChVlbnZpcm9ubWVudF92YXJpYWJsZXMYCSADKAsyLS5jdHJsLnYxLkRlcGxveW1lbnQuRW52aXJvbm1lbnRWYXJpYWJsZXNFbnRyeRIjCgh0b3BvbG9neRgKIAEoCzIRLmN0cmwudjEuVG9wb2xvZ3kSEgoKY3JlYXRlZF9hdBgLIAEoAxISCgp1cGRhdGVkX2F0GAwgASgDEhEKCWhvc3RuYW1lcxgNIAMoCRIXCg9yb290ZnNfaW1hZ2VfaWQYDiABKAkSEAoIYnVpbGRfaWQYDyABKAkSJgoFc3RlcHMYECADKAsyFy5jdHJsLnYxLkRlcGxveW1lbnRTdGVwEhoKEmdpdF9jb21taXRfbWVzc2FnZRgRIAEoCRIgChhnaXRfY29tbWl0X2F1dGhvcl9oYW5kbGUYEiABKAkSJAocZ2l0X2NvbW1pdF9hdXRob3JfYXZhdGFyX3VybBgTIAEoCRIcChRnaXRfY29tbWl0X3RpbWVzdGFtcBgUIAEoAxo7ChlFbnZpcm9ubWVudFZhcmlhYmxlc0VudHJ5EgsKA2tleRgBIAEoCRINCgV2YWx1ZRgCIAEoCToCOAEiXAoORGVwbG95bWVudFN0ZXASDgoGc3RhdHVzGAEgASgJEg8KB21lc3NhZ2UYAiABKAkSFQoNZXJyb3JfbWVzc2FnZRgDIAEoCRISCgpjcmVhdGVkX2F0GAQgASgDIqcBCghUb3BvbG9neRIWCg5jcHVfbWlsbGljb3JlcxgBIAEoBRISCgptZW1vcnlfbWliGAIgASgFEigKB3JlZ2lvbnMYAyADKAsyFy5jdHJsLnYxLlJlZ2lvbmFsQ29uZmlnEhwKFGlkbGVfdGltZW91dF9zZWNvbmRzGAQgASgFEhkKEWhlYWx0aF9jaGVja19wYXRoGAUgASgJEgwKBHBvcnQYBiABKAUiTgoOUmVnaW9uYWxDb25maWcSDgoGcmVnaW9uGAEgASgJEhUKDW1pbl9pbnN0YW5jZXMYAiABKAUSFQoNbWF4X2luc3RhbmNlcxgDIAEoBSJNCg9Sb2xsYmFja1JlcXVlc3QSHAoUc291cmNlX2RlcGxveW1lbnRfaWQYASABKAkSHAoUdGFyZ2V0X2RlcGxveW1lbnRfaWQYAiABKAkiEgoQUm9sbGJhY2tSZXNwb25zZSIuCg5Qcm9tb3RlUmVxdWVzdBIcChR0YXJnZXRfZGVwbG95bWVudF9pZBgBIAEoCSIRCg9Qcm9tb3RlUmVzcG9uc2Uq7wEKEERlcGxveW1lbnRTdGF0dXMSIQodREVQTE9ZTUVOVF9TVEFUVVNfVU5TUEVDSUZJRUQQABIdChlERVBMT1lNRU5UX1NUQVRVU19QRU5ESU5HEAESHgoaREVQTE9ZTUVOVF9TVEFUVVNfQlVJTERJTkcQAhIfChtERVBMT1lNRU5UX1NUQVRVU19ERVBMT1lJTkcQAxIdChlERVBMT1lNRU5UX1NUQVRVU19ORVRXT1JLEAQSGwoXREVQTE9ZTUVOVF9TVEFUVVNfUkVBRFkQBRIcChhERVBMT1lNRU5UX1NUQVRVU19GQUlMRUQQBipaCgpTb3VyY2VUeXBlEhsKF1NPVVJDRV9UWVBFX1VOU1BFQ0lGSUVEEAASEwoPU09VUkNFX1RZUEVfR0lUEAESGgoWU09VUkNFX1RZUEVfQ0xJX1VQTE9BRBACMsMCChFEZXBsb3ltZW50U2VydmljZRJZChBDcmVhdGVEZXBsb3ltZW50EiAuY3RybC52MS5DcmVhdGVEZXBsb3ltZW50UmVxdWVzdBohLmN0cmwudjEuQ3JlYXRlRGVwbG95bWVudFJlc3BvbnNlIgASUAoNR2V0RGVwbG95bWVudBIdLmN0cmwudjEuR2V0RGVwbG95bWVudFJlcXVlc3QaHi5jdHJsLnYxLkdldERlcGxveW1lbnRSZXNwb25zZSIAEkEKCFJvbGxiYWNrEhguY3RybC52MS5Sb2xsYmFja1JlcXVlc3QaGS5jdHJsLnYxLlJvbGxiYWNrUmVzcG9uc2UiABI+CgdQcm9tb3RlEhcuY3RybC52MS5Qcm9tb3RlUmVxdWVzdBoYLmN0cmwudjEuUHJvbW90ZVJlc3BvbnNlIgBCjgEKC2NvbS5jdHJsLnYxQg9EZXBsb3ltZW50UHJvdG9QAVoxZ2l0aHViLmNvbS91bmtleWVkL3Vua2V5L2dlbi9wcm90by9jdHJsL3YxO2N0cmx2MaICA0NYWKoCB0N0cmwuVjHKAgdDdHJsXFYx4gITQ3RybFxWMVxHUEJNZXRhZGF0YeoCCEN0cmw6OlYxYgZwcm90bzM"); /** * @generated from message ctrl.v1.CreateDeploymentRequest @@ -32,25 +32,14 @@ export type CreateDeploymentRequest = Message<"ctrl.v1.CreateDeploymentRequest"> environmentSlug: string; /** - * Build source, we can either build it from scratch or accept prebuilt image + * Build source - currently only prebuilt docker images are supported via API + * GitHub source builds are triggered automatically via webhook * - * @generated from oneof ctrl.v1.CreateDeploymentRequest.source - */ - source: { - /** - * @generated from field: ctrl.v1.BuildContext build_context = 5; - */ - value: BuildContext; - case: "buildContext"; - } | { - /** - * Prebuilt image reference - * - * @generated from field: string docker_image = 6; - */ - value: string; - case: "dockerImage"; - } | { case: undefined; value?: undefined }; + * Prebuilt image reference + * + * @generated from field: string docker_image = 6; + */ + dockerImage: string; /** * Git information @@ -82,32 +71,6 @@ export type CreateDeploymentRequest = Message<"ctrl.v1.CreateDeploymentRequest"> export const CreateDeploymentRequestSchema: GenMessage = /*@__PURE__*/ messageDesc(file_ctrl_v1_deployment, 0); -/** - * @generated from message ctrl.v1.BuildContext - */ -export type BuildContext = Message<"ctrl.v1.BuildContext"> & { - /** - * S3 key for uploaded build context - * - * @generated from field: string build_context_path = 1; - */ - buildContextPath: string; - - /** - * Path to Dockerfile within context (default: "Dockerfile") - * - * @generated from field: optional string dockerfile_path = 2; - */ - dockerfilePath?: string; -}; - -/** - * Describes the message ctrl.v1.BuildContext. - * Use `create(BuildContextSchema)` to create a new message. - */ -export const BuildContextSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_ctrl_v1_deployment, 1); - /** * @generated from message ctrl.v1.GitCommitInfo */ @@ -145,7 +108,7 @@ export type GitCommitInfo = Message<"ctrl.v1.GitCommitInfo"> & { * Use `create(GitCommitInfoSchema)` to create a new message. */ export const GitCommitInfoSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_ctrl_v1_deployment, 2); + messageDesc(file_ctrl_v1_deployment, 1); /** * @generated from message ctrl.v1.CreateDeploymentResponse @@ -157,7 +120,7 @@ export type CreateDeploymentResponse = Message<"ctrl.v1.CreateDeploymentResponse deploymentId: string; /** - * Will be PENDING or BUILDING + * Will be PENDING or DEPLOYING * * @generated from field: ctrl.v1.DeploymentStatus status = 2; */ @@ -169,7 +132,7 @@ export type CreateDeploymentResponse = Message<"ctrl.v1.CreateDeploymentResponse * Use `create(CreateDeploymentResponseSchema)` to create a new message. */ export const CreateDeploymentResponseSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_ctrl_v1_deployment, 3); + messageDesc(file_ctrl_v1_deployment, 2); /** * @generated from message ctrl.v1.GetDeploymentRequest @@ -186,7 +149,7 @@ export type GetDeploymentRequest = Message<"ctrl.v1.GetDeploymentRequest"> & { * Use `create(GetDeploymentRequestSchema)` to create a new message. */ export const GetDeploymentRequestSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_ctrl_v1_deployment, 4); + messageDesc(file_ctrl_v1_deployment, 3); /** * @generated from message ctrl.v1.GetDeploymentResponse @@ -203,7 +166,7 @@ export type GetDeploymentResponse = Message<"ctrl.v1.GetDeploymentResponse"> & { * Use `create(GetDeploymentResponseSchema)` to create a new message. */ export const GetDeploymentResponseSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_ctrl_v1_deployment, 5); + messageDesc(file_ctrl_v1_deployment, 4); /** * @generated from message ctrl.v1.Deployment @@ -339,7 +302,7 @@ export type Deployment = Message<"ctrl.v1.Deployment"> & { * Use `create(DeploymentSchema)` to create a new message. */ export const DeploymentSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_ctrl_v1_deployment, 6); + messageDesc(file_ctrl_v1_deployment, 5); /** * @generated from message ctrl.v1.DeploymentStep @@ -371,7 +334,7 @@ export type DeploymentStep = Message<"ctrl.v1.DeploymentStep"> & { * Use `create(DeploymentStepSchema)` to create a new message. */ export const DeploymentStepSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_ctrl_v1_deployment, 7); + messageDesc(file_ctrl_v1_deployment, 6); /** * @generated from message ctrl.v1.Topology @@ -419,7 +382,7 @@ export type Topology = Message<"ctrl.v1.Topology"> & { * Use `create(TopologySchema)` to create a new message. */ export const TopologySchema: GenMessage = /*@__PURE__*/ - messageDesc(file_ctrl_v1_deployment, 8); + messageDesc(file_ctrl_v1_deployment, 7); /** * @generated from message ctrl.v1.RegionalConfig @@ -446,7 +409,7 @@ export type RegionalConfig = Message<"ctrl.v1.RegionalConfig"> & { * Use `create(RegionalConfigSchema)` to create a new message. */ export const RegionalConfigSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_ctrl_v1_deployment, 9); + messageDesc(file_ctrl_v1_deployment, 8); /** * @generated from message ctrl.v1.RollbackRequest @@ -468,7 +431,7 @@ export type RollbackRequest = Message<"ctrl.v1.RollbackRequest"> & { * Use `create(RollbackRequestSchema)` to create a new message. */ export const RollbackRequestSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_ctrl_v1_deployment, 10); + messageDesc(file_ctrl_v1_deployment, 9); /** * @generated from message ctrl.v1.RollbackResponse @@ -481,7 +444,7 @@ export type RollbackResponse = Message<"ctrl.v1.RollbackResponse"> & { * Use `create(RollbackResponseSchema)` to create a new message. */ export const RollbackResponseSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_ctrl_v1_deployment, 11); + messageDesc(file_ctrl_v1_deployment, 10); /** * @generated from message ctrl.v1.PromoteRequest @@ -498,7 +461,7 @@ export type PromoteRequest = Message<"ctrl.v1.PromoteRequest"> & { * Use `create(PromoteRequestSchema)` to create a new message. */ export const PromoteRequestSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_ctrl_v1_deployment, 12); + messageDesc(file_ctrl_v1_deployment, 11); /** * @generated from message ctrl.v1.PromoteResponse @@ -511,50 +474,7 @@ export type PromoteResponse = Message<"ctrl.v1.PromoteResponse"> & { * Use `create(PromoteResponseSchema)` to create a new message. */ export const PromoteResponseSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_ctrl_v1_deployment, 13); - -/** - * @generated from message ctrl.v1.CreateS3UploadURLRequest - */ -export type CreateS3UploadURLRequest = Message<"ctrl.v1.CreateS3UploadURLRequest"> & { - /** - * @generated from field: string unkey_project_id = 1; - */ - unkeyProjectId: string; -}; - -/** - * Describes the message ctrl.v1.CreateS3UploadURLRequest. - * Use `create(CreateS3UploadURLRequestSchema)` to create a new message. - */ -export const CreateS3UploadURLRequestSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_ctrl_v1_deployment, 14); - -/** - * @generated from message ctrl.v1.CreateS3UploadURLResponse - */ -export type CreateS3UploadURLResponse = Message<"ctrl.v1.CreateS3UploadURLResponse"> & { - /** - * Presigned PUT URL - * - * @generated from field: string upload_url = 1; - */ - uploadUrl: string; - - /** - * S3 key to use in CreateBuild - * - * @generated from field: string build_context_path = 2; - */ - buildContextPath: string; -}; - -/** - * Describes the message ctrl.v1.CreateS3UploadURLResponse. - * Use `create(CreateS3UploadURLResponseSchema)` to create a new message. - */ -export const CreateS3UploadURLResponseSchema: GenMessage = /*@__PURE__*/ - messageDesc(file_ctrl_v1_deployment, 15); + messageDesc(file_ctrl_v1_deployment, 12); /** * Deployment status enum @@ -637,15 +557,7 @@ export const SourceTypeSchema: GenEnum = /*@__PURE__*/ */ export const DeploymentService: GenService<{ /** - * @generated from rpc ctrl.v1.DeploymentService.CreateS3UploadURL - */ - createS3UploadURL: { - methodKind: "unary"; - input: typeof CreateS3UploadURLRequestSchema; - output: typeof CreateS3UploadURLResponseSchema; - }, - /** - * Create a new deployment + * Create a new deployment with a prebuilt docker image * * @generated from rpc ctrl.v1.DeploymentService.CreateDeployment */ diff --git a/web/apps/dashboard/lib/env.ts b/web/apps/dashboard/lib/env.ts index 3f71a14d2d..1b2ec2b7d0 100644 --- a/web/apps/dashboard/lib/env.ts +++ b/web/apps/dashboard/lib/env.ts @@ -83,8 +83,8 @@ export const vercelIntegrationEnv = () => vercelIntegrationParsed.success ? vercelIntegrationParsed.data : null; export const githubAppSchema = z.object({ - GITHUB_APP_ID: z.string(), // needs to be a single line, with \n - GITHUB_APP_PRIVATE_KEY: z.string().transform((s) => s.replace(/\\n/g, "\n")), + GITHUB_APP_ID: z.string().transform((s) => Number.parseInt(s, 10)), + UNKEY_GITHUB_PRIVATE_KEY_PEM: z.string().transform((s) => s.replace(/\\n/g, "\n")), // needs to be a single line, with \n }); const githubAppParsed = githubAppSchema.safeParse(process.env); diff --git a/web/apps/dashboard/lib/github.ts b/web/apps/dashboard/lib/github.ts index e970e364ae..47553b7f5a 100644 --- a/web/apps/dashboard/lib/github.ts +++ b/web/apps/dashboard/lib/github.ts @@ -39,7 +39,7 @@ function generateAppJWT(): string { const sign = crypto.createSign("RSA-SHA256"); sign.update(signatureInput); - const signature = sign.sign(env.GITHUB_APP_PRIVATE_KEY, "base64url"); + const signature = sign.sign(env.UNKEY_GITHUB_PRIVATE_KEY_PEM, "base64url"); return `${signatureInput}.${signature}`; } diff --git a/web/apps/dashboard/lib/trpc/routers/github.ts b/web/apps/dashboard/lib/trpc/routers/github.ts index 430b120202..073a09bf39 100644 --- a/web/apps/dashboard/lib/trpc/routers/github.ts +++ b/web/apps/dashboard/lib/trpc/routers/github.ts @@ -123,7 +123,6 @@ export const githubRouter = t.router({ message: "GitHub App not configured", }); } - let parsedState: z.infer | null = null; try { const result = state.safeParse(JSON.parse(input.state)); @@ -143,7 +142,13 @@ export const githubRouter = t.router({ ctx.workspace.id, projectId, input.installationId, - ); + ).catch((err) => { + console.error(err); + throw new TRPCError({ + code: "INTERNAL_SERVER_ERROR", + message: "Failed to load project installation", + }); + }); if (!projectInstallation) { throw new TRPCError({ @@ -165,7 +170,8 @@ export const githubRouter = t.router({ updatedAt: Date.now(), }, }) - .catch(() => { + .catch((err) => { + console.error(err); throw new TRPCError({ code: "INTERNAL_SERVER_ERROR", message: "Failed to save GitHub installation", @@ -229,12 +235,15 @@ export const githubRouter = t.router({ }> = []; for (const installation of githubContext.installations) { - const repos = await getInstallationRepositories(installation.installationId).catch(() => { - throw new TRPCError({ - code: "INTERNAL_SERVER_ERROR", - message: "Failed to load GitHub repositories", - }); - }); + const repos = await getInstallationRepositories(installation.installationId).catch( + (err) => { + console.error(err); + throw new TRPCError({ + code: "INTERNAL_SERVER_ERROR", + message: "Failed to load GitHub repositories", + }); + }, + ); for (const repo of repos) { allRepos.push({ id: repo.id, diff --git a/web/apps/engineering/content/docs/architecture/workflows/github-deployments.mdx b/web/apps/engineering/content/docs/architecture/workflows/github-deployments.mdx new file mode 100644 index 0000000000..6a32fb9aeb --- /dev/null +++ b/web/apps/engineering/content/docs/architecture/workflows/github-deployments.mdx @@ -0,0 +1,366 @@ +--- +title: GitHub Deployments +description: How GitHub push webhooks trigger deployments via BuildKit git context +--- + +# GitHub Deployments + +When users connect a GitHub repository to their Unkey project, push events automatically trigger deployments. This document explains the complete flow from webhook to running containers. + +## Architecture Overview + +``` +┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐ +│ GitHub Push │────▶│ ctrl-api │────▶│ Restate │ +│ Webhook │ │ webhook handler│ │ Deploy Workflow│ +└─────────────────┘ └────────┬────────┘ └────────┬────────┘ + │ │ + ▼ ▼ + ┌─────────────────┐ ┌─────────────────┐ + │ MySQL │ │ Depot BuildKit │ + │ (deployment) │ │ (git context) │ + └─────────────────┘ └────────┬────────┘ + │ + ▼ + ┌─────────────────┐ + │ Container │ + │ Registry │ + └─────────────────┘ +``` + +BuildKit fetches the repository directly from GitHub using its native git context support. Authentication for private repositories is handled via a GitHub App installation token passed through BuildKit's secret provider. + +## Webhook Handler + +**Location:** `svc/ctrl/api/github_webhook.go` + +The webhook handler is intentionally thin—it validates, maps, creates a deployment record, and delegates to the durable workflow. + +### Request Flow + +1. **Validate HTTP request** + - Must be `POST` + - Requires `X-GitHub-Event` header + - Requires `X-Hub-Signature-256` header + +2. **Verify signature** + - Uses HMAC-SHA256 with the webhook secret + - Implemented in `githubclient.VerifyWebhookSignature()` + +3. **Parse push payload** + - Extracts branch from `refs/heads/` + - Non-branch refs (tags) are ignored + +4. **Map repository to project** + - Looks up `github_repo_connections` table using `(installation_id, repository_id)` + - Returns 200 OK if no connection exists (repo not linked to any project) + +5. **Determine environment** + - If pushed branch equals project's default branch → `production` + - Otherwise → `preview` + +6. **Create deployment record** + +```go +db.Query.InsertDeployment(ctx, s.db.RW(), db.InsertDeploymentParams{ + ID: deploymentID, + Status: db.DeploymentsStatusPending, + GitCommitSha: payload.After, + GitBranch: branch, + // ... other fields +}) +``` + +7. **Trigger workflow** + +```go +deployClient := hydrav1.NewDeploymentServiceIngressClient(s.restate, deploymentID) +invocation, err := deployClient.Deploy().Send(ctx, &hydrav1.DeployRequest{ + DeploymentId: deploymentID, + Source: &hydrav1.DeployRequest_Git{ + Git: &hydrav1.GitSource{ + InstallationId: repoConnection.InstallationID, + Repository: payload.Repository.FullName, + CommitSha: payload.After, + ContextPath: "./sub", + DockerfilePath: "Dockerfile", + }, + }, +}) +``` + +## Deploy Workflow + +**Location:** `svc/ctrl/worker/deploy/deploy_handler.go` + +The Restate workflow orchestrates the complete deployment lifecycle. It handles two source types: + +- `GitSource`: Build from GitHub repository (via webhook) +- `DockerImage`: Use pre-built image (via CLI/API) + +### Workflow Steps + + LoadMeta[Load Deployment/Project/Environment] + LoadMeta --> CheckSource{Source Type?} + + CheckSource -->|GitSource| BuildGit[Build from Git] + CheckSource -->|DockerImage| UseImage[Use Pre-built Image] + + BuildGit --> UpdateImage[Update Deployment Image] + UseImage --> UpdateImage + + UpdateImage --> StatusDeploying[Status: Deploying] + StatusDeploying --> CreateTopology[Create Regional Topologies] + CreateTopology --> EnsureSentinels[Ensure Sentinels Exist] + EnsureSentinels --> WaitReady[Wait for Instances Ready] + WaitReady --> CreateRoutes[Create Frontline Routes] + CreateRoutes --> AssignRoutes[Assign Routes to Deployment] + AssignRoutes --> StatusReady[Status: Ready] + StatusReady --> UpdateLive[Update Live Deployment] + UpdateLive --> End([Complete]) + + style BuildGit fill:#e1f5fe + style StatusReady fill:#c8e6c9 +`} /> + +### Failure Handling + +The workflow uses a deferred handler to ensure deployments are marked as failed on any error: + +```go +finishedSuccessfully := false + +defer func() { + if finishedSuccessfully { + return + } + w.updateDeploymentStatus(ctx, deployment.ID, db.DeploymentsStatusFailed) +}() + +// ... workflow steps ... + +finishedSuccessfully = true +``` + +## Git-Based Build + +**Location:** `svc/ctrl/worker/deploy/build.go` + +The `buildDockerImageFromGit()` function builds container images using BuildKit's native git context support. + +### BuildKit Git Context + +The build context is passed to BuildKit as a git URL: + +```go +// Format: https://github.com/owner/repo.git#[:] +gitContextURL := fmt.Sprintf("https://github.com/%s.git#%s", repo, sha) +if contextPath != "" { + gitContextURL = fmt.Sprintf("https://github.com/%s.git#%s:%s", repo, sha, contextPath) +} +``` + +BuildKit fetches the repository at the specified commit SHA and uses it as the build context. + +### GitHub Token Authentication + +For private repositories, BuildKit needs authentication. We use BuildKit's secret provider with the well-known secret name `GIT_AUTH_TOKEN.github.com`: + +```go +solverOptions := client.SolveOpt{ + Frontend: "dockerfile.v0", + FrontendAttrs: map[string]string{ + "platform": platform, + "context": gitContextURL, + "filename": dockerfilePath, + }, + Session: []session.Attachable{ + // Registry auth for pushing images + authprovider.NewDockerAuthProvider(...), + + // GitHub token for fetching private repos + secretsprovider.FromMap(map[string][]byte{ + "GIT_AUTH_TOKEN.github.com": []byte(githubToken), + }), + }, + Exports: []client.ExportEntry{{ + Type: "image", + Attrs: map[string]string{ + "name": imageName, + "push": "true", + }, + }}, +} +``` + +When BuildKit fetches from `github.com`, it looks for a secret named `GIT_AUTH_TOKEN.github.com` and uses it for HTTP authentication. + +### Token Acquisition + +**Location:** `svc/ctrl/worker/github/client.go` + +The GitHub App client generates installation tokens: + +1. Create a JWT signed with the App's private key +2. Call GitHub API: `POST /app/installations/:id/access_tokens` +3. Receive a short-lived installation token (~1 hour) + +```go +ghToken, err := w.github.GetInstallationToken(params.InstallationID) +// Token is passed to BuildKit via secretsprovider +``` + +Installation tokens are scoped to the repositories where the GitHub App is installed, providing a good security boundary. + +## Depot Integration + +**Location:** `svc/ctrl/worker/deploy/build.go` + +Depot provides remote BuildKit builders with persistent caching. + +### Build Flow + +``` +┌─────────────────┐ ┌─────────────────┐ ┌─────────────────┐ +│ Get/Create │────▶│ Create Depot │────▶│ Acquire │ +│ Depot Project │ │ Build │ │ BuildKit │ +└─────────────────┘ └─────────────────┘ └────────┬────────┘ + │ + ▼ + ┌─────────────────┐ + │ Execute │ + │ buildClient. │ + │ Solve() │ + └────────┬────────┘ + │ + ▼ + ┌─────────────────┐ + │ Push to │ + │ Registry │ + └─────────────────┘ +``` + +### Depot Project Management + +Each Unkey project gets a corresponding Depot project for caching: + +```go +func (w *Workflow) getOrCreateDepotProject(ctx context.Context, unkeyProjectID string) (string, error) { + project, _ := db.Query.FindProjectById(ctx, w.db.RO(), unkeyProjectID) + + if project.DepotProjectID.Valid { + return project.DepotProjectID.String, nil + } + + // Create new Depot project + createResp, _ := projectClient.CreateProject(ctx, connect.NewRequest(&corev1.CreateProjectRequest{ + Name: fmt.Sprintf("unkey-%s", unkeyProjectID), + RegionId: w.depotConfig.ProjectRegion, + CachePolicy: &corev1.CachePolicy{ + KeepGb: 50, + KeepDays: 14, + }, + })) + + // Store Depot project ID in database + db.Query.UpdateProjectDepotID(ctx, w.db.RW(), db.UpdateProjectDepotIDParams{ + DepotProjectID: sql.NullString{String: createResp.Msg.GetProject().GetProjectId(), Valid: true}, + ID: unkeyProjectID, + }) + + return createResp.Msg.GetProject().GetProjectId(), nil +} +``` + +### Build Execution + +```go +// Create Depot build session +depotBuild, _ := build.NewBuild(ctx, &cliv1.CreateBuildRequest{ + ProjectId: depotProjectID, +}, w.registryConfig.Password) +defer func() { depotBuild.Finish(err) }() + +// Acquire remote BuildKit machine +buildkit, _ := machine.Acquire(ctx, depotBuild.ID, depotBuild.Token, architecture) +defer buildkit.Release() + +// Connect to BuildKit +buildClient, _ := buildkit.Connect(ctx) +defer buildClient.Close() + +// Execute build +_, err = buildClient.Solve(ctx, nil, solverOptions, buildStatusCh) +``` + +### Build Observability + +Build status events are streamed to ClickHouse for monitoring: + +```go +buildStatusCh := make(chan *client.SolveStatus, 100) +go w.processBuildStatus(buildStatusCh, workspaceID, projectID, deploymentID) + +// Each completed vertex is logged +w.clickhouse.BufferBuildStep(schema.BuildStepV1{ + WorkspaceID: workspaceID, + ProjectID: projectID, + DeploymentID: deploymentID, + StepID: vertex.Digest.String(), + Name: vertex.Name, + Cached: vertex.Cached, + // ... +}) +``` + +## Proto Definitions + +**Location:** `proto/hydra/v1/deployment.proto` + +```protobuf +message GitSource { + int64 installation_id = 1; // GitHub App installation ID + string repository = 2; // "owner/repo" format + string commit_sha = 3; // Full 40-character SHA + string context_path = 4; // Subdirectory for build context + string dockerfile_path = 5; // Path to Dockerfile +} + +message DeployRequest { + string deployment_id = 1; + optional string key_auth_id = 2; + + oneof source { + GitSource git = 3; + DockerImage docker_image = 4; + } +} +``` + +## Important Constraints + +### Commit SHA +BuildKit requires the **full 40-character commit SHA** for reliable builds. Short SHAs may fail or fetch unexpected objects. + +### Private Submodules +Private submodules using SSH URLs won't work with this approach. The `GIT_AUTH_TOKEN` only provides HTTPS authentication. For SSH submodules, you'd need SSH key forwarding through BuildKit. + +### Context Path +The context path is normalized before use: +- Whitespace trimmed +- Leading `/` stripped +- `.` treated as repository root + +### External API +The external API (`ctrlv1.CreateDeploymentRequest`) only supports pre-built Docker images. Git-based builds are only triggered via the GitHub webhook integration. + +## Workflow Orchestration Patterns + +The Deploy workflow demonstrates several Restate patterns: + +1. **Idempotent state loading**: Deployment ID as stable external key +2. **Deferred failure handler**: Crashes converge to correct terminal status +3. **Side effects in `restate.Run`**: Deterministic replay + retry semantics +4. **Unique constraints for idempotency**: Sentinel creation uses DB unique index +5. **Fan-out/join**: Parallel region operations with barrier wait diff --git a/web/apps/engineering/content/docs/architecture/workflows/meta.json b/web/apps/engineering/content/docs/architecture/workflows/meta.json index 662c336dab..607e73300b 100644 --- a/web/apps/engineering/content/docs/architecture/workflows/meta.json +++ b/web/apps/engineering/content/docs/architecture/workflows/meta.json @@ -6,6 +6,7 @@ "pages": [ "index", "deployment-service", + "github-deployments", "deployment-workflow-pull-based", "routing-service", "creating-services" diff --git a/web/apps/engineering/content/docs/contributing/testing/integration-tests.mdx b/web/apps/engineering/content/docs/contributing/testing/integration-tests.mdx index 04f5c06eea..2120dd4e8a 100644 --- a/web/apps/engineering/content/docs/contributing/testing/integration-tests.mdx +++ b/web/apps/engineering/content/docs/contributing/testing/integration-tests.mdx @@ -31,16 +31,16 @@ func TestRedisCaching(t *testing.T) { For services that are expensive to start or that many tests share, we use `pkg/testutil/containers`. This package returns configuration for containers that are started once via docker-compose and shared across all tests. The tradeoff is that tests need to be careful about cleanup, since data written by one test is visible to the next. ```go -import "github.com/unkeyed/unkey/pkg/testutil/containers" +import "github.com/unkeyed/unkey/pkg/dockertest" func TestDatabaseQuery(t *testing.T) { - mysqlCfg := containers.MySQL(t) + mysqlCfg := dockertest.MySQL(t) - db, err := sql.Open("mysql", mysqlCfg.FormatDSN()) + db, err := sql.Open("mysql", mysqlCfg.DSN) require.NoError(t, err) defer db.Close() - // This MySQL instance is shared with other tests + // This MySQL instance is isolated per test } ``` diff --git a/web/internal/db/src/schema/github_app.ts b/web/internal/db/src/schema/github_app.ts index b4b353535b..db45c7a248 100644 --- a/web/internal/db/src/schema/github_app.ts +++ b/web/internal/db/src/schema/github_app.ts @@ -1,5 +1,5 @@ import { relations } from "drizzle-orm"; -import { bigint, mysqlTable, varchar } from "drizzle-orm/mysql-core"; +import { bigint, index, mysqlTable, varchar } from "drizzle-orm/mysql-core"; import { projects } from "./projects"; import { lifecycleDates } from "./util/lifecycle_dates"; import { workspaces } from "./workspaces"; @@ -18,18 +18,22 @@ export const githubAppInstallationsRelations = relations(githubAppInstallations, }), })); -export const githubRepoConnections = mysqlTable("github_repo_connections", { - pk: bigint("pk", { mode: "number", unsigned: true }).autoincrement().primaryKey(), - projectId: varchar("project_id", { length: 64 }).notNull().unique(), - installationId: bigint("installation_id", { - mode: "number", - }).notNull(), - repositoryId: bigint("repository_id", { mode: "number" }).notNull(), - repositoryFullName: varchar("repository_full_name", { - length: 500, - }).notNull(), - ...lifecycleDates, -}); +export const githubRepoConnections = mysqlTable( + "github_repo_connections", + { + pk: bigint("pk", { mode: "number", unsigned: true }).autoincrement().primaryKey(), + projectId: varchar("project_id", { length: 64 }).notNull().unique(), + installationId: bigint("installation_id", { + mode: "number", + }).notNull(), + repositoryId: bigint("repository_id", { mode: "number" }).notNull(), + repositoryFullName: varchar("repository_full_name", { + length: 500, + }).notNull(), + ...lifecycleDates, + }, + (table) => [index("installation_id_idx").on(table.installationId)], +); export const githubRepoConnectionsRelations = relations(githubRepoConnections, ({ one }) => ({ project: one(projects, { diff --git a/web/tools/migrate/v1_deprecation.ts b/web/tools/migrate/v1_deprecation.ts index 737561756d..add0262f9f 100644 --- a/web/tools/migrate/v1_deprecation.ts +++ b/web/tools/migrate/v1_deprecation.ts @@ -1,7 +1,5 @@ import { ClickHouse } from "@unkey/clickhouse"; -import { mysqlDrizzle, schema } from "@unkey/db"; -import { Resend } from "@unkey/resend"; -import { WorkOS } from "@workos-inc/node"; +import { type Workspace, mysqlDrizzle, schema } from "@unkey/db"; import mysql from "mysql2/promise"; import { z } from "zod"; async function main() { @@ -16,11 +14,6 @@ async function main() { await conn.ping(); const db = mysqlDrizzle(conn, { schema, mode: "default" }); - const workos = new WorkOS(process.env.WORKOS_API_KEY); - - const resend = new Resend({ - apiKey: process.env.RESEND_API_KEY, - }); console.log("starting"); const rows = await ch.querier.query({ @@ -32,7 +25,7 @@ async function main() { WHERE startsWith(path, '/v1/') AND workspace_id != '' AND workspace_id != 'ws_2vUFz88G6TuzMQHZaUhXADNyZWMy' // filter out special workspaces - AND time >= (now() - INTERVAL 30 DAY) + AND time >= (now() - INTERVAL 1 DAY) GROUP BY workspace_id, path`, schema: z.object({ workspace_id: z.string(), @@ -44,62 +37,31 @@ async function main() { process.exit(1); } - let emailsSent = 0; + const workspaces: Record = {}; - console.log( - `Found ${ - new Set(rows.val.map((r) => r.workspace_id)).size - } workspaces across ${rows.val.length} paths`, - ); - const workspaceToPaths = new Map(); for (const row of rows.val) { - if (row.workspace_id.startsWith("test_")) { - continue; - } - const paths = workspaceToPaths.get(row.workspace_id) || []; - paths.push(row.path); - workspaceToPaths.set(row.workspace_id, paths); - } - - for (const [workspaceId, paths] of workspaceToPaths.entries()) { - if (paths.includes("/v1/analytics.getVerifications")) { - console.warn( - `Skipping workspace ${workspaceId} due to analytics endpoint: ${paths.join(", ")}`, - ); + if (workspaces[row.workspace_id]) { continue; } - console.log(workspaceId, paths); const workspace = await db.query.workspaces.findFirst({ - where: (table, { eq }) => eq(table.id, workspaceId), + where: (table, { eq }) => eq(table.id, row.workspace_id), }); if (!workspace) { - console.error(`Workspace ${workspaceId} not found`); + console.error(`Workspace ${row.workspace_id} not found`); continue; } - - console.log(workspace.name); - - const members = await workos.userManagement.listOrganizationMemberships({ - organizationId: workspace.orgId, - limit: 100, - }); - - for (const member of members.data) { - await new Promise((resolve) => setTimeout(resolve, 500)); - const user = await workos.userManagement.getUser(member.userId); - console.log(`User: ${user.email}`); - await resend.sendApiV1MigrationEmail({ - email: user.email, - name: user.firstName, - workspace: workspace.name, - deprecatedEndpoints: paths, - }); - emailsSent++; - } + workspaces[workspace.id] = workspace; } - console.info(`Emails sent: ${emailsSent}`); + console.table( + Object.values(workspaces).map((ws) => ({ + id: ws.id, + name: ws.name, + org: ws.orgId, + sub: ws.stripeSubscriptionId, + })), + ); } main();