diff --git a/.github/workflows/restate_scheduled_tasks.yml b/.github/workflows/restate_scheduled_tasks.yml index 46b1f3c7d7..500770f248 100644 --- a/.github/workflows/restate_scheduled_tasks.yml +++ b/.github/workflows/restate_scheduled_tasks.yml @@ -82,5 +82,5 @@ jobs: -H "Content-Type: application/json" \ -H "Authorization: Bearer ${RESTATE_API_KEY}" \ -H "idempotency-key: scale-down-idle-$(date -u +'%Y-%m-%dT%H:%M')" \ - "${RESTATE_INGRESS_URL}/hydra.v1.DeploymentService/global/ScaleDownIdleDeployments/send" \ + "${RESTATE_INGRESS_URL}/hydra.v1.DeployService/global/ScaleDownIdlePreviewDeployments/send" \ -d '{}' diff --git a/gen/proto/ctrl/v1/ctrlv1connect/deployment.connect.go b/gen/proto/ctrl/v1/ctrlv1connect/deployment.connect.go index 726d3bdfe7..1a6bfbd3e8 100644 --- a/gen/proto/ctrl/v1/ctrlv1connect/deployment.connect.go +++ b/gen/proto/ctrl/v1/ctrlv1connect/deployment.connect.go @@ -21,8 +21,8 @@ import ( const _ = connect.IsAtLeastVersion1_13_0 const ( - // DeploymentServiceName is the fully-qualified name of the DeploymentService service. - DeploymentServiceName = "ctrl.v1.DeploymentService" + // DeployServiceName is the fully-qualified name of the DeployService service. + DeployServiceName = "ctrl.v1.DeployService" ) // These constants are the fully-qualified names of the RPCs defined in this package. They're @@ -33,22 +33,20 @@ const ( // reflection-formatted method names, remove the leading slash and convert the remaining slash to a // period. const ( - // DeploymentServiceCreateDeploymentProcedure is the fully-qualified name of the DeploymentService's + // DeployServiceCreateDeploymentProcedure is the fully-qualified name of the DeployService's // CreateDeployment RPC. - DeploymentServiceCreateDeploymentProcedure = "/ctrl.v1.DeploymentService/CreateDeployment" - // DeploymentServiceGetDeploymentProcedure is the fully-qualified name of the DeploymentService's + DeployServiceCreateDeploymentProcedure = "/ctrl.v1.DeployService/CreateDeployment" + // DeployServiceGetDeploymentProcedure is the fully-qualified name of the DeployService's // GetDeployment RPC. - DeploymentServiceGetDeploymentProcedure = "/ctrl.v1.DeploymentService/GetDeployment" - // DeploymentServiceRollbackProcedure is the fully-qualified name of the DeploymentService's - // Rollback RPC. - DeploymentServiceRollbackProcedure = "/ctrl.v1.DeploymentService/Rollback" - // DeploymentServicePromoteProcedure is the fully-qualified name of the DeploymentService's Promote - // RPC. - DeploymentServicePromoteProcedure = "/ctrl.v1.DeploymentService/Promote" + DeployServiceGetDeploymentProcedure = "/ctrl.v1.DeployService/GetDeployment" + // DeployServiceRollbackProcedure is the fully-qualified name of the DeployService's Rollback RPC. + DeployServiceRollbackProcedure = "/ctrl.v1.DeployService/Rollback" + // DeployServicePromoteProcedure is the fully-qualified name of the DeployService's Promote RPC. + DeployServicePromoteProcedure = "/ctrl.v1.DeployService/Promote" ) -// DeploymentServiceClient is a client for the ctrl.v1.DeploymentService service. -type DeploymentServiceClient interface { +// DeployServiceClient is a client for the ctrl.v1.DeployService service. +type DeployServiceClient interface { // Create a new deployment with a prebuilt docker image CreateDeployment(context.Context, *connect.Request[v1.CreateDeploymentRequest]) (*connect.Response[v1.CreateDeploymentResponse], error) // Get deployment details @@ -59,74 +57,74 @@ type DeploymentServiceClient interface { Promote(context.Context, *connect.Request[v1.PromoteRequest]) (*connect.Response[v1.PromoteResponse], error) } -// NewDeploymentServiceClient constructs a client for the ctrl.v1.DeploymentService service. By -// default, it uses the Connect protocol with the binary Protobuf Codec, asks for gzipped responses, -// and sends uncompressed requests. To use the gRPC or gRPC-Web protocols, supply the -// connect.WithGRPC() or connect.WithGRPCWeb() options. +// NewDeployServiceClient constructs a client for the ctrl.v1.DeployService service. By default, it +// uses the Connect protocol with the binary Protobuf Codec, asks for gzipped responses, and sends +// uncompressed requests. To use the gRPC or gRPC-Web protocols, supply the connect.WithGRPC() or +// connect.WithGRPCWeb() options. // // The URL supplied here should be the base URL for the Connect or gRPC server (for example, // http://api.acme.com or https://acme.com/grpc). -func NewDeploymentServiceClient(httpClient connect.HTTPClient, baseURL string, opts ...connect.ClientOption) DeploymentServiceClient { +func NewDeployServiceClient(httpClient connect.HTTPClient, baseURL string, opts ...connect.ClientOption) DeployServiceClient { baseURL = strings.TrimRight(baseURL, "/") - deploymentServiceMethods := v1.File_ctrl_v1_deployment_proto.Services().ByName("DeploymentService").Methods() - return &deploymentServiceClient{ + deployServiceMethods := v1.File_ctrl_v1_deployment_proto.Services().ByName("DeployService").Methods() + return &deployServiceClient{ createDeployment: connect.NewClient[v1.CreateDeploymentRequest, v1.CreateDeploymentResponse]( httpClient, - baseURL+DeploymentServiceCreateDeploymentProcedure, - connect.WithSchema(deploymentServiceMethods.ByName("CreateDeployment")), + baseURL+DeployServiceCreateDeploymentProcedure, + connect.WithSchema(deployServiceMethods.ByName("CreateDeployment")), connect.WithClientOptions(opts...), ), getDeployment: connect.NewClient[v1.GetDeploymentRequest, v1.GetDeploymentResponse]( httpClient, - baseURL+DeploymentServiceGetDeploymentProcedure, - connect.WithSchema(deploymentServiceMethods.ByName("GetDeployment")), + baseURL+DeployServiceGetDeploymentProcedure, + connect.WithSchema(deployServiceMethods.ByName("GetDeployment")), connect.WithClientOptions(opts...), ), rollback: connect.NewClient[v1.RollbackRequest, v1.RollbackResponse]( httpClient, - baseURL+DeploymentServiceRollbackProcedure, - connect.WithSchema(deploymentServiceMethods.ByName("Rollback")), + baseURL+DeployServiceRollbackProcedure, + connect.WithSchema(deployServiceMethods.ByName("Rollback")), connect.WithClientOptions(opts...), ), promote: connect.NewClient[v1.PromoteRequest, v1.PromoteResponse]( httpClient, - baseURL+DeploymentServicePromoteProcedure, - connect.WithSchema(deploymentServiceMethods.ByName("Promote")), + baseURL+DeployServicePromoteProcedure, + connect.WithSchema(deployServiceMethods.ByName("Promote")), connect.WithClientOptions(opts...), ), } } -// deploymentServiceClient implements DeploymentServiceClient. -type deploymentServiceClient struct { +// deployServiceClient implements DeployServiceClient. +type deployServiceClient struct { createDeployment *connect.Client[v1.CreateDeploymentRequest, v1.CreateDeploymentResponse] getDeployment *connect.Client[v1.GetDeploymentRequest, v1.GetDeploymentResponse] rollback *connect.Client[v1.RollbackRequest, v1.RollbackResponse] promote *connect.Client[v1.PromoteRequest, v1.PromoteResponse] } -// CreateDeployment calls ctrl.v1.DeploymentService.CreateDeployment. -func (c *deploymentServiceClient) CreateDeployment(ctx context.Context, req *connect.Request[v1.CreateDeploymentRequest]) (*connect.Response[v1.CreateDeploymentResponse], error) { +// CreateDeployment calls ctrl.v1.DeployService.CreateDeployment. +func (c *deployServiceClient) CreateDeployment(ctx context.Context, req *connect.Request[v1.CreateDeploymentRequest]) (*connect.Response[v1.CreateDeploymentResponse], error) { return c.createDeployment.CallUnary(ctx, req) } -// GetDeployment calls ctrl.v1.DeploymentService.GetDeployment. -func (c *deploymentServiceClient) GetDeployment(ctx context.Context, req *connect.Request[v1.GetDeploymentRequest]) (*connect.Response[v1.GetDeploymentResponse], error) { +// GetDeployment calls ctrl.v1.DeployService.GetDeployment. +func (c *deployServiceClient) GetDeployment(ctx context.Context, req *connect.Request[v1.GetDeploymentRequest]) (*connect.Response[v1.GetDeploymentResponse], error) { return c.getDeployment.CallUnary(ctx, req) } -// Rollback calls ctrl.v1.DeploymentService.Rollback. -func (c *deploymentServiceClient) Rollback(ctx context.Context, req *connect.Request[v1.RollbackRequest]) (*connect.Response[v1.RollbackResponse], error) { +// Rollback calls ctrl.v1.DeployService.Rollback. +func (c *deployServiceClient) Rollback(ctx context.Context, req *connect.Request[v1.RollbackRequest]) (*connect.Response[v1.RollbackResponse], error) { return c.rollback.CallUnary(ctx, req) } -// Promote calls ctrl.v1.DeploymentService.Promote. -func (c *deploymentServiceClient) Promote(ctx context.Context, req *connect.Request[v1.PromoteRequest]) (*connect.Response[v1.PromoteResponse], error) { +// Promote calls ctrl.v1.DeployService.Promote. +func (c *deployServiceClient) Promote(ctx context.Context, req *connect.Request[v1.PromoteRequest]) (*connect.Response[v1.PromoteResponse], error) { return c.promote.CallUnary(ctx, req) } -// DeploymentServiceHandler is an implementation of the ctrl.v1.DeploymentService service. -type DeploymentServiceHandler interface { +// DeployServiceHandler is an implementation of the ctrl.v1.DeployService service. +type DeployServiceHandler interface { // Create a new deployment with a prebuilt docker image CreateDeployment(context.Context, *connect.Request[v1.CreateDeploymentRequest]) (*connect.Response[v1.CreateDeploymentResponse], error) // Get deployment details @@ -137,68 +135,68 @@ type DeploymentServiceHandler interface { Promote(context.Context, *connect.Request[v1.PromoteRequest]) (*connect.Response[v1.PromoteResponse], error) } -// NewDeploymentServiceHandler builds an HTTP handler from the service implementation. It returns -// the path on which to mount the handler and the handler itself. +// NewDeployServiceHandler builds an HTTP handler from the service implementation. It returns the +// path on which to mount the handler and the handler itself. // // By default, handlers support the Connect, gRPC, and gRPC-Web protocols with the binary Protobuf // and JSON codecs. They also support gzip compression. -func NewDeploymentServiceHandler(svc DeploymentServiceHandler, opts ...connect.HandlerOption) (string, http.Handler) { - deploymentServiceMethods := v1.File_ctrl_v1_deployment_proto.Services().ByName("DeploymentService").Methods() - deploymentServiceCreateDeploymentHandler := connect.NewUnaryHandler( - DeploymentServiceCreateDeploymentProcedure, +func NewDeployServiceHandler(svc DeployServiceHandler, opts ...connect.HandlerOption) (string, http.Handler) { + deployServiceMethods := v1.File_ctrl_v1_deployment_proto.Services().ByName("DeployService").Methods() + deployServiceCreateDeploymentHandler := connect.NewUnaryHandler( + DeployServiceCreateDeploymentProcedure, svc.CreateDeployment, - connect.WithSchema(deploymentServiceMethods.ByName("CreateDeployment")), + connect.WithSchema(deployServiceMethods.ByName("CreateDeployment")), connect.WithHandlerOptions(opts...), ) - deploymentServiceGetDeploymentHandler := connect.NewUnaryHandler( - DeploymentServiceGetDeploymentProcedure, + deployServiceGetDeploymentHandler := connect.NewUnaryHandler( + DeployServiceGetDeploymentProcedure, svc.GetDeployment, - connect.WithSchema(deploymentServiceMethods.ByName("GetDeployment")), + connect.WithSchema(deployServiceMethods.ByName("GetDeployment")), connect.WithHandlerOptions(opts...), ) - deploymentServiceRollbackHandler := connect.NewUnaryHandler( - DeploymentServiceRollbackProcedure, + deployServiceRollbackHandler := connect.NewUnaryHandler( + DeployServiceRollbackProcedure, svc.Rollback, - connect.WithSchema(deploymentServiceMethods.ByName("Rollback")), + connect.WithSchema(deployServiceMethods.ByName("Rollback")), connect.WithHandlerOptions(opts...), ) - deploymentServicePromoteHandler := connect.NewUnaryHandler( - DeploymentServicePromoteProcedure, + deployServicePromoteHandler := connect.NewUnaryHandler( + DeployServicePromoteProcedure, svc.Promote, - connect.WithSchema(deploymentServiceMethods.ByName("Promote")), + connect.WithSchema(deployServiceMethods.ByName("Promote")), connect.WithHandlerOptions(opts...), ) - return "/ctrl.v1.DeploymentService/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + return "/ctrl.v1.DeployService/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { switch r.URL.Path { - case DeploymentServiceCreateDeploymentProcedure: - deploymentServiceCreateDeploymentHandler.ServeHTTP(w, r) - case DeploymentServiceGetDeploymentProcedure: - deploymentServiceGetDeploymentHandler.ServeHTTP(w, r) - case DeploymentServiceRollbackProcedure: - deploymentServiceRollbackHandler.ServeHTTP(w, r) - case DeploymentServicePromoteProcedure: - deploymentServicePromoteHandler.ServeHTTP(w, r) + case DeployServiceCreateDeploymentProcedure: + deployServiceCreateDeploymentHandler.ServeHTTP(w, r) + case DeployServiceGetDeploymentProcedure: + deployServiceGetDeploymentHandler.ServeHTTP(w, r) + case DeployServiceRollbackProcedure: + deployServiceRollbackHandler.ServeHTTP(w, r) + case DeployServicePromoteProcedure: + deployServicePromoteHandler.ServeHTTP(w, r) default: http.NotFound(w, r) } }) } -// UnimplementedDeploymentServiceHandler returns CodeUnimplemented from all methods. -type UnimplementedDeploymentServiceHandler struct{} +// UnimplementedDeployServiceHandler returns CodeUnimplemented from all methods. +type UnimplementedDeployServiceHandler struct{} -func (UnimplementedDeploymentServiceHandler) CreateDeployment(context.Context, *connect.Request[v1.CreateDeploymentRequest]) (*connect.Response[v1.CreateDeploymentResponse], error) { - return nil, connect.NewError(connect.CodeUnimplemented, errors.New("ctrl.v1.DeploymentService.CreateDeployment is not implemented")) +func (UnimplementedDeployServiceHandler) CreateDeployment(context.Context, *connect.Request[v1.CreateDeploymentRequest]) (*connect.Response[v1.CreateDeploymentResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("ctrl.v1.DeployService.CreateDeployment is not implemented")) } -func (UnimplementedDeploymentServiceHandler) GetDeployment(context.Context, *connect.Request[v1.GetDeploymentRequest]) (*connect.Response[v1.GetDeploymentResponse], error) { - return nil, connect.NewError(connect.CodeUnimplemented, errors.New("ctrl.v1.DeploymentService.GetDeployment is not implemented")) +func (UnimplementedDeployServiceHandler) GetDeployment(context.Context, *connect.Request[v1.GetDeploymentRequest]) (*connect.Response[v1.GetDeploymentResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("ctrl.v1.DeployService.GetDeployment is not implemented")) } -func (UnimplementedDeploymentServiceHandler) Rollback(context.Context, *connect.Request[v1.RollbackRequest]) (*connect.Response[v1.RollbackResponse], error) { - return nil, connect.NewError(connect.CodeUnimplemented, errors.New("ctrl.v1.DeploymentService.Rollback is not implemented")) +func (UnimplementedDeployServiceHandler) Rollback(context.Context, *connect.Request[v1.RollbackRequest]) (*connect.Response[v1.RollbackResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("ctrl.v1.DeployService.Rollback is not implemented")) } -func (UnimplementedDeploymentServiceHandler) Promote(context.Context, *connect.Request[v1.PromoteRequest]) (*connect.Response[v1.PromoteResponse], error) { - return nil, connect.NewError(connect.CodeUnimplemented, errors.New("ctrl.v1.DeploymentService.Promote is not implemented")) +func (UnimplementedDeployServiceHandler) Promote(context.Context, *connect.Request[v1.PromoteRequest]) (*connect.Response[v1.PromoteResponse], error) { + return nil, connect.NewError(connect.CodeUnimplemented, errors.New("ctrl.v1.DeployService.Promote is not implemented")) } diff --git a/gen/proto/ctrl/v1/deployment.pb.go b/gen/proto/ctrl/v1/deployment.pb.go index 48e04bb24f..5d77a4ea82 100644 --- a/gen/proto/ctrl/v1/deployment.pb.go +++ b/gen/proto/ctrl/v1/deployment.pb.go @@ -1136,8 +1136,8 @@ const file_ctrl_v1_deployment_proto_rawDesc = "" + "SourceType\x12\x1b\n" + "\x17SOURCE_TYPE_UNSPECIFIED\x10\x00\x12\x13\n" + "\x0fSOURCE_TYPE_GIT\x10\x01\x12\x1a\n" + - "\x16SOURCE_TYPE_CLI_UPLOAD\x10\x022\xc3\x02\n" + - "\x11DeploymentService\x12Y\n" + + "\x16SOURCE_TYPE_CLI_UPLOAD\x10\x022\xbf\x02\n" + + "\rDeployService\x12Y\n" + "\x10CreateDeployment\x12 .ctrl.v1.CreateDeploymentRequest\x1a!.ctrl.v1.CreateDeploymentResponse\"\x00\x12P\n" + "\rGetDeployment\x12\x1d.ctrl.v1.GetDeploymentRequest\x1a\x1e.ctrl.v1.GetDeploymentResponse\"\x00\x12A\n" + "\bRollback\x12\x18.ctrl.v1.RollbackRequest\x1a\x19.ctrl.v1.RollbackResponse\"\x00\x12>\n" + @@ -1185,14 +1185,14 @@ var file_ctrl_v1_deployment_proto_depIdxs = []int32{ 9, // 5: ctrl.v1.Deployment.topology:type_name -> ctrl.v1.Topology 8, // 6: ctrl.v1.Deployment.steps:type_name -> ctrl.v1.DeploymentStep 10, // 7: ctrl.v1.Topology.regions:type_name -> ctrl.v1.RegionalConfig - 2, // 8: ctrl.v1.DeploymentService.CreateDeployment:input_type -> ctrl.v1.CreateDeploymentRequest - 5, // 9: ctrl.v1.DeploymentService.GetDeployment:input_type -> ctrl.v1.GetDeploymentRequest - 11, // 10: ctrl.v1.DeploymentService.Rollback:input_type -> ctrl.v1.RollbackRequest - 13, // 11: ctrl.v1.DeploymentService.Promote:input_type -> ctrl.v1.PromoteRequest - 4, // 12: ctrl.v1.DeploymentService.CreateDeployment:output_type -> ctrl.v1.CreateDeploymentResponse - 6, // 13: ctrl.v1.DeploymentService.GetDeployment:output_type -> ctrl.v1.GetDeploymentResponse - 12, // 14: ctrl.v1.DeploymentService.Rollback:output_type -> ctrl.v1.RollbackResponse - 14, // 15: ctrl.v1.DeploymentService.Promote:output_type -> ctrl.v1.PromoteResponse + 2, // 8: ctrl.v1.DeployService.CreateDeployment:input_type -> ctrl.v1.CreateDeploymentRequest + 5, // 9: ctrl.v1.DeployService.GetDeployment:input_type -> ctrl.v1.GetDeploymentRequest + 11, // 10: ctrl.v1.DeployService.Rollback:input_type -> ctrl.v1.RollbackRequest + 13, // 11: ctrl.v1.DeployService.Promote:input_type -> ctrl.v1.PromoteRequest + 4, // 12: ctrl.v1.DeployService.CreateDeployment:output_type -> ctrl.v1.CreateDeploymentResponse + 6, // 13: ctrl.v1.DeployService.GetDeployment:output_type -> ctrl.v1.GetDeploymentResponse + 12, // 14: ctrl.v1.DeployService.Rollback:output_type -> ctrl.v1.RollbackResponse + 14, // 15: ctrl.v1.DeployService.Promote:output_type -> ctrl.v1.PromoteResponse 12, // [12:16] is the sub-list for method output_type 8, // [8:12] is the sub-list for method input_type 8, // [8:8] is the sub-list for extension type_name diff --git a/gen/proto/hydra/v1/BUILD.bazel b/gen/proto/hydra/v1/BUILD.bazel index 536c24b6d5..d065fcbfcd 100644 --- a/gen/proto/hydra/v1/BUILD.bazel +++ b/gen/proto/hydra/v1/BUILD.bazel @@ -9,6 +9,8 @@ go_library( "clickhouse_user_restate.pb.go", "custom_domain.pb.go", "custom_domain_restate.pb.go", + "deploy.pb.go", + "deploy_restate.pb.go", "deployment.pb.go", "deployment_restate.pb.go", "quota_check.pb.go", diff --git a/gen/proto/hydra/v1/deploy.pb.go b/gen/proto/hydra/v1/deploy.pb.go new file mode 100644 index 0000000000..760b0de81f --- /dev/null +++ b/gen/proto/hydra/v1/deploy.pb.go @@ -0,0 +1,651 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.36.8 +// protoc (unknown) +// source: hydra/v1/deploy.proto + +package hydrav1 + +import ( + _ "github.com/restatedev/sdk-go/generated/dev/restate/sdk" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" + unsafe "unsafe" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +type ScaleDownIdlePreviewDeploymentsRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ScaleDownIdlePreviewDeploymentsRequest) Reset() { + *x = ScaleDownIdlePreviewDeploymentsRequest{} + mi := &file_hydra_v1_deploy_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ScaleDownIdlePreviewDeploymentsRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScaleDownIdlePreviewDeploymentsRequest) ProtoMessage() {} + +func (x *ScaleDownIdlePreviewDeploymentsRequest) ProtoReflect() protoreflect.Message { + mi := &file_hydra_v1_deploy_proto_msgTypes[0] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScaleDownIdlePreviewDeploymentsRequest.ProtoReflect.Descriptor instead. +func (*ScaleDownIdlePreviewDeploymentsRequest) Descriptor() ([]byte, []int) { + return file_hydra_v1_deploy_proto_rawDescGZIP(), []int{0} +} + +type ScaleDownIdlePreviewDeploymentsResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *ScaleDownIdlePreviewDeploymentsResponse) Reset() { + *x = ScaleDownIdlePreviewDeploymentsResponse{} + mi := &file_hydra_v1_deploy_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *ScaleDownIdlePreviewDeploymentsResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ScaleDownIdlePreviewDeploymentsResponse) ProtoMessage() {} + +func (x *ScaleDownIdlePreviewDeploymentsResponse) ProtoReflect() protoreflect.Message { + mi := &file_hydra_v1_deploy_proto_msgTypes[1] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ScaleDownIdlePreviewDeploymentsResponse.ProtoReflect.Descriptor instead. +func (*ScaleDownIdlePreviewDeploymentsResponse) Descriptor() ([]byte, []int) { + return file_hydra_v1_deploy_proto_rawDescGZIP(), []int{1} +} + +// DockerImage references a pre-built container image to deploy directly, +// skipping the build step. +type DockerImage struct { + state protoimpl.MessageState `protogen:"open.v1"` + Image string `protobuf:"bytes,1,opt,name=image,proto3" json:"image,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DockerImage) Reset() { + *x = DockerImage{} + mi := &file_hydra_v1_deploy_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DockerImage) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DockerImage) ProtoMessage() {} + +func (x *DockerImage) ProtoReflect() protoreflect.Message { + mi := &file_hydra_v1_deploy_proto_msgTypes[2] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DockerImage.ProtoReflect.Descriptor instead. +func (*DockerImage) Descriptor() ([]byte, []int) { + return file_hydra_v1_deploy_proto_rawDescGZIP(), []int{2} +} + +func (x *DockerImage) GetImage() string { + if x != nil { + return x.Image + } + return "" +} + +// GitSource specifies a repository and commit to build a Docker image from. +type GitSource struct { + state protoimpl.MessageState `protogen:"open.v1"` + // GitHub App installation ID used to clone the repository. + InstallationId int64 `protobuf:"varint,1,opt,name=installation_id,json=installationId,proto3" json:"installation_id,omitempty"` + // Full repository identifier (e.g., "owner/repo"). + Repository string `protobuf:"bytes,2,opt,name=repository,proto3" json:"repository,omitempty"` + CommitSha string `protobuf:"bytes,3,opt,name=commit_sha,json=commitSha,proto3" json:"commit_sha,omitempty"` + // Subdirectory within the repository to use as the Docker build context. + ContextPath string `protobuf:"bytes,4,opt,name=context_path,json=contextPath,proto3" json:"context_path,omitempty"` + // Path to the Dockerfile, relative to context_path. + DockerfilePath string `protobuf:"bytes,5,opt,name=dockerfile_path,json=dockerfilePath,proto3" json:"dockerfile_path,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *GitSource) Reset() { + *x = GitSource{} + mi := &file_hydra_v1_deploy_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *GitSource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*GitSource) ProtoMessage() {} + +func (x *GitSource) ProtoReflect() protoreflect.Message { + mi := &file_hydra_v1_deploy_proto_msgTypes[3] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use GitSource.ProtoReflect.Descriptor instead. +func (*GitSource) Descriptor() ([]byte, []int) { + return file_hydra_v1_deploy_proto_rawDescGZIP(), []int{3} +} + +func (x *GitSource) GetInstallationId() int64 { + if x != nil { + return x.InstallationId + } + return 0 +} + +func (x *GitSource) GetRepository() string { + if x != nil { + return x.Repository + } + return "" +} + +func (x *GitSource) GetCommitSha() string { + if x != nil { + return x.CommitSha + } + return "" +} + +func (x *GitSource) GetContextPath() string { + if x != nil { + return x.ContextPath + } + return "" +} + +func (x *GitSource) GetDockerfilePath() string { + if x != nil { + return x.DockerfilePath + } + return "" +} + +type DeployRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + DeploymentId string `protobuf:"bytes,1,opt,name=deployment_id,json=deploymentId,proto3" json:"deployment_id,omitempty"` + // TODO: remove this field, it is unused. + KeyAuthId *string `protobuf:"bytes,2,opt,name=key_auth_id,json=keyAuthId,proto3,oneof" json:"key_auth_id,omitempty"` + // Types that are valid to be assigned to Source: + // + // *DeployRequest_Git + // *DeployRequest_DockerImage + Source isDeployRequest_Source `protobuf_oneof:"source"` + // Container command override (e.g., ["./app", "serve"]) + Command []string `protobuf:"bytes,5,rep,name=command,proto3" json:"command,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DeployRequest) Reset() { + *x = DeployRequest{} + mi := &file_hydra_v1_deploy_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DeployRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeployRequest) ProtoMessage() {} + +func (x *DeployRequest) ProtoReflect() protoreflect.Message { + mi := &file_hydra_v1_deploy_proto_msgTypes[4] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeployRequest.ProtoReflect.Descriptor instead. +func (*DeployRequest) Descriptor() ([]byte, []int) { + return file_hydra_v1_deploy_proto_rawDescGZIP(), []int{4} +} + +func (x *DeployRequest) GetDeploymentId() string { + if x != nil { + return x.DeploymentId + } + return "" +} + +func (x *DeployRequest) GetKeyAuthId() string { + if x != nil && x.KeyAuthId != nil { + return *x.KeyAuthId + } + return "" +} + +func (x *DeployRequest) GetSource() isDeployRequest_Source { + if x != nil { + return x.Source + } + return nil +} + +func (x *DeployRequest) GetGit() *GitSource { + if x != nil { + if x, ok := x.Source.(*DeployRequest_Git); ok { + return x.Git + } + } + return nil +} + +func (x *DeployRequest) GetDockerImage() *DockerImage { + if x != nil { + if x, ok := x.Source.(*DeployRequest_DockerImage); ok { + return x.DockerImage + } + } + return nil +} + +func (x *DeployRequest) GetCommand() []string { + if x != nil { + return x.Command + } + return nil +} + +type isDeployRequest_Source interface { + isDeployRequest_Source() +} + +type DeployRequest_Git struct { + Git *GitSource `protobuf:"bytes,3,opt,name=git,proto3,oneof"` +} + +type DeployRequest_DockerImage struct { + DockerImage *DockerImage `protobuf:"bytes,4,opt,name=docker_image,json=dockerImage,proto3,oneof"` +} + +func (*DeployRequest_Git) isDeployRequest_Source() {} + +func (*DeployRequest_DockerImage) isDeployRequest_Source() {} + +type DeployResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *DeployResponse) Reset() { + *x = DeployResponse{} + mi := &file_hydra_v1_deploy_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *DeployResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DeployResponse) ProtoMessage() {} + +func (x *DeployResponse) ProtoReflect() protoreflect.Message { + mi := &file_hydra_v1_deploy_proto_msgTypes[5] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use DeployResponse.ProtoReflect.Descriptor instead. +func (*DeployResponse) Descriptor() ([]byte, []int) { + return file_hydra_v1_deploy_proto_rawDescGZIP(), []int{5} +} + +// RollbackRequest identifies the deployment to roll back from and the +// deployment to restore. Both must belong to the same project and environment. +type RollbackRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // The current live deployment to roll back from. + SourceDeploymentId string `protobuf:"bytes,1,opt,name=source_deployment_id,json=sourceDeploymentId,proto3" json:"source_deployment_id,omitempty"` + // A previous deployment to restore traffic to. + TargetDeploymentId string `protobuf:"bytes,2,opt,name=target_deployment_id,json=targetDeploymentId,proto3" json:"target_deployment_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RollbackRequest) Reset() { + *x = RollbackRequest{} + mi := &file_hydra_v1_deploy_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RollbackRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RollbackRequest) ProtoMessage() {} + +func (x *RollbackRequest) ProtoReflect() protoreflect.Message { + mi := &file_hydra_v1_deploy_proto_msgTypes[6] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RollbackRequest.ProtoReflect.Descriptor instead. +func (*RollbackRequest) Descriptor() ([]byte, []int) { + return file_hydra_v1_deploy_proto_rawDescGZIP(), []int{6} +} + +func (x *RollbackRequest) GetSourceDeploymentId() string { + if x != nil { + return x.SourceDeploymentId + } + return "" +} + +func (x *RollbackRequest) GetTargetDeploymentId() string { + if x != nil { + return x.TargetDeploymentId + } + return "" +} + +type RollbackResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *RollbackResponse) Reset() { + *x = RollbackResponse{} + mi := &file_hydra_v1_deploy_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *RollbackResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RollbackResponse) ProtoMessage() {} + +func (x *RollbackResponse) ProtoReflect() protoreflect.Message { + mi := &file_hydra_v1_deploy_proto_msgTypes[7] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RollbackResponse.ProtoReflect.Descriptor instead. +func (*RollbackResponse) Descriptor() ([]byte, []int) { + return file_hydra_v1_deploy_proto_rawDescGZIP(), []int{7} +} + +// PromoteRequest identifies a ready deployment to promote to live. +type PromoteRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + TargetDeploymentId string `protobuf:"bytes,1,opt,name=target_deployment_id,json=targetDeploymentId,proto3" json:"target_deployment_id,omitempty"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PromoteRequest) Reset() { + *x = PromoteRequest{} + mi := &file_hydra_v1_deploy_proto_msgTypes[8] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PromoteRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PromoteRequest) ProtoMessage() {} + +func (x *PromoteRequest) ProtoReflect() protoreflect.Message { + mi := &file_hydra_v1_deploy_proto_msgTypes[8] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PromoteRequest.ProtoReflect.Descriptor instead. +func (*PromoteRequest) Descriptor() ([]byte, []int) { + return file_hydra_v1_deploy_proto_rawDescGZIP(), []int{8} +} + +func (x *PromoteRequest) GetTargetDeploymentId() string { + if x != nil { + return x.TargetDeploymentId + } + return "" +} + +type PromoteResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache +} + +func (x *PromoteResponse) Reset() { + *x = PromoteResponse{} + mi := &file_hydra_v1_deploy_proto_msgTypes[9] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) +} + +func (x *PromoteResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*PromoteResponse) ProtoMessage() {} + +func (x *PromoteResponse) ProtoReflect() protoreflect.Message { + mi := &file_hydra_v1_deploy_proto_msgTypes[9] + if x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use PromoteResponse.ProtoReflect.Descriptor instead. +func (*PromoteResponse) Descriptor() ([]byte, []int) { + return file_hydra_v1_deploy_proto_rawDescGZIP(), []int{9} +} + +var File_hydra_v1_deploy_proto protoreflect.FileDescriptor + +const file_hydra_v1_deploy_proto_rawDesc = "" + + "\n" + + "\x15hydra/v1/deploy.proto\x12\bhydra.v1\x1a\x18dev/restate/sdk/go.proto\"(\n" + + "&ScaleDownIdlePreviewDeploymentsRequest\")\n" + + "'ScaleDownIdlePreviewDeploymentsResponse\"#\n" + + "\vDockerImage\x12\x14\n" + + "\x05image\x18\x01 \x01(\tR\x05image\"\xbf\x01\n" + + "\tGitSource\x12'\n" + + "\x0finstallation_id\x18\x01 \x01(\x03R\x0einstallationId\x12\x1e\n" + + "\n" + + "repository\x18\x02 \x01(\tR\n" + + "repository\x12\x1d\n" + + "\n" + + "commit_sha\x18\x03 \x01(\tR\tcommitSha\x12!\n" + + "\fcontext_path\x18\x04 \x01(\tR\vcontextPath\x12'\n" + + "\x0fdockerfile_path\x18\x05 \x01(\tR\x0edockerfilePath\"\xf2\x01\n" + + "\rDeployRequest\x12#\n" + + "\rdeployment_id\x18\x01 \x01(\tR\fdeploymentId\x12#\n" + + "\vkey_auth_id\x18\x02 \x01(\tH\x01R\tkeyAuthId\x88\x01\x01\x12'\n" + + "\x03git\x18\x03 \x01(\v2\x13.hydra.v1.GitSourceH\x00R\x03git\x12:\n" + + "\fdocker_image\x18\x04 \x01(\v2\x15.hydra.v1.DockerImageH\x00R\vdockerImage\x12\x18\n" + + "\acommand\x18\x05 \x03(\tR\acommandB\b\n" + + "\x06sourceB\x0e\n" + + "\f_key_auth_id\"\x10\n" + + "\x0eDeployResponse\"u\n" + + "\x0fRollbackRequest\x120\n" + + "\x14source_deployment_id\x18\x01 \x01(\tR\x12sourceDeploymentId\x120\n" + + "\x14target_deployment_id\x18\x02 \x01(\tR\x12targetDeploymentId\"\x12\n" + + "\x10RollbackResponse\"B\n" + + "\x0ePromoteRequest\x120\n" + + "\x14target_deployment_id\x18\x01 \x01(\tR\x12targetDeploymentId\"\x11\n" + + "\x0fPromoteResponse2\xe6\x02\n" + + "\rDeployService\x12=\n" + + "\x06Deploy\x12\x17.hydra.v1.DeployRequest\x1a\x18.hydra.v1.DeployResponse\"\x00\x12C\n" + + "\bRollback\x12\x19.hydra.v1.RollbackRequest\x1a\x1a.hydra.v1.RollbackResponse\"\x00\x12@\n" + + "\aPromote\x12\x18.hydra.v1.PromoteRequest\x1a\x19.hydra.v1.PromoteResponse\"\x00\x12\x88\x01\n" + + "\x1fScaleDownIdlePreviewDeployments\x120.hydra.v1.ScaleDownIdlePreviewDeploymentsRequest\x1a1.hydra.v1.ScaleDownIdlePreviewDeploymentsResponse\"\x00\x1a\x04\x98\x80\x01\x02B\x91\x01\n" + + "\fcom.hydra.v1B\vDeployProtoP\x01Z3github.com/unkeyed/unkey/gen/proto/hydra/v1;hydrav1\xa2\x02\x03HXX\xaa\x02\bHydra.V1\xca\x02\bHydra\\V1\xe2\x02\x14Hydra\\V1\\GPBMetadata\xea\x02\tHydra::V1b\x06proto3" + +var ( + file_hydra_v1_deploy_proto_rawDescOnce sync.Once + file_hydra_v1_deploy_proto_rawDescData []byte +) + +func file_hydra_v1_deploy_proto_rawDescGZIP() []byte { + file_hydra_v1_deploy_proto_rawDescOnce.Do(func() { + file_hydra_v1_deploy_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_hydra_v1_deploy_proto_rawDesc), len(file_hydra_v1_deploy_proto_rawDesc))) + }) + return file_hydra_v1_deploy_proto_rawDescData +} + +var file_hydra_v1_deploy_proto_msgTypes = make([]protoimpl.MessageInfo, 10) +var file_hydra_v1_deploy_proto_goTypes = []any{ + (*ScaleDownIdlePreviewDeploymentsRequest)(nil), // 0: hydra.v1.ScaleDownIdlePreviewDeploymentsRequest + (*ScaleDownIdlePreviewDeploymentsResponse)(nil), // 1: hydra.v1.ScaleDownIdlePreviewDeploymentsResponse + (*DockerImage)(nil), // 2: hydra.v1.DockerImage + (*GitSource)(nil), // 3: hydra.v1.GitSource + (*DeployRequest)(nil), // 4: hydra.v1.DeployRequest + (*DeployResponse)(nil), // 5: hydra.v1.DeployResponse + (*RollbackRequest)(nil), // 6: hydra.v1.RollbackRequest + (*RollbackResponse)(nil), // 7: hydra.v1.RollbackResponse + (*PromoteRequest)(nil), // 8: hydra.v1.PromoteRequest + (*PromoteResponse)(nil), // 9: hydra.v1.PromoteResponse +} +var file_hydra_v1_deploy_proto_depIdxs = []int32{ + 3, // 0: hydra.v1.DeployRequest.git:type_name -> hydra.v1.GitSource + 2, // 1: hydra.v1.DeployRequest.docker_image:type_name -> hydra.v1.DockerImage + 4, // 2: hydra.v1.DeployService.Deploy:input_type -> hydra.v1.DeployRequest + 6, // 3: hydra.v1.DeployService.Rollback:input_type -> hydra.v1.RollbackRequest + 8, // 4: hydra.v1.DeployService.Promote:input_type -> hydra.v1.PromoteRequest + 0, // 5: hydra.v1.DeployService.ScaleDownIdlePreviewDeployments:input_type -> hydra.v1.ScaleDownIdlePreviewDeploymentsRequest + 5, // 6: hydra.v1.DeployService.Deploy:output_type -> hydra.v1.DeployResponse + 7, // 7: hydra.v1.DeployService.Rollback:output_type -> hydra.v1.RollbackResponse + 9, // 8: hydra.v1.DeployService.Promote:output_type -> hydra.v1.PromoteResponse + 1, // 9: hydra.v1.DeployService.ScaleDownIdlePreviewDeployments:output_type -> hydra.v1.ScaleDownIdlePreviewDeploymentsResponse + 6, // [6:10] is the sub-list for method output_type + 2, // [2:6] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_hydra_v1_deploy_proto_init() } +func file_hydra_v1_deploy_proto_init() { + if File_hydra_v1_deploy_proto != nil { + return + } + file_hydra_v1_deploy_proto_msgTypes[4].OneofWrappers = []any{ + (*DeployRequest_Git)(nil), + (*DeployRequest_DockerImage)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: unsafe.Slice(unsafe.StringData(file_hydra_v1_deploy_proto_rawDesc), len(file_hydra_v1_deploy_proto_rawDesc)), + NumEnums: 0, + NumMessages: 10, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_hydra_v1_deploy_proto_goTypes, + DependencyIndexes: file_hydra_v1_deploy_proto_depIdxs, + MessageInfos: file_hydra_v1_deploy_proto_msgTypes, + }.Build() + File_hydra_v1_deploy_proto = out.File + file_hydra_v1_deploy_proto_goTypes = nil + file_hydra_v1_deploy_proto_depIdxs = nil +} diff --git a/gen/proto/hydra/v1/deploy_restate.pb.go b/gen/proto/hydra/v1/deploy_restate.pb.go new file mode 100644 index 0000000000..6d35440574 --- /dev/null +++ b/gen/proto/hydra/v1/deploy_restate.pb.go @@ -0,0 +1,240 @@ +// Code generated by protoc-gen-go-restate. DO NOT EDIT. +// versions: +// - protoc-gen-go-restate v0.1 +// - protoc (unknown) +// source: hydra/v1/deploy.proto + +package hydrav1 + +import ( + fmt "fmt" + sdk_go "github.com/restatedev/sdk-go" + encoding "github.com/restatedev/sdk-go/encoding" + ingress "github.com/restatedev/sdk-go/ingress" +) + +// DeployServiceClient is the client API for hydra.v1.DeployService service. +// +// DeployService orchestrates the lifecycle of application deployments as +// durable Restate workflows. Each RPC is idempotent and can safely resume from +// any step after a crash. +// +// Deploy handles the full pipeline from building Docker images through +// provisioning containers and configuring domain routing. Rollback and Promote +// manage traffic switching between deployments by reassigning sticky frontline +// routes atomically through the routing service. +// +// ScaleDownIdlePreviewDeployments runs on a cron to find preview deployments that have +// received no traffic for 6 hours and sets them to standby. SetDeploymentDesiredState +// targets a single deployment, typically sent with a 30-minute delay after a new +// deployment replaces it. +type DeployServiceClient interface { + // Deploy executes the full deployment workflow: build (if git source), provision + // containers across regions, wait for health, configure domain routing, and + // update the project's live deployment pointer for production environments. + // Sets deployment status to failed on any error. + Deploy(opts ...sdk_go.ClientOption) sdk_go.Client[*DeployRequest, *DeployResponse] + // Rollback switches sticky frontline routes (environment and live) from the + // current live deployment back to a previous one. Marks the project as rolled + // back so future deploys don't automatically reclaim live routes. + // Source must be the current live deployment; both must share the same project + // and environment. + Rollback(opts ...sdk_go.ClientOption) sdk_go.Client[*RollbackRequest, *RollbackResponse] + // Promote reassigns sticky frontline routes to a target deployment and clears + // the rolled-back flag, restoring normal deployment flow. + // Target must be in ready status and not already the live deployment. + Promote(opts ...sdk_go.ClientOption) sdk_go.Client[*PromoteRequest, *PromoteResponse] + // ScaleDownIdlePreviewDeployments iterates all preview environments and sets any + // deployment to standby that has received zero requests in the last 6 hours. + // Intended to be called by a cron job. + ScaleDownIdlePreviewDeployments(opts ...sdk_go.ClientOption) sdk_go.Client[*ScaleDownIdlePreviewDeploymentsRequest, *ScaleDownIdlePreviewDeploymentsResponse] +} + +type deployServiceClient struct { + ctx sdk_go.Context + workflowID string + options []sdk_go.ClientOption +} + +func NewDeployServiceClient(ctx sdk_go.Context, workflowID string, opts ...sdk_go.ClientOption) DeployServiceClient { + cOpts := append([]sdk_go.ClientOption{sdk_go.WithProtoJSON}, opts...) + return &deployServiceClient{ + ctx, + workflowID, + cOpts, + } +} +func (c *deployServiceClient) Deploy(opts ...sdk_go.ClientOption) sdk_go.Client[*DeployRequest, *DeployResponse] { + cOpts := c.options + if len(opts) > 0 { + cOpts = append(append([]sdk_go.ClientOption{}, cOpts...), opts...) + } + return sdk_go.WithRequestType[*DeployRequest](sdk_go.Workflow[*DeployResponse](c.ctx, "hydra.v1.DeployService", c.workflowID, "Deploy", cOpts...)) +} + +func (c *deployServiceClient) Rollback(opts ...sdk_go.ClientOption) sdk_go.Client[*RollbackRequest, *RollbackResponse] { + cOpts := c.options + if len(opts) > 0 { + cOpts = append(append([]sdk_go.ClientOption{}, cOpts...), opts...) + } + return sdk_go.WithRequestType[*RollbackRequest](sdk_go.Workflow[*RollbackResponse](c.ctx, "hydra.v1.DeployService", c.workflowID, "Rollback", cOpts...)) +} + +func (c *deployServiceClient) Promote(opts ...sdk_go.ClientOption) sdk_go.Client[*PromoteRequest, *PromoteResponse] { + cOpts := c.options + if len(opts) > 0 { + cOpts = append(append([]sdk_go.ClientOption{}, cOpts...), opts...) + } + return sdk_go.WithRequestType[*PromoteRequest](sdk_go.Workflow[*PromoteResponse](c.ctx, "hydra.v1.DeployService", c.workflowID, "Promote", cOpts...)) +} + +func (c *deployServiceClient) ScaleDownIdlePreviewDeployments(opts ...sdk_go.ClientOption) sdk_go.Client[*ScaleDownIdlePreviewDeploymentsRequest, *ScaleDownIdlePreviewDeploymentsResponse] { + cOpts := c.options + if len(opts) > 0 { + cOpts = append(append([]sdk_go.ClientOption{}, cOpts...), opts...) + } + return sdk_go.WithRequestType[*ScaleDownIdlePreviewDeploymentsRequest](sdk_go.Workflow[*ScaleDownIdlePreviewDeploymentsResponse](c.ctx, "hydra.v1.DeployService", c.workflowID, "ScaleDownIdlePreviewDeployments", cOpts...)) +} + +// DeployServiceIngressClient is the ingress client API for hydra.v1.DeployService service. +// +// This client is used to call the service from outside of a Restate context. +type DeployServiceIngressClient interface { + // Deploy executes the full deployment workflow: build (if git source), provision + // containers across regions, wait for health, configure domain routing, and + // update the project's live deployment pointer for production environments. + // Sets deployment status to failed on any error. + Deploy() ingress.Requester[*DeployRequest, *DeployResponse] + // Rollback switches sticky frontline routes (environment and live) from the + // current live deployment back to a previous one. Marks the project as rolled + // back so future deploys don't automatically reclaim live routes. + // Source must be the current live deployment; both must share the same project + // and environment. + Rollback() ingress.Requester[*RollbackRequest, *RollbackResponse] + // Promote reassigns sticky frontline routes to a target deployment and clears + // the rolled-back flag, restoring normal deployment flow. + // Target must be in ready status and not already the live deployment. + Promote() ingress.Requester[*PromoteRequest, *PromoteResponse] + // ScaleDownIdlePreviewDeployments iterates all preview environments and sets any + // deployment to standby that has received zero requests in the last 6 hours. + // Intended to be called by a cron job. + ScaleDownIdlePreviewDeployments() ingress.Requester[*ScaleDownIdlePreviewDeploymentsRequest, *ScaleDownIdlePreviewDeploymentsResponse] +} + +type deployServiceIngressClient struct { + client *ingress.Client + serviceName string + workflowID string +} + +func NewDeployServiceIngressClient(client *ingress.Client, workflowID string) DeployServiceIngressClient { + return &deployServiceIngressClient{ + client, + "hydra.v1.DeployService", + workflowID, + } +} + +func (c *deployServiceIngressClient) Deploy() ingress.Requester[*DeployRequest, *DeployResponse] { + codec := encoding.ProtoJSONCodec + return ingress.NewRequester[*DeployRequest, *DeployResponse](c.client, c.serviceName, "Deploy", &c.workflowID, &codec) +} + +func (c *deployServiceIngressClient) Rollback() ingress.Requester[*RollbackRequest, *RollbackResponse] { + codec := encoding.ProtoJSONCodec + return ingress.NewRequester[*RollbackRequest, *RollbackResponse](c.client, c.serviceName, "Rollback", &c.workflowID, &codec) +} + +func (c *deployServiceIngressClient) Promote() ingress.Requester[*PromoteRequest, *PromoteResponse] { + codec := encoding.ProtoJSONCodec + return ingress.NewRequester[*PromoteRequest, *PromoteResponse](c.client, c.serviceName, "Promote", &c.workflowID, &codec) +} + +func (c *deployServiceIngressClient) ScaleDownIdlePreviewDeployments() ingress.Requester[*ScaleDownIdlePreviewDeploymentsRequest, *ScaleDownIdlePreviewDeploymentsResponse] { + codec := encoding.ProtoJSONCodec + return ingress.NewRequester[*ScaleDownIdlePreviewDeploymentsRequest, *ScaleDownIdlePreviewDeploymentsResponse](c.client, c.serviceName, "ScaleDownIdlePreviewDeployments", &c.workflowID, &codec) +} + +// DeployServiceServer is the server API for hydra.v1.DeployService service. +// All implementations should embed UnimplementedDeployServiceServer +// for forward compatibility. +// +// DeployService orchestrates the lifecycle of application deployments as +// durable Restate workflows. Each RPC is idempotent and can safely resume from +// any step after a crash. +// +// Deploy handles the full pipeline from building Docker images through +// provisioning containers and configuring domain routing. Rollback and Promote +// manage traffic switching between deployments by reassigning sticky frontline +// routes atomically through the routing service. +// +// ScaleDownIdlePreviewDeployments runs on a cron to find preview deployments that have +// received no traffic for 6 hours and sets them to standby. SetDeploymentDesiredState +// targets a single deployment, typically sent with a 30-minute delay after a new +// deployment replaces it. +type DeployServiceServer interface { + // Deploy executes the full deployment workflow: build (if git source), provision + // containers across regions, wait for health, configure domain routing, and + // update the project's live deployment pointer for production environments. + // Sets deployment status to failed on any error. + Deploy(ctx sdk_go.WorkflowSharedContext, req *DeployRequest) (*DeployResponse, error) + // Rollback switches sticky frontline routes (environment and live) from the + // current live deployment back to a previous one. Marks the project as rolled + // back so future deploys don't automatically reclaim live routes. + // Source must be the current live deployment; both must share the same project + // and environment. + Rollback(ctx sdk_go.WorkflowSharedContext, req *RollbackRequest) (*RollbackResponse, error) + // Promote reassigns sticky frontline routes to a target deployment and clears + // the rolled-back flag, restoring normal deployment flow. + // Target must be in ready status and not already the live deployment. + Promote(ctx sdk_go.WorkflowSharedContext, req *PromoteRequest) (*PromoteResponse, error) + // ScaleDownIdlePreviewDeployments iterates all preview environments and sets any + // deployment to standby that has received zero requests in the last 6 hours. + // Intended to be called by a cron job. + ScaleDownIdlePreviewDeployments(ctx sdk_go.WorkflowSharedContext, req *ScaleDownIdlePreviewDeploymentsRequest) (*ScaleDownIdlePreviewDeploymentsResponse, error) +} + +// UnimplementedDeployServiceServer should be embedded to have +// forward compatible implementations. +// +// NOTE: this should be embedded by value instead of pointer to avoid a nil +// pointer dereference when methods are called. +type UnimplementedDeployServiceServer struct{} + +func (UnimplementedDeployServiceServer) Deploy(ctx sdk_go.WorkflowSharedContext, req *DeployRequest) (*DeployResponse, error) { + return nil, sdk_go.TerminalError(fmt.Errorf("method Deploy not implemented"), 501) +} +func (UnimplementedDeployServiceServer) Rollback(ctx sdk_go.WorkflowSharedContext, req *RollbackRequest) (*RollbackResponse, error) { + return nil, sdk_go.TerminalError(fmt.Errorf("method Rollback not implemented"), 501) +} +func (UnimplementedDeployServiceServer) Promote(ctx sdk_go.WorkflowSharedContext, req *PromoteRequest) (*PromoteResponse, error) { + return nil, sdk_go.TerminalError(fmt.Errorf("method Promote not implemented"), 501) +} +func (UnimplementedDeployServiceServer) ScaleDownIdlePreviewDeployments(ctx sdk_go.WorkflowSharedContext, req *ScaleDownIdlePreviewDeploymentsRequest) (*ScaleDownIdlePreviewDeploymentsResponse, error) { + return nil, sdk_go.TerminalError(fmt.Errorf("method ScaleDownIdlePreviewDeployments not implemented"), 501) +} +func (UnimplementedDeployServiceServer) testEmbeddedByValue() {} + +// UnsafeDeployServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to DeployServiceServer will +// result in compilation errors. +type UnsafeDeployServiceServer interface { + mustEmbedUnimplementedDeployServiceServer() +} + +func NewDeployServiceServer(srv DeployServiceServer, opts ...sdk_go.ServiceDefinitionOption) sdk_go.ServiceDefinition { + // If the following call panics, it indicates UnimplementedDeployServiceServer was + // embedded by pointer and is nil. This will cause panics if an + // unimplemented method is ever invoked, so we test this at initialization + // time to prevent it from happening at runtime later due to I/O. + if t, ok := srv.(interface{ testEmbeddedByValue() }); ok { + t.testEmbeddedByValue() + } + sOpts := append([]sdk_go.ServiceDefinitionOption{sdk_go.WithProtoJSON}, opts...) + router := sdk_go.NewWorkflow("hydra.v1.DeployService", sOpts...) + router = router.Handler("Deploy", sdk_go.NewWorkflowSharedHandler(srv.Deploy)) + router = router.Handler("Rollback", sdk_go.NewWorkflowSharedHandler(srv.Rollback)) + router = router.Handler("Promote", sdk_go.NewWorkflowSharedHandler(srv.Promote)) + router = router.Handler("ScaleDownIdlePreviewDeployments", sdk_go.NewWorkflowSharedHandler(srv.ScaleDownIdlePreviewDeployments)) + return router +} diff --git a/gen/proto/hydra/v1/deployment.pb.go b/gen/proto/hydra/v1/deployment.pb.go index 2fcc7ff399..891c2cde7d 100644 --- a/gen/proto/hydra/v1/deployment.pb.go +++ b/gen/proto/hydra/v1/deployment.pb.go @@ -22,100 +22,86 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -type ScaleDownIdleDeploymentsRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} +// DeploymentDesiredState represents the target lifecycle state for a deployment. +// UNSPECIFIED is treated as an error and causes a terminal failure in +// ChangeDesiredState. +type DeploymentDesiredState int32 -func (x *ScaleDownIdleDeploymentsRequest) Reset() { - *x = ScaleDownIdleDeploymentsRequest{} - mi := &file_hydra_v1_deployment_proto_msgTypes[0] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *ScaleDownIdleDeploymentsRequest) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*ScaleDownIdleDeploymentsRequest) ProtoMessage() {} +const ( + DeploymentDesiredState_DEPLOYMENT_DESIRED_STATE_UNSPECIFIED DeploymentDesiredState = 0 + // RUNNING means the deployment should have active containers serving traffic. + DeploymentDesiredState_DEPLOYMENT_DESIRED_STATE_RUNNING DeploymentDesiredState = 1 + // STANDBY means the deployment's containers are scaled down but the + // deployment can be resumed without a full rebuild. + DeploymentDesiredState_DEPLOYMENT_DESIRED_STATE_STANDBY DeploymentDesiredState = 2 + // ARCHIVED means the deployment is permanently decommissioned. + DeploymentDesiredState_DEPLOYMENT_DESIRED_STATE_ARCHIVED DeploymentDesiredState = 3 +) -func (x *ScaleDownIdleDeploymentsRequest) ProtoReflect() protoreflect.Message { - mi := &file_hydra_v1_deployment_proto_msgTypes[0] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms +// Enum value maps for DeploymentDesiredState. +var ( + DeploymentDesiredState_name = map[int32]string{ + 0: "DEPLOYMENT_DESIRED_STATE_UNSPECIFIED", + 1: "DEPLOYMENT_DESIRED_STATE_RUNNING", + 2: "DEPLOYMENT_DESIRED_STATE_STANDBY", + 3: "DEPLOYMENT_DESIRED_STATE_ARCHIVED", } - return mi.MessageOf(x) -} + DeploymentDesiredState_value = map[string]int32{ + "DEPLOYMENT_DESIRED_STATE_UNSPECIFIED": 0, + "DEPLOYMENT_DESIRED_STATE_RUNNING": 1, + "DEPLOYMENT_DESIRED_STATE_STANDBY": 2, + "DEPLOYMENT_DESIRED_STATE_ARCHIVED": 3, + } +) -// Deprecated: Use ScaleDownIdleDeploymentsRequest.ProtoReflect.Descriptor instead. -func (*ScaleDownIdleDeploymentsRequest) Descriptor() ([]byte, []int) { - return file_hydra_v1_deployment_proto_rawDescGZIP(), []int{0} +func (x DeploymentDesiredState) Enum() *DeploymentDesiredState { + p := new(DeploymentDesiredState) + *p = x + return p } -type ScaleDownIdleDeploymentsResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache +func (x DeploymentDesiredState) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) } -func (x *ScaleDownIdleDeploymentsResponse) Reset() { - *x = ScaleDownIdleDeploymentsResponse{} - mi := &file_hydra_v1_deployment_proto_msgTypes[1] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) +func (DeploymentDesiredState) Descriptor() protoreflect.EnumDescriptor { + return file_hydra_v1_deployment_proto_enumTypes[0].Descriptor() } -func (x *ScaleDownIdleDeploymentsResponse) String() string { - return protoimpl.X.MessageStringOf(x) +func (DeploymentDesiredState) Type() protoreflect.EnumType { + return &file_hydra_v1_deployment_proto_enumTypes[0] } -func (*ScaleDownIdleDeploymentsResponse) ProtoMessage() {} - -func (x *ScaleDownIdleDeploymentsResponse) ProtoReflect() protoreflect.Message { - mi := &file_hydra_v1_deployment_proto_msgTypes[1] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) +func (x DeploymentDesiredState) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) } -// Deprecated: Use ScaleDownIdleDeploymentsResponse.ProtoReflect.Descriptor instead. -func (*ScaleDownIdleDeploymentsResponse) Descriptor() ([]byte, []int) { - return file_hydra_v1_deployment_proto_rawDescGZIP(), []int{1} +// Deprecated: Use DeploymentDesiredState.Descriptor instead. +func (DeploymentDesiredState) EnumDescriptor() ([]byte, []int) { + return file_hydra_v1_deployment_proto_rawDescGZIP(), []int{0} } -type DockerImage struct { +type ClearScheduledStateChangesRequest struct { state protoimpl.MessageState `protogen:"open.v1"` - Image string `protobuf:"bytes,1,opt,name=image,proto3" json:"image,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } -func (x *DockerImage) Reset() { - *x = DockerImage{} - mi := &file_hydra_v1_deployment_proto_msgTypes[2] +func (x *ClearScheduledStateChangesRequest) Reset() { + *x = ClearScheduledStateChangesRequest{} + mi := &file_hydra_v1_deployment_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } -func (x *DockerImage) String() string { +func (x *ClearScheduledStateChangesRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DockerImage) ProtoMessage() {} +func (*ClearScheduledStateChangesRequest) ProtoMessage() {} -func (x *DockerImage) ProtoReflect() protoreflect.Message { - mi := &file_hydra_v1_deployment_proto_msgTypes[2] +func (x *ClearScheduledStateChangesRequest) ProtoReflect() protoreflect.Message { + mi := &file_hydra_v1_deployment_proto_msgTypes[0] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -126,44 +112,32 @@ func (x *DockerImage) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DockerImage.ProtoReflect.Descriptor instead. -func (*DockerImage) Descriptor() ([]byte, []int) { - return file_hydra_v1_deployment_proto_rawDescGZIP(), []int{2} -} - -func (x *DockerImage) GetImage() string { - if x != nil { - return x.Image - } - return "" +// Deprecated: Use ClearScheduledStateChangesRequest.ProtoReflect.Descriptor instead. +func (*ClearScheduledStateChangesRequest) Descriptor() ([]byte, []int) { + return file_hydra_v1_deployment_proto_rawDescGZIP(), []int{0} } -type GitSource struct { - state protoimpl.MessageState `protogen:"open.v1"` - InstallationId int64 `protobuf:"varint,1,opt,name=installation_id,json=installationId,proto3" json:"installation_id,omitempty"` - Repository string `protobuf:"bytes,2,opt,name=repository,proto3" json:"repository,omitempty"` - CommitSha string `protobuf:"bytes,3,opt,name=commit_sha,json=commitSha,proto3" json:"commit_sha,omitempty"` - ContextPath string `protobuf:"bytes,4,opt,name=context_path,json=contextPath,proto3" json:"context_path,omitempty"` - DockerfilePath string `protobuf:"bytes,5,opt,name=dockerfile_path,json=dockerfilePath,proto3" json:"dockerfile_path,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache +type ClearScheduledStateChangesResponse struct { + state protoimpl.MessageState `protogen:"open.v1"` + unknownFields protoimpl.UnknownFields + sizeCache protoimpl.SizeCache } -func (x *GitSource) Reset() { - *x = GitSource{} - mi := &file_hydra_v1_deployment_proto_msgTypes[3] +func (x *ClearScheduledStateChangesResponse) Reset() { + *x = ClearScheduledStateChangesResponse{} + mi := &file_hydra_v1_deployment_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } -func (x *GitSource) String() string { +func (x *ClearScheduledStateChangesResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*GitSource) ProtoMessage() {} +func (*ClearScheduledStateChangesResponse) ProtoMessage() {} -func (x *GitSource) ProtoReflect() protoreflect.Message { - mi := &file_hydra_v1_deployment_proto_msgTypes[3] +func (x *ClearScheduledStateChangesResponse) ProtoReflect() protoreflect.Message { + mi := &file_hydra_v1_deployment_proto_msgTypes[1] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -174,76 +148,36 @@ func (x *GitSource) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use GitSource.ProtoReflect.Descriptor instead. -func (*GitSource) Descriptor() ([]byte, []int) { - return file_hydra_v1_deployment_proto_rawDescGZIP(), []int{3} -} - -func (x *GitSource) GetInstallationId() int64 { - if x != nil { - return x.InstallationId - } - return 0 -} - -func (x *GitSource) GetRepository() string { - if x != nil { - return x.Repository - } - return "" -} - -func (x *GitSource) GetCommitSha() string { - if x != nil { - return x.CommitSha - } - return "" -} - -func (x *GitSource) GetContextPath() string { - if x != nil { - return x.ContextPath - } - return "" -} - -func (x *GitSource) GetDockerfilePath() string { - if x != nil { - return x.DockerfilePath - } - return "" +// Deprecated: Use ClearScheduledStateChangesResponse.ProtoReflect.Descriptor instead. +func (*ClearScheduledStateChangesResponse) Descriptor() ([]byte, []int) { + return file_hydra_v1_deployment_proto_rawDescGZIP(), []int{1} } -type DeployRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - DeploymentId string `protobuf:"bytes,1,opt,name=deployment_id,json=deploymentId,proto3" json:"deployment_id,omitempty"` - KeyAuthId *string `protobuf:"bytes,2,opt,name=key_auth_id,json=keyAuthId,proto3,oneof" json:"key_auth_id,omitempty"` - // Types that are valid to be assigned to Source: - // - // *DeployRequest_Git - // *DeployRequest_DockerImage - Source isDeployRequest_Source `protobuf_oneof:"source"` - // Container command override (e.g., ["./app", "serve"]) - Command []string `protobuf:"bytes,5,rep,name=command,proto3" json:"command,omitempty"` +type ScheduleDesiredStateChangeRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Relative delay in milliseconds before the state change should take effect. + // Set to 0 to execute immediately. + DelayMillis int64 `protobuf:"varint,1,opt,name=delay_millis,json=delayMillis,proto3" json:"delay_millis,omitempty"` + State DeploymentDesiredState `protobuf:"varint,2,opt,name=state,proto3,enum=hydra.v1.DeploymentDesiredState" json:"state,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } -func (x *DeployRequest) Reset() { - *x = DeployRequest{} - mi := &file_hydra_v1_deployment_proto_msgTypes[4] +func (x *ScheduleDesiredStateChangeRequest) Reset() { + *x = ScheduleDesiredStateChangeRequest{} + mi := &file_hydra_v1_deployment_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } -func (x *DeployRequest) String() string { +func (x *ScheduleDesiredStateChangeRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*DeployRequest) ProtoMessage() {} +func (*ScheduleDesiredStateChangeRequest) ProtoMessage() {} -func (x *DeployRequest) ProtoReflect() protoreflect.Message { - mi := &file_hydra_v1_deployment_proto_msgTypes[4] +func (x *ScheduleDesiredStateChangeRequest) ProtoReflect() protoreflect.Message { + mi := &file_hydra_v1_deployment_proto_msgTypes[2] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -254,132 +188,46 @@ func (x *DeployRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use DeployRequest.ProtoReflect.Descriptor instead. -func (*DeployRequest) Descriptor() ([]byte, []int) { - return file_hydra_v1_deployment_proto_rawDescGZIP(), []int{4} -} - -func (x *DeployRequest) GetDeploymentId() string { - if x != nil { - return x.DeploymentId - } - return "" -} - -func (x *DeployRequest) GetKeyAuthId() string { - if x != nil && x.KeyAuthId != nil { - return *x.KeyAuthId - } - return "" -} - -func (x *DeployRequest) GetSource() isDeployRequest_Source { - if x != nil { - return x.Source - } - return nil -} - -func (x *DeployRequest) GetGit() *GitSource { - if x != nil { - if x, ok := x.Source.(*DeployRequest_Git); ok { - return x.Git - } - } - return nil +// Deprecated: Use ScheduleDesiredStateChangeRequest.ProtoReflect.Descriptor instead. +func (*ScheduleDesiredStateChangeRequest) Descriptor() ([]byte, []int) { + return file_hydra_v1_deployment_proto_rawDescGZIP(), []int{2} } -func (x *DeployRequest) GetDockerImage() *DockerImage { +func (x *ScheduleDesiredStateChangeRequest) GetDelayMillis() int64 { if x != nil { - if x, ok := x.Source.(*DeployRequest_DockerImage); ok { - return x.DockerImage - } + return x.DelayMillis } - return nil + return 0 } -func (x *DeployRequest) GetCommand() []string { +func (x *ScheduleDesiredStateChangeRequest) GetState() DeploymentDesiredState { if x != nil { - return x.Command + return x.State } - return nil -} - -type isDeployRequest_Source interface { - isDeployRequest_Source() -} - -type DeployRequest_Git struct { - Git *GitSource `protobuf:"bytes,3,opt,name=git,proto3,oneof"` + return DeploymentDesiredState_DEPLOYMENT_DESIRED_STATE_UNSPECIFIED } -type DeployRequest_DockerImage struct { - DockerImage *DockerImage `protobuf:"bytes,4,opt,name=docker_image,json=dockerImage,proto3,oneof"` -} - -func (*DeployRequest_Git) isDeployRequest_Source() {} - -func (*DeployRequest_DockerImage) isDeployRequest_Source() {} - -type DeployResponse struct { +type ScheduleDesiredStateChangeResponse struct { state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } -func (x *DeployResponse) Reset() { - *x = DeployResponse{} - mi := &file_hydra_v1_deployment_proto_msgTypes[5] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *DeployResponse) String() string { - return protoimpl.X.MessageStringOf(x) -} - -func (*DeployResponse) ProtoMessage() {} - -func (x *DeployResponse) ProtoReflect() protoreflect.Message { - mi := &file_hydra_v1_deployment_proto_msgTypes[5] - if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms - } - return mi.MessageOf(x) -} - -// Deprecated: Use DeployResponse.ProtoReflect.Descriptor instead. -func (*DeployResponse) Descriptor() ([]byte, []int) { - return file_hydra_v1_deployment_proto_rawDescGZIP(), []int{5} -} - -type RollbackRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - SourceDeploymentId string `protobuf:"bytes,1,opt,name=source_deployment_id,json=sourceDeploymentId,proto3" json:"source_deployment_id,omitempty"` - TargetDeploymentId string `protobuf:"bytes,2,opt,name=target_deployment_id,json=targetDeploymentId,proto3" json:"target_deployment_id,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *RollbackRequest) Reset() { - *x = RollbackRequest{} - mi := &file_hydra_v1_deployment_proto_msgTypes[6] +func (x *ScheduleDesiredStateChangeResponse) Reset() { + *x = ScheduleDesiredStateChangeResponse{} + mi := &file_hydra_v1_deployment_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } -func (x *RollbackRequest) String() string { +func (x *ScheduleDesiredStateChangeResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RollbackRequest) ProtoMessage() {} +func (*ScheduleDesiredStateChangeResponse) ProtoMessage() {} -func (x *RollbackRequest) ProtoReflect() protoreflect.Message { - mi := &file_hydra_v1_deployment_proto_msgTypes[6] +func (x *ScheduleDesiredStateChangeResponse) ProtoReflect() protoreflect.Message { + mi := &file_hydra_v1_deployment_proto_msgTypes[3] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -390,46 +238,39 @@ func (x *RollbackRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RollbackRequest.ProtoReflect.Descriptor instead. -func (*RollbackRequest) Descriptor() ([]byte, []int) { - return file_hydra_v1_deployment_proto_rawDescGZIP(), []int{6} -} - -func (x *RollbackRequest) GetSourceDeploymentId() string { - if x != nil { - return x.SourceDeploymentId - } - return "" -} - -func (x *RollbackRequest) GetTargetDeploymentId() string { - if x != nil { - return x.TargetDeploymentId - } - return "" +// Deprecated: Use ScheduleDesiredStateChangeResponse.ProtoReflect.Descriptor instead. +func (*ScheduleDesiredStateChangeResponse) Descriptor() ([]byte, []int) { + return file_hydra_v1_deployment_proto_rawDescGZIP(), []int{3} } -type RollbackResponse struct { - state protoimpl.MessageState `protogen:"open.v1"` +// ChangeDesiredStateRequest is sent internally by ScheduleDesiredStateChange +// via a delayed Restate call. Callers should not invoke this directly. +type ChangeDesiredStateRequest struct { + state protoimpl.MessageState `protogen:"open.v1"` + // Nonce generated by the originating ScheduleDesiredStateChange call. Used + // to implement last-writer-wins: if this nonce doesn't match the stored + // transition record, the call is a stale superseded transition and no-ops. + Nonce string `protobuf:"bytes,1,opt,name=nonce,proto3" json:"nonce,omitempty"` + State DeploymentDesiredState `protobuf:"varint,2,opt,name=state,proto3,enum=hydra.v1.DeploymentDesiredState" json:"state,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } -func (x *RollbackResponse) Reset() { - *x = RollbackResponse{} - mi := &file_hydra_v1_deployment_proto_msgTypes[7] +func (x *ChangeDesiredStateRequest) Reset() { + *x = ChangeDesiredStateRequest{} + mi := &file_hydra_v1_deployment_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } -func (x *RollbackResponse) String() string { +func (x *ChangeDesiredStateRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*RollbackResponse) ProtoMessage() {} +func (*ChangeDesiredStateRequest) ProtoMessage() {} -func (x *RollbackResponse) ProtoReflect() protoreflect.Message { - mi := &file_hydra_v1_deployment_proto_msgTypes[7] +func (x *ChangeDesiredStateRequest) ProtoReflect() protoreflect.Message { + mi := &file_hydra_v1_deployment_proto_msgTypes[4] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -440,76 +281,46 @@ func (x *RollbackResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use RollbackResponse.ProtoReflect.Descriptor instead. -func (*RollbackResponse) Descriptor() ([]byte, []int) { - return file_hydra_v1_deployment_proto_rawDescGZIP(), []int{7} -} - -type PromoteRequest struct { - state protoimpl.MessageState `protogen:"open.v1"` - TargetDeploymentId string `protobuf:"bytes,1,opt,name=target_deployment_id,json=targetDeploymentId,proto3" json:"target_deployment_id,omitempty"` - unknownFields protoimpl.UnknownFields - sizeCache protoimpl.SizeCache -} - -func (x *PromoteRequest) Reset() { - *x = PromoteRequest{} - mi := &file_hydra_v1_deployment_proto_msgTypes[8] - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - ms.StoreMessageInfo(mi) -} - -func (x *PromoteRequest) String() string { - return protoimpl.X.MessageStringOf(x) +// Deprecated: Use ChangeDesiredStateRequest.ProtoReflect.Descriptor instead. +func (*ChangeDesiredStateRequest) Descriptor() ([]byte, []int) { + return file_hydra_v1_deployment_proto_rawDescGZIP(), []int{4} } -func (*PromoteRequest) ProtoMessage() {} - -func (x *PromoteRequest) ProtoReflect() protoreflect.Message { - mi := &file_hydra_v1_deployment_proto_msgTypes[8] +func (x *ChangeDesiredStateRequest) GetNonce() string { if x != nil { - ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) - if ms.LoadMessageInfo() == nil { - ms.StoreMessageInfo(mi) - } - return ms + return x.Nonce } - return mi.MessageOf(x) -} - -// Deprecated: Use PromoteRequest.ProtoReflect.Descriptor instead. -func (*PromoteRequest) Descriptor() ([]byte, []int) { - return file_hydra_v1_deployment_proto_rawDescGZIP(), []int{8} + return "" } -func (x *PromoteRequest) GetTargetDeploymentId() string { +func (x *ChangeDesiredStateRequest) GetState() DeploymentDesiredState { if x != nil { - return x.TargetDeploymentId + return x.State } - return "" + return DeploymentDesiredState_DEPLOYMENT_DESIRED_STATE_UNSPECIFIED } -type PromoteResponse struct { +type ChangeDesiredStateResponse struct { state protoimpl.MessageState `protogen:"open.v1"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } -func (x *PromoteResponse) Reset() { - *x = PromoteResponse{} - mi := &file_hydra_v1_deployment_proto_msgTypes[9] +func (x *ChangeDesiredStateResponse) Reset() { + *x = ChangeDesiredStateResponse{} + mi := &file_hydra_v1_deployment_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } -func (x *PromoteResponse) String() string { +func (x *ChangeDesiredStateResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*PromoteResponse) ProtoMessage() {} +func (*ChangeDesiredStateResponse) ProtoMessage() {} -func (x *PromoteResponse) ProtoReflect() protoreflect.Message { - mi := &file_hydra_v1_deployment_proto_msgTypes[9] +func (x *ChangeDesiredStateResponse) ProtoReflect() protoreflect.Message { + mi := &file_hydra_v1_deployment_proto_msgTypes[5] if x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -520,50 +331,35 @@ func (x *PromoteResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use PromoteResponse.ProtoReflect.Descriptor instead. -func (*PromoteResponse) Descriptor() ([]byte, []int) { - return file_hydra_v1_deployment_proto_rawDescGZIP(), []int{9} +// Deprecated: Use ChangeDesiredStateResponse.ProtoReflect.Descriptor instead. +func (*ChangeDesiredStateResponse) Descriptor() ([]byte, []int) { + return file_hydra_v1_deployment_proto_rawDescGZIP(), []int{5} } var File_hydra_v1_deployment_proto protoreflect.FileDescriptor const file_hydra_v1_deployment_proto_rawDesc = "" + "\n" + - "\x19hydra/v1/deployment.proto\x12\bhydra.v1\x1a\x18dev/restate/sdk/go.proto\"!\n" + - "\x1fScaleDownIdleDeploymentsRequest\"\"\n" + - " ScaleDownIdleDeploymentsResponse\"#\n" + - "\vDockerImage\x12\x14\n" + - "\x05image\x18\x01 \x01(\tR\x05image\"\xbf\x01\n" + - "\tGitSource\x12'\n" + - "\x0finstallation_id\x18\x01 \x01(\x03R\x0einstallationId\x12\x1e\n" + - "\n" + - "repository\x18\x02 \x01(\tR\n" + - "repository\x12\x1d\n" + - "\n" + - "commit_sha\x18\x03 \x01(\tR\tcommitSha\x12!\n" + - "\fcontext_path\x18\x04 \x01(\tR\vcontextPath\x12'\n" + - "\x0fdockerfile_path\x18\x05 \x01(\tR\x0edockerfilePath\"\xf2\x01\n" + - "\rDeployRequest\x12#\n" + - "\rdeployment_id\x18\x01 \x01(\tR\fdeploymentId\x12#\n" + - "\vkey_auth_id\x18\x02 \x01(\tH\x01R\tkeyAuthId\x88\x01\x01\x12'\n" + - "\x03git\x18\x03 \x01(\v2\x13.hydra.v1.GitSourceH\x00R\x03git\x12:\n" + - "\fdocker_image\x18\x04 \x01(\v2\x15.hydra.v1.DockerImageH\x00R\vdockerImage\x12\x18\n" + - "\acommand\x18\x05 \x03(\tR\acommandB\b\n" + - "\x06sourceB\x0e\n" + - "\f_key_auth_id\"\x10\n" + - "\x0eDeployResponse\"u\n" + - "\x0fRollbackRequest\x120\n" + - "\x14source_deployment_id\x18\x01 \x01(\tR\x12sourceDeploymentId\x120\n" + - "\x14target_deployment_id\x18\x02 \x01(\tR\x12targetDeploymentId\"\x12\n" + - "\x10RollbackResponse\"B\n" + - "\x0ePromoteRequest\x120\n" + - "\x14target_deployment_id\x18\x01 \x01(\tR\x12targetDeploymentId\"\x11\n" + - "\x0fPromoteResponse2\xd4\x02\n" + - "\x11DeploymentService\x12=\n" + - "\x06Deploy\x12\x17.hydra.v1.DeployRequest\x1a\x18.hydra.v1.DeployResponse\"\x00\x12C\n" + - "\bRollback\x12\x19.hydra.v1.RollbackRequest\x1a\x1a.hydra.v1.RollbackResponse\"\x00\x12@\n" + - "\aPromote\x12\x18.hydra.v1.PromoteRequest\x1a\x19.hydra.v1.PromoteResponse\"\x00\x12s\n" + - "\x18ScaleDownIdleDeployments\x12).hydra.v1.ScaleDownIdleDeploymentsRequest\x1a*.hydra.v1.ScaleDownIdleDeploymentsResponse\"\x00\x1a\x04\x98\x80\x01\x02B\x95\x01\n" + + "\x19hydra/v1/deployment.proto\x12\bhydra.v1\x1a\x18dev/restate/sdk/go.proto\"#\n" + + "!ClearScheduledStateChangesRequest\"$\n" + + "\"ClearScheduledStateChangesResponse\"~\n" + + "!ScheduleDesiredStateChangeRequest\x12!\n" + + "\fdelay_millis\x18\x01 \x01(\x03R\vdelayMillis\x126\n" + + "\x05state\x18\x02 \x01(\x0e2 .hydra.v1.DeploymentDesiredStateR\x05state\"$\n" + + "\"ScheduleDesiredStateChangeResponse\"i\n" + + "\x19ChangeDesiredStateRequest\x12\x14\n" + + "\x05nonce\x18\x01 \x01(\tR\x05nonce\x126\n" + + "\x05state\x18\x02 \x01(\x0e2 .hydra.v1.DeploymentDesiredStateR\x05state\"\x1c\n" + + "\x1aChangeDesiredStateResponse*\xb5\x01\n" + + "\x16DeploymentDesiredState\x12(\n" + + "$DEPLOYMENT_DESIRED_STATE_UNSPECIFIED\x10\x00\x12$\n" + + " DEPLOYMENT_DESIRED_STATE_RUNNING\x10\x01\x12$\n" + + " DEPLOYMENT_DESIRED_STATE_STANDBY\x10\x02\x12%\n" + + "!DEPLOYMENT_DESIRED_STATE_ARCHIVED\x10\x032\xf2\x02\n" + + "\x11DeploymentService\x12y\n" + + "\x1aScheduleDesiredStateChange\x12+.hydra.v1.ScheduleDesiredStateChangeRequest\x1a,.hydra.v1.ScheduleDesiredStateChangeResponse\"\x00\x12a\n" + + "\x12ChangeDesiredState\x12#.hydra.v1.ChangeDesiredStateRequest\x1a$.hydra.v1.ChangeDesiredStateResponse\"\x00\x12y\n" + + "\x1aClearScheduledStateChanges\x12+.hydra.v1.ClearScheduledStateChangesRequest\x1a,.hydra.v1.ClearScheduledStateChangesResponse\"\x00\x1a\x04\x98\x80\x01\x01B\x95\x01\n" + "\fcom.hydra.v1B\x0fDeploymentProtoP\x01Z3github.com/unkeyed/unkey/gen/proto/hydra/v1;hydrav1\xa2\x02\x03HXX\xaa\x02\bHydra.V1\xca\x02\bHydra\\V1\xe2\x02\x14Hydra\\V1\\GPBMetadata\xea\x02\tHydra::V1b\x06proto3" var ( @@ -578,32 +374,28 @@ func file_hydra_v1_deployment_proto_rawDescGZIP() []byte { return file_hydra_v1_deployment_proto_rawDescData } -var file_hydra_v1_deployment_proto_msgTypes = make([]protoimpl.MessageInfo, 10) +var file_hydra_v1_deployment_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_hydra_v1_deployment_proto_msgTypes = make([]protoimpl.MessageInfo, 6) var file_hydra_v1_deployment_proto_goTypes = []any{ - (*ScaleDownIdleDeploymentsRequest)(nil), // 0: hydra.v1.ScaleDownIdleDeploymentsRequest - (*ScaleDownIdleDeploymentsResponse)(nil), // 1: hydra.v1.ScaleDownIdleDeploymentsResponse - (*DockerImage)(nil), // 2: hydra.v1.DockerImage - (*GitSource)(nil), // 3: hydra.v1.GitSource - (*DeployRequest)(nil), // 4: hydra.v1.DeployRequest - (*DeployResponse)(nil), // 5: hydra.v1.DeployResponse - (*RollbackRequest)(nil), // 6: hydra.v1.RollbackRequest - (*RollbackResponse)(nil), // 7: hydra.v1.RollbackResponse - (*PromoteRequest)(nil), // 8: hydra.v1.PromoteRequest - (*PromoteResponse)(nil), // 9: hydra.v1.PromoteResponse + (DeploymentDesiredState)(0), // 0: hydra.v1.DeploymentDesiredState + (*ClearScheduledStateChangesRequest)(nil), // 1: hydra.v1.ClearScheduledStateChangesRequest + (*ClearScheduledStateChangesResponse)(nil), // 2: hydra.v1.ClearScheduledStateChangesResponse + (*ScheduleDesiredStateChangeRequest)(nil), // 3: hydra.v1.ScheduleDesiredStateChangeRequest + (*ScheduleDesiredStateChangeResponse)(nil), // 4: hydra.v1.ScheduleDesiredStateChangeResponse + (*ChangeDesiredStateRequest)(nil), // 5: hydra.v1.ChangeDesiredStateRequest + (*ChangeDesiredStateResponse)(nil), // 6: hydra.v1.ChangeDesiredStateResponse } var file_hydra_v1_deployment_proto_depIdxs = []int32{ - 3, // 0: hydra.v1.DeployRequest.git:type_name -> hydra.v1.GitSource - 2, // 1: hydra.v1.DeployRequest.docker_image:type_name -> hydra.v1.DockerImage - 4, // 2: hydra.v1.DeploymentService.Deploy:input_type -> hydra.v1.DeployRequest - 6, // 3: hydra.v1.DeploymentService.Rollback:input_type -> hydra.v1.RollbackRequest - 8, // 4: hydra.v1.DeploymentService.Promote:input_type -> hydra.v1.PromoteRequest - 0, // 5: hydra.v1.DeploymentService.ScaleDownIdleDeployments:input_type -> hydra.v1.ScaleDownIdleDeploymentsRequest - 5, // 6: hydra.v1.DeploymentService.Deploy:output_type -> hydra.v1.DeployResponse - 7, // 7: hydra.v1.DeploymentService.Rollback:output_type -> hydra.v1.RollbackResponse - 9, // 8: hydra.v1.DeploymentService.Promote:output_type -> hydra.v1.PromoteResponse - 1, // 9: hydra.v1.DeploymentService.ScaleDownIdleDeployments:output_type -> hydra.v1.ScaleDownIdleDeploymentsResponse - 6, // [6:10] is the sub-list for method output_type - 2, // [2:6] is the sub-list for method input_type + 0, // 0: hydra.v1.ScheduleDesiredStateChangeRequest.state:type_name -> hydra.v1.DeploymentDesiredState + 0, // 1: hydra.v1.ChangeDesiredStateRequest.state:type_name -> hydra.v1.DeploymentDesiredState + 3, // 2: hydra.v1.DeploymentService.ScheduleDesiredStateChange:input_type -> hydra.v1.ScheduleDesiredStateChangeRequest + 5, // 3: hydra.v1.DeploymentService.ChangeDesiredState:input_type -> hydra.v1.ChangeDesiredStateRequest + 1, // 4: hydra.v1.DeploymentService.ClearScheduledStateChanges:input_type -> hydra.v1.ClearScheduledStateChangesRequest + 4, // 5: hydra.v1.DeploymentService.ScheduleDesiredStateChange:output_type -> hydra.v1.ScheduleDesiredStateChangeResponse + 6, // 6: hydra.v1.DeploymentService.ChangeDesiredState:output_type -> hydra.v1.ChangeDesiredStateResponse + 2, // 7: hydra.v1.DeploymentService.ClearScheduledStateChanges:output_type -> hydra.v1.ClearScheduledStateChangesResponse + 5, // [5:8] is the sub-list for method output_type + 2, // [2:5] is the sub-list for method input_type 2, // [2:2] is the sub-list for extension type_name 2, // [2:2] is the sub-list for extension extendee 0, // [0:2] is the sub-list for field type_name @@ -614,22 +406,19 @@ func file_hydra_v1_deployment_proto_init() { if File_hydra_v1_deployment_proto != nil { return } - file_hydra_v1_deployment_proto_msgTypes[4].OneofWrappers = []any{ - (*DeployRequest_Git)(nil), - (*DeployRequest_DockerImage)(nil), - } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: unsafe.Slice(unsafe.StringData(file_hydra_v1_deployment_proto_rawDesc), len(file_hydra_v1_deployment_proto_rawDesc)), - NumEnums: 0, - NumMessages: 10, + NumEnums: 1, + NumMessages: 6, NumExtensions: 0, NumServices: 1, }, GoTypes: file_hydra_v1_deployment_proto_goTypes, DependencyIndexes: file_hydra_v1_deployment_proto_depIdxs, + EnumInfos: file_hydra_v1_deployment_proto_enumTypes, MessageInfos: file_hydra_v1_deployment_proto_msgTypes, }.Build() File_hydra_v1_deployment_proto = out.File diff --git a/gen/proto/hydra/v1/deployment_restate.pb.go b/gen/proto/hydra/v1/deployment_restate.pb.go index 22c2539536..c6dfb35535 100644 --- a/gen/proto/hydra/v1/deployment_restate.pb.go +++ b/gen/proto/hydra/v1/deployment_restate.pb.go @@ -14,111 +14,177 @@ import ( ) // DeploymentServiceClient is the client API for hydra.v1.DeploymentService service. +// +// DeploymentService manages desired-state transitions for a single deployment +// as a Restate virtual object keyed by deployment ID. The virtual object key +// guarantees that only one state change operation executes per deployment at a +// time, preventing conflicting concurrent transitions. +// +// State transitions use last-writer-wins semantics: calling +// ScheduleDesiredStateChange while a previous transition is still pending +// overwrites it. This is implemented via a nonce — each schedule generates a +// unique nonce stored in Restate state, and ChangeDesiredState silently no-ops +// if its nonce doesn't match the stored one, meaning the transition was +// superseded by a newer schedule. +// +// Typical flow: +// 1. Caller invokes ScheduleDesiredStateChange with a target state and a +// relative delay in milliseconds for when the change should take effect. +// 2. The handler stores the transition record and sends a delayed +// ChangeDesiredState call to itself. +// 3. When the delay elapses, ChangeDesiredState verifies the nonce still +// matches and persists the new desired state to the database. type DeploymentServiceClient interface { - Deploy(opts ...sdk_go.ClientOption) sdk_go.Client[*DeployRequest, *DeployResponse] - Rollback(opts ...sdk_go.ClientOption) sdk_go.Client[*RollbackRequest, *RollbackResponse] - Promote(opts ...sdk_go.ClientOption) sdk_go.Client[*PromoteRequest, *PromoteResponse] - ScaleDownIdleDeployments(opts ...sdk_go.ClientOption) sdk_go.Client[*ScaleDownIdleDeploymentsRequest, *ScaleDownIdleDeploymentsResponse] + // ScheduleDesiredStateChange registers a future desired-state transition for + // this deployment. It generates a nonce, stores a transition record in Restate + // state, and sends a delayed ChangeDesiredState call to itself after the + // requested delay. Calling this again before the previous transition fires + // replaces it (last-writer-wins via nonce). + ScheduleDesiredStateChange(opts ...sdk_go.ClientOption) sdk_go.Client[*ScheduleDesiredStateChangeRequest, *ScheduleDesiredStateChangeResponse] + // ChangeDesiredState is an internal handler invoked by the delayed call from + // ScheduleDesiredStateChange. It verifies the nonce matches the stored + // transition record — if it doesn't, the transition was superseded and the + // call no-ops. On match, it persists the new desired state to the database. + // The deployment ID is derived from the virtual object key. + ChangeDesiredState(opts ...sdk_go.ClientOption) sdk_go.Client[*ChangeDesiredStateRequest, *ChangeDesiredStateResponse] + // ClearScheduledStateChanges removes the pending transition record from + // Restate state, effectively cancelling any scheduled ChangeDesiredState + // call. The delayed call may still fire, but it will no-op because + // ChangeDesiredState requires a stored transition to exist. + ClearScheduledStateChanges(opts ...sdk_go.ClientOption) sdk_go.Client[*ClearScheduledStateChangesRequest, *ClearScheduledStateChangesResponse] } type deploymentServiceClient struct { - ctx sdk_go.Context - workflowID string - options []sdk_go.ClientOption + ctx sdk_go.Context + key string + options []sdk_go.ClientOption } -func NewDeploymentServiceClient(ctx sdk_go.Context, workflowID string, opts ...sdk_go.ClientOption) DeploymentServiceClient { +func NewDeploymentServiceClient(ctx sdk_go.Context, key string, opts ...sdk_go.ClientOption) DeploymentServiceClient { cOpts := append([]sdk_go.ClientOption{sdk_go.WithProtoJSON}, opts...) return &deploymentServiceClient{ ctx, - workflowID, + key, cOpts, } } -func (c *deploymentServiceClient) Deploy(opts ...sdk_go.ClientOption) sdk_go.Client[*DeployRequest, *DeployResponse] { - cOpts := c.options - if len(opts) > 0 { - cOpts = append(append([]sdk_go.ClientOption{}, cOpts...), opts...) - } - return sdk_go.WithRequestType[*DeployRequest](sdk_go.Workflow[*DeployResponse](c.ctx, "hydra.v1.DeploymentService", c.workflowID, "Deploy", cOpts...)) -} - -func (c *deploymentServiceClient) Rollback(opts ...sdk_go.ClientOption) sdk_go.Client[*RollbackRequest, *RollbackResponse] { +func (c *deploymentServiceClient) ScheduleDesiredStateChange(opts ...sdk_go.ClientOption) sdk_go.Client[*ScheduleDesiredStateChangeRequest, *ScheduleDesiredStateChangeResponse] { cOpts := c.options if len(opts) > 0 { cOpts = append(append([]sdk_go.ClientOption{}, cOpts...), opts...) } - return sdk_go.WithRequestType[*RollbackRequest](sdk_go.Workflow[*RollbackResponse](c.ctx, "hydra.v1.DeploymentService", c.workflowID, "Rollback", cOpts...)) + return sdk_go.WithRequestType[*ScheduleDesiredStateChangeRequest](sdk_go.Object[*ScheduleDesiredStateChangeResponse](c.ctx, "hydra.v1.DeploymentService", c.key, "ScheduleDesiredStateChange", cOpts...)) } -func (c *deploymentServiceClient) Promote(opts ...sdk_go.ClientOption) sdk_go.Client[*PromoteRequest, *PromoteResponse] { +func (c *deploymentServiceClient) ChangeDesiredState(opts ...sdk_go.ClientOption) sdk_go.Client[*ChangeDesiredStateRequest, *ChangeDesiredStateResponse] { cOpts := c.options if len(opts) > 0 { cOpts = append(append([]sdk_go.ClientOption{}, cOpts...), opts...) } - return sdk_go.WithRequestType[*PromoteRequest](sdk_go.Workflow[*PromoteResponse](c.ctx, "hydra.v1.DeploymentService", c.workflowID, "Promote", cOpts...)) + return sdk_go.WithRequestType[*ChangeDesiredStateRequest](sdk_go.Object[*ChangeDesiredStateResponse](c.ctx, "hydra.v1.DeploymentService", c.key, "ChangeDesiredState", cOpts...)) } -func (c *deploymentServiceClient) ScaleDownIdleDeployments(opts ...sdk_go.ClientOption) sdk_go.Client[*ScaleDownIdleDeploymentsRequest, *ScaleDownIdleDeploymentsResponse] { +func (c *deploymentServiceClient) ClearScheduledStateChanges(opts ...sdk_go.ClientOption) sdk_go.Client[*ClearScheduledStateChangesRequest, *ClearScheduledStateChangesResponse] { cOpts := c.options if len(opts) > 0 { cOpts = append(append([]sdk_go.ClientOption{}, cOpts...), opts...) } - return sdk_go.WithRequestType[*ScaleDownIdleDeploymentsRequest](sdk_go.Workflow[*ScaleDownIdleDeploymentsResponse](c.ctx, "hydra.v1.DeploymentService", c.workflowID, "ScaleDownIdleDeployments", cOpts...)) + return sdk_go.WithRequestType[*ClearScheduledStateChangesRequest](sdk_go.Object[*ClearScheduledStateChangesResponse](c.ctx, "hydra.v1.DeploymentService", c.key, "ClearScheduledStateChanges", cOpts...)) } // DeploymentServiceIngressClient is the ingress client API for hydra.v1.DeploymentService service. // // This client is used to call the service from outside of a Restate context. type DeploymentServiceIngressClient interface { - Deploy() ingress.Requester[*DeployRequest, *DeployResponse] - Rollback() ingress.Requester[*RollbackRequest, *RollbackResponse] - Promote() ingress.Requester[*PromoteRequest, *PromoteResponse] - ScaleDownIdleDeployments() ingress.Requester[*ScaleDownIdleDeploymentsRequest, *ScaleDownIdleDeploymentsResponse] + // ScheduleDesiredStateChange registers a future desired-state transition for + // this deployment. It generates a nonce, stores a transition record in Restate + // state, and sends a delayed ChangeDesiredState call to itself after the + // requested delay. Calling this again before the previous transition fires + // replaces it (last-writer-wins via nonce). + ScheduleDesiredStateChange() ingress.Requester[*ScheduleDesiredStateChangeRequest, *ScheduleDesiredStateChangeResponse] + // ChangeDesiredState is an internal handler invoked by the delayed call from + // ScheduleDesiredStateChange. It verifies the nonce matches the stored + // transition record — if it doesn't, the transition was superseded and the + // call no-ops. On match, it persists the new desired state to the database. + // The deployment ID is derived from the virtual object key. + ChangeDesiredState() ingress.Requester[*ChangeDesiredStateRequest, *ChangeDesiredStateResponse] + // ClearScheduledStateChanges removes the pending transition record from + // Restate state, effectively cancelling any scheduled ChangeDesiredState + // call. The delayed call may still fire, but it will no-op because + // ChangeDesiredState requires a stored transition to exist. + ClearScheduledStateChanges() ingress.Requester[*ClearScheduledStateChangesRequest, *ClearScheduledStateChangesResponse] } type deploymentServiceIngressClient struct { client *ingress.Client serviceName string - workflowID string + key string } -func NewDeploymentServiceIngressClient(client *ingress.Client, workflowID string) DeploymentServiceIngressClient { +func NewDeploymentServiceIngressClient(client *ingress.Client, key string) DeploymentServiceIngressClient { return &deploymentServiceIngressClient{ client, "hydra.v1.DeploymentService", - workflowID, + key, } } -func (c *deploymentServiceIngressClient) Deploy() ingress.Requester[*DeployRequest, *DeployResponse] { - codec := encoding.ProtoJSONCodec - return ingress.NewRequester[*DeployRequest, *DeployResponse](c.client, c.serviceName, "Deploy", &c.workflowID, &codec) -} - -func (c *deploymentServiceIngressClient) Rollback() ingress.Requester[*RollbackRequest, *RollbackResponse] { +func (c *deploymentServiceIngressClient) ScheduleDesiredStateChange() ingress.Requester[*ScheduleDesiredStateChangeRequest, *ScheduleDesiredStateChangeResponse] { codec := encoding.ProtoJSONCodec - return ingress.NewRequester[*RollbackRequest, *RollbackResponse](c.client, c.serviceName, "Rollback", &c.workflowID, &codec) + return ingress.NewRequester[*ScheduleDesiredStateChangeRequest, *ScheduleDesiredStateChangeResponse](c.client, c.serviceName, "ScheduleDesiredStateChange", &c.key, &codec) } -func (c *deploymentServiceIngressClient) Promote() ingress.Requester[*PromoteRequest, *PromoteResponse] { +func (c *deploymentServiceIngressClient) ChangeDesiredState() ingress.Requester[*ChangeDesiredStateRequest, *ChangeDesiredStateResponse] { codec := encoding.ProtoJSONCodec - return ingress.NewRequester[*PromoteRequest, *PromoteResponse](c.client, c.serviceName, "Promote", &c.workflowID, &codec) + return ingress.NewRequester[*ChangeDesiredStateRequest, *ChangeDesiredStateResponse](c.client, c.serviceName, "ChangeDesiredState", &c.key, &codec) } -func (c *deploymentServiceIngressClient) ScaleDownIdleDeployments() ingress.Requester[*ScaleDownIdleDeploymentsRequest, *ScaleDownIdleDeploymentsResponse] { +func (c *deploymentServiceIngressClient) ClearScheduledStateChanges() ingress.Requester[*ClearScheduledStateChangesRequest, *ClearScheduledStateChangesResponse] { codec := encoding.ProtoJSONCodec - return ingress.NewRequester[*ScaleDownIdleDeploymentsRequest, *ScaleDownIdleDeploymentsResponse](c.client, c.serviceName, "ScaleDownIdleDeployments", &c.workflowID, &codec) + return ingress.NewRequester[*ClearScheduledStateChangesRequest, *ClearScheduledStateChangesResponse](c.client, c.serviceName, "ClearScheduledStateChanges", &c.key, &codec) } // DeploymentServiceServer is the server API for hydra.v1.DeploymentService service. // All implementations should embed UnimplementedDeploymentServiceServer // for forward compatibility. +// +// DeploymentService manages desired-state transitions for a single deployment +// as a Restate virtual object keyed by deployment ID. The virtual object key +// guarantees that only one state change operation executes per deployment at a +// time, preventing conflicting concurrent transitions. +// +// State transitions use last-writer-wins semantics: calling +// ScheduleDesiredStateChange while a previous transition is still pending +// overwrites it. This is implemented via a nonce — each schedule generates a +// unique nonce stored in Restate state, and ChangeDesiredState silently no-ops +// if its nonce doesn't match the stored one, meaning the transition was +// superseded by a newer schedule. +// +// Typical flow: +// 1. Caller invokes ScheduleDesiredStateChange with a target state and a +// relative delay in milliseconds for when the change should take effect. +// 2. The handler stores the transition record and sends a delayed +// ChangeDesiredState call to itself. +// 3. When the delay elapses, ChangeDesiredState verifies the nonce still +// matches and persists the new desired state to the database. type DeploymentServiceServer interface { - Deploy(ctx sdk_go.WorkflowSharedContext, req *DeployRequest) (*DeployResponse, error) - Rollback(ctx sdk_go.WorkflowSharedContext, req *RollbackRequest) (*RollbackResponse, error) - Promote(ctx sdk_go.WorkflowSharedContext, req *PromoteRequest) (*PromoteResponse, error) - ScaleDownIdleDeployments(ctx sdk_go.WorkflowSharedContext, req *ScaleDownIdleDeploymentsRequest) (*ScaleDownIdleDeploymentsResponse, error) + // ScheduleDesiredStateChange registers a future desired-state transition for + // this deployment. It generates a nonce, stores a transition record in Restate + // state, and sends a delayed ChangeDesiredState call to itself after the + // requested delay. Calling this again before the previous transition fires + // replaces it (last-writer-wins via nonce). + ScheduleDesiredStateChange(ctx sdk_go.ObjectContext, req *ScheduleDesiredStateChangeRequest) (*ScheduleDesiredStateChangeResponse, error) + // ChangeDesiredState is an internal handler invoked by the delayed call from + // ScheduleDesiredStateChange. It verifies the nonce matches the stored + // transition record — if it doesn't, the transition was superseded and the + // call no-ops. On match, it persists the new desired state to the database. + // The deployment ID is derived from the virtual object key. + ChangeDesiredState(ctx sdk_go.ObjectContext, req *ChangeDesiredStateRequest) (*ChangeDesiredStateResponse, error) + // ClearScheduledStateChanges removes the pending transition record from + // Restate state, effectively cancelling any scheduled ChangeDesiredState + // call. The delayed call may still fire, but it will no-op because + // ChangeDesiredState requires a stored transition to exist. + ClearScheduledStateChanges(ctx sdk_go.ObjectContext, req *ClearScheduledStateChangesRequest) (*ClearScheduledStateChangesResponse, error) } // UnimplementedDeploymentServiceServer should be embedded to have @@ -128,17 +194,14 @@ type DeploymentServiceServer interface { // pointer dereference when methods are called. type UnimplementedDeploymentServiceServer struct{} -func (UnimplementedDeploymentServiceServer) Deploy(ctx sdk_go.WorkflowSharedContext, req *DeployRequest) (*DeployResponse, error) { - return nil, sdk_go.TerminalError(fmt.Errorf("method Deploy not implemented"), 501) -} -func (UnimplementedDeploymentServiceServer) Rollback(ctx sdk_go.WorkflowSharedContext, req *RollbackRequest) (*RollbackResponse, error) { - return nil, sdk_go.TerminalError(fmt.Errorf("method Rollback not implemented"), 501) +func (UnimplementedDeploymentServiceServer) ScheduleDesiredStateChange(ctx sdk_go.ObjectContext, req *ScheduleDesiredStateChangeRequest) (*ScheduleDesiredStateChangeResponse, error) { + return nil, sdk_go.TerminalError(fmt.Errorf("method ScheduleDesiredStateChange not implemented"), 501) } -func (UnimplementedDeploymentServiceServer) Promote(ctx sdk_go.WorkflowSharedContext, req *PromoteRequest) (*PromoteResponse, error) { - return nil, sdk_go.TerminalError(fmt.Errorf("method Promote not implemented"), 501) +func (UnimplementedDeploymentServiceServer) ChangeDesiredState(ctx sdk_go.ObjectContext, req *ChangeDesiredStateRequest) (*ChangeDesiredStateResponse, error) { + return nil, sdk_go.TerminalError(fmt.Errorf("method ChangeDesiredState not implemented"), 501) } -func (UnimplementedDeploymentServiceServer) ScaleDownIdleDeployments(ctx sdk_go.WorkflowSharedContext, req *ScaleDownIdleDeploymentsRequest) (*ScaleDownIdleDeploymentsResponse, error) { - return nil, sdk_go.TerminalError(fmt.Errorf("method ScaleDownIdleDeployments not implemented"), 501) +func (UnimplementedDeploymentServiceServer) ClearScheduledStateChanges(ctx sdk_go.ObjectContext, req *ClearScheduledStateChangesRequest) (*ClearScheduledStateChangesResponse, error) { + return nil, sdk_go.TerminalError(fmt.Errorf("method ClearScheduledStateChanges not implemented"), 501) } func (UnimplementedDeploymentServiceServer) testEmbeddedByValue() {} @@ -158,10 +221,9 @@ func NewDeploymentServiceServer(srv DeploymentServiceServer, opts ...sdk_go.Serv t.testEmbeddedByValue() } sOpts := append([]sdk_go.ServiceDefinitionOption{sdk_go.WithProtoJSON}, opts...) - router := sdk_go.NewWorkflow("hydra.v1.DeploymentService", sOpts...) - router = router.Handler("Deploy", sdk_go.NewWorkflowSharedHandler(srv.Deploy)) - router = router.Handler("Rollback", sdk_go.NewWorkflowSharedHandler(srv.Rollback)) - router = router.Handler("Promote", sdk_go.NewWorkflowSharedHandler(srv.Promote)) - router = router.Handler("ScaleDownIdleDeployments", sdk_go.NewWorkflowSharedHandler(srv.ScaleDownIdleDeployments)) + router := sdk_go.NewObject("hydra.v1.DeploymentService", sOpts...) + router = router.Handler("ScheduleDesiredStateChange", sdk_go.NewObjectHandler(srv.ScheduleDesiredStateChange)) + router = router.Handler("ChangeDesiredState", sdk_go.NewObjectHandler(srv.ChangeDesiredState)) + router = router.Handler("ClearScheduledStateChanges", sdk_go.NewObjectHandler(srv.ClearScheduledStateChanges)) return router } diff --git a/pkg/db/BUILD.bazel b/pkg/db/BUILD.bazel index 7cf02a4eac..c2e5845e10 100644 --- a/pkg/db/BUILD.bazel +++ b/pkg/db/BUILD.bazel @@ -108,6 +108,7 @@ go_library( "deployment_topology_insert.sql_generated.go", "deployment_topology_list_by_versions.sql_generated.go", "deployment_topology_list_desired.sql_generated.go", + "deployment_topology_update_desired_status.sql_generated.go", "deployment_update_build_id.sql_generated.go", "deployment_update_desired_state.sql_generated.go", "deployment_update_image.sql_generated.go", diff --git a/pkg/db/deployment_topology_update_desired_status.sql_generated.go b/pkg/db/deployment_topology_update_desired_status.sql_generated.go new file mode 100644 index 0000000000..f172fd2363 --- /dev/null +++ b/pkg/db/deployment_topology_update_desired_status.sql_generated.go @@ -0,0 +1,36 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.30.0 +// source: deployment_topology_update_desired_status.sql + +package db + +import ( + "context" + "database/sql" +) + +const updateDeploymentTopologyDesiredStatus = `-- name: UpdateDeploymentTopologyDesiredStatus :exec +UPDATE ` + "`" + `deployment_topology` + "`" + ` +SET desired_status = ?, version = ?, updated_at = ? +WHERE deployment_id = ? AND region = ? +` + +type UpdateDeploymentTopologyDesiredStatusParams struct { + DesiredStatus DeploymentTopologyDesiredStatus `db:"desired_status"` + Version uint64 `db:"version"` + UpdatedAt sql.NullInt64 `db:"updated_at"` + DeploymentID string `db:"deployment_id"` + Region string `db:"region"` +} + +// UpdateDeploymentTopologyDesiredStatus updates the desired_status and version of a topology entry. +// A new version is required so that WatchDeployments picks up the change. +// +// UPDATE `deployment_topology` +// SET desired_status = ?, version = ?, updated_at = ? +// WHERE deployment_id = ? AND region = ? +func (q *Queries) UpdateDeploymentTopologyDesiredStatus(ctx context.Context, db DBTX, arg UpdateDeploymentTopologyDesiredStatusParams) error { + _, err := db.ExecContext(ctx, updateDeploymentTopologyDesiredStatus, arg.DesiredStatus, arg.Version, arg.UpdatedAt, arg.DeploymentID, arg.Region) + return err +} diff --git a/pkg/db/querier_generated.go b/pkg/db/querier_generated.go index cef7a1dba4..93685f4e94 100644 --- a/pkg/db/querier_generated.go +++ b/pkg/db/querier_generated.go @@ -2375,6 +2375,13 @@ type Querier interface { // SET desired_state = ?, updated_at = ? // WHERE id = ? UpdateDeploymentDesiredState(ctx context.Context, db DBTX, arg UpdateDeploymentDesiredStateParams) error + //UpdateDeploymentTopologyDesiredStatus updates the desired_status and version of a topology entry. + // A new version is required so that WatchDeployments picks up the change. + // + // UPDATE `deployment_topology` + // SET desired_status = ?, version = ?, updated_at = ? + // WHERE deployment_id = ? AND region = ? + UpdateDeploymentTopologyDesiredStatus(ctx context.Context, db DBTX, arg UpdateDeploymentTopologyDesiredStatusParams) error //UpdateDeploymentImage // // UPDATE deployments diff --git a/pkg/db/queries/deployment_topology_update_desired_status.sql b/pkg/db/queries/deployment_topology_update_desired_status.sql new file mode 100644 index 0000000000..92dd85907e --- /dev/null +++ b/pkg/db/queries/deployment_topology_update_desired_status.sql @@ -0,0 +1,6 @@ +-- name: UpdateDeploymentTopologyDesiredStatus :exec +-- UpdateDeploymentTopologyDesiredStatus updates the desired_status and version of a topology entry. +-- A new version is required so that WatchDeployments picks up the change. +UPDATE `deployment_topology` +SET desired_status = sqlc.arg(desired_status), version = sqlc.arg(version), updated_at = sqlc.arg(updated_at) +WHERE deployment_id = sqlc.arg(deployment_id) AND region = sqlc.arg(region); diff --git a/svc/api/internal/testutil/mock_deployment_client.go b/svc/api/internal/testutil/mock_deployment_client.go index 2080b4c1cd..8af1416bb2 100644 --- a/svc/api/internal/testutil/mock_deployment_client.go +++ b/svc/api/internal/testutil/mock_deployment_client.go @@ -9,7 +9,7 @@ import ( "github.com/unkeyed/unkey/gen/proto/ctrl/v1/ctrlv1connect" ) -var _ ctrlv1connect.DeploymentServiceClient = (*MockDeploymentClient)(nil) +var _ ctrlv1connect.DeployServiceClient = (*MockDeploymentClient)(nil) // MockDeploymentClient is a test double for the control plane's deployment service. // diff --git a/svc/api/routes/services.go b/svc/api/routes/services.go index c18ddb8352..92d0486b40 100644 --- a/svc/api/routes/services.go +++ b/svc/api/routes/services.go @@ -55,7 +55,7 @@ type Services struct { // CtrlDeploymentClient communicates with the control plane for deployment // operations like creating and managing deployments. - CtrlDeploymentClient ctrlv1connect.DeploymentServiceClient + CtrlDeploymentClient ctrlv1connect.DeployServiceClient // PprofEnabled controls whether pprof profiling endpoints are registered. PprofEnabled bool diff --git a/svc/api/routes/v2_deploy_create_deployment/handler.go b/svc/api/routes/v2_deploy_create_deployment/handler.go index 919acb73fe..2663079e00 100644 --- a/svc/api/routes/v2_deploy_create_deployment/handler.go +++ b/svc/api/routes/v2_deploy_create_deployment/handler.go @@ -25,7 +25,7 @@ type ( type Handler struct { DB db.Database Keys keys.KeyService - CtrlClient ctrlv1connect.DeploymentServiceClient + CtrlClient ctrlv1connect.DeployServiceClient } func (h *Handler) Path() string { diff --git a/svc/api/run.go b/svc/api/run.go index d0c75e5415..1a14d0b473 100644 --- a/svc/api/run.go +++ b/svc/api/run.go @@ -268,7 +268,7 @@ func Run(ctx context.Context, cfg Config) error { } // Initialize CTRL deployment client using bufconnect - ctrlDeploymentClient := ctrlv1connect.NewDeploymentServiceClient( + ctrlDeploymentClient := ctrlv1connect.NewDeployServiceClient( &http.Client{}, cfg.CtrlURL, connect.WithInterceptors(interceptor.NewHeaderInjector(map[string]string{ diff --git a/svc/ctrl/api/deployment_integration_test.go b/svc/ctrl/api/deployment_integration_test.go index cafa344763..14d745fbf2 100644 --- a/svc/ctrl/api/deployment_integration_test.go +++ b/svc/ctrl/api/deployment_integration_test.go @@ -15,20 +15,27 @@ import ( "github.com/unkeyed/unkey/svc/ctrl/integration/seed" ) -type mockDeploymentService struct { - hydrav1.UnimplementedDeploymentServiceServer +type mockDeployService struct { + hydrav1.UnimplementedDeployServiceServer requests chan *hydrav1.DeployRequest } -func (m *mockDeploymentService) Deploy(ctx restate.WorkflowSharedContext, req *hydrav1.DeployRequest) (*hydrav1.DeployResponse, error) { +func (m *mockDeployService) Deploy(ctx restate.WorkflowSharedContext, req *hydrav1.DeployRequest) (*hydrav1.DeployResponse, error) { m.requests <- req return &hydrav1.DeployResponse{}, nil } +// mockDeploymentService stubs the DeploymentService (state-change RPCs) for tests +// that need the service registered in Restate but don't exercise its methods. +type mockDeploymentService struct { + hydrav1.UnimplementedDeploymentServiceServer + requests chan *hydrav1.DeployRequest +} + func TestDeployment_Create_TriggersWorkflow(t *testing.T) { requests := make(chan *hydrav1.DeployRequest, 1) harness := newWebhookHarness(t, webhookHarnessConfig{ - Services: []restate.ServiceDefinition{hydrav1.NewDeploymentServiceServer(&mockDeploymentService{requests: requests})}, + Services: []restate.ServiceDefinition{hydrav1.NewDeployServiceServer(&mockDeployService{requests: requests})}, }) ctx := harness.RequestContext() @@ -51,7 +58,7 @@ func TestDeployment_Create_TriggersWorkflow(t *testing.T) { DeleteProtection: false, }) - client := ctrlv1connect.NewDeploymentServiceClient(harness.ConnectClient(), harness.CtrlURL, harness.ConnectOptions()...) + client := ctrlv1connect.NewDeployServiceClient(harness.ConnectClient(), harness.CtrlURL, harness.ConnectOptions()...) resp, err := client.CreateDeployment(ctx, connect.NewRequest(&ctrlv1.CreateDeploymentRequest{ ProjectId: project.ID, EnvironmentSlug: environment.Slug, diff --git a/svc/ctrl/api/github_webhook.go b/svc/ctrl/api/github_webhook.go index aa978e7601..e27858f657 100644 --- a/svc/ctrl/api/github_webhook.go +++ b/svc/ctrl/api/github_webhook.go @@ -229,7 +229,7 @@ func (s *GitHubWebhook) handlePush(ctx context.Context, w http.ResponseWriter, b ) // Start deploy workflow with GitSource - deployClient := hydrav1.NewDeploymentServiceIngressClient(s.restate, deploymentID) + deployClient := hydrav1.NewDeployServiceIngressClient(s.restate, deploymentID) invocation, err := deployClient.Deploy().Send(ctx, &hydrav1.DeployRequest{ DeploymentId: deploymentID, Source: &hydrav1.DeployRequest_Git{ diff --git a/svc/ctrl/api/github_webhook_integration_test.go b/svc/ctrl/api/github_webhook_integration_test.go index 520d2ceec4..06c9f2ad3d 100644 --- a/svc/ctrl/api/github_webhook_integration_test.go +++ b/svc/ctrl/api/github_webhook_integration_test.go @@ -25,7 +25,7 @@ const testRepoFullName = "acme/repo" func TestGitHubWebhook_Push_TriggersDeployWorkflow(t *testing.T) { deployRequests := make(chan *hydrav1.DeployRequest, 1) harness := newWebhookHarness(t, webhookHarnessConfig{ - Services: []restate.ServiceDefinition{hydrav1.NewDeploymentServiceServer(&mockDeploymentService{requests: deployRequests})}, + Services: []restate.ServiceDefinition{hydrav1.NewDeployServiceServer(&mockDeployService{requests: deployRequests})}, }) projectID := insertRepoConnection(t, harness, testRepoFullName, 101, 202) @@ -70,7 +70,7 @@ func TestGitHubWebhook_Push_IgnoresFork(t *testing.T) { func TestGitHubWebhook_InvalidSignature(t *testing.T) { deployRequests := make(chan *hydrav1.DeployRequest, 1) harness := newWebhookHarness(t, webhookHarnessConfig{ - Services: []restate.ServiceDefinition{hydrav1.NewDeploymentServiceServer(&mockDeploymentService{requests: deployRequests})}, + Services: []restate.ServiceDefinition{hydrav1.NewDeployServiceServer(&mockDeployService{requests: deployRequests})}, }) _ = insertRepoConnection(t, harness, testRepoFullName, 101, 202) diff --git a/svc/ctrl/api/run.go b/svc/ctrl/api/run.go index ccb89aa407..3af4bb75a3 100644 --- a/svc/ctrl/api/run.go +++ b/svc/ctrl/api/run.go @@ -143,7 +143,7 @@ func Run(ctx context.Context, cfg Config) error { r.RegisterHealth(mux) mux.Handle(ctrlv1connect.NewCtrlServiceHandler(ctrl.New(cfg.InstanceID, database))) - mux.Handle(ctrlv1connect.NewDeploymentServiceHandler(deployment.New(deployment.Config{ + mux.Handle(ctrlv1connect.NewDeployServiceHandler(deployment.New(deployment.Config{ Database: database, Restate: restateClient, AvailableRegions: cfg.AvailableRegions, diff --git a/svc/ctrl/integration/harness/BUILD.bazel b/svc/ctrl/integration/harness/BUILD.bazel index abf9ecb0f4..20b4bb25be 100644 --- a/svc/ctrl/integration/harness/BUILD.bazel +++ b/svc/ctrl/integration/harness/BUILD.bazel @@ -16,6 +16,7 @@ go_library( "//svc/ctrl/integration/seed", "//svc/ctrl/worker/clickhouseuser", "//svc/ctrl/worker/deploy", + "//svc/ctrl/worker/deployment", "//svc/ctrl/worker/quotacheck", "//svc/vault/testutil", "@com_github_clickhouse_clickhouse_go_v2//:clickhouse-go", diff --git a/svc/ctrl/integration/harness/harness.go b/svc/ctrl/integration/harness/harness.go index e7549a07a5..c314d24bed 100644 --- a/svc/ctrl/integration/harness/harness.go +++ b/svc/ctrl/integration/harness/harness.go @@ -28,6 +28,7 @@ import ( "github.com/unkeyed/unkey/svc/ctrl/integration/seed" "github.com/unkeyed/unkey/svc/ctrl/worker/clickhouseuser" "github.com/unkeyed/unkey/svc/ctrl/worker/deploy" + "github.com/unkeyed/unkey/svc/ctrl/worker/deployment" "github.com/unkeyed/unkey/svc/ctrl/worker/quotacheck" vaulttestutil "github.com/unkeyed/unkey/svc/vault/testutil" "golang.org/x/net/http2" @@ -179,12 +180,17 @@ func New(t *testing.T) *Harness { AllowUnauthenticatedDeployments: false, }) + deploymentSvc := deployment.New(deployment.Config{ + DB: database, + }) + // Set up Restate server with all services // Use the proto-generated wrappers (same as run.go) to get correct service names restateSrv := restateServer.NewRestate() restateSrv.Bind(hydrav1.NewQuotaCheckServiceServer(quotaCheckSvc)) restateSrv.Bind(hydrav1.NewClickhouseUserServiceServer(clickhouseUserSvc)) - restateSrv.Bind(hydrav1.NewDeploymentServiceServer(deploySvc)) + restateSrv.Bind(hydrav1.NewDeployServiceServer(deploySvc)) + restateSrv.Bind(hydrav1.NewDeploymentServiceServer(deploymentSvc)) restateHandler, err := restateSrv.Handler() require.NoError(t, err) diff --git a/svc/ctrl/proto/ctrl/v1/deployment.proto b/svc/ctrl/proto/ctrl/v1/deployment.proto index 946264d535..2cb461ad80 100644 --- a/svc/ctrl/proto/ctrl/v1/deployment.proto +++ b/svc/ctrl/proto/ctrl/v1/deployment.proto @@ -144,7 +144,7 @@ message PromoteRequest { message PromoteResponse {} -service DeploymentService { +service DeployService { // Create a new deployment with a prebuilt docker image rpc CreateDeployment(CreateDeploymentRequest) returns (CreateDeploymentResponse) {} diff --git a/svc/ctrl/proto/hydra/v1/deploy.proto b/svc/ctrl/proto/hydra/v1/deploy.proto new file mode 100644 index 0000000000..d7484dc862 --- /dev/null +++ b/svc/ctrl/proto/hydra/v1/deploy.proto @@ -0,0 +1,108 @@ +syntax = "proto3"; +package hydra.v1; + +import "dev/restate/sdk/go.proto"; + +option go_package = "github.com/unkeyed/unkey/gen/proto/hydra/v1;hydrav1"; + +// DeployService orchestrates the lifecycle of application deployments as +// durable Restate workflows. Each RPC is idempotent and can safely resume from +// any step after a crash. +// +// Deploy handles the full pipeline from building Docker images through +// provisioning containers and configuring domain routing. Rollback and Promote +// manage traffic switching between deployments by reassigning sticky frontline +// routes atomically through the routing service. +// +// ScaleDownIdlePreviewDeployments runs on a cron to find preview deployments that have +// received no traffic for 6 hours and sets them to standby. SetDeploymentDesiredState +// targets a single deployment, typically sent with a 30-minute delay after a new +// deployment replaces it. +service DeployService { + option (dev.restate.sdk.go.service_type) = WORKFLOW; + + // Deploy executes the full deployment workflow: build (if git source), provision + // containers across regions, wait for health, configure domain routing, and + // update the project's live deployment pointer for production environments. + // Sets deployment status to failed on any error. + rpc Deploy(DeployRequest) returns (DeployResponse) {} + + // Rollback switches sticky frontline routes (environment and live) from the + // current live deployment back to a previous one. Marks the project as rolled + // back so future deploys don't automatically reclaim live routes. + // Source must be the current live deployment; both must share the same project + // and environment. + rpc Rollback(RollbackRequest) returns (RollbackResponse) {} + + // Promote reassigns sticky frontline routes to a target deployment and clears + // the rolled-back flag, restoring normal deployment flow. + // Target must be in ready status and not already the live deployment. + rpc Promote(PromoteRequest) returns (PromoteResponse) {} + + // ScaleDownIdlePreviewDeployments iterates all preview environments and sets any + // deployment to standby that has received zero requests in the last 6 hours. + // Intended to be called by a cron job. + rpc ScaleDownIdlePreviewDeployments(ScaleDownIdlePreviewDeploymentsRequest) returns (ScaleDownIdlePreviewDeploymentsResponse) {} +} + +message ScaleDownIdlePreviewDeploymentsRequest {} +message ScaleDownIdlePreviewDeploymentsResponse {} + +// DockerImage references a pre-built container image to deploy directly, +// skipping the build step. +message DockerImage { + string image = 1; +} + +// GitSource specifies a repository and commit to build a Docker image from. +message GitSource { + // GitHub App installation ID used to clone the repository. + int64 installation_id = 1; + + // Full repository identifier (e.g., "owner/repo"). + string repository = 2; + + string commit_sha = 3; + + // Subdirectory within the repository to use as the Docker build context. + string context_path = 4; + + // Path to the Dockerfile, relative to context_path. + string dockerfile_path = 5; +} + +message DeployRequest { + string deployment_id = 1; + + // TODO: remove this field, it is unused. + optional string key_auth_id = 2; + + oneof source { + GitSource git = 3; + DockerImage docker_image = 4; + } + + // Container command override (e.g., ["./app", "serve"]) + repeated string command = 5; +} + +message DeployResponse {} + +// RollbackRequest identifies the deployment to roll back from and the +// deployment to restore. Both must belong to the same project and environment. +message RollbackRequest { + // The current live deployment to roll back from. + string source_deployment_id = 1; + + // A previous deployment to restore traffic to. + string target_deployment_id = 2; +} + +message RollbackResponse {} + +// PromoteRequest identifies a ready deployment to promote to live. +message PromoteRequest { + string target_deployment_id = 1; +} + +message PromoteResponse {} diff --git a/svc/ctrl/proto/hydra/v1/deployment.proto b/svc/ctrl/proto/hydra/v1/deployment.proto index 1b97e701a5..6a6d2ab54b 100644 --- a/svc/ctrl/proto/hydra/v1/deployment.proto +++ b/svc/ctrl/proto/hydra/v1/deployment.proto @@ -1,57 +1,93 @@ syntax = "proto3"; + package hydra.v1; import "dev/restate/sdk/go.proto"; option go_package = "github.com/unkeyed/unkey/gen/proto/hydra/v1;hydrav1"; +// DeploymentService manages desired-state transitions for a single deployment +// as a Restate virtual object keyed by deployment ID. The virtual object key +// guarantees that only one state change operation executes per deployment at a +// time, preventing conflicting concurrent transitions. +// +// State transitions use last-writer-wins semantics: calling +// ScheduleDesiredStateChange while a previous transition is still pending +// overwrites it. This is implemented via a nonce — each schedule generates a +// unique nonce stored in Restate state, and ChangeDesiredState silently no-ops +// if its nonce doesn't match the stored one, meaning the transition was +// superseded by a newer schedule. +// +// Typical flow: +// 1. Caller invokes ScheduleDesiredStateChange with a target state and a +// relative delay in milliseconds for when the change should take effect. +// 2. The handler stores the transition record and sends a delayed +// ChangeDesiredState call to itself. +// 3. When the delay elapses, ChangeDesiredState verifies the nonce still +// matches and persists the new desired state to the database. service DeploymentService { - option (dev.restate.sdk.go.service_type) = WORKFLOW; - rpc Deploy(DeployRequest) returns (DeployResponse) {} - rpc Rollback(RollbackRequest) returns (RollbackResponse) {} - rpc Promote(PromoteRequest) returns (PromoteResponse) {} - rpc ScaleDownIdleDeployments(ScaleDownIdleDeploymentsRequest) returns (ScaleDownIdleDeploymentsResponse) {} -} + option (dev.restate.sdk.go.service_type) = VIRTUAL_OBJECT; -message ScaleDownIdleDeploymentsRequest {} -message ScaleDownIdleDeploymentsResponse {} + // ScheduleDesiredStateChange registers a future desired-state transition for + // this deployment. It generates a nonce, stores a transition record in Restate + // state, and sends a delayed ChangeDesiredState call to itself after the + // requested delay. Calling this again before the previous transition fires + // replaces it (last-writer-wins via nonce). + rpc ScheduleDesiredStateChange(ScheduleDesiredStateChangeRequest) returns (ScheduleDesiredStateChangeResponse) {} -message DockerImage { - string image = 1; -} + // ChangeDesiredState is an internal handler invoked by the delayed call from + // ScheduleDesiredStateChange. It verifies the nonce matches the stored + // transition record — if it doesn't, the transition was superseded and the + // call no-ops. On match, it persists the new desired state to the database. + // The deployment ID is derived from the virtual object key. + rpc ChangeDesiredState(ChangeDesiredStateRequest) returns (ChangeDesiredStateResponse) {} -message GitSource { - int64 installation_id = 1; - string repository = 2; - string commit_sha = 3; - string context_path = 4; - string dockerfile_path = 5; + // ClearScheduledStateChanges removes the pending transition record from + // Restate state, effectively cancelling any scheduled ChangeDesiredState + // call. The delayed call may still fire, but it will no-op because + // ChangeDesiredState requires a stored transition to exist. + rpc ClearScheduledStateChanges(ClearScheduledStateChangesRequest) returns (ClearScheduledStateChangesResponse) {} } -message DeployRequest { - string deployment_id = 1; - optional string key_auth_id = 2; +message ClearScheduledStateChangesRequest {} +message ClearScheduledStateChangesResponse {} - oneof source { - GitSource git = 3; - DockerImage docker_image = 4; - } +// DeploymentDesiredState represents the target lifecycle state for a deployment. +// UNSPECIFIED is treated as an error and causes a terminal failure in +// ChangeDesiredState. +enum DeploymentDesiredState { + DEPLOYMENT_DESIRED_STATE_UNSPECIFIED = 0; - // Container command override (e.g., ["./app", "serve"]) - repeated string command = 5; + // RUNNING means the deployment should have active containers serving traffic. + DEPLOYMENT_DESIRED_STATE_RUNNING = 1; + + // STANDBY means the deployment's containers are scaled down but the + // deployment can be resumed without a full rebuild. + DEPLOYMENT_DESIRED_STATE_STANDBY = 2; + + // ARCHIVED means the deployment is permanently decommissioned. + DEPLOYMENT_DESIRED_STATE_ARCHIVED = 3; } -message DeployResponse {} +message ScheduleDesiredStateChangeRequest { + // Relative delay in milliseconds before the state change should take effect. + // Set to 0 to execute immediately. + int64 delay_millis = 1; -message RollbackRequest { - string source_deployment_id = 1; - string target_deployment_id = 2; + DeploymentDesiredState state = 2; } -message RollbackResponse {} +message ScheduleDesiredStateChangeResponse {} + +// ChangeDesiredStateRequest is sent internally by ScheduleDesiredStateChange +// via a delayed Restate call. Callers should not invoke this directly. +message ChangeDesiredStateRequest { + // Nonce generated by the originating ScheduleDesiredStateChange call. Used + // to implement last-writer-wins: if this nonce doesn't match the stored + // transition record, the call is a stale superseded transition and no-ops. + string nonce = 1; -message PromoteRequest { - string target_deployment_id = 1; + DeploymentDesiredState state = 2; } -message PromoteResponse {} +message ChangeDesiredStateResponse {} diff --git a/svc/ctrl/services/deployment/service.go b/svc/ctrl/services/deployment/service.go index 3a0c438d7d..8726616307 100644 --- a/svc/ctrl/services/deployment/service.go +++ b/svc/ctrl/services/deployment/service.go @@ -7,20 +7,20 @@ import ( "github.com/unkeyed/unkey/pkg/db" ) -// Service implements the DeploymentService ConnectRPC API. It coordinates +// Service implements the DeployService ConnectRPC API. It coordinates // deployment operations by persisting state to the database and delegating // workflow execution to Restate. type Service struct { - ctrlv1connect.UnimplementedDeploymentServiceHandler + ctrlv1connect.UnimplementedDeployServiceHandler db db.Database restate *restateingress.Client availableRegions []string } -// deploymentClient creates a typed Restate ingress client for the DeploymentService +// deploymentClient creates a typed Restate ingress client for the DeployService // keyed by the given project ID to ensure only one operation per project runs at a time. -func (s *Service) deploymentClient(projectID string) hydrav1.DeploymentServiceIngressClient { - return hydrav1.NewDeploymentServiceIngressClient(s.restate, projectID) +func (s *Service) deploymentClient(projectID string) hydrav1.DeployServiceIngressClient { + return hydrav1.NewDeployServiceIngressClient(s.restate, projectID) } // Config holds the configuration for creating a new [Service]. @@ -37,9 +37,9 @@ type Config struct { // [Config] are required. func New(cfg Config) *Service { return &Service{ - UnimplementedDeploymentServiceHandler: ctrlv1connect.UnimplementedDeploymentServiceHandler{}, - db: cfg.Database, - restate: cfg.Restate, - availableRegions: cfg.AvailableRegions, + UnimplementedDeployServiceHandler: ctrlv1connect.UnimplementedDeployServiceHandler{}, + db: cfg.Database, + restate: cfg.Restate, + availableRegions: cfg.AvailableRegions, } } diff --git a/svc/ctrl/services/doc.go b/svc/ctrl/services/doc.go index 165ed42ff2..1f3f7d34b5 100644 --- a/svc/ctrl/services/doc.go +++ b/svc/ctrl/services/doc.go @@ -50,7 +50,7 @@ // }) // // // Register with Connect server -// mux.Handle(ctrlv1connect.NewDeploymentServiceHandler(deploymentSvc)) +// mux.Handle(ctrlv1connect.NewDeployServiceHandler(deploymentSvc)) // // # Error Handling // diff --git a/svc/ctrl/worker/BUILD.bazel b/svc/ctrl/worker/BUILD.bazel index bad150779d..7c6fffac38 100644 --- a/svc/ctrl/worker/BUILD.bazel +++ b/svc/ctrl/worker/BUILD.bazel @@ -30,6 +30,7 @@ go_library( "//svc/ctrl/worker/clickhouseuser", "//svc/ctrl/worker/customdomain", "//svc/ctrl/worker/deploy", + "//svc/ctrl/worker/deployment", "//svc/ctrl/worker/github", "//svc/ctrl/worker/quotacheck", "//svc/ctrl/worker/routing", diff --git a/svc/ctrl/worker/deploy/BUILD.bazel b/svc/ctrl/worker/deploy/BUILD.bazel index 2315c01b6c..a0ec7ef5df 100644 --- a/svc/ctrl/worker/deploy/BUILD.bazel +++ b/svc/ctrl/worker/deploy/BUILD.bazel @@ -11,7 +11,7 @@ go_library( "helpers.go", "promote_handler.go", "rollback_handler.go", - "scale_down_idle_deployments.go", + "scale_down_idle_preview_deployments.go", "service.go", ], importpath = "github.com/unkeyed/unkey/svc/ctrl/worker/deploy", @@ -52,7 +52,7 @@ go_library( go_test( name = "deploy_test", - srcs = ["scale_down_idle_deployments_test.go"], + srcs = ["scale_down_idle_preview_deployments_test.go"], deps = [ "//gen/proto/hydra/v1:hydra", "//pkg/db", diff --git a/svc/ctrl/worker/deploy/deploy_handler.go b/svc/ctrl/worker/deploy/deploy_handler.go index 838201c056..65b24c43bf 100644 --- a/svc/ctrl/worker/deploy/deploy_handler.go +++ b/svc/ctrl/worker/deploy/deploy_handler.go @@ -29,30 +29,32 @@ const ( // Deploy executes a full deployment workflow for a new application version. // // This durable workflow orchestrates the complete deployment lifecycle: building -// Docker images (if source is provided), provisioning containers across regions, -// waiting for instances to become healthy, and configuring domain routing. The -// workflow is idempotent and can safely resume from any step after a crash. +// Docker images (if a GitSource is provided via Depot), provisioning containers +// across regions, waiting for instances to become healthy, and configuring domain +// routing. The workflow is idempotent and can safely resume from any step after +// a crash. // -// The deployment request must specify either a build context path (to build from -// source) or a pre-built Docker image. If BuildContextPath is set, the workflow -// triggers a Docker build through the build service before deployment. Otherwise, -// the provided DockerImage is deployed directly. +// The deployment request specifies a source as a oneof: either a GitSource (which +// triggers a Docker build through Depot) or a DockerImage (which is deployed +// directly). // // The workflow creates deployment topologies for all configured regions, each with -// its own version number for independent scaling and rollback. Sentinel containers -// are automatically provisioned for environments that don't already have them, -// with production environments getting 3 replicas and others getting 1. +// a version obtained from VersioningService and 1 desired replica. Sentinel +// containers are automatically provisioned for environments that don't already +// have them, with production sentinels getting 3 replicas and others getting 1. // -// Domain routing is configured through frontline routes, with sticky domains -// (branch and environment) automatically updating to point to the new deployment. -// For production deployments, the project's live deployment pointer is updated -// unless the project is in a rolled-back state. +// Domain routing is configured through frontline routes. Sticky routes +// (environment, and live for non-rolled-back production) are reassigned to the +// new deployment. For production deployments, the project's live deployment +// pointer is updated unless the project is in a rolled-back state. After a +// successful deploy, the previous live deployment is scheduled for standby after +// 30 minutes via DeploymentService.ScheduleDesiredStateChange. // // If any step fails, the deployment status is automatically set to failed via a // deferred cleanup handler, ensuring the database reflects the true deployment state. // -// Returns terminal errors for validation failures (missing image/context) and -// retryable errors for transient system failures. +// Returns terminal errors for validation failures and retryable errors for +// transient system failures. func (w *Workflow) Deploy(ctx restate.WorkflowSharedContext, req *hydrav1.DeployRequest) (*hydrav1.DeployResponse, error) { finishedSuccessfully := false @@ -81,6 +83,7 @@ func (w *Workflow) Deploy(ctx restate.WorkflowSharedContext, req *hydrav1.Deploy logger.Error("deployment failed but we can not set the status", "error", err.Error()) } }() + workspace, err := restate.Run(ctx, func(runCtx restate.RunContext) (db.Workspace, error) { var ws db.Workspace @@ -119,6 +122,7 @@ func (w *Workflow) Deploy(ctx restate.WorkflowSharedContext, req *hydrav1.Deploy if err != nil { return nil, err } + environment, err := restate.Run(ctx, func(runCtx restate.RunContext) (db.FindEnvironmentByIdRow, error) { return db.Query.FindEnvironmentById(runCtx, w.db.RW(), deployment.EnvironmentID) }, restate.WithName("finding environment")) @@ -354,6 +358,7 @@ func (w *Workflow) Deploy(ctx restate.WorkflowSharedContext, req *hydrav1.Deploy deployment.GitCommitSha.String, deployment.GitBranch.String, w.defaultDomain, + // TODO: source type is hardcoded to CLI_UPLOAD regardless of actual source type ctrlv1.SourceType_SOURCE_TYPE_CLI_UPLOAD, ) @@ -427,17 +432,46 @@ func (w *Workflow) Deploy(ctx restate.WorkflowSharedContext, req *hydrav1.Deploy } if !project.IsRolledBack && environment.Slug == "production" { - _, err = restate.Run(ctx, func(runCtx restate.RunContext) (restate.Void, error) { - return restate.Void{}, db.Query.UpdateProjectDeployments(runCtx, w.db.RW(), db.UpdateProjectDeploymentsParams{ - IsRolledBack: false, - ID: deployment.ProjectID, - LiveDeploymentID: sql.NullString{Valid: true, String: deployment.ID}, - UpdatedAt: sql.NullInt64{Valid: true, Int64: time.Now().UnixMilli()}, + // Atomically read the current live deployment and swap it to the new one. + // This prevents a race where two concurrent deploys both capture the same + // previousLiveDeploymentID and one of them never gets scheduled for standby. + previousLiveDeploymentID, err := restate.Run(ctx, func(runCtx restate.RunContext) (sql.NullString, error) { + return db.TxWithResult(runCtx, w.db.RW(), func(txCtx context.Context, tx db.DBTX) (sql.NullString, error) { + currentProject, findErr := db.Query.FindProjectById(txCtx, tx, deployment.ProjectID) + if findErr != nil { + return sql.NullString{}, findErr + } + + updateErr := db.Query.UpdateProjectDeployments(txCtx, tx, db.UpdateProjectDeploymentsParams{ + IsRolledBack: false, + ID: deployment.ProjectID, + LiveDeploymentID: sql.NullString{Valid: true, String: deployment.ID}, + UpdatedAt: sql.NullInt64{Valid: true, Int64: time.Now().UnixMilli()}, + }) + if updateErr != nil { + return sql.NullString{}, updateErr + } + + return currentProject.LiveDeploymentID, nil }) - }, restate.WithName("updating project live deployment")) + }, restate.WithName("swapping project live deployment")) if err != nil { return nil, err } + + if previousLiveDeploymentID.Valid { + _, err = hydrav1.NewDeploymentServiceClient(ctx, previousLiveDeploymentID.String). + ScheduleDesiredStateChange().Request( + &hydrav1.ScheduleDesiredStateChangeRequest{ + DelayMillis: (30 * time.Minute).Milliseconds(), + State: hydrav1.DeploymentDesiredState_DEPLOYMENT_DESIRED_STATE_STANDBY, + }, + restate.WithIdempotencyKey(deployment.ID), + ) + if err != nil { + return nil, err + } + } } logger.Info("deployment workflow completed", diff --git a/svc/ctrl/worker/deploy/doc.go b/svc/ctrl/worker/deploy/doc.go index 4817276fec..2505878600 100644 --- a/svc/ctrl/worker/deploy/doc.go +++ b/svc/ctrl/worker/deploy/doc.go @@ -1,122 +1,80 @@ -// Package deploy implements deployment lifecycle orchestration workflows. -// -// This package manages the complete deployment lifecycle including deploying new versions, -// rolling back to previous versions, and promoting deployments. It coordinates between -// container orchestration (Krane), database updates, domain routing, and sentinel configuration -// to ensure consistent deployment state. -// -// # Built on Restate -// -// All workflows in this package are built on top of Restate (restate.dev) for durable -// execution. This provides critical guarantees: -// -// - Automatic retries on transient failures -// - Exactly-once execution semantics for each workflow step -// - Durable state that survives process crashes and restarts -// - Virtual object concurrency control keyed by project ID -// -// The virtual object model ensures that only one deployment operation runs per project -// at any given time, preventing race conditions during concurrent deploy/rollback/promote -// operations that could leave the system in an inconsistent state. -// -// # Key Types -// -// [Workflow] is the main entry point that implements deployment orchestration. -// It provides three primary operations: -// -// - [Workflow.Deploy] - Deploy a new Docker image and configure routing -// - [Workflow.Rollback] - Roll back to a previous deployment -// - [Workflow.Promote] - Promote a deployment to live and clear rollback state -// -// # Usage -// -// The workflow is typically initialized with database connections, a Krane client, -// and configuration: -// -// workflow := deploy.New(deploy.Config{ -// DB: mainDB, -// Krane: kraneClient, -// DefaultDomain: "unkey.app", -// }) -// -// Deploy a new version: -// -// _, err := workflow.Deploy(ctx, &hydrav1.DeployRequest{ -// DeploymentId: "dep_123", -// DockerImage: "myapp:v1.2.3", -// KeyAuthId: "key_auth_456", // optional -// }) -// -// Rollback to previous version: -// -// _, err := workflow.Rollback(ctx, &hydrav1.RollbackRequest{ -// SourceDeploymentId: "dep_current", -// TargetDeploymentId: "dep_previous", -// }) -// -// Promote a deployment to live: -// -// _, err := workflow.Promote(ctx, &hydrav1.PromoteRequest{ -// TargetDeploymentId: "dep_123", -// }) -// -// # Deployment Flow -// -// The deployment process follows these steps: -// -// 1. Deployment lookup - Find and validate deployment record -// 2. Context gathering - Load workspace, project, and environment data -// 3. Status update to building - Mark deployment as in-progress -// 4. Container deployment - Create deployment in Krane -// 5. Polling for readiness - Wait for all instances to be running -// 6. VM registration - Register running instances in DB -// 7. OpenAPI scraping - Fetch API spec from running instances (if available) -// 8. Domain assignment - Create/update domains and sentinel configs via routing service -// 9. Status update to ready - Mark deployment as live -// 10. Project update - Update live deployment pointer (if production) -// -// Each step is wrapped in a restate.Run call, making it durable and retryable. If the -// workflow crashes at any point, Restate will resume from the last completed step rather -// than restarting from the beginning. The deferred error handler ensures that failed -// deployments are properly marked in the database even if the workflow is interrupted. -// -// # Rollback and Promote -// -// Rollbacks switch sticky domains (environment and live domains) from the current deployment -// to a previous deployment. This is done atomically through the routing service to prevent -// partial updates. The project is marked as rolled back to prevent new deployments from -// automatically taking over live domains. -// -// Promotion reverses a rollback by switching domains to a new deployment and clearing the -// rolled back flag. This allows normal deployment flow to resume. +// Package deploy orchestrates the deployment lifecycle for user applications. +// +// Deployments move through a multi-step pipeline that builds container images, +// provisions infrastructure across regions, waits for health, and configures +// domain routing — all durably, so a crash at any point resumes from the last +// completed step rather than restarting from scratch. +// +// # Why Restate Workflows +// +// Every handler in this package runs as a Restate Workflow (restate.dev), keyed +// by a caller-supplied workflow ID. Restate provides automatic retries, durable +// execution across process restarts, and exactly-once semantics per step. Each +// step is wrapped in restate.Run (or RunAsync for parallel work), making the +// entire pipeline resumable. If the process dies mid-deploy, Restate picks up +// from the last committed step without re-executing earlier side effects. +// +// # Operations +// +// [Workflow.Deploy] is the primary entrypoint. It validates the deployment +// record, loads workspace/project/environment context, then either builds a +// Docker image from a Git repository via Depot or accepts a pre-built image. +// It creates deployment topologies for every configured region (each with its +// own version from VersioningService), ensures sentinel containers and Cilium +// network policies exist per region, and polls in parallel until all instances +// are running. Once healthy, it generates frontline routes for per-commit, +// per-branch, and per-environment domains, reassigns sticky routes through +// RoutingService, marks the deployment ready, and — for non-rolled-back +// production environments — updates the project's live deployment pointer. +// The previous live deployment is scheduled for standby after 30 minutes via +// DeploymentService.ScheduleDesiredStateChange. +// +// [Workflow.Rollback] switches sticky frontline routes (environment and live) +// from the current live deployment to a previous one, atomically through +// RoutingService. It sets the project's isRolledBack flag, which prevents +// subsequent deploys from automatically claiming the live routes. +// +// [Workflow.Promote] reverses a rollback by reassigning sticky routes to a new +// target deployment and clearing the isRolledBack flag so normal deploy flow +// resumes. +// +// [Workflow.ScaleDownIdlePreviewDeployments] paginates through preview +// environments and sets any deployment to archived that has been idle (zero +// requests in ClickHouse) for longer than six hours. +// +// # Image Builds +// +// When a deploy request carries a Git source, the workflow builds a container +// image remotely through Depot. It retrieves or creates a Depot project per +// Unkey project, acquires a BuildKit machine, fetches the repository via a +// GitHub installation token, and streams build-step telemetry to ClickHouse. // // # Domain Generation // -// The package generates multiple domain patterns per deployment: -// -// - Per-commit: `-git--.` (never reassigned) -// - Per-branch: `-git--.` (sticky to branch) -// - Per-environment: `--.` (sticky to environment) -// -// The sticky behavior ensures that branch and environment domains follow the latest -// deployment for that branch/environment, while commit domains remain immutable. +// [buildDomains] produces three domain patterns per deployment: // -// # Sentinel Configuration +// - Per-commit: -git--. (non-sticky, immutable) +// - Per-branch: -git--. (sticky to branch) +// - Per-environment: --. (sticky to environment) // -// Sentinel configs are created for all domains (except localhost and .local/.test TLDs) -// and stored as JSON in the database. Each config includes: +// Sticky domains automatically follow the latest deployment matching their +// criteria; commit domains never move. CLI uploads add a random numeric suffix +// to the commit domain to avoid collisions from repeated pushes of the same +// SHA. Live-traffic routing is handled separately by sticky route reassignment +// in RoutingService, not by a dedicated "live" domain type. // -// - Deployment ID and enabled status -// - VM addresses for load balancing -// - Optional auth configuration (key auth ID) -// - Optional validation configuration (OpenAPI spec) +// # Network Policy // -// Sentinel configs use protojson encoding for easier debugging and direct database inspection. +// [Workflow.ensureCiliumNetworkPolicy] persists Cilium network policies in the +// database for each region that lacks one. Each policy allows ingress from the +// sentinel namespace to deployment pods on port 8080 and is applied by regional +// reconcilers. // // # Error Handling // -// The package uses Restate's error handling model with deferred cleanup. If any step fails, -// the deployment status is automatically updated to "failed". Terminal errors with appropriate -// HTTP status codes are returned for client errors (invalid input, not found, etc.). System -// errors are returned for unexpected failures that may be retried by Restate. +// Deploy defers a cleanup handler that marks the deployment as failed if the +// workflow does not finish successfully. Terminal errors (invalid input, not +// found) are returned with appropriate HTTP status codes so Restate does not +// retry them; transient failures are returned as regular errors for automatic +// retry. package deploy diff --git a/svc/ctrl/worker/deploy/promote_handler.go b/svc/ctrl/worker/deploy/promote_handler.go index 7fb2f145e9..66f56a817e 100644 --- a/svc/ctrl/worker/deploy/promote_handler.go +++ b/svc/ctrl/worker/deploy/promote_handler.go @@ -13,19 +13,19 @@ import ( // Promote reassigns all sticky domains to a deployment and clears the rolled back state. // -// This durable workflow moves sticky domains (environment and live domains) from the -// current live deployment to a new target deployment. It reverses a previous rollback -// and allows normal deployment flow to resume. +// This durable workflow moves sticky domains (environment and live) from the +// current live deployment to a new target deployment. It reverses a previous +// rollback and allows normal deployment flow to resume. // -// The workflow validates that: -// - Target deployment is ready (not building, deploying, or failed) -// - Target deployment has running VMs -// - Target deployment is not already the live deployment -// - Project has sticky domains to promote +// The workflow validates that the target deployment is ready, the project has a +// live deployment, the target is not already the live deployment, and there are +// sticky domains to promote. // // After switching domains atomically through the routing service, the project's live // deployment pointer is updated and the rolled back flag is cleared, allowing future -// deployments to automatically take over sticky domains. +// deployments to automatically take over sticky domains. Any pending scheduled +// state changes on the promoted deployment are cleared (so it won't be spun down), +// and the previous live deployment is scheduled for standby after 30 minutes. // // Returns terminal errors (400/404) for validation failures and retryable errors // for system failures. @@ -119,6 +119,18 @@ func (w *Workflow) Promote(ctx restate.WorkflowSharedContext, req *hydrav1.Promo return nil, err } + // ensure the new promoted deployment does not get spun down from existing scheduled actions + _, err = hydrav1.NewDeploymentServiceClient(ctx, targetDeployment.ID).ClearScheduledStateChanges().Request(&hydrav1.ClearScheduledStateChangesRequest{}) + if err != nil { + return nil, err + } + + // schedule old deployment to be spun down + hydrav1.NewDeploymentServiceClient(ctx, project.LiveDeploymentID.String).ScheduleDesiredStateChange().Send(&hydrav1.ScheduleDesiredStateChangeRequest{ + State: hydrav1.DeploymentDesiredState_DEPLOYMENT_DESIRED_STATE_STANDBY, + DelayMillis: (30 * time.Minute).Milliseconds(), + }) + logger.Info("promotion completed successfully", "target", req.GetTargetDeploymentId(), "domains_promoted", len(routeIDs)) diff --git a/svc/ctrl/worker/deploy/rollback_handler.go b/svc/ctrl/worker/deploy/rollback_handler.go index b3fed0ffee..22fc42ed0b 100644 --- a/svc/ctrl/worker/deploy/rollback_handler.go +++ b/svc/ctrl/worker/deploy/rollback_handler.go @@ -13,19 +13,20 @@ import ( // Rollback performs a rollback to a previous deployment. // -// This durable workflow switches sticky frontlineRoutes (environment and live frontlineRoutes) from the -// current live deployment back to a previous deployment. The operation is performed -// atomically through the routing service to prevent partial updates that could leave -// the system in an inconsistent state. +// This durable workflow switches sticky frontline routes (environment and live) +// from the current live deployment back to a previous deployment. The operation +// is performed atomically through the routing service to prevent partial updates +// that could leave the system in an inconsistent state. // -// The workflow validates that: -// - Source deployment is the current live deployment -// - Target deployment has running VMs -// - Both deployments are in the same project and environment -// - There are sticky frontlineRoutes to rollback +// The workflow validates that source and target are different deployments, that +// the source deployment is the current live deployment, that both deployments +// belong to the same project and environment, and that there are sticky frontline +// routes to rollback. // -// After switching frontlineRoutes, the project is marked as rolled back to prevent new -// deployments from automatically taking over the live frontlineRoutes. +// Before switching routes, any pending scheduled state changes on the target +// deployment are cleared so it won't be spun down while serving live traffic. +// After switching routes, the project is marked as rolled back to prevent new +// deployments from automatically taking over the live routes. // // Returns terminal errors (400/404) for validation failures and retryable errors // for system failures. @@ -87,6 +88,12 @@ func (w *Workflow) Rollback(ctx restate.WorkflowSharedContext, req *hydrav1.Roll return nil, restate.TerminalError(fmt.Errorf("source deployment is not the current live deployment"), 400) } + // ensure the rolled back deployment does not get spun down from existing scheduled actions + _, err = hydrav1.NewDeploymentServiceClient(ctx, targetDeployment.ID).ClearScheduledStateChanges().Request(&hydrav1.ClearScheduledStateChangesRequest{}) + if err != nil { + return nil, err + } + // Get all frontlineRoutes on the live deployment that are sticky frontlineRoutes, err := restate.Run(ctx, func(stepCtx restate.RunContext) ([]db.FindFrontlineRoutesForRollbackRow, error) { return db.Query.FindFrontlineRoutesForRollback(stepCtx, w.db.RO(), db.FindFrontlineRoutesForRollbackParams{ diff --git a/svc/ctrl/worker/deploy/scale_down_idle_deployments.go b/svc/ctrl/worker/deploy/scale_down_idle_preview_deployments.go similarity index 68% rename from svc/ctrl/worker/deploy/scale_down_idle_deployments.go rename to svc/ctrl/worker/deploy/scale_down_idle_preview_deployments.go index 0c2845879f..bc0f07510e 100644 --- a/svc/ctrl/worker/deploy/scale_down_idle_deployments.go +++ b/svc/ctrl/worker/deploy/scale_down_idle_preview_deployments.go @@ -14,7 +14,13 @@ import ( // how long a deployment must be idle for before we scale it down to 0 var idleTime = 6 * time.Hour -func (w *Workflow) ScaleDownIdleDeployments(ctx restate.WorkflowSharedContext, req *hydrav1.ScaleDownIdleDeploymentsRequest) (*hydrav1.ScaleDownIdleDeploymentsResponse, error) { +// ScaleDownIdlePreviewDeployments reclaims resources from preview deployments +// that have received no traffic within the idle window defined by idleTime. +// Preview environments can accumulate many running deployments from feature +// branches that are no longer actively used, so this workflow paginates through +// all preview environments and transitions idle deployments to archived by +// checking request counts in ClickHouse. +func (w *Workflow) ScaleDownIdlePreviewDeployments(ctx restate.WorkflowSharedContext, req *hydrav1.ScaleDownIdlePreviewDeploymentsRequest) (*hydrav1.ScaleDownIdlePreviewDeploymentsResponse, error) { cutoff := time.Now().Add(-idleTime).UnixMilli() @@ -65,13 +71,11 @@ func (w *Workflow) ScaleDownIdleDeployments(ctx restate.WorkflowSharedContext, r } if requests == 0 { - err = restate.RunVoid(ctx, func(runCtx restate.RunContext) error { - return db.Query.UpdateDeploymentDesiredState(runCtx, w.db.RW(), db.UpdateDeploymentDesiredStateParams{ - ID: deployment.ID, - UpdatedAt: sql.NullInt64{Valid: true, Int64: time.Now().UnixMilli()}, - DesiredState: db.DeploymentsDesiredStateStandby, - }) - }, restate.WithName(fmt.Sprintf("set standby state for %s", deployment.ID))) + _, err = hydrav1.NewDeploymentServiceClient(ctx, deployment.ID).ScheduleDesiredStateChange().Request(&hydrav1.ScheduleDesiredStateChangeRequest{ + DelayMillis: 0, + State: hydrav1.DeploymentDesiredState_DEPLOYMENT_DESIRED_STATE_STANDBY, + }) + if err != nil { return nil, err } @@ -82,5 +86,5 @@ func (w *Workflow) ScaleDownIdleDeployments(ctx restate.WorkflowSharedContext, r } - return &hydrav1.ScaleDownIdleDeploymentsResponse{}, nil + return &hydrav1.ScaleDownIdlePreviewDeploymentsResponse{}, nil } diff --git a/svc/ctrl/worker/deploy/scale_down_idle_deployments_test.go b/svc/ctrl/worker/deploy/scale_down_idle_preview_deployments_test.go similarity index 92% rename from svc/ctrl/worker/deploy/scale_down_idle_deployments_test.go rename to svc/ctrl/worker/deploy/scale_down_idle_preview_deployments_test.go index 6b2c03a4a7..55b3a1225d 100644 --- a/svc/ctrl/worker/deploy/scale_down_idle_deployments_test.go +++ b/svc/ctrl/worker/deploy/scale_down_idle_preview_deployments_test.go @@ -13,7 +13,7 @@ import ( "github.com/unkeyed/unkey/svc/ctrl/integration/seed" ) -func TestScaleDownIdleDeployments_ScalesDownIdleDeploymentWithZeroRequests(t *testing.T) { +func TestScaleDownIdlePreviewDeployments_ScalesDownIdleDeploymentWithZeroRequests(t *testing.T) { h := harness.New(t) oldTime := time.Now().Add(-8 * time.Hour).UnixMilli() oldUpdatedAt := sql.NullInt64{Valid: true, Int64: oldTime} @@ -54,7 +54,7 @@ func TestScaleDownIdleDeployments_ScalesDownIdleDeploymentWithZeroRequests(t *te require.Equal(t, db.DeploymentsDesiredStateStandby, updated.DesiredState) } -func TestScaleDownIdleDeployments_DoesNotScaleDownDeploymentWithRecentRequests(t *testing.T) { +func TestScaleDownIdlePreviewDeployments_DoesNotScaleDownDeploymentWithRecentRequests(t *testing.T) { h := harness.New(t) oldTime := time.Now().Add(-8 * time.Hour).UnixMilli() oldUpdatedAt := sql.NullInt64{Valid: true, Int64: oldTime} @@ -97,7 +97,7 @@ func TestScaleDownIdleDeployments_DoesNotScaleDownDeploymentWithRecentRequests(t require.Equal(t, db.DeploymentsDesiredStateRunning, updated.DesiredState) } -func TestScaleDownIdleDeployments_IgnoresNonPreviewEnvironments(t *testing.T) { +func TestScaleDownIdlePreviewDeployments_IgnoresNonPreviewEnvironments(t *testing.T) { h := harness.New(t) oldTime := time.Now().Add(-8 * time.Hour).UnixMilli() oldUpdatedAt := sql.NullInt64{Valid: true, Int64: oldTime} @@ -138,7 +138,7 @@ func TestScaleDownIdleDeployments_IgnoresNonPreviewEnvironments(t *testing.T) { require.Equal(t, db.DeploymentsDesiredStateRunning, updated.DesiredState) } -func TestScaleDownIdleDeployments_IgnoresDeploymentsNotInReadyStatus(t *testing.T) { +func TestScaleDownIdlePreviewDeployments_IgnoresDeploymentsNotInReadyStatus(t *testing.T) { h := harness.New(t) oldTime := time.Now().Add(-8 * time.Hour).UnixMilli() oldUpdatedAt := sql.NullInt64{Valid: true, Int64: oldTime} @@ -179,7 +179,7 @@ func TestScaleDownIdleDeployments_IgnoresDeploymentsNotInReadyStatus(t *testing. require.Equal(t, db.DeploymentsDesiredStateRunning, updated.DesiredState) } -func TestScaleDownIdleDeployments_IgnoresRecentlyCreatedDeployments(t *testing.T) { +func TestScaleDownIdlePreviewDeployments_IgnoresRecentlyCreatedDeployments(t *testing.T) { h := harness.New(t) ws := h.Seed.CreateWorkspace(h.Ctx) @@ -219,7 +219,7 @@ func TestScaleDownIdleDeployments_IgnoresRecentlyCreatedDeployments(t *testing.T require.Equal(t, db.DeploymentsDesiredStateRunning, updated.DesiredState) } -func TestScaleDownIdleDeployments_IgnoresRecentlyUpdatedDeployments(t *testing.T) { +func TestScaleDownIdlePreviewDeployments_IgnoresRecentlyUpdatedDeployments(t *testing.T) { h := harness.New(t) oldTime := time.Now().Add(-8 * time.Hour).UnixMilli() @@ -260,7 +260,7 @@ func TestScaleDownIdleDeployments_IgnoresRecentlyUpdatedDeployments(t *testing.T require.Equal(t, db.DeploymentsDesiredStateRunning, updated.DesiredState) } -func TestScaleDownIdleDeployments_HandlesMultipleDeploymentsAcrossMultipleEnvironments(t *testing.T) { +func TestScaleDownIdlePreviewDeployments_HandlesMultipleDeploymentsAcrossMultipleEnvironments(t *testing.T) { h := harness.New(t) oldTime := time.Now().Add(-8 * time.Hour).UnixMilli() oldUpdatedAt := sql.NullInt64{Valid: true, Int64: oldTime} @@ -325,7 +325,7 @@ func TestScaleDownIdleDeployments_HandlesMultipleDeploymentsAcrossMultipleEnviro require.Equal(t, db.DeploymentsDesiredStateRunning, updated.DesiredState, "active deployment %s should be running", dep.ID) } } -func TestScaleDownIdleDeployments_PaginatesAcrossManyPreviewEnvironmentsAtScale(t *testing.T) { +func TestScaleDownIdlePreviewDeployments_PaginatesAcrossManyPreviewEnvironmentsAtScale(t *testing.T) { h := harness.New(t) oldTime := time.Now().Add(-8 * time.Hour).UnixMilli() oldUpdatedAt := sql.NullInt64{Valid: true, Int64: oldTime} @@ -374,7 +374,7 @@ func TestScaleDownIdleDeployments_PaginatesAcrossManyPreviewEnvironmentsAtScale( } func triggerScaleDown(t *testing.T, h *harness.Harness) { - client := hydrav1.NewDeploymentServiceIngressClient(h.Restate, uid.New("test")) - _, err := client.ScaleDownIdleDeployments().Request(h.Ctx, &hydrav1.ScaleDownIdleDeploymentsRequest{}) + client := hydrav1.NewDeployServiceIngressClient(h.Restate, uid.New("test")) + _, err := client.ScaleDownIdlePreviewDeployments().Request(h.Ctx, &hydrav1.ScaleDownIdlePreviewDeploymentsRequest{}) require.NoError(t, err) } diff --git a/svc/ctrl/worker/deploy/service.go b/svc/ctrl/worker/deploy/service.go index 2d4dfce87b..f8c31174fc 100644 --- a/svc/ctrl/worker/deploy/service.go +++ b/svc/ctrl/worker/deploy/service.go @@ -38,7 +38,7 @@ type RegistryConfig struct { // deployment operation runs per project at any time, preventing race conditions during // concurrent deploy/rollback/promote operations. type Workflow struct { - hydrav1.UnimplementedDeploymentServiceServer + hydrav1.UnimplementedDeployServiceServer db db.Database defaultDomain string @@ -55,7 +55,7 @@ type Workflow struct { allowUnauthenticatedDeployments bool } -var _ hydrav1.DeploymentServiceServer = (*Workflow)(nil) +var _ hydrav1.DeployServiceServer = (*Workflow)(nil) // Config holds the configuration for creating a deployment workflow. type Config struct { @@ -97,17 +97,17 @@ type Config struct { // New creates a new deployment workflow instance. func New(cfg Config) *Workflow { return &Workflow{ - UnimplementedDeploymentServiceServer: hydrav1.UnimplementedDeploymentServiceServer{}, - db: cfg.DB, - defaultDomain: cfg.DefaultDomain, - vault: cfg.Vault, - sentinelImage: cfg.SentinelImage, - availableRegions: cfg.AvailableRegions, - github: cfg.GitHub, - depotConfig: cfg.DepotConfig, - registryConfig: cfg.RegistryConfig, - buildPlatform: cfg.BuildPlatform, - clickhouse: cfg.Clickhouse, - allowUnauthenticatedDeployments: cfg.AllowUnauthenticatedDeployments, + UnimplementedDeployServiceServer: hydrav1.UnimplementedDeployServiceServer{}, + db: cfg.DB, + defaultDomain: cfg.DefaultDomain, + vault: cfg.Vault, + sentinelImage: cfg.SentinelImage, + availableRegions: cfg.AvailableRegions, + github: cfg.GitHub, + depotConfig: cfg.DepotConfig, + registryConfig: cfg.RegistryConfig, + buildPlatform: cfg.BuildPlatform, + clickhouse: cfg.Clickhouse, + allowUnauthenticatedDeployments: cfg.AllowUnauthenticatedDeployments, } } diff --git a/svc/ctrl/worker/deployment/BUILD.bazel b/svc/ctrl/worker/deployment/BUILD.bazel new file mode 100644 index 0000000000..af3653b8f6 --- /dev/null +++ b/svc/ctrl/worker/deployment/BUILD.bazel @@ -0,0 +1,18 @@ +load("@rules_go//go:def.bzl", "go_library") + +go_library( + name = "deployment", + srcs = [ + "clear_scheduled.go", + "deployment_state.go", + "doc.go", + "service.go", + ], + importpath = "github.com/unkeyed/unkey/svc/ctrl/worker/deployment", + visibility = ["//visibility:public"], + deps = [ + "//gen/proto/hydra/v1:hydra", + "//pkg/db", + "@com_github_restatedev_sdk_go//:sdk-go", + ], +) diff --git a/svc/ctrl/worker/deployment/clear_scheduled.go b/svc/ctrl/worker/deployment/clear_scheduled.go new file mode 100644 index 0000000000..c1b93782ed --- /dev/null +++ b/svc/ctrl/worker/deployment/clear_scheduled.go @@ -0,0 +1,18 @@ +package deployment + +import ( + restate "github.com/restatedev/sdk-go" + hydrav1 "github.com/unkeyed/unkey/gen/proto/hydra/v1" +) + +// ClearScheduledStateChanges removes the pending transition record, +// cancelling any scheduled desired state change. A previously enqueued +// ChangeDesiredState call may still fire after the delay, but it will +// encounter a nil transition and silently no-op rather than applying +// the stale state change. +func (v *VirtualObject) ClearScheduledStateChanges(ctx restate.ObjectContext, req *hydrav1.ClearScheduledStateChangesRequest) (*hydrav1.ClearScheduledStateChangesResponse, error) { + + restate.Clear(ctx, transitionKey) + return &hydrav1.ClearScheduledStateChangesResponse{}, nil + +} diff --git a/svc/ctrl/worker/deployment/deployment_state.go b/svc/ctrl/worker/deployment/deployment_state.go new file mode 100644 index 0000000000..0f67dfd2dd --- /dev/null +++ b/svc/ctrl/worker/deployment/deployment_state.go @@ -0,0 +1,163 @@ +package deployment + +import ( + "context" + "database/sql" + "fmt" + "time" + + restate "github.com/restatedev/sdk-go" + hydrav1 "github.com/unkeyed/unkey/gen/proto/hydra/v1" + "github.com/unkeyed/unkey/pkg/db" +) + +const transitionKey = "transition" + +// transition is the Restate-persisted state for a pending desired state change. +// Only the most recently written transition is considered active; older ones are +// identified and discarded by nonce mismatch in ChangeDesiredState. +type transition struct { + Nonce string + To hydrav1.DeploymentDesiredState +} + +// ScheduleDesiredStateChange records a future desired state transition for this +// deployment. It generates a unique nonce, persists the transition in Restate +// state, and sends a delayed ChangeDesiredState call to itself. If called again +// before the delay elapses, the new nonce overwrites the old one, causing the +// previous delayed call to no-op on nonce mismatch. +func (v *VirtualObject) ScheduleDesiredStateChange(ctx restate.ObjectContext, req *hydrav1.ScheduleDesiredStateChangeRequest) (*hydrav1.ScheduleDesiredStateChangeResponse, error) { + + nonce := restate.UUID(ctx).String() + + t := transition{ + Nonce: nonce, + To: req.GetState(), + } + + restate.Set(ctx, transitionKey, &t) + + delay := time.Duration(req.GetDelayMillis()) * time.Millisecond + + options := []restate.SendOption{} + if delay > 0 { + options = append(options, restate.WithDelay(delay)) + } + + hydrav1.NewDeploymentServiceClient(ctx, restate.Key(ctx)).ChangeDesiredState().Send(&hydrav1.ChangeDesiredStateRequest{ + Nonce: nonce, + State: req.GetState(), + }, options...) + + return &hydrav1.ScheduleDesiredStateChangeResponse{}, nil +} + +// ChangeDesiredState applies a previously scheduled desired state transition to +// the database. It validates the request nonce against the stored transition: +// if no transition exists (already applied and cleared) or the nonce mismatches +// (a newer schedule has superseded this one), the call returns successfully +// without making any changes. On match, it maps the protobuf state enum to the +// database representation, updates the deployment's desired state and all +// topology entries, and clears the stored transition. +func (v *VirtualObject) ChangeDesiredState(ctx restate.ObjectContext, req *hydrav1.ChangeDesiredStateRequest) (*hydrav1.ChangeDesiredStateResponse, error) { + + deploymentID := restate.Key(ctx) + + t, err := restate.Get[*transition](ctx, transitionKey) + if err != nil { + return nil, err + } + if t == nil { + // This is a noop, since the request was removed + return &hydrav1.ChangeDesiredStateResponse{}, nil + } + if t.Nonce != req.GetNonce() { + // This is a noop, since the request is outdated + return &hydrav1.ChangeDesiredStateResponse{}, nil + } + + var desiredState db.DeploymentsDesiredState + var topologyDesiredStatus db.DeploymentTopologyDesiredStatus + switch req.GetState() { + case hydrav1.DeploymentDesiredState_DEPLOYMENT_DESIRED_STATE_RUNNING: + desiredState = db.DeploymentsDesiredStateRunning + topologyDesiredStatus = db.DeploymentTopologyDesiredStatusStarted + case hydrav1.DeploymentDesiredState_DEPLOYMENT_DESIRED_STATE_STANDBY: + desiredState = db.DeploymentsDesiredStateStandby + topologyDesiredStatus = db.DeploymentTopologyDesiredStatusStopped + case hydrav1.DeploymentDesiredState_DEPLOYMENT_DESIRED_STATE_ARCHIVED: + desiredState = db.DeploymentsDesiredStateArchived + topologyDesiredStatus = db.DeploymentTopologyDesiredStatusStopped + case hydrav1.DeploymentDesiredState_DEPLOYMENT_DESIRED_STATE_UNSPECIFIED: + return nil, restate.TerminalErrorf("invalid state: %s", req.GetState()) + default: + return nil, restate.TerminalErrorf("unhandled state: %s", req.GetState()) + } + + err = restate.RunVoid(ctx, func(runCtx restate.RunContext) error { + + return db.Tx(runCtx, v.db.RW(), func(txCtx context.Context, tx db.DBTX) error { + deployment, err := db.Query.FindDeploymentById(txCtx, tx, deploymentID) + if err != nil { + return err + } + project, err := db.Query.FindProjectById(txCtx, tx, deployment.ProjectID) + if err != nil { + return err + } + + if project.LiveDeploymentID.Valid && project.LiveDeploymentID.String == deploymentID { + return restate.TerminalErrorf("not allowed to modify the current live deployment") + } + + err = db.Query.UpdateDeploymentDesiredState(txCtx, tx, db.UpdateDeploymentDesiredStateParams{ + ID: deploymentID, + DesiredState: desiredState, + UpdatedAt: sql.NullInt64{Valid: true, Int64: time.Now().UnixMilli()}, + }) + if err != nil { + return err + } + return nil + + }) + + }, restate.WithName("updating desired state")) + + if err != nil { + return nil, err + } + + // Update all topology entries so WatchDeployments picks up the change. + // Each region needs a new version from VersioningService. + regions, err := restate.Run(ctx, func(runCtx restate.RunContext) ([]string, error) { + return db.Query.FindDeploymentRegions(runCtx, v.db.RO(), deploymentID) + }, restate.WithName("find deployment regions")) + if err != nil { + return nil, fmt.Errorf("failed to find deployment regions: %w", err) + } + + for _, region := range regions { + versionResp, err := hydrav1.NewVersioningServiceClient(ctx, region).NextVersion().Request(&hydrav1.NextVersionRequest{}) + if err != nil { + return nil, fmt.Errorf("failed to get next version for region %s: %w", region, err) + } + + err = restate.RunVoid(ctx, func(runCtx restate.RunContext) error { + return db.Query.UpdateDeploymentTopologyDesiredStatus(runCtx, v.db.RW(), db.UpdateDeploymentTopologyDesiredStatusParams{ + DesiredStatus: topologyDesiredStatus, + Version: versionResp.GetVersion(), + UpdatedAt: sql.NullInt64{Valid: true, Int64: time.Now().UnixMilli()}, + DeploymentID: deploymentID, + Region: region, + }) + }, restate.WithName(fmt.Sprintf("updating topology desired status in %s", region))) + if err != nil { + return nil, fmt.Errorf("failed to update topology desired status in %s: %w", region, err) + } + } + + restate.Clear(ctx, transitionKey) + + return &hydrav1.ChangeDesiredStateResponse{}, nil +} diff --git a/svc/ctrl/worker/deployment/doc.go b/svc/ctrl/worker/deployment/doc.go new file mode 100644 index 0000000000..d89ed5bb60 --- /dev/null +++ b/svc/ctrl/worker/deployment/doc.go @@ -0,0 +1,42 @@ +// Package deployment provides a Restate virtual object that serialises all +// mutations targeting a single deployment. +// +// Multiple actors may need to mutate a deployment concurrently — the deploy +// workflow scheduling a standby transition, a cron job scaling down idle +// previews, an operator intervening manually, or future operations yet to be +// added. Without coordination these requests would race, potentially +// overwriting each other or applying changes in the wrong order. +// +// # Serialisation via Restate Virtual Objects +// +// The package solves this with a Restate virtual object keyed by deployment ID. +// Restate guarantees that all calls to the same virtual object key are +// processed sequentially: if two requests arrive for the same deployment at +// the same time, Restate queues one until the other completes. This eliminates +// the need for external locks or optimistic-concurrency checks in the +// database and makes it safe to add new operations to this object without +// worrying about cross-operation races. +// +// # Last-Writer-Wins for Scheduled State Changes +// +// Sequential execution alone is not enough for delayed operations. Consider a +// deployment that is scheduled for standby in 30 minutes, but five minutes +// later someone requests it to stay running. The delayed call is already +// enqueued in Restate and will fire regardless. Cancelling Restate timers is +// not possible, so the package uses a nonce-based last-writer-wins mechanism +// instead. +// +// When [VirtualObject.ScheduleDesiredStateChange] is called, it generates a +// unique nonce, writes a transition record (nonce + target state) into Restate +// state, and sends a delayed [VirtualObject.ChangeDesiredState] call carrying +// that nonce. If ScheduleDesiredStateChange is called again before the delay +// elapses, it overwrites the stored transition with a new nonce. When the +// first delayed ChangeDesiredState finally executes, it compares its nonce to +// the stored one, sees a mismatch, and returns without making any changes. +// Only the most recently scheduled transition's nonce will match, so it is +// the only one that takes effect. +// +// [VirtualObject.ClearScheduledStateChanges] removes the stored transition +// entirely, which causes any in-flight delayed call to no-op because there is +// no transition record left to match against. +package deployment diff --git a/svc/ctrl/worker/deployment/service.go b/svc/ctrl/worker/deployment/service.go new file mode 100644 index 0000000000..fcb9ba0c9a --- /dev/null +++ b/svc/ctrl/worker/deployment/service.go @@ -0,0 +1,31 @@ +package deployment + +import ( + hydrav1 "github.com/unkeyed/unkey/gen/proto/hydra/v1" + "github.com/unkeyed/unkey/pkg/db" +) + +// VirtualObject serialises all mutations targeting a single deployment. See +// the package documentation for an explanation of the virtual object keying +// and the nonce-based last-writer-wins mechanism used for scheduled state +// changes. +type VirtualObject struct { + hydrav1.UnimplementedDeploymentServiceServer + db db.Database +} + +var _ hydrav1.DeploymentServiceServer = (*VirtualObject)(nil) + +// Config holds the dependencies required to create a VirtualObject. +type Config struct { + // DB is the main database connection for workspace, project, and deployment data. + DB db.Database +} + +// New creates a new VirtualObject from the given configuration. +func New(cfg Config) *VirtualObject { + return &VirtualObject{ + UnimplementedDeploymentServiceServer: hydrav1.UnimplementedDeploymentServiceServer{}, + db: cfg.DB, + } +} diff --git a/svc/ctrl/worker/run.go b/svc/ctrl/worker/run.go index a578d769b7..d5d4c9be6e 100644 --- a/svc/ctrl/worker/run.go +++ b/svc/ctrl/worker/run.go @@ -33,6 +33,7 @@ import ( "github.com/unkeyed/unkey/svc/ctrl/worker/clickhouseuser" workercustomdomain "github.com/unkeyed/unkey/svc/ctrl/worker/customdomain" "github.com/unkeyed/unkey/svc/ctrl/worker/deploy" + "github.com/unkeyed/unkey/svc/ctrl/worker/deployment" githubclient "github.com/unkeyed/unkey/svc/ctrl/worker/github" "github.com/unkeyed/unkey/svc/ctrl/worker/quotacheck" "github.com/unkeyed/unkey/svc/ctrl/worker/routing" @@ -155,7 +156,7 @@ func Run(ctx context.Context, cfg Config) error { // Restate Server - uses logging.GetHandler() for slog integration restateSrv := restateServer.NewRestate().WithLogger(logger.GetHandler(), false) - restateSrv.Bind(hydrav1.NewDeploymentServiceServer(deploy.New(deploy.Config{ + restateSrv.Bind(hydrav1.NewDeployServiceServer(deploy.New(deploy.Config{ DB: database, DefaultDomain: cfg.DefaultDomain, Vault: vaultClient, @@ -177,6 +178,9 @@ func Run(ctx context.Context, cfg Config) error { restate.KillOnMaxAttempts(), ), )) + restateSrv.Bind(hydrav1.NewDeploymentServiceServer(deployment.New(deployment.Config{ + DB: database, + }), restate.WithIngressPrivate(true))) restateSrv.Bind(hydrav1.NewRoutingServiceServer(routing.New(routing.Config{ DB: database, diff --git a/web/apps/dashboard/gen/proto/ctrl/v1/deployment_pb.ts b/web/apps/dashboard/gen/proto/ctrl/v1/deployment_pb.ts index 0bcf822d5c..1504768806 100644 --- a/web/apps/dashboard/gen/proto/ctrl/v1/deployment_pb.ts +++ b/web/apps/dashboard/gen/proto/ctrl/v1/deployment_pb.ts @@ -10,7 +10,7 @@ import type { Message } from "@bufbuild/protobuf"; * Describes the file ctrl/v1/deployment.proto. */ export const file_ctrl_v1_deployment: GenFile = /*@__PURE__*/ - fileDesc("ChhjdHJsL3YxL2RlcGxveW1lbnQucHJvdG8SB2N0cmwudjEi7gEKF0NyZWF0ZURlcGxveW1lbnRSZXF1ZXN0EhIKCnByb2plY3RfaWQYAiABKAkSDgoGYnJhbmNoGAMgASgJEhgKEGVudmlyb25tZW50X3NsdWcYBCABKAkSFAoMZG9ja2VyX2ltYWdlGAYgASgJEi8KCmdpdF9jb21taXQYByABKAsyFi5jdHJsLnYxLkdpdENvbW1pdEluZm9IAIgBARIYCgtrZXlzcGFjZV9pZBgIIAEoCUgBiAEBEg8KB2NvbW1hbmQYCSADKAlCDQoLX2dpdF9jb21taXRCDgoMX2tleXNwYWNlX2lkSgQIARACIoABCg1HaXRDb21taXRJbmZvEhIKCmNvbW1pdF9zaGEYASABKAkSFgoOY29tbWl0X21lc3NhZ2UYAiABKAkSFQoNYXV0aG9yX2hhbmRsZRgDIAEoCRIZChFhdXRob3JfYXZhdGFyX3VybBgEIAEoCRIRCgl0aW1lc3RhbXAYBSABKAMiXAoYQ3JlYXRlRGVwbG95bWVudFJlc3BvbnNlEhUKDWRlcGxveW1lbnRfaWQYASABKAkSKQoGc3RhdHVzGAIgASgOMhkuY3RybC52MS5EZXBsb3ltZW50U3RhdHVzIi0KFEdldERlcGxveW1lbnRSZXF1ZXN0EhUKDWRlcGxveW1lbnRfaWQYASABKAkiQAoVR2V0RGVwbG95bWVudFJlc3BvbnNlEicKCmRlcGxveW1lbnQYASABKAsyEy5jdHJsLnYxLkRlcGxveW1lbnQiiAUKCkRlcGxveW1lbnQSCgoCaWQYASABKAkSFAoMd29ya3NwYWNlX2lkGAIgASgJEhIKCnByb2plY3RfaWQYAyABKAkSFgoOZW52aXJvbm1lbnRfaWQYBCABKAkSFgoOZ2l0X2NvbW1pdF9zaGEYBSABKAkSEgoKZ2l0X2JyYW5jaBgGIAEoCRIpCgZzdGF0dXMYByABKA4yGS5jdHJsLnYxLkRlcGxveW1lbnRTdGF0dXMSFQoNZXJyb3JfbWVzc2FnZRgIIAEoCRJMChVlbnZpcm9ubWVudF92YXJpYWJsZXMYCSADKAsyLS5jdHJsLnYxLkRlcGxveW1lbnQuRW52aXJvbm1lbnRWYXJpYWJsZXNFbnRyeRIjCgh0b3BvbG9neRgKIAEoCzIRLmN0cmwudjEuVG9wb2xvZ3kSEgoKY3JlYXRlZF9hdBgLIAEoAxISCgp1cGRhdGVkX2F0GAwgASgDEhEKCWhvc3RuYW1lcxgNIAMoCRIXCg9yb290ZnNfaW1hZ2VfaWQYDiABKAkSEAoIYnVpbGRfaWQYDyABKAkSJgoFc3RlcHMYECADKAsyFy5jdHJsLnYxLkRlcGxveW1lbnRTdGVwEhoKEmdpdF9jb21taXRfbWVzc2FnZRgRIAEoCRIgChhnaXRfY29tbWl0X2F1dGhvcl9oYW5kbGUYEiABKAkSJAocZ2l0X2NvbW1pdF9hdXRob3JfYXZhdGFyX3VybBgTIAEoCRIcChRnaXRfY29tbWl0X3RpbWVzdGFtcBgUIAEoAxo7ChlFbnZpcm9ubWVudFZhcmlhYmxlc0VudHJ5EgsKA2tleRgBIAEoCRINCgV2YWx1ZRgCIAEoCToCOAEiXAoORGVwbG95bWVudFN0ZXASDgoGc3RhdHVzGAEgASgJEg8KB21lc3NhZ2UYAiABKAkSFQoNZXJyb3JfbWVzc2FnZRgDIAEoCRISCgpjcmVhdGVkX2F0GAQgASgDIqcBCghUb3BvbG9neRIWCg5jcHVfbWlsbGljb3JlcxgBIAEoBRISCgptZW1vcnlfbWliGAIgASgFEigKB3JlZ2lvbnMYAyADKAsyFy5jdHJsLnYxLlJlZ2lvbmFsQ29uZmlnEhwKFGlkbGVfdGltZW91dF9zZWNvbmRzGAQgASgFEhkKEWhlYWx0aF9jaGVja19wYXRoGAUgASgJEgwKBHBvcnQYBiABKAUiTgoOUmVnaW9uYWxDb25maWcSDgoGcmVnaW9uGAEgASgJEhUKDW1pbl9pbnN0YW5jZXMYAiABKAUSFQoNbWF4X2luc3RhbmNlcxgDIAEoBSJNCg9Sb2xsYmFja1JlcXVlc3QSHAoUc291cmNlX2RlcGxveW1lbnRfaWQYASABKAkSHAoUdGFyZ2V0X2RlcGxveW1lbnRfaWQYAiABKAkiEgoQUm9sbGJhY2tSZXNwb25zZSIuCg5Qcm9tb3RlUmVxdWVzdBIcChR0YXJnZXRfZGVwbG95bWVudF9pZBgBIAEoCSIRCg9Qcm9tb3RlUmVzcG9uc2Uq7wEKEERlcGxveW1lbnRTdGF0dXMSIQodREVQTE9ZTUVOVF9TVEFUVVNfVU5TUEVDSUZJRUQQABIdChlERVBMT1lNRU5UX1NUQVRVU19QRU5ESU5HEAESHgoaREVQTE9ZTUVOVF9TVEFUVVNfQlVJTERJTkcQAhIfChtERVBMT1lNRU5UX1NUQVRVU19ERVBMT1lJTkcQAxIdChlERVBMT1lNRU5UX1NUQVRVU19ORVRXT1JLEAQSGwoXREVQTE9ZTUVOVF9TVEFUVVNfUkVBRFkQBRIcChhERVBMT1lNRU5UX1NUQVRVU19GQUlMRUQQBipaCgpTb3VyY2VUeXBlEhsKF1NPVVJDRV9UWVBFX1VOU1BFQ0lGSUVEEAASEwoPU09VUkNFX1RZUEVfR0lUEAESGgoWU09VUkNFX1RZUEVfQ0xJX1VQTE9BRBACMsMCChFEZXBsb3ltZW50U2VydmljZRJZChBDcmVhdGVEZXBsb3ltZW50EiAuY3RybC52MS5DcmVhdGVEZXBsb3ltZW50UmVxdWVzdBohLmN0cmwudjEuQ3JlYXRlRGVwbG95bWVudFJlc3BvbnNlIgASUAoNR2V0RGVwbG95bWVudBIdLmN0cmwudjEuR2V0RGVwbG95bWVudFJlcXVlc3QaHi5jdHJsLnYxLkdldERlcGxveW1lbnRSZXNwb25zZSIAEkEKCFJvbGxiYWNrEhguY3RybC52MS5Sb2xsYmFja1JlcXVlc3QaGS5jdHJsLnYxLlJvbGxiYWNrUmVzcG9uc2UiABI+CgdQcm9tb3RlEhcuY3RybC52MS5Qcm9tb3RlUmVxdWVzdBoYLmN0cmwudjEuUHJvbW90ZVJlc3BvbnNlIgBCjgEKC2NvbS5jdHJsLnYxQg9EZXBsb3ltZW50UHJvdG9QAVoxZ2l0aHViLmNvbS91bmtleWVkL3Vua2V5L2dlbi9wcm90by9jdHJsL3YxO2N0cmx2MaICA0NYWKoCB0N0cmwuVjHKAgdDdHJsXFYx4gITQ3RybFxWMVxHUEJNZXRhZGF0YeoCCEN0cmw6OlYxYgZwcm90bzM"); + fileDesc("ChhjdHJsL3YxL2RlcGxveW1lbnQucHJvdG8SB2N0cmwudjEi7gEKF0NyZWF0ZURlcGxveW1lbnRSZXF1ZXN0EhIKCnByb2plY3RfaWQYAiABKAkSDgoGYnJhbmNoGAMgASgJEhgKEGVudmlyb25tZW50X3NsdWcYBCABKAkSFAoMZG9ja2VyX2ltYWdlGAYgASgJEi8KCmdpdF9jb21taXQYByABKAsyFi5jdHJsLnYxLkdpdENvbW1pdEluZm9IAIgBARIYCgtrZXlzcGFjZV9pZBgIIAEoCUgBiAEBEg8KB2NvbW1hbmQYCSADKAlCDQoLX2dpdF9jb21taXRCDgoMX2tleXNwYWNlX2lkSgQIARACIoABCg1HaXRDb21taXRJbmZvEhIKCmNvbW1pdF9zaGEYASABKAkSFgoOY29tbWl0X21lc3NhZ2UYAiABKAkSFQoNYXV0aG9yX2hhbmRsZRgDIAEoCRIZChFhdXRob3JfYXZhdGFyX3VybBgEIAEoCRIRCgl0aW1lc3RhbXAYBSABKAMiXAoYQ3JlYXRlRGVwbG95bWVudFJlc3BvbnNlEhUKDWRlcGxveW1lbnRfaWQYASABKAkSKQoGc3RhdHVzGAIgASgOMhkuY3RybC52MS5EZXBsb3ltZW50U3RhdHVzIi0KFEdldERlcGxveW1lbnRSZXF1ZXN0EhUKDWRlcGxveW1lbnRfaWQYASABKAkiQAoVR2V0RGVwbG95bWVudFJlc3BvbnNlEicKCmRlcGxveW1lbnQYASABKAsyEy5jdHJsLnYxLkRlcGxveW1lbnQiiAUKCkRlcGxveW1lbnQSCgoCaWQYASABKAkSFAoMd29ya3NwYWNlX2lkGAIgASgJEhIKCnByb2plY3RfaWQYAyABKAkSFgoOZW52aXJvbm1lbnRfaWQYBCABKAkSFgoOZ2l0X2NvbW1pdF9zaGEYBSABKAkSEgoKZ2l0X2JyYW5jaBgGIAEoCRIpCgZzdGF0dXMYByABKA4yGS5jdHJsLnYxLkRlcGxveW1lbnRTdGF0dXMSFQoNZXJyb3JfbWVzc2FnZRgIIAEoCRJMChVlbnZpcm9ubWVudF92YXJpYWJsZXMYCSADKAsyLS5jdHJsLnYxLkRlcGxveW1lbnQuRW52aXJvbm1lbnRWYXJpYWJsZXNFbnRyeRIjCgh0b3BvbG9neRgKIAEoCzIRLmN0cmwudjEuVG9wb2xvZ3kSEgoKY3JlYXRlZF9hdBgLIAEoAxISCgp1cGRhdGVkX2F0GAwgASgDEhEKCWhvc3RuYW1lcxgNIAMoCRIXCg9yb290ZnNfaW1hZ2VfaWQYDiABKAkSEAoIYnVpbGRfaWQYDyABKAkSJgoFc3RlcHMYECADKAsyFy5jdHJsLnYxLkRlcGxveW1lbnRTdGVwEhoKEmdpdF9jb21taXRfbWVzc2FnZRgRIAEoCRIgChhnaXRfY29tbWl0X2F1dGhvcl9oYW5kbGUYEiABKAkSJAocZ2l0X2NvbW1pdF9hdXRob3JfYXZhdGFyX3VybBgTIAEoCRIcChRnaXRfY29tbWl0X3RpbWVzdGFtcBgUIAEoAxo7ChlFbnZpcm9ubWVudFZhcmlhYmxlc0VudHJ5EgsKA2tleRgBIAEoCRINCgV2YWx1ZRgCIAEoCToCOAEiXAoORGVwbG95bWVudFN0ZXASDgoGc3RhdHVzGAEgASgJEg8KB21lc3NhZ2UYAiABKAkSFQoNZXJyb3JfbWVzc2FnZRgDIAEoCRISCgpjcmVhdGVkX2F0GAQgASgDIqcBCghUb3BvbG9neRIWCg5jcHVfbWlsbGljb3JlcxgBIAEoBRISCgptZW1vcnlfbWliGAIgASgFEigKB3JlZ2lvbnMYAyADKAsyFy5jdHJsLnYxLlJlZ2lvbmFsQ29uZmlnEhwKFGlkbGVfdGltZW91dF9zZWNvbmRzGAQgASgFEhkKEWhlYWx0aF9jaGVja19wYXRoGAUgASgJEgwKBHBvcnQYBiABKAUiTgoOUmVnaW9uYWxDb25maWcSDgoGcmVnaW9uGAEgASgJEhUKDW1pbl9pbnN0YW5jZXMYAiABKAUSFQoNbWF4X2luc3RhbmNlcxgDIAEoBSJNCg9Sb2xsYmFja1JlcXVlc3QSHAoUc291cmNlX2RlcGxveW1lbnRfaWQYASABKAkSHAoUdGFyZ2V0X2RlcGxveW1lbnRfaWQYAiABKAkiEgoQUm9sbGJhY2tSZXNwb25zZSIuCg5Qcm9tb3RlUmVxdWVzdBIcChR0YXJnZXRfZGVwbG95bWVudF9pZBgBIAEoCSIRCg9Qcm9tb3RlUmVzcG9uc2Uq7wEKEERlcGxveW1lbnRTdGF0dXMSIQodREVQTE9ZTUVOVF9TVEFUVVNfVU5TUEVDSUZJRUQQABIdChlERVBMT1lNRU5UX1NUQVRVU19QRU5ESU5HEAESHgoaREVQTE9ZTUVOVF9TVEFUVVNfQlVJTERJTkcQAhIfChtERVBMT1lNRU5UX1NUQVRVU19ERVBMT1lJTkcQAxIdChlERVBMT1lNRU5UX1NUQVRVU19ORVRXT1JLEAQSGwoXREVQTE9ZTUVOVF9TVEFUVVNfUkVBRFkQBRIcChhERVBMT1lNRU5UX1NUQVRVU19GQUlMRUQQBipaCgpTb3VyY2VUeXBlEhsKF1NPVVJDRV9UWVBFX1VOU1BFQ0lGSUVEEAASEwoPU09VUkNFX1RZUEVfR0lUEAESGgoWU09VUkNFX1RZUEVfQ0xJX1VQTE9BRBACMr8CCg1EZXBsb3lTZXJ2aWNlElkKEENyZWF0ZURlcGxveW1lbnQSIC5jdHJsLnYxLkNyZWF0ZURlcGxveW1lbnRSZXF1ZXN0GiEuY3RybC52MS5DcmVhdGVEZXBsb3ltZW50UmVzcG9uc2UiABJQCg1HZXREZXBsb3ltZW50Eh0uY3RybC52MS5HZXREZXBsb3ltZW50UmVxdWVzdBoeLmN0cmwudjEuR2V0RGVwbG95bWVudFJlc3BvbnNlIgASQQoIUm9sbGJhY2sSGC5jdHJsLnYxLlJvbGxiYWNrUmVxdWVzdBoZLmN0cmwudjEuUm9sbGJhY2tSZXNwb25zZSIAEj4KB1Byb21vdGUSFy5jdHJsLnYxLlByb21vdGVSZXF1ZXN0GhguY3RybC52MS5Qcm9tb3RlUmVzcG9uc2UiAEKOAQoLY29tLmN0cmwudjFCD0RlcGxveW1lbnRQcm90b1ABWjFnaXRodWIuY29tL3Vua2V5ZWQvdW5rZXkvZ2VuL3Byb3RvL2N0cmwvdjE7Y3RybHYxogIDQ1hYqgIHQ3RybC5WMcoCB0N0cmxcVjHiAhNDdHJsXFYxXEdQQk1ldGFkYXRh6gIIQ3RybDo6VjFiBnByb3RvMw"); /** * @generated from message ctrl.v1.CreateDeploymentRequest @@ -553,13 +553,13 @@ export const SourceTypeSchema: GenEnum = /*@__PURE__*/ enumDesc(file_ctrl_v1_deployment, 1); /** - * @generated from service ctrl.v1.DeploymentService + * @generated from service ctrl.v1.DeployService */ -export const DeploymentService: GenService<{ +export const DeployService: GenService<{ /** * Create a new deployment with a prebuilt docker image * - * @generated from rpc ctrl.v1.DeploymentService.CreateDeployment + * @generated from rpc ctrl.v1.DeployService.CreateDeployment */ createDeployment: { methodKind: "unary"; @@ -569,7 +569,7 @@ export const DeploymentService: GenService<{ /** * Get deployment details * - * @generated from rpc ctrl.v1.DeploymentService.GetDeployment + * @generated from rpc ctrl.v1.DeployService.GetDeployment */ getDeployment: { methodKind: "unary"; @@ -579,7 +579,7 @@ export const DeploymentService: GenService<{ /** * Reassign the sticky domains of the projects live deployment to the target deployment * - * @generated from rpc ctrl.v1.DeploymentService.Rollback + * @generated from rpc ctrl.v1.DeployService.Rollback */ rollback: { methodKind: "unary"; @@ -589,7 +589,7 @@ export const DeploymentService: GenService<{ /** * Promote the deployment to the live environment * - * @generated from rpc ctrl.v1.DeploymentService.Promote + * @generated from rpc ctrl.v1.DeployService.Promote */ promote: { methodKind: "unary"; diff --git a/web/apps/dashboard/lib/trpc/routers/deploy/deployment/promote.ts b/web/apps/dashboard/lib/trpc/routers/deploy/deployment/promote.ts index 13687adcd0..c3a9edff5f 100644 --- a/web/apps/dashboard/lib/trpc/routers/deploy/deployment/promote.ts +++ b/web/apps/dashboard/lib/trpc/routers/deploy/deployment/promote.ts @@ -3,7 +3,7 @@ import { createClient } from "@connectrpc/connect"; import { createConnectTransport } from "@connectrpc/connect-web"; // Import service definition that you want to connect to. -import { DeploymentService } from "@/gen/proto/ctrl/v1/deployment_pb"; +import { DeployService } from "@/gen/proto/ctrl/v1/deployment_pb"; import { db } from "@/lib/db"; import { env } from "@/lib/env"; @@ -31,7 +31,7 @@ export const promote = workspaceProcedure // Here we make the client itself, combining the service // definition with the transport. const ctrl = createClient( - DeploymentService, + DeployService, createConnectTransport({ baseUrl: CTRL_URL, interceptors: [ diff --git a/web/apps/dashboard/lib/trpc/routers/deploy/deployment/rollback.ts b/web/apps/dashboard/lib/trpc/routers/deploy/deployment/rollback.ts index 2289f3d209..6ead6c76d5 100644 --- a/web/apps/dashboard/lib/trpc/routers/deploy/deployment/rollback.ts +++ b/web/apps/dashboard/lib/trpc/routers/deploy/deployment/rollback.ts @@ -3,7 +3,7 @@ import { createClient } from "@connectrpc/connect"; import { createConnectTransport } from "@connectrpc/connect-web"; // Import service definition that you want to connect to. -import { DeploymentService } from "@/gen/proto/ctrl/v1/deployment_pb"; +import { DeployService } from "@/gen/proto/ctrl/v1/deployment_pb"; import { db } from "@/lib/db"; import { env } from "@/lib/env"; @@ -30,7 +30,7 @@ export const rollback = workspaceProcedure // Here we make the client itself, combining the service // definition with the transport. const ctrl = createClient( - DeploymentService, + DeployService, createConnectTransport({ baseUrl: CTRL_URL, interceptors: [ diff --git a/web/apps/engineering/content/docs/architecture/services/ctrl/index.mdx b/web/apps/engineering/content/docs/architecture/services/ctrl/index.mdx index 7056515cc8..482d178876 100644 --- a/web/apps/engineering/content/docs/architecture/services/ctrl/index.mdx +++ b/web/apps/engineering/content/docs/architecture/services/ctrl/index.mdx @@ -5,8 +5,6 @@ description: The control plane service for managing deployments and infrastructu import { Mermaid } from "@/app/components/mermaid"; - - The ctrl service provides a deployment platform similar to Vercel, Railway, or Fly.io. When a customer deploys their application, ctrl: 1. **Builds** container images from source code using Depot.dev @@ -21,9 +19,9 @@ All multi-step operations are durable, using Restate workflows to ensure consist ### Service Composition -The ctrl service is composed of several specialized services and workflows. The RPC services handle synchronous operations like pull-based infrastructure coordination through `ClusterService`, container image building through `BuildService`, deployment creation and management through `DeploymentService`, ACME challenge coordination through `AcmeService`, OpenAPI spec management through `OpenApiService`, and health checks through `CtrlService`. +The ctrl service is composed of several specialized services and workflows. The RPC services handle synchronous operations like pull-based infrastructure coordination through `ClusterService`, container image building through `BuildService`, deployment creation and management through `DeployService`, ACME challenge coordination through `AcmeService`, OpenAPI spec management through `OpenApiService`, and health checks through `CtrlService`. -Running alongside these are the Restate workflows that provide durable orchestration. The `DeploymentService` workflow orchestrates the full deployment lifecycle, the `RoutingService` workflow manages domain and sentinel configuration, and the `CertificateService` workflow handles TLS certificate provisioning through the ACME protocol. +Running alongside these are the Restate workflows that provide durable orchestration. The `DeployService` workflow orchestrates the full deployment lifecycle, the `RoutingService` workflow manages domain and sentinel configuration, and the `CertificateService` workflow handles TLS certificate provisioning through the ACME protocol. ### Technology Stack @@ -55,7 +53,7 @@ For CLI deployments, users provide pre-built Docker images directly, bypassing t The deployment service orchestrates the complete deployment lifecycle through durable workflows. It provides four key operations: `CreateDeployment` initiates a new deployment, `GetDeployment` queries the current status, `Promote` promotes a deployment to live, and `Rollback` rolls back to a previous deployment. -The deployment workflow progresses through several phases. It first builds the container image if building from source, then creates the deployment in Krane, our Kubernetes abstraction layer. Next it polls for instance readiness for up to 5 minutes, checking every second whether all pods are running. Once instances are ready, it registers them in the database so sentinels can route traffic to them. It attempts to scrape an OpenAPI spec from the running service, though this is optional. Finally, it assigns domains and creates sentinel configurations via the routing service, then marks the deployment as ready. +The deployment workflow progresses through several phases. It first builds a container image from Git via Depot (or accepts a pre-built image), then creates deployment topologies for all configured regions, ensuring sentinels and Cilium network policies exist per region. It polls in parallel until all instances are running. Once healthy, it generates frontline routes for per-commit, per-branch, and per-environment domains, assigns them atomically through the routing service, marks the deployment as ready, and — for non-rolled-back production environments — updates the project's live deployment pointer. The previous live deployment is then scheduled for standby via DeploymentService. Restate implements [durable executions](https://www.restate.dev/what-is-durable-execution) by recording progress in a distributed persistent log. The log is managed by the Restate server. If ctrl crashes during deployment, Restate resumes from the last completed phase rather than restarting from the beginning. This ensures deployments complete reliably even during system failures. diff --git a/web/apps/engineering/content/docs/architecture/workflows/creating-services.mdx b/web/apps/engineering/content/docs/architecture/workflows/creating-services.mdx index 179940666a..b570f58f42 100644 --- a/web/apps/engineering/content/docs/architecture/workflows/creating-services.mdx +++ b/web/apps/engineering/content/docs/architecture/workflows/creating-services.mdx @@ -24,7 +24,7 @@ Don't use workflows for: ### 1. Define the Proto -Create `go/proto/hydra/v1/yourservice.proto`: +Create `svc/ctrl/proto/hydra/v1/yourservice.proto`: ```protobuf syntax = "proto3"; @@ -60,7 +60,7 @@ make generate ### 3. Implement the Service -Create `go/apps/ctrl/workflows/yourservice/`: +Create `svc/ctrl/worker/yourservice/`: **service.go:** @@ -111,7 +111,7 @@ func (s *Service) YourOperation( ### 4. Register the Service -Update `go/apps/ctrl/run.go`: +Update `svc/ctrl/worker/run.go`: ```go import ( @@ -192,9 +192,9 @@ if err != nil { See existing implementations: -- **DeploymentService**: `go/apps/ctrl/workflows/deploy/` -- **RoutingService**: `go/apps/ctrl/workflows/routing/` -- **CertificateService**: `go/apps/ctrl/workflows/certificate/` +- **DeployService**: `svc/ctrl/worker/deploy/` +- **RoutingService**: `svc/ctrl/worker/routing/` +- **CertificateService**: `svc/ctrl/worker/certificate/` ## References diff --git a/web/apps/engineering/content/docs/architecture/workflows/deployment-service.mdx b/web/apps/engineering/content/docs/architecture/workflows/deployment-service.mdx index b1ef458548..c0d519fb28 100644 --- a/web/apps/engineering/content/docs/architecture/workflows/deployment-service.mdx +++ b/web/apps/engineering/content/docs/architecture/workflows/deployment-service.mdx @@ -5,10 +5,10 @@ description: Durable deployment workflow orchestration # Deployment Service -The `DeploymentService` orchestrates the complete deployment lifecycle, from building containers to assigning domains. +The `DeployService` orchestrates the complete deployment lifecycle, from building containers to assigning domains. -**Location:** `go/apps/ctrl/workflows/deploy/` -**Proto:** `go/proto/hydra/v1/deployment.proto` +**Location:** `svc/ctrl/worker/deploy/` +**Proto:** `svc/ctrl/proto/hydra/v1/deployment.proto` **Key:** `project_id` ## Operations @@ -17,31 +17,39 @@ The `DeploymentService` orchestrates the complete deployment lifecycle, from bui FetchMeta[Fetch Metadata] - FetchMeta --> StatusBuilding[Status: Building] - StatusBuilding --> CreateKrane[Create in Krane] - CreateKrane --> PollStatus{Poll Until Ready} - PollStatus --> UpsertVMs[Upsert VM Records] - UpsertVMs --> PollStatus - PollStatus --> ScrapeAPI[Scrape OpenAPI Spec] - ScrapeAPI --> BuildRoutes[Build Frontline Routes] + FetchMeta --> CheckSource{Source Type?} + CheckSource -->|GitSource| BuildGit[Build from Git via Depot] + CheckSource -->|DockerImage| UseImage[Use Pre-built Image] + BuildGit --> StatusDeploying[Status: Deploying] + UseImage --> StatusDeploying + StatusDeploying --> CreateTopology[Create Regional Topologies] + CreateTopology --> EnsureSentinels[Ensure Sentinels Exist] + EnsureSentinels --> EnsureNetPol[Ensure Cilium Network Policy] + EnsureNetPol --> WaitReady[Wait for Instances Ready] + WaitReady --> BuildRoutes[Build Frontline Routes] BuildRoutes --> AssignRoutes[Call RoutingService] AssignRoutes --> StatusReady[Status: Ready] StatusReady --> UpdateLive[Update Live Deployment] - UpdateLive --> End([Complete]) + UpdateLive --> ScheduleStandby[Schedule Previous Standby] + ScheduleStandby --> End([Complete]) + style BuildGit fill:#e1f5fe style AssignRoutes fill:#e1f5fe style StatusReady fill:#c8e6c9 + `} /> Creates a new deployment: + 1. Fetch deployment, workspace, project, environment metadata -2. Create deployment in Krane -3. Poll until instances are running -4. Scrape OpenAPI spec -5. Call RoutingService to assign frontline routes atomically -6. Update project's live deployment ID +2. Build Docker image from Git via Depot (or use pre-built image) +3. Create regional deployment topologies and ensure sentinels + Cilium network policies exist +4. Poll in parallel until all instances are running +5. Build frontline routes and call RoutingService to assign them atomically +6. Update project's live deployment pointer (production, non-rolled-back only) +7. Schedule previous live deployment for standby via DeploymentService -Implementation: `go/apps/ctrl/workflows/deploy/deploy_handler.go` +Implementation: `svc/ctrl/worker/deploy/deploy_handler.go` ### Rollback @@ -55,15 +63,17 @@ Implementation: `go/apps/ctrl/workflows/deploy/deploy_handler.go` style UpdateRoutes fill:#e1f5fe style UpdateProject fill:#c8e6c9 + `} /> Rolls back to a previous deployment: + 1. Validate source/target deployments 2. Find sticky frontline routes (live + environment level) 3. Update frontline routes to point to target deployment 4. Update project metadata -Implementation: `go/apps/ctrl/workflows/deploy/rollback_handler.go` +Implementation: `svc/ctrl/worker/deploy/rollback_handler.go` ### Promote @@ -75,19 +85,22 @@ Implementation: `go/apps/ctrl/workflows/deploy/rollback_handler.go` UpdateProject --> End([Success]) style UpdateRoutes fill:#e1f5fe + `} /> Promotes a deployment to live, removing rolled-back state: + 1. Validate deployment is ready 2. Find all project frontline routes 3. Update frontline routes to point to new deployment 4. Clear rolled_back flag -Implementation: `go/apps/ctrl/workflows/deploy/promote_handler.go` +Implementation: `svc/ctrl/worker/deploy/promote_handler.go` ## Why RoutingService? All frontline route operations are delegated to `RoutingService` to: + - Ensure atomic updates to routing configuration - Serialize frontline route operations per project - Provide rollback capabilities for failed routing changes diff --git a/web/apps/engineering/content/docs/architecture/workflows/github-deployments.mdx b/web/apps/engineering/content/docs/architecture/workflows/github-deployments.mdx index 6a32fb9aeb..7ef3ce8f15 100644 --- a/web/apps/engineering/content/docs/architecture/workflows/github-deployments.mdx +++ b/web/apps/engineering/content/docs/architecture/workflows/github-deployments.mdx @@ -74,7 +74,7 @@ db.Query.InsertDeployment(ctx, s.db.RW(), db.InsertDeploymentParams{ 7. **Trigger workflow** ```go -deployClient := hydrav1.NewDeploymentServiceIngressClient(s.restate, deploymentID) +deployClient := hydrav1.NewDeployServiceIngressClient(s.restate, deploymentID) invocation, err := deployClient.Deploy().Send(ctx, &hydrav1.DeployRequest{ DeploymentId: deploymentID, Source: &hydrav1.DeployRequest_Git{ @@ -100,7 +100,8 @@ The Restate workflow orchestrates the complete deployment lifecycle. It handles ### Workflow Steps - LoadMeta[Load Deployment/Project/Environment] LoadMeta --> CheckSource{Source Type?} @@ -122,7 +123,8 @@ The Restate workflow orchestrates the complete deployment lifecycle. It handles style BuildGit fill:#e1f5fe style StatusReady fill:#c8e6c9 -`} /> +`} +/> ### Failure Handling @@ -178,7 +180,7 @@ solverOptions := client.SolveOpt{ Session: []session.Attachable{ // Registry auth for pushing images authprovider.NewDockerAuthProvider(...), - + // GitHub token for fetching private repos secretsprovider.FromMap(map[string][]byte{ "GIT_AUTH_TOKEN.github.com": []byte(githubToken), @@ -248,11 +250,11 @@ Each Unkey project gets a corresponding Depot project for caching: ```go func (w *Workflow) getOrCreateDepotProject(ctx context.Context, unkeyProjectID string) (string, error) { project, _ := db.Query.FindProjectById(ctx, w.db.RO(), unkeyProjectID) - + if project.DepotProjectID.Valid { return project.DepotProjectID.String, nil } - + // Create new Depot project createResp, _ := projectClient.CreateProject(ctx, connect.NewRequest(&corev1.CreateProjectRequest{ Name: fmt.Sprintf("unkey-%s", unkeyProjectID), @@ -262,13 +264,13 @@ func (w *Workflow) getOrCreateDepotProject(ctx context.Context, unkeyProjectID s KeepDays: 14, }, })) - + // Store Depot project ID in database db.Query.UpdateProjectDepotID(ctx, w.db.RW(), db.UpdateProjectDepotIDParams{ DepotProjectID: sql.NullString{String: createResp.Msg.GetProject().GetProjectId(), Valid: true}, ID: unkeyProjectID, }) - + return createResp.Msg.GetProject().GetProjectId(), nil } ``` @@ -316,7 +318,7 @@ w.clickhouse.BufferBuildStep(schema.BuildStepV1{ ## Proto Definitions -**Location:** `proto/hydra/v1/deployment.proto` +**Location:** `svc/ctrl/proto/hydra/v1/deployment.proto` ```protobuf message GitSource { @@ -341,18 +343,23 @@ message DeployRequest { ## Important Constraints ### Commit SHA + BuildKit requires the **full 40-character commit SHA** for reliable builds. Short SHAs may fail or fetch unexpected objects. ### Private Submodules + Private submodules using SSH URLs won't work with this approach. The `GIT_AUTH_TOKEN` only provides HTTPS authentication. For SSH submodules, you'd need SSH key forwarding through BuildKit. ### Context Path + The context path is normalized before use: + - Whitespace trimmed - Leading `/` stripped - `.` treated as repository root ### External API + The external API (`ctrlv1.CreateDeploymentRequest`) only supports pre-built Docker images. Git-based builds are only triggered via the GitHub webhook integration. ## Workflow Orchestration Patterns diff --git a/web/apps/engineering/content/docs/architecture/workflows/index.mdx b/web/apps/engineering/content/docs/architecture/workflows/index.mdx index f3d7d08cfe..f2e21128ce 100644 --- a/web/apps/engineering/content/docs/architecture/workflows/index.mdx +++ b/web/apps/engineering/content/docs/architecture/workflows/index.mdx @@ -1,78 +1,158 @@ --- title: Durable Workflows with Restate -description: How we use Restate for durable execution of deployment operations +description: How we use Restate for durable execution in the control plane --- # Durable Workflows with Restate -Unkey uses [Restate](https://restate.dev) for durable workflow execution in critical deployment operations. Restate provides: +Unkey uses [Restate](https://restate.dev) for durable workflow execution in the control plane. All workflow services live in `svc/ctrl/worker/` with protobuf definitions in `svc/ctrl/proto/hydra/v1/`. -- **Durable Execution**: Operations resume from the last successful step after failures -- **Automatic Retries**: Transient failures are retried automatically -- **State Management**: Workflow state is managed by Restate, not in our database -- **Observability**: Built-in UI to inspect running workflows +Restate gives us: + +- **Durable Execution**: Operations resume from the last successful step after crashes +- **Automatic Retries**: Transient failures are retried without manual intervention +- **Concurrency Control**: Virtual objects serialize access per key, eliminating distributed locking +- **Observability**: Built-in UI to inspect running workflows, step history, and failures ## Core Concepts -### Virtual Objects +### Workflows vs Virtual Objects + +Restate offers two service types, both used in the worker: -Virtual Objects provide key-based concurrency control - only one handler can execute at a time per object key. Example: `DeploymentService` is keyed by `project_id`, ensuring only one deployment per project runs at a time. +- **Workflow** (`WORKFLOW`): Runs once per workflow ID with exactly-once semantics. Used for multi-step pipelines like deployments where each step is durable and the entire operation should not re-execute on retry. +- **Virtual Object** (`VIRTUAL_OBJECT`): Keyed by an arbitrary string. All calls to the same key are serialized, preventing concurrent mutations. Used for services like routing, certificates, and deployment state management. ### Durable Steps -Each `restate.Run()` step executes once and stores its result. After failures, workflows resume from stored results without re-executing completed steps. +Each `restate.Run()` call executes once and stores its result. After failures, workflows resume from stored results without re-executing completed steps. Use `restate.WithName("step name")` on every step for observability in the Restate UI. ### Service Communication -Workflows call each other using blocking (`Object.Request`) or fire-and-forget (`WorkflowSend.Send`) patterns. See the Go implementation files for examples. +Services call each other using: + +- **Blocking**: `Object.Request()` — waits for the result +- **Fire-and-forget**: `Object.Send()` — enqueues the call and returns immediately +- **Delayed**: `Object.Send()` with a delay — enqueues a call to fire after a duration ## Workflow Services -### DeploymentService +### DeployService -**Location:** `go/apps/ctrl/workflows/deploy/` -**Proto:** `go/proto/hydra/v1/deployment.proto` -**Key:** `project_id` -**Operations:** Deploy, Rollback, Promote +**Location**: `svc/ctrl/worker/deploy/` +**Proto**: `svc/ctrl/proto/hydra/v1/deploy.proto` +**Type**: Workflow +**Key**: caller-supplied workflow ID +**Operations**: `Deploy`, `Rollback`, `Promote`, `ScaleDownIdlePreviewDeployments` -Handles deployment lifecycle: building containers via Krane, polling status, scraping OpenAPI specs, and assigning domains. +Orchestrates the full deployment lifecycle. `Deploy` validates the deployment record, builds a container image (from Git via Depot or a pre-built Docker image), provisions containers across regions with per-region versioning, polls for instance health in parallel, generates frontline routes (per-commit, per-branch, per-environment), reassigns sticky routes through RoutingService, and updates the project's live deployment pointer. The previous live deployment is scheduled for standby after 30 minutes via DeploymentService. + +`Rollback` switches sticky frontline routes from the current live deployment to a previous one and sets the project's `isRolledBack` flag to prevent future deploys from automatically claiming live routes. `Promote` reverses a rollback by reassigning routes and clearing the flag. + +`ScaleDownIdlePreviewDeployments` is called by a cron to archive preview deployments with zero traffic in the last 6 hours. See: [Deployment Service](./deployment-service) +### DeploymentService + +**Location**: `svc/ctrl/worker/deployment/` +**Proto**: `svc/ctrl/proto/hydra/v1/deployment.proto` +**Type**: Virtual Object +**Key**: `deployment_id` +**Operations**: `ScheduleDesiredStateChange`, `ChangeDesiredState`, `ClearScheduledStateChanges` + +Serializes all desired-state mutations for a single deployment. Multiple actors (deploy workflow, idle scaler, operators) may need to change a deployment's state concurrently — the virtual object key guarantees sequential processing per deployment. + +Uses a nonce-based last-writer-wins mechanism for scheduled transitions: `ScheduleDesiredStateChange` generates a unique nonce, stores it, and sends a delayed `ChangeDesiredState` call. If a newer schedule arrives before the delay elapses, it overwrites the nonce, causing the stale delayed call to no-op. Target states are `RUNNING`, `STANDBY`, and `ARCHIVED`. + ### RoutingService -**Location:** `go/apps/ctrl/workflows/routing/` -**Proto:** `go/proto/hydra/v1/routing.proto` -**Key:** `project_id` -**Operations:** AssignFrontlineRoutes +**Location**: `svc/ctrl/worker/routing/` +**Proto**: `svc/ctrl/proto/hydra/v1/routing.proto` +**Type**: Virtual Object (ingress private) +**Key**: `project_id` +**Operations**: `AssignFrontlineRoutes` -Manages atomic frontline route assignments and updates for deployments with per-tenant sentinel isolation. +Reassigns frontline routes to point at a target deployment by updating the `deployment_id` column in the `frontline_routes` table. Called by DeployService during deploy, rollback, and promote operations. Marked as ingress-private so it cannot be invoked directly from outside Restate. See: [Routing Service](./routing-service) +### VersioningService + +**Location**: `svc/ctrl/worker/versioning/` +**Proto**: `svc/ctrl/proto/hydra/v1/versioning.proto` +**Type**: Virtual Object (ingress private) +**Key**: region name +**Operations**: `NextVersion`, `GetVersion` + +Generates monotonically increasing version numbers per region for state synchronization between the control plane and edge agents (krane). The per-region key design allows parallel version generation across regions while maintaining strict ordering within each. + +Before mutating a deployment or sentinel, callers request a new version and stamp it on the resource row. Edge agents track their last-seen version and query for changes: `WHERE region = ? AND version > ?`. + ### CertificateService -**Location:** `go/apps/ctrl/workflows/certificate/` -**Proto:** `go/proto/hydra/v1/certificate.proto` -**Key:** `domain` -**Operations:** ProcessChallenge +**Location**: `svc/ctrl/worker/certificate/` +**Proto**: `svc/ctrl/proto/hydra/v1/certificate.proto` +**Type**: Virtual Object +**Key**: domain name +**Operations**: `ProcessChallenge`, `RenewExpiringCertificates` + +Handles ACME certificate issuance and renewal. `ProcessChallenge` runs the full ACME flow — automatically selecting HTTP-01 for regular domains or DNS-01 (via Route53) for wildcards. Rate limit responses from Let's Encrypt trigger durable sleeps rather than consuming retry budget. Private keys are encrypted via Vault before database storage. -Handles ACME certificate challenges and issuance for custom domains. +`RenewExpiringCertificates` is called periodically to find and renew certificates approaching expiry. Configured with a 15-minute inactivity timeout to accommodate DNS propagation delays. + +### CustomDomainService + +**Location**: `svc/ctrl/worker/customdomain/` +**Proto**: `svc/ctrl/proto/hydra/v1/custom_domain.proto` +**Type**: Virtual Object +**Key**: domain name +**Operations**: `VerifyDomain`, `RetryVerification` + +Verifies custom domain ownership through a two-step DNS validation: first a TXT record at `_unkey.` to prove ownership, then a CNAME pointing to a unique subdomain under the platform's DNS apex (e.g., `.unkey-dns.com`). Both checks must pass before the domain is marked verified. + +Configured with a fixed 1-minute retry interval for up to 24 hours (1440 attempts) to accommodate DNS propagation. After verification, triggers certificate issuance via CertificateService and creates frontline routes for traffic routing. + +### ClickhouseUserService + +**Location**: `svc/ctrl/worker/clickhouseuser/` +**Proto**: `svc/ctrl/proto/hydra/v1/clickhouse_user.proto` +**Type**: Virtual Object +**Key**: `workspace_id` +**Operations**: `ConfigureUser` + +Provisions ClickHouse users for workspace analytics access. Creates users with SHA256 authentication, SELECT permissions on analytics tables, row-level security policies restricting data to the owning workspace, time-based retention filters, and per-query quotas (execution time, memory, result rows). + +Passwords are generated with `crypto/rand`, encrypted via Vault, and stored in MySQL. The handler is idempotent — repeated calls preserve existing passwords while updating quotas and reapplying permissions. + +Optional — only enabled when `CLICKHOUSE_ADMIN_URL` and Vault are both configured. + +### QuotaCheckService + +**Location**: `svc/ctrl/worker/quotacheck/` +**Proto**: `svc/ctrl/proto/hydra/v1/quota_check.proto` +**Type**: Virtual Object +**Key**: billing period (`YYYY-MM`) +**Operations**: `RunCheck` + +Monitors workspace quota usage and sends Slack notifications for newly exceeded quotas. Uses Restate state to deduplicate — each workspace is notified at most once per billing period. Self-schedules the next run 24 hours later with idempotency keys to prevent duplicate runs. When the month rolls over, a new virtual object with fresh state is used automatically. ## Configuration -Services auto-register with Restate on startup via `go/apps/ctrl/run.go`. Config fields (see `go/apps/ctrl/config.go`): +Services are bound and registered in `svc/ctrl/worker/run.go`. When `Restate.RegisterAs` is configured, the worker self-registers with the Restate admin API on startup. In Kubernetes environments, registration is handled externally. + +Config fields (see `svc/ctrl/worker/config.go`): -- `Restate.URL`: Restate ingress endpoint for invoking workflows - `Restate.AdminURL`: Restate admin endpoint for service registration -- `Restate.HttpPort`: Port where ctrl listens for Restate HTTP requests -- `Restate.RegisterAs`: Public URL of this service for self-registration -- `Restate.APIKey`: API key for authenticating with Restate ingress +- `Restate.APIKey`: API key for authenticating with Restate admin API +- `Restate.HttpPort`: Port where the worker listens for Restate HTTP requests +- `Restate.RegisterAs`: Public URL of this service for self-registration (optional in k8s) ## Error Handling -- **Terminal Errors**: Use `restate.TerminalError(err, statusCode)` for business logic failures that shouldn't retry -- **Transient Errors**: Return regular errors for automatic retry +- **Terminal Errors**: Use `restate.TerminalError(err, statusCode)` for business logic failures that should not retry (invalid input, not found, unauthorized) +- **Transient Errors**: Return regular errors for automatic retry (network timeouts, temporary failures) +- **Rate Limits**: Use `restate.Sleep()` for durable waits when rate-limited (e.g., ACME) ## Best Practices @@ -80,10 +160,7 @@ Services auto-register with Restate on startup via `go/apps/ctrl/run.go`. Config 2. **Named Steps**: Always use `restate.WithName("step name")` for observability 3. **Small Steps**: Break operations into focused, single-purpose steps 4. **Virtual Objects**: Use for automatic serialization instead of manual locking - -## Observability - -Restate UI (port 9070) shows running/completed invocations, step execution history, failures, and retries. +5. **Ingress Privacy**: Mark internal-only services with `restate.WithIngressPrivate(true)` ## References