diff --git a/cd/manager/common/aws/ddb/dynamoDb.go b/cd/manager/common/aws/ddb/dynamoDb.go index 46bcc92..67435e3 100644 --- a/cd/manager/common/aws/ddb/dynamoDb.go +++ b/cd/manager/common/aws/ddb/dynamoDb.go @@ -32,8 +32,19 @@ type DynamoDb struct { cursor time.Time } +const defaultJobStateTtl = 2 * 7 * 24 * time.Hour // Two weeks +const buildHashTag = "sha_tag" + +// buildState represents build/deploy commit hash information. This information is maintained in a legacy DynamoDB table +// used by our utility AWS Lambdas. +type buildState struct { + key manager.DeployComponent `dynamodbav:"key"` + deployTag string `dynamodbav:"deployTag"` + buildInfo map[string]interface{} `dynamodbav:"buildInfo"` +} + func NewDynamoDb(cfg aws.Config, cache manager.Cache) manager.Database { - env := os.Getenv("ENV") + env := os.Getenv(manager.EnvVar_Env) // Use override endpoint, if specified, so that we can store jobs locally, while hitting regular AWS endpoints for // other operations. This allows local testing without affecting CD manager instances running in AWS. customEndpoint := os.Getenv("DB_AWS_ENDPOINT") @@ -291,7 +302,7 @@ func (db DynamoDb) WriteJob(jobState job.JobState) error { // Generate a new UUID for every job update jobState.Id = uuid.New().String() // Set entry expiration - jobState.Ttl = time.Now().Add(manager.DefaultJobStateTtl) + jobState.Ttl = time.Now().Add(defaultJobStateTtl) if attributeValues, err := attributevalue.MarshalMapWithOptions(jobState, func(options *attributevalue.EncoderOptions) { options.EncodeTime = func(time time.Time) (types.AttributeValue, error) { return &types.AttributeValueMemberN{Value: strconv.FormatInt(time.UnixNano(), 10)}, nil @@ -357,7 +368,7 @@ func (db DynamoDb) GetBuildHashes() (map[manager.DeployComponent]string, error) } else { commitHashes := make(map[manager.DeployComponent]string, len(buildStates)) for _, state := range buildStates { - commitHashes[state.Key] = state.BuildInfo[manager.BuildHashTag].(string) + commitHashes[state.key] = state.buildInfo[buildHashTag].(string) } return commitHashes, nil } @@ -369,13 +380,13 @@ func (db DynamoDb) GetDeployHashes() (map[manager.DeployComponent]string, error) } else { commitHashes := make(map[manager.DeployComponent]string, len(buildStates)) for _, state := range buildStates { - commitHashes[state.Key] = state.DeployTag + commitHashes[state.key] = state.deployTag } return commitHashes, nil } } -func (db DynamoDb) getBuildStates() ([]manager.BuildState, error) { +func (db DynamoDb) getBuildStates() ([]buildState, error) { ctx, cancel := context.WithTimeout(context.Background(), manager.DefaultHttpWaitTime) defer cancel() @@ -385,7 +396,7 @@ func (db DynamoDb) getBuildStates() ([]manager.BuildState, error) { }); err != nil { return nil, err } else { - var buildStates []manager.BuildState + var buildStates []buildState if err = attributevalue.UnmarshalListOfMapsWithOptions(scanOutput.Items, &buildStates); err != nil { return nil, err } diff --git a/cd/manager/common/aws/ecs/ecs.go b/cd/manager/common/aws/ecs/ecs.go index a651022..d57326c 100644 --- a/cd/manager/common/aws/ecs/ecs.go +++ b/cd/manager/common/aws/ecs/ecs.go @@ -30,9 +30,16 @@ type ecsFailure struct { arn, detail, reason string } +const ( + deployType_Service string = "service" + deployType_Task string = "task" +) + +const resourceTag = "Ceramic" + func NewEcs(cfg aws.Config) manager.Deployment { ecrUri := os.Getenv("AWS_ACCOUNT_ID") + ".dkr.ecr." + os.Getenv("AWS_REGION") + ".amazonaws.com/" - return &Ecs{ecs.NewFromConfig(cfg), ssm.NewFromConfig(cfg), manager.EnvType(os.Getenv("ENV")), ecrUri} + return &Ecs{ecs.NewFromConfig(cfg), ssm.NewFromConfig(cfg), manager.EnvType(os.Getenv(manager.EnvVar_Env)), ecrUri} } func (e Ecs) LaunchServiceTask(cluster, service, family, container string, overrides map[string]string) (string, error) { @@ -106,124 +113,29 @@ func (e Ecs) CheckTask(cluster, taskDefId string, running, stable bool, taskIds return tasksFound && tasksInState, nil } -func (e Ecs) GenerateEnvLayout(component manager.DeployComponent) (*manager.Layout, error) { - privateCluster := manager.CeramicEnvPfx() - publicCluster := manager.CeramicEnvPfx() + "-ex" - casCluster := manager.CeramicEnvPfx() + "-cas" - casV5Cluster := "app-cas-" + string(e.env) - ecrRepo, err := e.componentEcrRepo(component) - if err != nil { - log.Printf("generateEnvLayout: ecr repo error: %s, %v", component, err) - return nil, err - } - // Populate the service layout by retrieving the clusters/services from ECS - layout := &manager.Layout{Clusters: map[string]*manager.Cluster{}, Repo: ecrRepo} - casSchedulerFound := false - for _, cluster := range []string{privateCluster, publicCluster, casCluster, casV5Cluster} { +func (e Ecs) GetLayout(clusters []string) (*manager.Layout, error) { + layout := &manager.Layout{Clusters: map[string]*manager.Cluster{}} + for _, cluster := range clusters { if clusterServices, err := e.listEcsServices(cluster); err != nil { - log.Printf("generateEnvLayout: list services error: %s, %v", cluster, err) + log.Printf("getLayout: list services error: %s, %v", cluster, err) return nil, err - } else { + } else if len(clusterServices.ServiceArns) > 0 { + layout.Clusters[cluster] = &manager.Cluster{ServiceTasks: &manager.TaskSet{Tasks: map[string]*manager.Task{}}} for _, serviceArn := range clusterServices.ServiceArns { service := e.serviceNameFromArn(serviceArn) - if task := e.componentTask(component, cluster, service); task != nil { - if _, found := layout.Clusters[cluster]; !found { - // We found at least one matching task, so we can start populating the cluster layout. - layout.Clusters[cluster] = &manager.Cluster{ServiceTasks: &manager.TaskSet{Tasks: map[string]*manager.Task{}}} - } - descSvcOutput, err := e.describeEcsService(cluster, service) - if err != nil { - log.Printf("generateEnvLayout: describe service error: %s, %s, %v", cluster, service, err) - return nil, err - } - // Set the task definition to the one currently running. For most cases, this will be overwritten by - // a new definition, but for some cases, we might want to use a layout with currently running - // definitions and not updated ones, e.g. to check if an existing deployment is stable. - task.Id = *descSvcOutput.Services[0].TaskDefinition - layout.Clusters[cluster].ServiceTasks.Tasks[service] = task - casSchedulerFound = casSchedulerFound || - ((component == manager.DeployComponent_Cas) && strings.Contains(service, manager.ServiceSuffix_CasScheduler)) + ecsService, err := e.describeEcsService(cluster, service) + if err != nil { + log.Printf("getLayout: describe service error: %s, %s, %v", cluster, service, err) + return nil, err } + layout.Clusters[cluster].ServiceTasks.Tasks[service] = &manager.Task{Id: *ecsService.Services[0].TaskDefinition} } } } - // If the CAS Scheduler service was present, add the CASv2 worker to the layout since it doesn't get updated through - // an ECS Service. - if casSchedulerFound { - layout.Clusters[casCluster].Tasks = &manager.TaskSet{Tasks: map[string]*manager.Task{ - casCluster + "-" + manager.ServiceSuffix_CasWorker: { - Repo: "ceramic-prod-cas-runner", - Temp: true, // Anchor workers do not stay up permanently - Name: manager.ContainerName_CasWorker, - }, - }} - } return layout, nil } -func (e Ecs) componentTask(component manager.DeployComponent, cluster, service string) *manager.Task { - // Skip any ELP services (e.g. "ceramic-elp-1-1-node") - serviceNameParts := strings.Split(service, "-") - if (len(serviceNameParts) >= 2) && (serviceNameParts[1] == manager.ServiceSuffix_Elp) { - return nil - } - switch component { - case manager.DeployComponent_Ceramic: - if strings.Contains(service, manager.ServiceSuffix_CeramicNode) { - return &manager.Task{Name: manager.ContainerName_CeramicNode} - } - case manager.DeployComponent_Ipfs: - if strings.Contains(service, manager.ServiceSuffix_IpfsNode) { - return &manager.Task{Name: manager.ContainerName_IpfsNode} - } - case manager.DeployComponent_Cas: - // All pre-CASv5 services are only present in the CAS cluster - if cluster == manager.CeramicEnvPfx()+"-cas" { - // Until all environments are moved to CASv2, the CAS Scheduler (CASv2) and CAS Worker (CASv1) ECS Services will - // exist in some environments and not others. This is ok because only if a service exists in an environment will - // we attempt to update it during a deployment. - if strings.Contains(service, manager.ServiceSuffix_CasApi) { - return &manager.Task{Name: manager.ContainerName_CasApi} - } else if strings.Contains(service, manager.ServiceSuffix_CasScheduler) { - // CASv2 - return &manager.Task{Name: manager.ContainerName_CasScheduler} - } else if strings.Contains(service, manager.ServiceSuffix_CasWorker) { // CASv1 - return &manager.Task{ - Repo: "ceramic-prod-cas-runner", - Temp: true, // Anchor workers do not stay up permanently - Name: manager.ContainerName_CasWorker, - } - } - } - case manager.DeployComponent_CasV5: - // All CASv5 services will exist in a separate "app-cas" cluster - if cluster == "app-cas-"+string(e.env) { - if strings.Contains(service, manager.ServiceSuffix_CasScheduler) { - return &manager.Task{Name: manager.ContainerName_CasV5Scheduler} - } - } - default: - log.Printf("componentTask: unknown component: %s", component) - } - return nil -} - -func (e Ecs) componentEcrRepo(component manager.DeployComponent) (string, error) { - switch component { - case manager.DeployComponent_Ceramic: - return "ceramic-prod", nil - case manager.DeployComponent_Ipfs: - return "go-ipfs-prod", nil - case manager.DeployComponent_Cas: - return "ceramic-prod-cas", nil - case manager.DeployComponent_CasV5: - return "app-cas-scheduler", nil - default: - return "", fmt.Errorf("componentTask: unknown component: %s", component) - } -} - -func (e Ecs) UpdateEnv(layout *manager.Layout, commitHash string) error { +func (e Ecs) UpdateLayout(layout *manager.Layout, commitHash string) error { for clusterName, cluster := range layout.Clusters { clusterRepo := layout.Repo if len(cluster.Repo) > 0 { @@ -236,7 +148,7 @@ func (e Ecs) UpdateEnv(layout *manager.Layout, commitHash string) error { return nil } -func (e Ecs) CheckEnv(layout *manager.Layout) (bool, error) { +func (e Ecs) CheckLayout(layout *manager.Layout) (bool, error) { for clusterName, cluster := range layout.Clusters { if deployed, err := e.checkEnvCluster(cluster, clusterName); err != nil { return false, err @@ -294,7 +206,7 @@ func (e Ecs) runEcsTask(cluster, family, container string, networkConfig *types. LaunchType: "FARGATE", NetworkConfiguration: networkConfig, StartedBy: aws.String(manager.ServiceName), - Tags: []types.Tag{{Key: aws.String(manager.ResourceTag), Value: aws.String(string(e.env))}}, + Tags: []types.Tag{{Key: aws.String(resourceTag), Value: aws.String(string(e.env))}}, } if (overrides != nil) && (len(overrides) > 0) { overrideEnv := make([]types.KeyValuePair, 0, len(overrides)) @@ -348,7 +260,7 @@ func (e Ecs) updateEcsTaskDefinition(taskDefArn, image, containerName string) (s RuntimePlatform: taskDef.RuntimePlatform, TaskRoleArn: taskDef.TaskRoleArn, Volumes: taskDef.Volumes, - Tags: []types.Tag{{Key: aws.String(manager.ResourceTag), Value: aws.String(string(e.env))}}, + Tags: []types.Tag{{Key: aws.String(resourceTag), Value: aws.String(string(e.env))}}, } if regTaskDefOutput, err := e.ecsClient.RegisterTaskDefinition(ctx, regTaskDefInput); err != nil { log.Printf("updateEcsTaskDefinition: register task def error: %s, %s, %s, %v", taskDefArn, image, containerName, err) @@ -508,15 +420,15 @@ func (e Ecs) listEcsTasks(cluster, family string) ([]string, error) { } func (e Ecs) updateEnvCluster(cluster *manager.Cluster, clusterName, clusterRepo, commitHash string) error { - if err := e.updateEnvTaskSet(cluster.ServiceTasks, manager.DeployType_Service, clusterName, clusterRepo, commitHash); err != nil { + if err := e.updateEnvTaskSet(cluster.ServiceTasks, deployType_Service, clusterName, clusterRepo, commitHash); err != nil { return err - } else if err = e.updateEnvTaskSet(cluster.Tasks, manager.DeployType_Task, clusterName, clusterRepo, commitHash); err != nil { + } else if err = e.updateEnvTaskSet(cluster.Tasks, deployType_Task, clusterName, clusterRepo, commitHash); err != nil { return err } return nil } -func (e Ecs) updateEnvTaskSet(taskSet *manager.TaskSet, deployType manager.DeployType, cluster, clusterRepo, commitHash string) error { +func (e Ecs) updateEnvTaskSet(taskSet *manager.TaskSet, deployType string, cluster, clusterRepo, commitHash string) error { if taskSet != nil { for taskSetName, task := range taskSet.Tasks { taskSetRepo := clusterRepo @@ -524,11 +436,11 @@ func (e Ecs) updateEnvTaskSet(taskSet *manager.TaskSet, deployType manager.Deplo taskSetRepo = taskSet.Repo } switch deployType { - case manager.DeployType_Service: + case deployType_Service: if err := e.updateEnvServiceTask(task, cluster, taskSetName, taskSetRepo, commitHash); err != nil { return err } - case manager.DeployType_Task: + case deployType_Task: if err := e.updateEnvTask(task, cluster, taskSetName, taskSetRepo, commitHash); err != nil { return err } @@ -567,29 +479,29 @@ func (e Ecs) updateEnvTask(task *manager.Task, cluster, taskName, taskSetRepo, c } func (e Ecs) checkEnvCluster(cluster *manager.Cluster, clusterName string) (bool, error) { - if deployed, err := e.checkEnvTaskSet(cluster.ServiceTasks, manager.DeployType_Service, clusterName); err != nil { + if deployed, err := e.checkEnvTaskSet(cluster.ServiceTasks, deployType_Service, clusterName); err != nil { return false, err } else if !deployed { return false, nil - } else if deployed, err = e.checkEnvTaskSet(cluster.Tasks, manager.DeployType_Task, clusterName); err != nil { + } else if deployed, err = e.checkEnvTaskSet(cluster.Tasks, deployType_Task, clusterName); err != nil { return false, err } else { return deployed, nil } } -func (e Ecs) checkEnvTaskSet(taskSet *manager.TaskSet, deployType manager.DeployType, cluster string) (bool, error) { +func (e Ecs) checkEnvTaskSet(taskSet *manager.TaskSet, deployType string, cluster string) (bool, error) { if taskSet != nil { for _, task := range taskSet.Tasks { switch deployType { - case manager.DeployType_Service: + case deployType_Service: if deployed, err := e.checkEcsService(cluster, task.Id); err != nil { return false, err } else if !deployed { return false, nil } return true, nil - case manager.DeployType_Task: + case deployType_Task: // Only check tasks that are meant to stay up permanently if !task.Temp { if deployed, err := e.CheckTask(cluster, "", true, true, task.Id); err != nil { diff --git a/cd/manager/jobmanager/jobManager.go b/cd/manager/jobmanager/jobManager.go index 1014328..7cbc0cd 100644 --- a/cd/manager/jobmanager/jobManager.go +++ b/cd/manager/jobmanager/jobManager.go @@ -34,14 +34,26 @@ type JobManager struct { waitGroup *sync.WaitGroup } +const ( + tests_Name = "Post-Deployment Tests" + tests_Org = "3box" + tests_Repo = "ceramic-tests" + tests_Ref = "main" + tests_Workflow = "run-durable.yml" + tests_Selector = "fast" +) + +const defaultCasMaxAnchorWorkers = 1 +const defaultCasMinAnchorWorkers = 0 + func NewJobManager(cache manager.Cache, db manager.Database, d manager.Deployment, apiGw manager.ApiGw, repo manager.Repository, notifs manager.Notifs) (manager.Manager, error) { - maxAnchorJobs := manager.DefaultCasMaxAnchorWorkers + maxAnchorJobs := defaultCasMaxAnchorWorkers if configMaxAnchorWorkers, found := os.LookupEnv("CAS_MAX_ANCHOR_WORKERS"); found { if parsedMaxAnchorWorkers, err := strconv.Atoi(configMaxAnchorWorkers); err == nil { maxAnchorJobs = parsedMaxAnchorWorkers } } - minAnchorJobs := manager.DefaultCasMinAnchorWorkers + minAnchorJobs := defaultCasMinAnchorWorkers if configMinAnchorWorkers, found := os.LookupEnv("CAS_MIN_ANCHOR_WORKERS"); found { if parsedMinAnchorWorkers, err := strconv.Atoi(configMinAnchorWorkers); err == nil { minAnchorJobs = parsedMinAnchorWorkers @@ -51,7 +63,7 @@ func NewJobManager(cache manager.Cache, db manager.Database, d manager.Deploymen return nil, fmt.Errorf("newJobManager: invalid anchor worker config: %d, %d", minAnchorJobs, maxAnchorJobs) } paused, _ := strconv.ParseBool(os.Getenv("PAUSED")) - return &JobManager{cache, db, d, apiGw, repo, notifs, maxAnchorJobs, minAnchorJobs, paused, manager.EnvType(os.Getenv("ENV")), new(sync.WaitGroup)}, nil + return &JobManager{cache, db, d, apiGw, repo, notifs, maxAnchorJobs, minAnchorJobs, paused, manager.EnvType(os.Getenv(manager.EnvVar_Env)), new(sync.WaitGroup)}, nil } func (m *JobManager) NewJob(jobState job.JobState) (job.JobState, error) { @@ -474,14 +486,14 @@ func (m *JobManager) postProcessJob(jobState job.JobState) { Type: job.JobType_Workflow, Params: map[string]interface{}{ job.JobParam_Source: manager.ServiceName, - job.WorkflowJobParam_Name: manager.Tests_Name, - job.WorkflowJobParam_Org: manager.Tests_Org, - job.WorkflowJobParam_Repo: manager.Tests_Repo, - job.WorkflowJobParam_Ref: manager.Tests_Ref, - job.WorkflowJobParam_Workflow: manager.Tests_Workflow, + job.WorkflowJobParam_Name: tests_Name, + job.WorkflowJobParam_Org: tests_Org, + job.WorkflowJobParam_Repo: tests_Repo, + job.WorkflowJobParam_Ref: tests_Ref, + job.WorkflowJobParam_Workflow: tests_Workflow, job.WorkflowJobParam_Inputs: map[string]interface{}{ job.WorkflowJobParam_Environment: m.env, - job.WorkflowJobParam_TestSelector: manager.Tests_Selector, + job.WorkflowJobParam_TestSelector: tests_Selector, }, }, }); err != nil { diff --git a/cd/manager/jobs/anchor.go b/cd/manager/jobs/anchor.go index af8574e..d5d6ffb 100644 --- a/cd/manager/jobs/anchor.go +++ b/cd/manager/jobs/anchor.go @@ -21,7 +21,7 @@ type anchorJob struct { } func AnchorJob(jobState job.JobState, db manager.Database, notifs manager.Notifs, d manager.Deployment) manager.JobSm { - return &anchorJob{baseJob{jobState, db, notifs}, os.Getenv("ENV"), d} + return &anchorJob{baseJob{jobState, db, notifs}, os.Getenv(manager.EnvVar_Env), d} } func (a anchorJob) Advance() (job.JobState, error) { diff --git a/cd/manager/jobs/deploy.go b/cd/manager/jobs/deploy.go index f5f5d7a..6dfce3d 100644 --- a/cd/manager/jobs/deploy.go +++ b/cd/manager/jobs/deploy.go @@ -4,6 +4,7 @@ import ( "fmt" "log" "os" + "strings" "time" "github.com/3box/pipeline-tools/cd/manager" @@ -19,10 +20,39 @@ type deployJob struct { manual bool rollback bool force bool + env string d manager.Deployment repo manager.Repository } +const ( + envBranch_Dev string = "develop" + envBranch_Qa string = "qa" + envBranch_Tnet string = "release-candidate" + envBranch_Prod string = "main" +) + +const ( + serviceSuffix_CeramicNode string = "node" + serviceSuffix_IpfsNode string = "ipfs-nd" + serviceSuffix_CasApi string = "api" + serviceSuffix_CasWorker string = "anchor" + serviceSuffix_CasScheduler string = "scheduler" + serviceSuffix_Elp string = "elp" +) + +const ( + containerName_CeramicNode string = "ceramic_node" + containerName_IpfsNode string = "go-ipfs" + containerName_CasApi string = "cas_api" + containerName_CasWorker string = "cas_anchor" + containerName_CasV5Scheduler string = "scheduler" +) + +const defaultFailureTime = 30 * time.Minute +const buildHashLatest = "latest" +const anchorWorkerRepo = "ceramic-prod-cas-runner" + func DeployJob(jobState job.JobState, db manager.Database, notifs manager.Notifs, d manager.Deployment, repo manager.Repository) (manager.JobSm, error) { if component, found := jobState.Params[job.DeployJobParam_Component].(string); !found { return nil, fmt.Errorf("deployJob: missing component (ceramic, ipfs, cas)") @@ -32,7 +62,7 @@ func DeployJob(jobState job.JobState, db manager.Database, notifs manager.Notifs manual, _ := jobState.Params[job.DeployJobParam_Manual].(bool) rollback, _ := jobState.Params[job.DeployJobParam_Rollback].(bool) force, _ := jobState.Params[job.DeployJobParam_Force].(bool) - return &deployJob{baseJob{jobState, db, notifs}, manager.DeployComponent(component), sha, manual, rollback, force, d, repo}, nil + return &deployJob{baseJob{jobState, db, notifs}, manager.DeployComponent(component), sha, manual, rollback, force, os.Getenv(manager.EnvVar_Env), d, repo}, nil } } @@ -52,7 +82,7 @@ func (d deployJob) Advance() (job.JobState, error) { // Rollbacks are also force deploys, so we don't need to check for the former explicitly since we're // already checking for force deploys. return d.advance(job.JobStage_Skipped, now, nil) - } else if envLayout, err := d.d.GenerateEnvLayout(d.component); err != nil { + } else if envLayout, err := d.generateEnvLayout(d.component); err != nil { return d.advance(job.JobStage_Failed, now, err) } else { d.state.Params[job.DeployJobParam_Layout] = *envLayout @@ -86,7 +116,7 @@ func (d deployJob) Advance() (job.JobState, error) { log.Printf("deployJob: failed to update deploy hash: %v, %s", err, manager.PrintJob(d.state)) } return d.advance(job.JobStage_Completed, now, nil) - } else if job.IsTimedOut(d.state, manager.DefaultFailureTime) { + } else if job.IsTimedOut(d.state, defaultFailureTime) { return d.advance(job.JobStage_Failed, now, manager.Error_CompletionTimeout) } else { // Return so we come back again to check @@ -114,11 +144,11 @@ func (d deployJob) prepareJob(deployHashes map[manager.DeployComponent]string) e // - Else use the latest build hash from the database. // // The last two cases will only happen when redeploying manually, so we can note that in the notification. - if d.sha == manager.BuildHashLatest { + if d.sha == buildHashLatest { shaTag, _ := d.state.Params[job.DeployJobParam_ShaTag].(string) if latestSha, err := d.repo.GetLatestCommitHash( manager.ComponentRepo(d.component), - manager.EnvBranch(d.component, manager.EnvType(os.Getenv("ENV"))), + d.envBranch(d.component, manager.EnvType(os.Getenv(manager.EnvVar_Env))), shaTag, ); err != nil { return err @@ -144,16 +174,15 @@ func (d deployJob) prepareJob(deployHashes map[manager.DeployComponent]string) e } func (d deployJob) updateEnv(commitHash string) error { - if layout, found := d.state.Params[job.DeployJobParam_Layout].(manager.Layout); found { - return d.d.UpdateEnv(&layout, commitHash) - } - return fmt.Errorf("updateEnv: missing env layout") + // Layout should already be present + layout, _ := d.state.Params[job.DeployJobParam_Layout].(manager.Layout) + return d.d.UpdateLayout(&layout, commitHash) } func (d deployJob) checkEnv() (bool, error) { - if layout, found := d.state.Params[job.DeployJobParam_Layout].(manager.Layout); !found { - return false, fmt.Errorf("checkEnv: missing env layout") - } else if deployed, err := d.d.CheckEnv(&layout); err != nil { + // Layout should already be present + layout, _ := d.state.Params[job.DeployJobParam_Layout].(manager.Layout) + if deployed, err := d.d.CheckLayout(&layout); err != nil { return false, err } else if !deployed || (d.component != manager.DeployComponent_Ipfs) { return deployed, nil @@ -164,9 +193,127 @@ func (d deployJob) checkEnv() (bool, error) { // In this case, we want to check whether *some* version of Ceramic is stable and not any specific version, like we // normally do when checking for successful deployments, so it's OK to rebuild the Ceramic layout on-the-fly each // time instead of storing it in the database. - if ceramicLayout, err := d.d.GenerateEnvLayout(manager.DeployComponent_Ceramic); err != nil { + if ceramicLayout, err := d.generateEnvLayout(manager.DeployComponent_Ceramic); err != nil { return false, err } else { - return d.d.CheckEnv(ceramicLayout) + return d.d.CheckLayout(ceramicLayout) + } +} + +func (d deployJob) generateEnvLayout(component manager.DeployComponent) (*manager.Layout, error) { + privateCluster := "ceramic-" + d.env + publicCluster := "ceramic-" + d.env + "-ex" + casCluster := "ceramic-" + d.env + "-cas" + casV5Cluster := "app-cas-" + d.env + ecrRepo, err := d.componentEcrRepo(component) + if err != nil { + log.Printf("generateEnvLayout: ecr repo error: %s, %v", component, err) + return nil, err + } + clusters := []string{privateCluster, publicCluster, casCluster, casV5Cluster} + // Populate the service layout by retrieving the clusters/services from ECS + if currentLayout, err := d.d.GetLayout(clusters); err != nil { + log.Printf("generateEnvLayout: get layout error: %s, %v", component, err) + return nil, err + } else { + newLayout := &manager.Layout{Clusters: map[string]*manager.Cluster{}, Repo: ecrRepo} + for cluster, clusterLayout := range currentLayout.Clusters { + for service, task := range clusterLayout.ServiceTasks.Tasks { + if newTask := d.componentTask(component, cluster, service); newTask != nil { + if newLayout.Clusters[cluster] == nil { + // We found at least one matching task, so we can start populating the cluster layout. + newLayout.Clusters[cluster] = &manager.Cluster{ServiceTasks: &manager.TaskSet{Tasks: map[string]*manager.Task{}}} + } + // Set the task definition to the one currently running. For most cases, this will be overwritten by + // a new definition, but for some cases, we might want to use a layout with currently running + // definitions and not updated ones, e.g. to check if an existing deployment is stable. + newTask.Id = task.Id + newLayout.Clusters[cluster].ServiceTasks.Tasks[service] = newTask + } + } + } + // If CAS is bing deployed, add the Anchor Worker to the layout since it doesn't get updated through an ECS + // service. + if component == manager.DeployComponent_Cas { + newLayout.Clusters[casCluster].Tasks = &manager.TaskSet{Tasks: map[string]*manager.Task{ + casCluster + "-" + serviceSuffix_CasWorker: { + Repo: anchorWorkerRepo, + Temp: true, // Anchor workers do not stay up permanently + Name: containerName_CasWorker, + }, + }} + } + return newLayout, nil + } +} + +func (d deployJob) componentTask(component manager.DeployComponent, cluster, service string) *manager.Task { + // Skip any ELP services (e.g. "ceramic-elp-1-1-node") + serviceNameParts := strings.Split(service, "-") + if (len(serviceNameParts) >= 2) && (serviceNameParts[1] == serviceSuffix_Elp) { + return nil + } + switch component { + case manager.DeployComponent_Ceramic: + // All clusters have Ceramic nodes + if strings.Contains(service, serviceSuffix_CeramicNode) { + return &manager.Task{Name: containerName_CeramicNode} + } + case manager.DeployComponent_Ipfs: + // All clusters have IPFS nodes + if strings.Contains(service, serviceSuffix_IpfsNode) { + return &manager.Task{Name: containerName_IpfsNode} + } + case manager.DeployComponent_Cas: + if (cluster == "ceramic-"+d.env+"-cas") && strings.Contains(service, serviceSuffix_CasApi) { + return &manager.Task{Name: containerName_CasApi} + } + case manager.DeployComponent_CasV5: + if (cluster == "app-cas-"+d.env) && strings.Contains(service, serviceSuffix_CasScheduler) { + return &manager.Task{Name: containerName_CasV5Scheduler} + } + default: + log.Printf("componentTask: unknown component: %s", component) + } + return nil +} + +func (d deployJob) componentEcrRepo(component manager.DeployComponent) (string, error) { + switch component { + case manager.DeployComponent_Ceramic: + return "ceramic-prod", nil + case manager.DeployComponent_Ipfs: + return "go-ipfs-prod", nil + case manager.DeployComponent_Cas: + return "ceramic-prod-cas", nil + case manager.DeployComponent_CasV5: + return "app-cas-scheduler", nil + default: + return "", fmt.Errorf("componentTask: unknown component: %s", component) + } +} + +func (d deployJob) envBranch(component manager.DeployComponent, env manager.EnvType) string { + switch env { + case manager.EnvType_Dev: + return envBranch_Dev + case manager.EnvType_Qa: + // Ceramic and CAS "qa" deploys correspond to the "develop" branch + switch component { + case manager.DeployComponent_Ceramic: + return envBranch_Dev + case manager.DeployComponent_Cas: + return envBranch_Dev + case manager.DeployComponent_CasV5: + return envBranch_Dev + default: + return envBranch_Qa + } + case manager.EnvType_Tnet: + return envBranch_Tnet + case manager.EnvType_Prod: + return envBranch_Prod + default: + return "" } } diff --git a/cd/manager/jobs/e2e.go b/cd/manager/jobs/e2e.go index 894fbb5..8679340 100644 --- a/cd/manager/jobs/e2e.go +++ b/cd/manager/jobs/e2e.go @@ -9,9 +9,6 @@ import ( "github.com/3box/pipeline-tools/cd/manager/common/job" ) -// Allow up to 4 hours for E2E tests to run -const e2eFailureTime = 4 * time.Hour - var _ manager.JobSm = &e2eTestJob{} type e2eTestJob struct { @@ -19,6 +16,15 @@ type e2eTestJob struct { d manager.Deployment } +const ( + e2eTest_PrivatePublic string = "private-public" + e2eTest_LocalClientPublic string = "local_client-public" + e2eTest_LocalNodePrivate string = "local_node-private" +) + +// Allow up to 4 hours for E2E tests to run +const e2eFailureTime = 4 * time.Hour + func E2eTestJob(jobState job.JobState, db manager.Database, notifs manager.Notifs, d manager.Deployment) manager.JobSm { return &e2eTestJob{baseJob{jobState, db, notifs}, d} } @@ -77,12 +83,12 @@ func (e e2eTestJob) Advance() (job.JobState, error) { } func (e e2eTestJob) startAllTests() error { - if err := e.startTests(manager.E2eTest_PrivatePublic); err != nil { + if err := e.startTests(e2eTest_PrivatePublic); err != nil { return err - } else if err = e.startTests(manager.E2eTest_LocalClientPublic); err != nil { + } else if err = e.startTests(e2eTest_LocalClientPublic); err != nil { return err } else { - return e.startTests(manager.E2eTest_LocalNodePrivate) + return e.startTests(e2eTest_LocalNodePrivate) } } @@ -108,11 +114,11 @@ func (e e2eTestJob) startTests(config string) error { } func (e e2eTestJob) checkAllTests(expectedToBeRunning bool) (bool, error) { - if privatePublicStatus, err := e.checkTests(e.state.Params[manager.E2eTest_PrivatePublic].(string), expectedToBeRunning); err != nil { + if privatePublicStatus, err := e.checkTests(e.state.Params[e2eTest_PrivatePublic].(string), expectedToBeRunning); err != nil { return false, err - } else if localClientPublicStatus, err := e.checkTests(e.state.Params[manager.E2eTest_LocalClientPublic].(string), expectedToBeRunning); err != nil { + } else if localClientPublicStatus, err := e.checkTests(e.state.Params[e2eTest_LocalClientPublic].(string), expectedToBeRunning); err != nil { return false, err - } else if localNodePrivateStatus, err := e.checkTests(e.state.Params[manager.E2eTest_LocalNodePrivate].(string), expectedToBeRunning); err != nil { + } else if localNodePrivateStatus, err := e.checkTests(e.state.Params[e2eTest_LocalNodePrivate].(string), expectedToBeRunning); err != nil { return false, err } else if privatePublicStatus && localClientPublicStatus && localNodePrivateStatus { return true, nil diff --git a/cd/manager/jobs/smoke.go b/cd/manager/jobs/smoke.go index aff8c5d..3f91161 100644 --- a/cd/manager/jobs/smoke.go +++ b/cd/manager/jobs/smoke.go @@ -26,7 +26,7 @@ type smokeTestJob struct { } func SmokeTestJob(jobState job.JobState, db manager.Database, notifs manager.Notifs, d manager.Deployment) manager.JobSm { - return &smokeTestJob{baseJob{jobState, db, notifs}, os.Getenv("ENV"), d} + return &smokeTestJob{baseJob{jobState, db, notifs}, os.Getenv(manager.EnvVar_Env), d} } func (s smokeTestJob) Advance() (job.JobState, error) { diff --git a/cd/manager/jobs/workflow.go b/cd/manager/jobs/workflow.go index 0835d9b..12639c2 100644 --- a/cd/manager/jobs/workflow.go +++ b/cd/manager/jobs/workflow.go @@ -46,7 +46,7 @@ func GitHubWorkflowJob(jobState job.JobState, db manager.Database, notifs manage // Add the job ID to the inputs, so we can track the right workflow corresponding to this job. inputs[job.WorkflowJobParam_JobId] = jobState.JobId // Set the environment so that the workflow knows which environment to target - env := os.Getenv("ENV") + env := os.Getenv(manager.EnvVar_Env) inputs[job.WorkflowJobParam_Environment] = env var httpClient *http.Client = nil diff --git a/cd/manager/models.go b/cd/manager/models.go index d29452b..2d878bf 100644 --- a/cd/manager/models.go +++ b/cd/manager/models.go @@ -9,7 +9,6 @@ import ( const DefaultTick = 10 * time.Second const DefaultTtlDays = 1 -const DefaultFailureTime = 30 * time.Minute const DefaultHttpWaitTime = 30 * time.Second const DefaultWaitTime = 5 * time.Minute @@ -22,20 +21,6 @@ const ( EnvType_Prod EnvType = "prod" ) -const ( - EnvName_Dev string = "dev" - EnvName_Qa string = "dev-qa" - EnvName_Tnet string = "testnet-clay" - EnvName_Prod string = "mainnet" -) - -const ( - EnvBranch_Dev string = "develop" - EnvBranch_Qa string = "qa" - EnvBranch_Tnet string = "release-candidate" - EnvBranch_Prod string = "main" -) - type DeployComponent string const ( @@ -54,60 +39,17 @@ const ( DeployRepo_Ipfs DeployRepo = "go-ipfs-daemon" ) -type DeployType string - -const ( - DeployType_Service DeployType = "service" - DeployType_Task DeployType = "task" -) - -const ( - ServiceSuffix_CeramicNode string = "node" - ServiceSuffix_IpfsNode string = "ipfs-nd" - ServiceSuffix_CasApi string = "api" - ServiceSuffix_CasWorker string = "anchor" - ServiceSuffix_CasScheduler string = "scheduler" - ServiceSuffix_Elp string = "elp" -) - -const ( - E2eTest_PrivatePublic string = "private-public" - E2eTest_LocalClientPublic string = "local_client-public" - E2eTest_LocalNodePrivate string = "local_node-private" -) - -const ( - ContainerName_CeramicNode string = "ceramic_node" - ContainerName_IpfsNode string = "go-ipfs" - ContainerName_CasApi string = "cas_api" - ContainerName_CasWorker string = "cas_anchor" - ContainerName_CasScheduler string = "cas_scheduler" - ContainerName_CasV5Scheduler string = "scheduler" -) - var ( Error_StartupTimeout = fmt.Errorf("startup timeout") Error_CompletionTimeout = fmt.Errorf("completion timeout") ) +const GitHubOrg = "ceramicnetwork" + const ( - NotifField_CommitHashes string = "Commit Hashes" - NotifField_JobId string = "Job ID" - NotifField_Time string = "Time" - NotifField_Deploy string = "Deployment(s)" - NotifField_Anchor string = "Anchor Worker(s)" - NotifField_TestE2E string = "E2E Tests" - NotifField_TestSmoke string = "Smoke Tests" - NotifField_Workflow string = "Workflow(s)" + EnvVar_Env = "ENV" ) -// Repository -const CommitHashRegex = "[0-9a-f]{40}" -const BuildHashTag = "sha_tag" -const BuildHashLatest = "latest" -const GitHubOrg = "ceramicnetwork" -const ImageVerificationStatusCheck = "ci/image: verify" - type WorkflowStatus uint8 const ( @@ -117,33 +59,7 @@ const ( WorkflowStatus_Success ) -// Miscellaneous -const ResourceTag = "Ceramic" const ServiceName = "cd-manager" -const DefaultCasMaxAnchorWorkers = 1 -const DefaultCasMinAnchorWorkers = 0 -const DefaultJobStateTtl = 2 * 7 * 24 * time.Hour // Two weeks - -// Tests -const ( - Tests_Name = "Post-Deployment Tests" - Tests_Org = "3box" - Tests_Repo = "ceramic-tests" - Tests_Ref = "main" - Tests_Workflow = "run-durable.yml" - Tests_Selector = "fast" -) - -// For CASv5 workers -const CasV5Version = "5" - -// BuildState represents build/deploy commit hash information. This information is maintained in a legacy DynamoDB table -// used by our utility AWS Lambdas. -type BuildState struct { - Key DeployComponent `dynamodbav:"key"` - DeployTag string `dynamodbav:"deployTag"` - BuildInfo map[string]interface{} `dynamodbav:"buildInfo"` -} // Layout (as well as Cluster, TaskSet, and Task) are a generic representation of our service structure within an // orchestration service (e.g. AWS ECS). @@ -218,9 +134,9 @@ type Deployment interface { LaunchServiceTask(cluster, service, family, container string, overrides map[string]string) (string, error) LaunchTask(cluster, family, container, vpcConfigParam string, overrides map[string]string) (string, error) CheckTask(cluster, taskDefId string, running, stable bool, taskIds ...string) (bool, error) - GenerateEnvLayout(DeployComponent) (*Layout, error) - UpdateEnv(*Layout, string) error - CheckEnv(*Layout) (bool, error) + GetLayout(clusters []string) (*Layout, error) + UpdateLayout(*Layout, string) error + CheckLayout(*Layout) (bool, error) } // Notifs represents a notification service (e.g. Discord) diff --git a/cd/manager/notifs/anchor.go b/cd/manager/notifs/anchor.go index 853e689..423bb91 100644 --- a/cd/manager/notifs/anchor.go +++ b/cd/manager/notifs/anchor.go @@ -65,9 +65,7 @@ func (a anchorNotif) getColor() discordColor { return discordColor_Alert } else if delayed, _ := a.state.Params[job.AnchorJobParam_Delayed].(bool); delayed { return discordColor_Warning - } else { - return discordColor_Info } } - return getColorForStage(a.state.Stage) + return colorForStage(a.state.Stage) } diff --git a/cd/manager/notifs/deploy.go b/cd/manager/notifs/deploy.go index a00f6d9..10186d8 100644 --- a/cd/manager/notifs/deploy.go +++ b/cd/manager/notifs/deploy.go @@ -24,13 +24,20 @@ type deployNotif struct { env manager.EnvType } +const ( + envName_Dev string = "dev" + envName_Qa string = "dev-qa" + envName_Tnet string = "testnet-clay" + envName_Prod string = "mainnet" +) + func newDeployNotif(jobState job.JobState) (jobNotif, error) { if d, err := parseDiscordWebhookUrl("DISCORD_DEPLOYMENTS_WEBHOOK"); err != nil { return nil, err } else if c, err := parseDiscordWebhookUrl("DISCORD_COMMUNITY_NODES_WEBHOOK"); err != nil { return nil, err } else { - return &deployNotif{jobState, d, c, manager.EnvType(os.Getenv("ENV"))}, nil + return &deployNotif{jobState, d, c, manager.EnvType(os.Getenv(manager.EnvVar_Env))}, nil } } @@ -56,7 +63,7 @@ func (d deployNotif) getTitle() string { } return fmt.Sprintf( "3Box Labs `%s` %s %s %s %s", - manager.EnvName(d.env), + envName(d.env), strings.ToUpper(component), cases.Title(language.English).String(qualifier), "Deployment", @@ -69,5 +76,20 @@ func (d deployNotif) getFields() []discord.EmbedField { } func (d deployNotif) getColor() discordColor { - return getColorForStage(d.state.Stage) + return colorForStage(d.state.Stage) +} + +func envName(env manager.EnvType) string { + switch env { + case manager.EnvType_Dev: + return envName_Dev + case manager.EnvType_Qa: + return envName_Qa + case manager.EnvType_Tnet: + return envName_Tnet + case manager.EnvType_Prod: + return envName_Prod + default: + return "" + } } diff --git a/cd/manager/notifs/discord.go b/cd/manager/notifs/discord.go index 3df1cef..e9cbf02 100644 --- a/cd/manager/notifs/discord.go +++ b/cd/manager/notifs/discord.go @@ -5,7 +5,6 @@ import ( "log" "net/url" "os" - "regexp" "strings" "time" @@ -28,9 +27,20 @@ const ( discordColor_Alert = 16711712 ) -const DiscordPacing = 2 * time.Second +const ( + notifField_CommitHashes string = "Commit Hashes" + notifField_JobId string = "Job ID" + notifField_Time string = "Time" + notifField_Deploy string = "Deployment(s)" + notifField_Anchor string = "Anchor Worker(s)" + notifField_TestE2E string = "E2E Tests" + notifField_TestSmoke string = "Smoke Tests" + notifField_Workflow string = "Workflow(s)" +) -const ShaTagLength = 12 +const discordPacing = 2 * time.Second + +const shaTagLength = 12 var _ manager.Notifs = &JobNotifs{} @@ -121,7 +131,7 @@ func (n JobNotifs) sendNotif(title string, fields []discord.EmbedField, color di SetEmbeds(messageEmbed). SetUsername(manager.ServiceName). Build(), - rest.WithDelay(DiscordPacing), + rest.WithDelay(discordPacing), ); err != nil { log.Printf("notifyJob: error sending discord notification: %v, %s, %v, %d", err, title, fields, color) } @@ -130,19 +140,19 @@ func (n JobNotifs) sendNotif(title string, fields []discord.EmbedField, color di func (n JobNotifs) getNotifFields(jobState job.JobState) []discord.EmbedField { fields := []discord.EmbedField{ { - Name: manager.NotifField_JobId, + Name: notifField_JobId, Value: jobState.JobId, }, } // Return deploy hashes for all jobs, if we were able to retrieve them successfully. if commitHashes := n.getDeployHashes(jobState); len(commitHashes) > 0 { fields = append(fields, discord.EmbedField{ - Name: manager.NotifField_CommitHashes, + Name: notifField_CommitHashes, Value: commitHashes, }) } fields = append(fields, discord.EmbedField{ - Name: manager.NotifField_Time, + Name: notifField_Time, Value: time.Now().Format(time.RFC1123), // "Mon, 02 Jan 2006 15:04:05 MST" }) // Add the list of jobs in progress @@ -159,7 +169,7 @@ func (n JobNotifs) getDeployHashes(jobState job.JobState) string { if jobState.Type == job.JobType_Deploy { sha := jobState.Params[job.DeployJobParam_Sha].(string) // If the specified hash is valid, overwrite the previous hash from the database. - if isValidSha, _ := regexp.MatchString(manager.CommitHashRegex, sha); isValidSha { + if manager.IsValidSha(sha) { commitHashes[manager.DeployComponent(jobState.Params[job.DeployJobParam_Component].(string))] = sha } } @@ -173,9 +183,9 @@ func (n JobNotifs) getDeployHashes(jobState job.JobState) string { } func (n JobNotifs) getComponentMsg(component manager.DeployComponent, commitHashes map[manager.DeployComponent]string) string { - if commitHash, found := commitHashes[component]; found && (len(commitHash) >= ShaTagLength) { + if commitHash, found := commitHashes[component]; found && (len(commitHash) >= shaTagLength) { repo := manager.ComponentRepo(component) - return fmt.Sprintf("[%s (%s)](https://github.com/%s/%s/commit/%s)", repo, commitHash[:ShaTagLength], manager.GitHubOrg, repo, commitHash) + return fmt.Sprintf("[%s (%s)](https://github.com/%s/%s/commit/%s)", repo, commitHash[:shaTagLength], manager.GitHubOrg, repo, commitHash) } return "" } @@ -225,12 +235,29 @@ func (n JobNotifs) getActiveJobsByType(jobState job.JobState, jobType job.JobTyp } } return discord.EmbedField{ - Name: manager.NotifField(jobType) + " In Progress:", + Name: notifField(jobType) + " In Progress:", Value: message, }, len(message) > 0 } -func getColorForStage(jobStage job.JobStage) discordColor { +func notifField(jt job.JobType) string { + switch jt { + case job.JobType_Deploy: + return notifField_Deploy + case job.JobType_Anchor: + return notifField_Anchor + case job.JobType_TestE2E: + return notifField_TestE2E + case job.JobType_TestSmoke: + return notifField_TestSmoke + case job.JobType_Workflow: + return notifField_Workflow + default: + return "" + } +} + +func colorForStage(jobStage job.JobStage) discordColor { switch jobStage { case job.JobStage_Dequeued: return discordColor_Info @@ -247,7 +274,7 @@ func getColorForStage(jobStage job.JobStage) discordColor { case job.JobStage_Completed: return discordColor_Ok default: - log.Printf("getColorForStage: unknown job stage: %s", jobStage) + log.Printf("colorForStage: unknown job stage: %s", jobStage) return discordColor_Alert } } diff --git a/cd/manager/notifs/e2e.go b/cd/manager/notifs/e2e.go index 6b8ad51..4e8d5ba 100644 --- a/cd/manager/notifs/e2e.go +++ b/cd/manager/notifs/e2e.go @@ -33,5 +33,5 @@ func (e e2eTestNotif) getFields() []discord.EmbedField { } func (e e2eTestNotif) getColor() discordColor { - return getColorForStage(e.state.Stage) + return colorForStage(e.state.Stage) } diff --git a/cd/manager/notifs/smoke.go b/cd/manager/notifs/smoke.go index 49ccbdf..4992234 100644 --- a/cd/manager/notifs/smoke.go +++ b/cd/manager/notifs/smoke.go @@ -33,5 +33,5 @@ func (s smokeTestNotif) getFields() []discord.EmbedField { } func (s smokeTestNotif) getColor() discordColor { - return getColorForStage(s.state.Stage) + return colorForStage(s.state.Stage) } diff --git a/cd/manager/notifs/workflow.go b/cd/manager/notifs/workflow.go index 63acf1c..e3cf5b9 100644 --- a/cd/manager/notifs/workflow.go +++ b/cd/manager/notifs/workflow.go @@ -61,5 +61,5 @@ func (w workflowNotif) getFields() []discord.EmbedField { } func (w workflowNotif) getColor() discordColor { - return getColorForStage(w.state.Stage) + return colorForStage(w.state.Stage) } diff --git a/cd/manager/repository/github.go b/cd/manager/repository/github.go index b51e48e..2c4c10c 100644 --- a/cd/manager/repository/github.go +++ b/cd/manager/repository/github.go @@ -17,6 +17,10 @@ import ( var _ manager.Repository = &Github{} +type Github struct { + client *github.Client +} + const ( github_CommitStatus_Failure string = "failure" github_CommitStatus_Success string = "success" @@ -31,9 +35,7 @@ const ( gitHub_WorkflowStatus_Canceled = "cancelled" ) -type Github struct { - client *github.Client -} +const imageVerificationStatusCheck = "ci/image: verify" func NewRepository() manager.Repository { var httpClient *http.Client = nil @@ -98,7 +100,7 @@ func (g Github) checkRefStatus(repo manager.DeployRepo, ref string) (bool, error // Make sure that image verification has run. We could reach here after CircleCI tests have passed but // image verification has not started yet, and so the combined status would appear to be successful. for _, statusCheck := range status.Statuses { - if *statusCheck.Context == manager.ImageVerificationStatusCheck { + if *statusCheck.Context == imageVerificationStatusCheck { return true, nil } } diff --git a/cd/manager/utils.go b/cd/manager/utils.go index 217b7ab..3de1123 100644 --- a/cd/manager/utils.go +++ b/cd/manager/utils.go @@ -3,13 +3,15 @@ package manager import ( "encoding/json" "fmt" - "os" "regexp" "time" "github.com/3box/pipeline-tools/cd/manager/common/job" ) +const commitHashRegex = "[0-9a-f]{40}" +const casV5Version = "5" + func PrintJob(jobStates ...job.JobState) string { prettyString := "" for _, jobState := range jobStates { @@ -22,46 +24,6 @@ func PrintJob(jobStates ...job.JobState) string { return prettyString } -func EnvName(env EnvType) string { - switch env { - case EnvType_Dev: - return EnvName_Dev - case EnvType_Qa: - return EnvName_Qa - case EnvType_Tnet: - return EnvName_Tnet - case EnvType_Prod: - return EnvName_Prod - default: - return "" - } -} - -func EnvBranch(component DeployComponent, env EnvType) string { - switch env { - case EnvType_Dev: - return EnvBranch_Dev - case EnvType_Qa: - // Ceramic and CAS "qa" deploys correspond to the "develop" branch - switch component { - case DeployComponent_Ceramic: - return EnvBranch_Dev - case DeployComponent_Cas: - return EnvBranch_Dev - case DeployComponent_CasV5: - return EnvBranch_Dev - default: - return EnvBranch_Qa - } - case EnvType_Tnet: - return EnvBranch_Tnet - case EnvType_Prod: - return EnvBranch_Prod - default: - return "" - } -} - func ComponentRepo(component DeployComponent) DeployRepo { switch component { case DeployComponent_Ceramic: @@ -77,35 +39,14 @@ func ComponentRepo(component DeployComponent) DeployRepo { } } -func NotifField(jt job.JobType) string { - switch jt { - case job.JobType_Deploy: - return NotifField_Deploy - case job.JobType_Anchor: - return NotifField_Anchor - case job.JobType_TestE2E: - return NotifField_TestE2E - case job.JobType_TestSmoke: - return NotifField_TestSmoke - case job.JobType_Workflow: - return NotifField_Workflow - default: - return "" - } -} - -func CeramicEnvPfx() string { - return "ceramic-" + os.Getenv("ENV") -} - func IsValidSha(sha string) bool { - isValidSha, err := regexp.MatchString(CommitHashRegex, sha) + isValidSha, err := regexp.MatchString(commitHashRegex, sha) return err == nil && isValidSha } func IsV5WorkerJob(jobState job.JobState) bool { if jobState.Type == job.JobType_Anchor { - if version, found := jobState.Params[job.AnchorJobParam_Version].(string); found && (version == CasV5Version) { + if version, found := jobState.Params[job.AnchorJobParam_Version].(string); found && (version == casV5Version) { return true } }