-
Notifications
You must be signed in to change notification settings - Fork 208
Support recreate for ECS tasks #4608
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 1 commit
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change | ||||||||||||||
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
|
|
@@ -159,6 +159,8 @@ func applyServiceDefinition(ctx context.Context, cli provider.Client, serviceDef | |||||||||||||||
| if err := cli.TagResource(ctx, *service.ServiceArn, serviceDefinition.Tags); err != nil { | ||||||||||||||||
| return nil, fmt.Errorf("failed to update tags of ECS service %s: %w", *serviceDefinition.ServiceName, err) | ||||||||||||||||
| } | ||||||||||||||||
| // Re-assign tags to service object because UpdateService API doesn't return tags. | ||||||||||||||||
| service.Tags = serviceDefinition.Tags | ||||||||||||||||
|
|
||||||||||||||||
| } else { | ||||||||||||||||
| service, err = cli.CreateService(ctx, serviceDefinition) | ||||||||||||||||
|
|
@@ -242,7 +244,7 @@ func createPrimaryTaskSet(ctx context.Context, client provider.Client, service t | |||||||||||||||
| return nil | ||||||||||||||||
| } | ||||||||||||||||
|
|
||||||||||||||||
| func sync(ctx context.Context, in *executor.Input, platformProviderName string, platformProviderCfg *config.PlatformProviderECSConfig, taskDefinition types.TaskDefinition, serviceDefinition types.Service, targetGroup *types.LoadBalancer) bool { | ||||||||||||||||
| func sync(ctx context.Context, in *executor.Input, platformProviderName string, platformProviderCfg *config.PlatformProviderECSConfig, singleton bool, taskDefinition types.TaskDefinition, serviceDefinition types.Service, targetGroup *types.LoadBalancer) bool { | ||||||||||||||||
| client, err := provider.DefaultRegistry().Client(platformProviderName, platformProviderCfg, in.Logger) | ||||||||||||||||
| if err != nil { | ||||||||||||||||
| in.LogPersister.Errorf("Unable to create ECS client for the provider %s: %v", platformProviderName, err) | ||||||||||||||||
|
|
@@ -263,10 +265,35 @@ func sync(ctx context.Context, in *executor.Input, platformProviderName string, | |||||||||||||||
| return false | ||||||||||||||||
| } | ||||||||||||||||
|
|
||||||||||||||||
| in.LogPersister.Infof("Start rolling out ECS task set") | ||||||||||||||||
| if err := createPrimaryTaskSet(ctx, client, *service, *td, targetGroup); err != nil { | ||||||||||||||||
| in.LogPersister.Errorf("Failed to rolling out ECS task set for service %s: %v", *serviceDefinition.ServiceName, err) | ||||||||||||||||
| return false | ||||||||||||||||
| if singleton { | ||||||||||||||||
| cnt := service.DesiredCount | ||||||||||||||||
| // Scale down the service tasks by set it to 0 | ||||||||||||||||
| in.LogPersister.Infof("Scale down ECS desired tasks count to 0") | ||||||||||||||||
| service.DesiredCount = 0 | ||||||||||||||||
| if _, err = client.UpdateService(ctx, *service); err != nil { | ||||||||||||||||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Does this function wait for the completion of all tasks before proceeding?
Member
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I think it's not necessary 🤔 AFAIK, when the service desired count is set to 0, the termination process begins. ECS will send a stop sign to any running tasks of that service and stop traffic coming to the service (since it's marked as inactivity). The tasks can keep working with its on-flight processes/requests, but no new coming traffic is accepted. So, rather than implement a wait mechanism here, I guess it's better to implement proper handling in the application code side, wdyt?
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Okay, after reviewing the old code, I think this approach will work well. pipecd/pkg/app/piped/executor/ecs/ecs.go Lines 237 to 243 in 20cfb77
|
||||||||||||||||
| in.LogPersister.Errorf("Failed to stop service tasks: %v", err) | ||||||||||||||||
| return false | ||||||||||||||||
| } | ||||||||||||||||
|
|
||||||||||||||||
| in.LogPersister.Infof("Start rolling out ECS task set") | ||||||||||||||||
| if err := createPrimaryTaskSet(ctx, client, *service, *td, targetGroup); err != nil { | ||||||||||||||||
| in.LogPersister.Errorf("Failed to rolling out ECS task set for service %s: %v", *serviceDefinition.ServiceName, err) | ||||||||||||||||
| return false | ||||||||||||||||
| } | ||||||||||||||||
|
|
||||||||||||||||
| // Scale up the service tasks count back to its desired. | ||||||||||||||||
| in.LogPersister.Infof("Scale up ECS desired tasks count back to %d", cnt) | ||||||||||||||||
| service.DesiredCount = cnt | ||||||||||||||||
| if _, err = client.UpdateService(ctx, *service); err != nil { | ||||||||||||||||
| in.LogPersister.Errorf("Failed to turning back service tasks: %v", err) | ||||||||||||||||
| return false | ||||||||||||||||
| } | ||||||||||||||||
| } else { | ||||||||||||||||
| in.LogPersister.Infof("Start rolling out ECS task set") | ||||||||||||||||
| if err := createPrimaryTaskSet(ctx, client, *service, *td, targetGroup); err != nil { | ||||||||||||||||
| in.LogPersister.Errorf("Failed to rolling out ECS task set for service %s: %v", *serviceDefinition.ServiceName, err) | ||||||||||||||||
| return false | ||||||||||||||||
| } | ||||||||||||||||
| } | ||||||||||||||||
|
|
||||||||||||||||
| in.LogPersister.Infof("Wait service to reach stable state") | ||||||||||||||||
|
|
||||||||||||||||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Do we need to update for rollback too?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I think this feature is only for the sync stage, not progressive, since we only have one task which is available at a time, isn't it?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I've forgotten most of the details regarding the code.
At this point, my main concern is to ensure that the rollback process also keeps the singleton property.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I got your point 👍 Just plan to update the rollback process logic by another PR, so we can forget about that now.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Ok!