Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

stop dependent containers before recreating diverged service #12122

Merged
merged 1 commit into from
Sep 16, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
51 changes: 27 additions & 24 deletions pkg/compose/convergence.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,9 +45,6 @@ import (
)

const (
extLifecycle = "x-lifecycle"
forceRecreate = "force_recreate"

doubledContainerNameWarning = "WARNING: The %q service is using the custom container name %q. " +
"Docker requires each container to have a unique name. " +
"Remove the custom name to scale the service.\n"
Expand Down Expand Up @@ -108,9 +105,7 @@ func (c *convergence) apply(ctx context.Context, project *types.Project, options
})
}

var mu sync.Mutex

func (c *convergence) ensureService(ctx context.Context, project *types.Project, service types.ServiceConfig, recreate string, inherit bool, timeout *time.Duration) error {
func (c *convergence) ensureService(ctx context.Context, project *types.Project, service types.ServiceConfig, recreate string, inherit bool, timeout *time.Duration) error { //nolint:gocyclo
expected, err := getScale(service)
if err != nil {
return err
Expand Down Expand Up @@ -147,6 +142,7 @@ func (c *convergence) ensureService(ctx context.Context, project *types.Project,
// If we don't get a container number (?) just sort by creation date
return containers[i].Created < containers[j].Created
})

for i, container := range containers {
if i >= expected {
// Scale Down
Expand All @@ -163,6 +159,11 @@ func (c *convergence) ensureService(ctx context.Context, project *types.Project,
return err
}
if mustRecreate {
err := c.stopDependentContainers(ctx, project, service)
if err != nil {
return err
}

i, container := i, container
eg.Go(tracing.SpanWrapFuncForErrGroup(ctx, "container/recreate", tracing.ContainerOptions(container), func(ctx context.Context) error {
recreated, err := c.service.recreateContainer(ctx, project, service, container, inherit, timeout)
Expand Down Expand Up @@ -217,6 +218,25 @@ func (c *convergence) ensureService(ctx context.Context, project *types.Project,
return err
}

func (c *convergence) stopDependentContainers(ctx context.Context, project *types.Project, service types.ServiceConfig) error {
w := progress.ContextWriter(ctx)
// Stop dependent containers, so they will be restarted after service is re-created
dependents := project.GetDependentsForService(service)
for _, name := range dependents {
dependents := c.observedState[name]
err := c.service.stopContainers(ctx, w, dependents, nil)
if err != nil {
return err
}
for i, dependent := range dependents {
dependent.State = ContainerExited
dependents[i] = dependent
}
c.observedState[name] = dependents
}
return nil
}

func getScale(config types.ServiceConfig) (int, error) {
scale := config.GetScale()
if scale > 1 && config.ContainerName != "" {
Expand Down Expand Up @@ -296,7 +316,7 @@ func mustRecreate(expected types.ServiceConfig, actual moby.Container, policy st
if policy == api.RecreateNever {
return false, nil
}
if policy == api.RecreateForce || expected.Extensions[extLifecycle] == forceRecreate {
if policy == api.RecreateForce {
return true, nil
}
configHash, err := ServiceHash(expected)
Expand Down Expand Up @@ -535,26 +555,9 @@ func (s *composeService) recreateContainer(ctx context.Context, project *types.P
}

w.Event(progress.NewEvent(getContainerProgressName(replaced), progress.Done, "Recreated"))
setDependentLifecycle(project, service.Name, forceRecreate)
return created, err
}

// setDependentLifecycle define the Lifecycle strategy for all services to depend on specified service
func setDependentLifecycle(project *types.Project, service string, strategy string) {
mu.Lock()
defer mu.Unlock()

for i, s := range project.Services {
if utils.StringContains(s.GetDependencies(), service) {
if s.Extensions == nil {
s.Extensions = map[string]interface{}{}
}
s.Extensions[extLifecycle] = strategy
project.Services[i] = s
}
}
}

func (s *composeService) startContainer(ctx context.Context, container moby.Container) error {
w := progress.ContextWriter(ctx)
w.Event(progress.NewEvent(getContainerProgressName(container), progress.Working, "Restart"))
Expand Down
20 changes: 20 additions & 0 deletions pkg/e2e/compose_up_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -90,3 +90,23 @@ func TestStdoutStderr(t *testing.T) {

c.RunDockerComposeCmd(t, "--project-name", projectName, "down", "--remove-orphans")
}

func TestLoggingDriver(t *testing.T) {
c := NewCLI(t)
const projectName = "e2e-logging-driver"

host := "HOST=127.0.0.1"
res := c.RunDockerCmd(t, "info", "-f", "{{.OperatingSystem}}")
os := res.Stdout()
if strings.TrimSpace(os) == "Docker Desktop" {
host = "HOST=host.docker.internal"
}

cmd := c.NewDockerComposeCmd(t, "-f", "fixtures/logging-driver/compose.yaml", "--project-name", projectName, "up", "-d")
cmd.Env = append(cmd.Env, host, "BAR=foo")
icmd.RunCmd(cmd).Assert(t, icmd.Success)

cmd = c.NewDockerComposeCmd(t, "-f", "fixtures/logging-driver/compose.yaml", "--project-name", projectName, "up", "-d")
cmd.Env = append(cmd.Env, host, "BAR=zot")
icmd.RunCmd(cmd).Assert(t, icmd.Success)
}
19 changes: 19 additions & 0 deletions pkg/e2e/fixtures/logging-driver/compose.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
services:
fluentbit:
image: fluent/fluent-bit:3.1.7-debug
ports:
- "24224:24224"
- "24224:24224/udp"
environment:
FOO: ${BAR}

app:
image: nginx
depends_on:
fluentbit:
condition: service_started
restart: true
logging:
driver: fluentd
options:
fluentd-address: ${HOST:-127.0.0.1}:24224
Loading