diff --git a/agent/api/container/containertype.go b/agent/api/container/containertype.go index aa5f9e60498..740f504229e 100644 --- a/agent/api/container/containertype.go +++ b/agent/api/container/containertype.go @@ -39,6 +39,10 @@ const ( // ContainerServiceConnectRelay represents the internal container type // for the relay to share connections to management infrastructure. ContainerServiceConnectRelay + + // ContainerManagedDaemon represents the internal container type + // for Managed Daemons + ContainerManagedDaemon ) // ContainerType represents the type of the internal container created diff --git a/agent/engine/daemonmanager/daemon_manager.go b/agent/engine/daemonmanager/daemon_manager.go new file mode 100644 index 00000000000..34e2157e62e --- /dev/null +++ b/agent/engine/daemonmanager/daemon_manager.go @@ -0,0 +1,31 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package daemonmanager + +import ( + "context" + + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi" + "github.com/aws/amazon-ecs-agent/agent/utils/loader" + "github.com/docker/docker/api/types" +) + +type DaemonManager interface { + loader.Loader + CreateDaemonTask() (*apitask.Task, error) + LoadImage(ctx context.Context, _ *config.Config, dockerClient dockerapi.DockerClient) (*types.ImageInspect, error) + IsLoaded(dockerClient dockerapi.DockerClient) (bool, error) +} diff --git a/agent/engine/daemonmanager/daemon_manager_linux.go b/agent/engine/daemonmanager/daemon_manager_linux.go new file mode 100644 index 00000000000..ee3538be35a --- /dev/null +++ b/agent/engine/daemonmanager/daemon_manager_linux.go @@ -0,0 +1,166 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package daemonmanager + +import ( + "context" + "encoding/json" + "fmt" + "io/fs" + "os" + + "github.com/pborman/uuid" + + apicontainer "github.com/aws/amazon-ecs-agent/agent/api/container" + apicontainerstatus "github.com/aws/amazon-ecs-agent/agent/api/container/status" + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status" + "github.com/aws/amazon-ecs-agent/agent/config" + "github.com/aws/amazon-ecs-agent/agent/dockerclient/dockerapi" + "github.com/aws/amazon-ecs-agent/agent/taskresource" + "github.com/aws/amazon-ecs-agent/agent/utils/loader" + "github.com/aws/amazon-ecs-agent/ecs-agent/logger" + "github.com/aws/amazon-ecs-agent/ecs-agent/logger/field" + md "github.com/aws/amazon-ecs-agent/ecs-agent/manageddaemon" + "github.com/aws/aws-sdk-go/aws" + "github.com/docker/docker/api/types" + dockercontainer "github.com/docker/docker/api/types/container" +) + +const ( + // all daemons will share the same user id + // note: AppNetUID is 20000 + daemonUID = 20001 + ecsAgentLogFileENV = "ECS_LOGFILE" + defaultECSAgentLogPathContainer = "/log" +) + +// each daemon manager manages one single daemon container +type daemonManager struct { + managedDaemon *md.ManagedDaemon +} + +func NewDaemonManager(manageddaemon *md.ManagedDaemon) DaemonManager { + return &daemonManager{managedDaemon: manageddaemon} +} + +func (dm *daemonManager) CreateDaemonTask() (*apitask.Task, error) { + imageName := dm.managedDaemon.GetLoadedDaemonImageRef() + containerRunning := apicontainerstatus.ContainerRunning + dockerHostConfig := dockercontainer.HostConfig{ + NetworkMode: apitask.HostNetworkMode, + // the default value of 0 for MaximumRetryCount means retry indefinitely + RestartPolicy: dockercontainer.RestartPolicy{ + Name: "on-failure", + MaximumRetryCount: 0, + }, + } + if !dm.managedDaemon.IsValidManagedDaemon() { + return nil, fmt.Errorf("%s is an invalid managed daemon", dm.managedDaemon.GetImageName()) + } + for _, mount := range dm.managedDaemon.GetMountPoints() { + err := mkdirAllAndChown(mount.SourceVolumeHostPath, 0700, daemonUID, os.Getegid()) + if err != nil { + return nil, err + } + dockerHostConfig.Binds = append(dockerHostConfig.Binds, + fmt.Sprintf("%s:%s", mount.SourceVolumeHostPath, mount.ContainerPath)) + } + rawHostConfig, err := json.Marshal(&dockerHostConfig) + if err != nil { + return nil, err + } + healthConfig := dm.managedDaemon.GetDockerHealthConfig() + rawHealthConfig, err := json.Marshal(&healthConfig) + if err != nil { + return nil, err + } + // The raw host config needs to be created this way - if we marshal the entire config object + // directly, and the object only contains healthcheck, all other fields will be written as empty/nil + // in the result string. This will override the configurations that comes with the container image + // (CMD for example) + rawConfig := fmt.Sprintf("{\"Healthcheck\":%s}", string(rawHealthConfig)) + + daemonTask := &apitask.Task{ + Arn: fmt.Sprintf("arn:::::/%s-%s", dm.managedDaemon.GetImageName(), uuid.NewUUID()), + DesiredStatusUnsafe: apitaskstatus.TaskRunning, + Containers: []*apicontainer.Container{{ + Name: dm.managedDaemon.GetImageName(), + Image: imageName, + ContainerArn: fmt.Sprintf("arn:::::/instance-%s", dm.managedDaemon.GetImageName()), + Type: apicontainer.ContainerManagedDaemon, + TransitionDependenciesMap: make(map[apicontainerstatus.ContainerStatus]apicontainer.TransitionDependencySet), + Essential: true, + SteadyStateStatusUnsafe: &containerRunning, + DockerConfig: apicontainer.DockerConfig{ + Config: aws.String(rawConfig), + HostConfig: aws.String(string(rawHostConfig)), + }, + HealthCheckType: "DOCKER", + }}, + LaunchType: "EC2", + NetworkMode: apitask.HostNetworkMode, + ResourcesMapUnsafe: make(map[string][]taskresource.TaskResource), + IsInternal: true, + } + // add managed daemon environment to daemon task container + daemonTask.Containers[0].MergeEnvironmentVariables(dm.managedDaemon.GetEnvironment()) + return daemonTask, nil +} + +// LoadImage loads the daemon's latest image +func (dm *daemonManager) LoadImage(ctx context.Context, _ *config.Config, dockerClient dockerapi.DockerClient) (*types.ImageInspect, error) { + var loadErr error + daemonImageToLoad := dm.managedDaemon.GetImageName() + daemonImageTarPath := dm.managedDaemon.GetImageTarPath() + if _, err := os.Stat(daemonImageTarPath); err != nil { + logger.Warn(fmt.Sprintf("%s container tarball unavailable at path: %s", daemonImageToLoad, daemonImageTarPath), logger.Fields{ + field.Error: err, + }) + } + logger.Debug(fmt.Sprintf("Loading %s container image from tarball: %s", daemonImageToLoad, daemonImageTarPath)) + if loadErr = loader.LoadFromFile(ctx, daemonImageTarPath, dockerClient); loadErr != nil { + logger.Warn(fmt.Sprintf("Unable to load %s container image from tarball: %s", daemonImageToLoad, daemonImageTarPath), logger.Fields{ + field.Error: loadErr, + }) + } + dm.managedDaemon.SetLoadedDaemonImageRef(dm.managedDaemon.GetImageCanonicalRef()) + loadedImageRef := dm.managedDaemon.GetLoadedDaemonImageRef() + logger.Info(fmt.Sprintf("Successfully loaded %s container image from tarball: %s", daemonImageToLoad, daemonImageTarPath), + logger.Fields{ + field.Image: loadedImageRef, + }) + return loader.GetContainerImage(loadedImageRef, dockerClient) +} + +// isImageLoaded uses the image ref with its tag +func (dm *daemonManager) IsLoaded(dockerClient dockerapi.DockerClient) (bool, error) { + return loader.IsImageLoaded(dm.managedDaemon.GetImageCanonicalRef(), dockerClient) +} + +var mkdirAllAndChown = defaultMkdirAllAndChown + +func defaultMkdirAllAndChown(path string, perm fs.FileMode, uid, gid int) error { + _, err := os.Stat(path) + if os.IsNotExist(err) { + err = os.MkdirAll(path, perm) + } + if err != nil { + return fmt.Errorf("failed to mkdir %s: %+v", path, err) + } + if err = os.Chown(path, uid, gid); err != nil { + return fmt.Errorf("failed to chown %s: %+v", path, err) + } + return nil +} diff --git a/agent/engine/daemonmanager/daemon_manager_linux_test.go b/agent/engine/daemonmanager/daemon_manager_linux_test.go new file mode 100644 index 00000000000..b8fdbfe3259 --- /dev/null +++ b/agent/engine/daemonmanager/daemon_manager_linux_test.go @@ -0,0 +1,204 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package daemonmanager + +import ( + "encoding/json" + "fmt" + "io/fs" + "testing" + "time" + + apitask "github.com/aws/amazon-ecs-agent/agent/api/task" + apitaskstatus "github.com/aws/amazon-ecs-agent/agent/api/task/status" + md "github.com/aws/amazon-ecs-agent/ecs-agent/manageddaemon" + "github.com/aws/aws-sdk-go/aws" + "github.com/stretchr/testify/assert" +) + +const ( + TestDaemonName = "TestDaemon" + TestImageRef = "TestImage" + TestImageTag = "testTag" + TestOtherVolumeID = "otherTestMount" + TestHealthString = "testHealth" + TestImagePath = "/test/image/path" + TestAgentPath = "/test/agent/path" + TestMountPointVolume = "testVolume" +) + +func TestCreateDaemonTask(t *testing.T) { + + cases := []struct { + testName string + testDaemonName string + testImageRef string + testOtherMount *md.MountPoint + testHealthCheck []string + }{ + { + testName: "Basic Daemon", + testDaemonName: TestDaemonName, + testImageRef: TestImageRef, + testOtherMount: &md.MountPoint{SourceVolumeID: TestOtherVolumeID, ContainerPath: "/container/other/", SourceVolumeHostPath: "/var/ecs/other/"}, + testHealthCheck: []string{TestHealthString}, + }, + { + testName: "Daemon Updated Daemon Name", + testDaemonName: "TestDeemen", + testImageRef: TestImageRef, + testOtherMount: &md.MountPoint{SourceVolumeID: TestOtherVolumeID, ContainerPath: "/container/other/", SourceVolumeHostPath: "/var/ecs/other/"}, + testHealthCheck: []string{TestHealthString}, + }, + { + testName: "Daemon Updated Image ref", + testDaemonName: TestDaemonName, + testImageRef: "TestOtherImage", + testOtherMount: &md.MountPoint{SourceVolumeID: TestOtherVolumeID, ContainerPath: "/container/other/", SourceVolumeHostPath: "/var/ecs/other/"}, + testHealthCheck: []string{TestHealthString}, + }, + { + testName: "Daemon With Updated Mounts", + testDaemonName: TestDaemonName, + testImageRef: TestImageRef, + testOtherMount: &md.MountPoint{SourceVolumeID: "testUpdatedMountVolume", ContainerPath: "/container/other/", SourceVolumeHostPath: "/var/ecs/other/"}, + testHealthCheck: []string{TestHealthString}, + }, + { + testName: "Daemon With Updated HealthCheck", + testDaemonName: TestDaemonName, + testImageRef: TestImageRef, + testOtherMount: &md.MountPoint{SourceVolumeID: TestOtherVolumeID, ContainerPath: "/container/other/", SourceVolumeHostPath: "/var/ecs/other/"}, + testHealthCheck: []string{"testOtherHealth"}, + }, + } + + for _, c := range cases { + t.Run(c.testName, func(t *testing.T) { + // mock mkdirAllAndChown + origMkdir := mkdirAllAndChown + mkdirAllAndChown = func(path string, perm fs.FileMode, uid, gid int) error { + return nil + } + // set up test managed daemon + tmd := md.NewManagedDaemon(c.testDaemonName, c.testImageRef, TestImageTag) + tmd.SetLoadedDaemonImageRef(c.testImageRef) + // create required mounts and add all + testAgentCommunicationMount := &md.MountPoint{SourceVolumeID: "agentCommunicationMount", ContainerPath: "/container/run/"} + testApplicationLogMount := &md.MountPoint{SourceVolumeID: "applicationLogMount", ContainerPath: "/container/log/"} + testMountPoints := []*md.MountPoint{} + testMountPoints = append(testMountPoints, testAgentCommunicationMount, testApplicationLogMount, c.testOtherMount) + tmd.SetMountPoints(testMountPoints) + // add required healthcheck + tmd.SetHealthCheck(c.testHealthCheck, 2*time.Minute, 2*time.Minute, 1) + // create daemon manager for the above daemon + testDaemonManager := NewDaemonManager(tmd) + resultDaemonTask, _ := testDaemonManager.CreateDaemonTask() + // validate daemon task configs + assert.Equal(t, fmt.Sprintf("arn:::::/%s-", c.testDaemonName), resultDaemonTask.Arn[:20], "Task Arn should prefix should match Image Name ") + assert.Equal(t, apitaskstatus.TaskRunning, resultDaemonTask.DesiredStatusUnsafe, "Task DesiredStatus should be running") + assert.Equal(t, apitask.HostNetworkMode, resultDaemonTask.NetworkMode, "Task NetworkMode should be Host") + assert.Equal(t, "EC2", resultDaemonTask.LaunchType, "Task LaunchType should be EC2") + assert.Equal(t, true, resultDaemonTask.IsInternal, "Task IsInteral should be true") + // validate task container + assert.Equal(t, c.testImageRef, resultDaemonTask.Containers[0].Image, "Task Container Image Name should match image ref") + // validate daemon container configs + configRaw := resultDaemonTask.Containers[0].DockerConfig.Config + hostConfigRaw := resultDaemonTask.Containers[0].DockerConfig.HostConfig + var configMap map[string]interface{} + var hostConfigMap map[string]interface{} + json.Unmarshal([]byte(aws.StringValue(configRaw)), &configMap) + json.Unmarshal([]byte(aws.StringValue(hostConfigRaw)), &hostConfigMap) + // validate mount points + containerBinds := hostConfigMap["Binds"].([]interface{}) + assert.Equal(t, true, containsString(containerBinds, "/var/ecs/other/:/container/other/"), "Container Missing Optional Container Bind") + // validate healthcheck + containerHealthCheck := configMap["Healthcheck"].(map[string]interface{}) + containerHealthCheckTest := containerHealthCheck["Test"].([]interface{}) + assert.Equal(t, c.testHealthCheck[0], containerHealthCheckTest[0].(string), "Container health check has changed") + mkdirAllAndChown = origMkdir + }) + } +} + +func TestFailCreateDaemonTask_MissingMount(t *testing.T) { + // mock mkdirAllAndChown + origMkdir := mkdirAllAndChown + mkdirAllAndChown = func(path string, perm fs.FileMode, uid, gid int) error { + return nil + } + // set up test managed daemon + tmd := md.NewManagedDaemon(TestDaemonName, TestImageRef, TestImageTag) + tmd.SetLoadedDaemonImageRef(TestImageRef) + // test failure with a missing applicationLogMount + testAgentCommunicationMount := &md.MountPoint{SourceVolumeID: "agentCommunicationMount", ContainerPath: "/container/run/"} + testOtherMount := &md.MountPoint{SourceVolumeID: TestOtherVolumeID, ContainerPath: "/container/other/", SourceVolumeHostPath: "/var/ecs/other/"} + testMountPoints := []*md.MountPoint{} + testMountPoints = append(testMountPoints, testAgentCommunicationMount, testOtherMount) + tmd.SetMountPoints(testMountPoints) + testDaemonManager := NewDaemonManager(tmd) + _, err := testDaemonManager.CreateDaemonTask() + assert.EqualError(t, err, fmt.Sprintf("%s is an invalid managed daemon", TestDaemonName)) + + // add required log mount but no healthcheck + testApplicationLogMount := &md.MountPoint{SourceVolumeID: "applicationLogMount", ContainerPath: "/container/log/"} + testMountPoints = append(testMountPoints, testApplicationLogMount) + tmd.SetMountPoints(testMountPoints) + testDaemonManager = NewDaemonManager(tmd) + _, err = testDaemonManager.CreateDaemonTask() + assert.EqualError(t, err, fmt.Sprintf("%s is an invalid managed daemon", TestDaemonName)) + + // add required healthcheck + testHealthCheck := []string{"test"} + tmd.SetHealthCheck(testHealthCheck, 2*time.Minute, 2*time.Minute, 1) + testDaemonManager = NewDaemonManager(tmd) + resultDaemonTask, err := testDaemonManager.CreateDaemonTask() + + // validate daemon task configs + assert.Equal(t, fmt.Sprintf("arn:::::/%s-", TestDaemonName), resultDaemonTask.Arn[:20], "Task Arn should prefix should match Image Name ") + assert.Equal(t, apitaskstatus.TaskRunning, resultDaemonTask.DesiredStatusUnsafe, "Task DesiredStatus should be running") + assert.Equal(t, apitask.HostNetworkMode, resultDaemonTask.NetworkMode, "Task NetworkMode should be Host") + assert.Equal(t, "EC2", resultDaemonTask.LaunchType, "Task LaunchType should be EC2") + assert.Equal(t, true, resultDaemonTask.IsInternal, "Task IsInteral should be true") + + // validate task container + assert.Equal(t, TestImageRef, resultDaemonTask.Containers[0].Image, "Task Container Image Name should match image ref") + + // validate daemon container configs + configRaw := resultDaemonTask.Containers[0].DockerConfig.Config + hostConfigRaw := resultDaemonTask.Containers[0].DockerConfig.HostConfig + var configMap map[string]interface{} + var hostConfigMap map[string]interface{} + json.Unmarshal([]byte(aws.StringValue(configRaw)), &configMap) + json.Unmarshal([]byte(aws.StringValue(hostConfigRaw)), &hostConfigMap) + + // validate mount point count + containerBinds := hostConfigMap["Binds"].([]interface{}) + assert.Equal(t, len(containerBinds), 3, "Task should have Required container binds (2) + 1 other bind") + // validate healthcheck + containerHealthCheck := configMap["Healthcheck"].(map[string]interface{}) + containerHealthCheckTest := containerHealthCheck["Test"].([]interface{}) + assert.Equal(t, testHealthCheck[0], containerHealthCheckTest[0].(string), "Container health check has changed") + mkdirAllAndChown = origMkdir +} + +// containsString will typecast elements to strings and compare to the target +func containsString(arr []interface{}, target string) bool { + for _, val := range arr { + if val.(string) == target { + return true + } + } + return false +} diff --git a/agent/vendor/github.com/aws/amazon-ecs-agent/ecs-agent/manageddaemon/managed_daemon.go b/agent/vendor/github.com/aws/amazon-ecs-agent/ecs-agent/manageddaemon/managed_daemon.go new file mode 100644 index 00000000000..7c34cae5f7e --- /dev/null +++ b/agent/vendor/github.com/aws/amazon-ecs-agent/ecs-agent/manageddaemon/managed_daemon.go @@ -0,0 +1,330 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package manageddaemon + +import ( + "fmt" + "time" + + dockercontainer "github.com/docker/docker/api/types/container" +) + +const ( + imageTarPath = "/var/lib/ecs/deps/daemons" + imageTagDefault = "latest" + defaultAgentCommunicationPathHostRoot = "/var/run/ecs" + defaultApplicationLogPathHostRoot = "/var/log/ecs" + defaultAgentCommunicationMount = "agentCommunicationMount" + defaultApplicationLogMount = "applicationLogMount" +) + +type ManagedDaemon struct { + imageName string + imageRef string + imageTag string + + healthCheckTest []string + healthCheckInterval time.Duration + healthCheckTimeout time.Duration + healthCheckRetries int + + // Daemons require an agent <-> daemon mount + // identified by the volume name `agentCommunicationMount` + // the SourceVolumeHostPath will always be overridden to + // /var/run/ecs/ + agentCommunicationMount *MountPoint + + // Daemons require an application log mount + // identified by the volume name `applicationLogMount` + // the SourceVolumeHostPath will always be overridden to + // /var/log/ecs/ + applicationLogMount *MountPoint + + mountPoints []*MountPoint + environment map[string]string + + loadedDaemonImageRef string +} + +// A valid managed daemon will require +// healthcheck and mount points to be added +func NewManagedDaemon( + imageName string, + imageRef string, + imageTag string, +) *ManagedDaemon { + if imageTag == "" { + imageTag = imageTagDefault + } + // health check retries 0 is valid + // we'll override this default to -1 + newManagedDaemon := &ManagedDaemon{ + imageName: imageName, + imageRef: imageRef, + imageTag: imageTag, + healthCheckRetries: -1, + } + return newManagedDaemon +} + +// ImportAll function will parse/validate all managed daemon definitions +// defined in /var/lib/ecs/deps/daemons and will return an array +// of valid ManagedDeamon objects +func ImportAll() []*ManagedDaemon { + // TODO parse files in /deps/daemons + // TODO validate that each daemon has an image tar + // TODO validate that there is one MountPoint with + // SourceVolume: 'agentCommunicationMount' + // TODO validate that there is one MountPoint with + // SourceVolume: 'applicationLogMount' + ebsManagedDaemon := NewManagedDaemon("ebs-csi-driver", + "public.ecr.aws/ebs-csi-driver/aws-ebs-csi-driver", + "v1.20.0") + // todo add healthcheck + // todo add mount points + return []*ManagedDaemon{ebsManagedDaemon} +} + +func (md *ManagedDaemon) SetHealthCheck( + healthCheckTest []string, + healthCheckInterval time.Duration, + healthCheckTimeout time.Duration, + healthCheckRetries int) { + md.healthCheckInterval = healthCheckInterval + md.healthCheckTimeout = healthCheckTimeout + md.healthCheckRetries = healthCheckRetries + md.healthCheckTest = make([]string, len(healthCheckTest)) + copy(md.healthCheckTest, healthCheckTest) +} + +func (md *ManagedDaemon) GetImageName() string { + return md.imageName +} + +func (md *ManagedDaemon) GetImageRef() string { + return md.imageRef +} + +func (md *ManagedDaemon) GetImageTag() string { + return md.imageTag +} + +func (md *ManagedDaemon) GetImageCanonicalRef() string { + return (fmt.Sprintf("%s:%s", md.imageRef, md.imageTag)) +} + +func (md *ManagedDaemon) GetImageTarPath() string { + return (fmt.Sprintf("%s/%s", imageTarPath, md.imageName)) +} + +func (md *ManagedDaemon) GetAgentCommunicationMount() *MountPoint { + return md.agentCommunicationMount +} + +func (md *ManagedDaemon) GetApplicationLogMount() *MountPoint { + return md.applicationLogMount +} + +// returns list of mountpoints without the +// agentCommunicationMount and applicationLogMount +func (md *ManagedDaemon) GetFilteredMountPoints() []*MountPoint { + filteredMounts := make([]*MountPoint, len(md.mountPoints)) + copy(filteredMounts, md.mountPoints) + return filteredMounts +} + +// returns list of mountpoints which (re)integrates +// agentCommunicationMount and applicationLogMount +// these will always include host mount file overrides +func (md *ManagedDaemon) GetMountPoints() []*MountPoint { + allMounts := make([]*MountPoint, len(md.mountPoints)) + copy(allMounts, md.mountPoints) + allMounts = append(allMounts, md.agentCommunicationMount) + allMounts = append(allMounts, md.applicationLogMount) + return allMounts +} + +func (md *ManagedDaemon) GetEnvironment() map[string]string { + return md.environment +} + +func (md *ManagedDaemon) GetLoadedDaemonImageRef() string { + return md.loadedDaemonImageRef +} + +// filter mount points for agentCommunicationMount +// set required mounts +// and override host paths in favor of agent defaults +// when a duplicate SourceVolumeID is given, the last Mount wins +func (md *ManagedDaemon) SetMountPoints(mountPoints []*MountPoint) error { + var mountPointMap = make(map[string]*MountPoint) + for _, mp := range mountPoints { + if mp.SourceVolumeID == defaultAgentCommunicationMount { + mp.SourceVolumeHostPath = fmt.Sprintf("%s/%s/", defaultAgentCommunicationPathHostRoot, md.imageName) + md.agentCommunicationMount = mp + } else if mp.SourceVolumeID == defaultApplicationLogMount { + mp.SourceVolumeHostPath = fmt.Sprintf("%s/%s/", defaultApplicationLogPathHostRoot, md.imageName) + md.applicationLogMount = mp + } else { + mountPointMap[mp.SourceVolumeID] = mp + } + } + mountResult := []*MountPoint{} + for _, mp := range mountPointMap { + mountResult = append(mountResult, mp) + } + md.mountPoints = mountResult + return nil +} + +// Used to set or to update the agentCommunicationMount +func (md *ManagedDaemon) SetAgentCommunicationMount(mp *MountPoint) error { + if mp.SourceVolumeID == defaultAgentCommunicationMount { + mp.SourceVolumeHostPath = fmt.Sprintf("%s/%s/", defaultAgentCommunicationPathHostRoot, md.imageName) + md.agentCommunicationMount = mp + return nil + } else { + return fmt.Errorf("AgentCommunicationMount %s must have a SourceVolumeID of %s", mp.SourceVolumeID, defaultAgentCommunicationMount) + } +} + +// Used to set or to update the applicationLogMount +func (md *ManagedDaemon) SetApplicationLogMount(mp *MountPoint) error { + if mp.SourceVolumeID == defaultApplicationLogMount { + mp.SourceVolumeHostPath = fmt.Sprintf("%s/%s/", defaultApplicationLogPathHostRoot, md.imageName) + md.applicationLogMount = mp + return nil + } else { + return fmt.Errorf("ApplicationLogMount %s must have a SourceVolumeID of %s", mp.SourceVolumeID, defaultApplicationLogMount) + } +} + +func (md *ManagedDaemon) SetEnvironment(environment map[string]string) { + md.environment = make(map[string]string) + for key, val := range environment { + md.environment[key] = val + } +} + +func (md *ManagedDaemon) SetLoadedDaemonImageRef(loadedImageRef string) { + md.loadedDaemonImageRef = loadedImageRef +} + +// AddMountPoint will add by MountPoint.SourceVolume +// which is unique to the task and is a required field +// and will throw an error if an existing +// MountPoint.SourceVolume is found +func (md *ManagedDaemon) AddMountPoint(mp *MountPoint) error { + mountIndex := md.GetMountPointIndex(mp) + if mountIndex != -1 { + return fmt.Errorf("MountPoint already exists at index %d", mountIndex) + } + md.mountPoints = append(md.mountPoints, mp) + return nil +} + +// UpdateMountPoint will update by +// MountPoint.SourceVolume which is unique to the task +// and will throw an error if the MountPoint.SourceVolume +// is not found +func (md *ManagedDaemon) UpdateMountPointBySourceVolume(mp *MountPoint) error { + mountIndex := md.GetMountPointIndex(mp) + if mountIndex < 0 { + return fmt.Errorf("MountPoint %s not found; will not update", mp.SourceVolume) + } + md.mountPoints[mountIndex] = mp + return nil +} + +// UpdateMountPoint will delete by +// MountPoint.SourceVolume which is unique to the task +// and will throw an error if the MountPoint.SourceVolume +// is not found +func (md *ManagedDaemon) DeleteMountPoint(mp *MountPoint) error { + mountIndex := md.GetMountPointIndex(mp) + if mountIndex < 0 { + return fmt.Errorf("MountPoint %s not found; will not delete", mp.SourceVolume) + } + md.mountPoints = append(md.mountPoints[:mountIndex], md.mountPoints[mountIndex+1:]...) + return nil +} + +// GetMountPointIndex will return index of a mountpoint or -1 +// search by the unique MountPoint.SourceVolume field +func (md *ManagedDaemon) GetMountPointIndex(mp *MountPoint) int { + sourceVolume := mp.SourceVolume + for i, mount := range md.mountPoints { + if mount.SourceVolume == sourceVolume { + return i + } + } + return -1 +} + +// AddEnvVar will add by envKey +// and will throw an error if an existing +// envKey is found +func (md *ManagedDaemon) AddEnvVar(envKey string, envVal string) error { + _, exists := md.environment[envKey] + if !exists { + md.environment[envKey] = envVal + return nil + } + return fmt.Errorf("EnvKey: %s already exists; will not add EnvVal: %s", envKey, envVal) +} + +// Updates environment varable by evnKey +// and will throw an error if the envKey +// is not found +func (md *ManagedDaemon) UpdateEnvVar(envKey string, envVal string) error { + _, ok := md.environment[envKey] + if !ok { + return fmt.Errorf("EnvKey: %s not found; will not update EnvVal: %s", envKey, envVal) + } + md.environment[envKey] = envVal + return nil +} + +// Deletes environment variable by envKey +// and will throw an error if the envKey +// is not found +func (md *ManagedDaemon) DeleteEnvVar(envKey string) error { + _, ok := md.environment[envKey] + if !ok { + return fmt.Errorf("EnvKey: %s not found; will not delete", envKey) + } + delete(md.environment, envKey) + return nil +} + +// Generates a DockerHealthConfig object from the +// ManagedDaeemon Health Check fields +func (md *ManagedDaemon) GetDockerHealthConfig() *dockercontainer.HealthConfig { + return &dockercontainer.HealthConfig{ + Test: md.healthCheckTest, + Interval: md.healthCheckInterval, + Timeout: md.healthCheckTimeout, + Retries: md.healthCheckRetries, + } +} + +// Validates that all required fields are present and valid +func (md *ManagedDaemon) IsValidManagedDaemon() bool { + isValid := true + isValid = isValid && (md.agentCommunicationMount != nil) + isValid = isValid && (md.applicationLogMount != nil) + isValid = isValid && (len(md.healthCheckTest) != 0) + isValid = isValid && (md.healthCheckRetries != -1) + return isValid +} diff --git a/ecs-agent/managed_daemon/mountpoint.go b/agent/vendor/github.com/aws/amazon-ecs-agent/ecs-agent/manageddaemon/mountpoint.go similarity index 100% rename from ecs-agent/managed_daemon/mountpoint.go rename to agent/vendor/github.com/aws/amazon-ecs-agent/ecs-agent/manageddaemon/mountpoint.go diff --git a/agent/vendor/modules.txt b/agent/vendor/modules.txt index 9a6ff2b7892..263c06a56a3 100644 --- a/agent/vendor/modules.txt +++ b/agent/vendor/modules.txt @@ -28,6 +28,7 @@ github.com/aws/amazon-ecs-agent/ecs-agent/logger/audit github.com/aws/amazon-ecs-agent/ecs-agent/logger/audit/mocks github.com/aws/amazon-ecs-agent/ecs-agent/logger/audit/request github.com/aws/amazon-ecs-agent/ecs-agent/logger/field +github.com/aws/amazon-ecs-agent/ecs-agent/manageddaemon github.com/aws/amazon-ecs-agent/ecs-agent/metrics github.com/aws/amazon-ecs-agent/ecs-agent/stats github.com/aws/amazon-ecs-agent/ecs-agent/tcs/client diff --git a/ecs-agent/managed_daemon/managed_daemon.go b/ecs-agent/managed_daemon/managed_daemon.go deleted file mode 100644 index d01a19e8e07..00000000000 --- a/ecs-agent/managed_daemon/managed_daemon.go +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package manageddaemon - -import ( - "fmt" - "time" - - dockercontainer "github.com/docker/docker/api/types/container" -) - -type ManagedDaemon struct { - imageName string - imageCanonicalRef string - imagePath string - - healthCheckTest []string - healthCheckInterval time.Duration - healthCheckTimeout time.Duration - healthCheckRetries int - - // Daemons will have a primary agent <-> daemon mount - // this may either be implictly the first mount in the MountPoints array - // or explicitly a separate mount - agentCommunicationMount *MountPoint - agentCommunicationFileName string - - mountPoints []*MountPoint - environment map[string]string -} - -func NewManagedDaemon( - imageName string, - imageCanonicalRef string, - imagePath string, - agentCommunicationMount *MountPoint, - agentCommunicationFileName string, -) *ManagedDaemon { - newManagedDaemon := &ManagedDaemon{ - imageName: imageName, - imageCanonicalRef: imageCanonicalRef, - imagePath: imagePath, - agentCommunicationMount: agentCommunicationMount, - agentCommunicationFileName: agentCommunicationFileName, - } - return newManagedDaemon -} - -func (md *ManagedDaemon) SetHealthCheck( - healthCheckTest []string, - healthCheckInterval time.Duration, - healthCheckTimeout time.Duration, - healthCheckRetries int) { - md.healthCheckInterval = healthCheckInterval - md.healthCheckTimeout = healthCheckTimeout - md.healthCheckRetries = healthCheckRetries - md.healthCheckTest = make([]string, len(healthCheckTest)) - copy(md.healthCheckTest, healthCheckTest) -} - -func (md *ManagedDaemon) GetImageName() string { - return md.imageName -} - -func (md *ManagedDaemon) GetImageCanonicalRef() string { - return md.imageCanonicalRef -} - -func (md *ManagedDaemon) GetImagePath() string { - return md.imagePath -} - -func (md *ManagedDaemon) GetAgentCommunicationMount() *MountPoint { - return md.agentCommunicationMount -} - -func (md *ManagedDaemon) GetAgentCommunicationFileName() string { - return md.agentCommunicationFileName -} - -func (md *ManagedDaemon) GetMountPoints() []*MountPoint { - return md.mountPoints -} - -func (md *ManagedDaemon) GetEnvironment() map[string]string { - return md.environment -} - -func (md *ManagedDaemon) SetMountPoints(mountPoints []*MountPoint) { - md.mountPoints = make([]*MountPoint, len(mountPoints)) - copy(md.mountPoints, mountPoints) -} - -func (md *ManagedDaemon) SetEnvironment(environment map[string]string) { - md.environment = make(map[string]string) - for key, val := range environment { - md.environment[key] = val - } -} - -// AddMountPoint will add by MountPoint.SourceVolume -// which is unique to the task and is a required field -// and will throw an error if an existing -// MountPoint.SourceVolume is found -func (md *ManagedDaemon) AddMountPoint(mp *MountPoint) error { - mountIndex := md.GetMountPointIndex(mp) - if mountIndex != -1 { - return fmt.Errorf("MountPoint already exists at index %d", mountIndex) - } - md.mountPoints = append(md.mountPoints, mp) - return nil -} - -// UpdateMountPoint will update by -// MountPoint.SourceVolume which is unique to the task -// and will throw an error if the MountPoint.SourceVolume -// is not found -func (md *ManagedDaemon) UpdateMountPointBySourceVolume(mp *MountPoint) error { - mountIndex := md.GetMountPointIndex(mp) - if mountIndex < 0 { - return fmt.Errorf("MountPoint %s not found; will not update", mp.SourceVolume) - } - md.mountPoints[mountIndex] = mp - return nil -} - -// UpdateMountPoint will delete by -// MountPoint.SourceVolume which is unique to the task -// and will throw an error if the MountPoint.SourceVolume -// is not found -func (md *ManagedDaemon) DeleteMountPoint(mp *MountPoint) error { - mountIndex := md.GetMountPointIndex(mp) - if mountIndex < 0 { - return fmt.Errorf("MountPoint %s not found; will not delete", mp.SourceVolume) - } - md.mountPoints = append(md.mountPoints[:mountIndex], md.mountPoints[mountIndex+1:]...) - return nil -} - -// GetMountPointIndex will return index of a mountpoint or -1 -// search by the unique MountPoint.SourceVolume field -func (md *ManagedDaemon) GetMountPointIndex(mp *MountPoint) int { - sourceVolume := mp.SourceVolume - for i, mount := range md.mountPoints { - if mount.SourceVolume == sourceVolume { - return i - } - } - return -1 -} - -// AddEnvVar will add by envKey -// and will throw an error if an existing -// envKey is found -func (md *ManagedDaemon) AddEnvVar(envKey string, envVal string) error { - _, exists := md.environment[envKey] - if !exists { - md.environment[envKey] = envVal - return nil - } - return fmt.Errorf("EnvKey: %s already exists; will not add EnvVal: %s", envKey, envVal) -} - -// Updates environment varable by evnKey -// and will throw an error if the envKey -// is not found -func (md *ManagedDaemon) UpdateEnvVar(envKey string, envVal string) error { - _, ok := md.environment[envKey] - if !ok { - return fmt.Errorf("EnvKey: %s not found; will not update EnvVal: %s", envKey, envVal) - } - md.environment[envKey] = envVal - return nil -} - -// Deletes environment variable by envKey -// and will throw an error if the envKey -// is not found -func (md *ManagedDaemon) DeleteEnvVar(envKey string) error { - _, ok := md.environment[envKey] - if !ok { - return fmt.Errorf("EnvKey: %s not found; will not delete", envKey) - } - delete(md.environment, envKey) - return nil -} - -// Generates a DockerHealthConfig object from the -// ManagedDaeemon Health Check fields -func (md *ManagedDaemon) GetDockerHealthConfig() *dockercontainer.HealthConfig { - return &dockercontainer.HealthConfig{ - Test: md.healthCheckTest, - Interval: md.healthCheckInterval, - Timeout: md.healthCheckTimeout, - Retries: md.healthCheckRetries, - } -} diff --git a/ecs-agent/managed_daemon/managed_daemon_test.go b/ecs-agent/managed_daemon/managed_daemon_test.go deleted file mode 100644 index b9e4c5170a6..00000000000 --- a/ecs-agent/managed_daemon/managed_daemon_test.go +++ /dev/null @@ -1,289 +0,0 @@ -//go:build unit -// +build unit - -// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"). You may -// not use this file except in compliance with the License. A copy of the -// License is located at -// -// http://aws.amazon.com/apache2.0/ -// -// or in the "license" file accompanying this file. This file is distributed -// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -// express or implied. See the License for the specific language governing -// permissions and limitations under the License. - -package manageddaemon - -import ( - "testing" - "time" - - dockercontainer "github.com/docker/docker/api/types/container" - "github.com/stretchr/testify/assert" -) - -const ( - TestImageName = "TestImage" - TestImageCanonicalRef = "TestRef:latest" - TestImagePath = "/test/image/path" - TestAgentPath = "/test/agent/path" - TestMountPointVolume = "testVolume" -) - -func TestNewManagedDaemon(t *testing.T) { - cases := []struct { - TestName string - TestImageName string - TestAgentMount *MountPoint - ExpectedDaemonName string - ExpectedMountVolume string - ExpectedMountReadOnly bool - }{ - { - TestName: "All Fields", - TestImageName: TestImageName, - TestAgentMount: &MountPoint{SourceVolume: TestMountPointVolume, ReadOnly: true}, - ExpectedDaemonName: TestImageName, - ExpectedMountVolume: TestMountPointVolume, - ExpectedMountReadOnly: true, - }, - { - TestName: "Empty Image Name", - TestImageName: "", - TestAgentMount: &MountPoint{SourceVolume: TestMountPointVolume, ReadOnly: true}, - ExpectedDaemonName: "", - ExpectedMountVolume: TestMountPointVolume, - ExpectedMountReadOnly: true, - }, - { - TestName: "Missing Mount ReadOnly Field", - TestImageName: TestImageName, - TestAgentMount: &MountPoint{SourceVolume: TestMountPointVolume}, - ExpectedDaemonName: TestImageName, - ExpectedMountVolume: TestMountPointVolume, - ExpectedMountReadOnly: false, - }, - } - - for _, c := range cases { - t.Run(c.TestName, func(t *testing.T) { - tmd := NewManagedDaemon( - c.TestImageName, - TestImageCanonicalRef, - TestImagePath, - c.TestAgentMount, - "") - assert.Equal(t, c.ExpectedDaemonName, tmd.GetImageName(), "Wrong value for Managed Daemon Image Name") - assert.Equal(t, c.ExpectedMountVolume, tmd.GetAgentCommunicationMount().SourceVolume, "Wrong value for Managed Daemon Mount Source Volume") - assert.Equal(t, c.ExpectedMountReadOnly, tmd.GetAgentCommunicationMount().ReadOnly, "Wrong value for Managed Daemon ReadOnly") - assert.Equal(t, "", tmd.GetAgentCommunicationFileName(), "Wrong value for Managed Daemon Agent Communication File Name") - }) - } -} - -func TestSetMountPoints(t *testing.T) { - cases := []struct { - TestName string - TestAgentMount *MountPoint - TestMountCount int - }{ - { - TestName: "All Fields", - TestAgentMount: &MountPoint{SourceVolume: TestMountPointVolume, ReadOnly: true}, - TestMountCount: 0, - }, - { - TestName: "Empty Image Name", - TestAgentMount: &MountPoint{SourceVolume: TestMountPointVolume, ReadOnly: true}, - TestMountCount: 1, - }, - { - TestName: "Missing Mount ReadOnly Field", - TestAgentMount: &MountPoint{SourceVolume: TestMountPointVolume}, - TestMountCount: 2, - }, - } - - for _, c := range cases { - t.Run(c.TestName, func(t *testing.T) { - tmd := NewManagedDaemon( - TestImageName, - TestImageCanonicalRef, - TestImagePath, - c.TestAgentMount, - "") - mountPoints := []*MountPoint{} - for i := 0; i < c.TestMountCount; i++ { - mountPoints = append(mountPoints, c.TestAgentMount) - } - tmd.SetMountPoints(mountPoints) - assert.Equal(t, c.TestMountCount, len(tmd.GetMountPoints()), "Wrong value for Set Managed Daemon Mounts") - }) - } -} - -func TestAddMountPoint(t *testing.T) { - testMountPoint1 := &MountPoint{SourceVolume: "TestMountPointVolume1"} - testMountPoint2 := &MountPoint{SourceVolume: "TestMountPointVolume2"} - mountPoints := []*MountPoint{} - mountPoints = append(mountPoints, testMountPoint1) - tmd := NewManagedDaemon(TestImageName, TestImageCanonicalRef, TestImagePath, testMountPoint1, "") - tmd.SetMountPoints(mountPoints) - // test add valid mount point - errResult := tmd.AddMountPoint(testMountPoint2) - assert.Equal(t, 2, len(tmd.GetMountPoints())) - assert.Equal(t, "TestMountPointVolume2", tmd.GetMountPoints()[1].SourceVolume) - assert.Nil(t, errResult) - - // test add existing mount point -- should fail - errResult = tmd.AddMountPoint(testMountPoint2) - expectedErrorMsg := "MountPoint already exists at index 1" - assert.EqualErrorf(t, errResult, expectedErrorMsg, "Error should be: %v, got: %v", expectedErrorMsg, errResult) -} - -func TestUpdateMountPoint(t *testing.T) { - testMountPoint1 := &MountPoint{SourceVolume: "TestMountPointVolume1"} - mountPoints := []*MountPoint{} - mountPoints = append(mountPoints, testMountPoint1) - tmd := NewManagedDaemon(TestImageName, TestImageCanonicalRef, TestImagePath, testMountPoint1, "") - tmd.SetMountPoints(mountPoints) - assert.Equal(t, 1, len(tmd.GetMountPoints())) - assert.False(t, tmd.GetMountPoints()[0].ReadOnly) - - // test update existing mount point - updatedMountPoint1 := &MountPoint{SourceVolume: "TestMountPointVolume1", ReadOnly: true} - errResult := tmd.UpdateMountPointBySourceVolume(updatedMountPoint1) - assert.Equal(t, 1, len(tmd.GetMountPoints())) - assert.True(t, tmd.GetMountPoints()[0].ReadOnly) - - // test update non-existing mount point - testMountPoint2 := &MountPoint{SourceVolume: "TestMountPointVolume2"} - errResult = tmd.UpdateMountPointBySourceVolume(testMountPoint2) - expectedErrorMsg := "MountPoint TestMountPointVolume2 not found; will not update" - assert.EqualErrorf(t, errResult, expectedErrorMsg, "Error should be: %v, got: %v", expectedErrorMsg, errResult) -} - -func TestDeleteMountPoint(t *testing.T) { - testMountPoint1 := &MountPoint{SourceVolume: "TestMountPointVolume1"} - mountPoints := []*MountPoint{} - mountPoints = append(mountPoints, testMountPoint1) - tmd := NewManagedDaemon(TestImageName, TestImageCanonicalRef, TestImagePath, testMountPoint1, "") - tmd.SetMountPoints(mountPoints) - assert.Equal(t, 1, len(tmd.GetMountPoints())) - assert.False(t, tmd.GetMountPoints()[0].ReadOnly) - - // test delete non-existing mount point - testMountPoint2 := &MountPoint{SourceVolume: "TestMountPointVolume2"} - errResult := tmd.DeleteMountPoint(testMountPoint2) - assert.Equal(t, 1, len(tmd.GetMountPoints())) - expectedErrorMsg := "MountPoint TestMountPointVolume2 not found; will not delete" - assert.EqualErrorf(t, errResult, expectedErrorMsg, "Error should be: %v, got: %v", expectedErrorMsg, errResult) - - // test delete existing mount point - errResult = tmd.DeleteMountPoint(testMountPoint1) - assert.Equal(t, 0, len(tmd.mountPoints)) -} - -func TestSetEnvironment(t *testing.T) { - cases := []struct { - TestName string - TestEnvironmentMap map[string]string - }{ - { - TestName: "Missing Map", - TestEnvironmentMap: nil, - }, - { - TestName: "Single Element Map", - TestEnvironmentMap: map[string]string{"testKey1": "testVal1"}, - }, - { - TestName: "Multi Map", - TestEnvironmentMap: map[string]string{"testKey1": "testVal1", "TestKey2": "TestVal2"}, - }, - } - - for _, c := range cases { - t.Run(c.TestName, func(t *testing.T) { - tmd := NewManagedDaemon( - TestImageName, - TestImageCanonicalRef, - TestImagePath, - &MountPoint{SourceVolume: TestMountPointVolume}, - "") - tmd.SetEnvironment(c.TestEnvironmentMap) - assert.Equal(t, len(c.TestEnvironmentMap), len(tmd.GetEnvironment()), "Wrong value for Set Environment") - }) - } -} - -func TestAddEnvVar(t *testing.T) { - testMountPoint1 := &MountPoint{SourceVolume: "TestMountPointVolume1"} - tmd := NewManagedDaemon(TestImageName, TestImageCanonicalRef, TestImagePath, testMountPoint1, "") - tmd.SetEnvironment(map[string]string{"testKey1": "testVal1", "TestKey2": "TestVal2"}) - // test add new EnvKey - errResult := tmd.AddEnvVar("testKey3", "testVal3") - assert.Nil(t, errResult) - assert.Equal(t, 3, len(tmd.GetEnvironment())) - assert.Equal(t, "testVal3", tmd.GetEnvironment()["testKey3"]) - - // test add existing EnvKey -- should fail - errResult = tmd.AddEnvVar("testKey3", "nope") - assert.Equal(t, 3, len(tmd.GetEnvironment())) - assert.Equal(t, "testVal3", tmd.GetEnvironment()["testKey3"]) - expectedErrorMsg := "EnvKey: testKey3 already exists; will not add EnvVal: nope" - assert.EqualErrorf(t, errResult, expectedErrorMsg, "Error should be: %v, got: %v", expectedErrorMsg, errResult) -} - -func TestUpdateEnvVar(t *testing.T) { - testMountPoint1 := &MountPoint{SourceVolume: "TestMountPointVolume1"} - tmd := NewManagedDaemon(TestImageName, TestImageCanonicalRef, TestImagePath, testMountPoint1, "") - tmd.SetEnvironment(map[string]string{"testKey1": "testVal1", "TestKey2": "TestVal2"}) - // test update EnvKey - errResult := tmd.UpdateEnvVar("TestKey2", "TestValNew") - assert.Nil(t, errResult) - assert.Equal(t, 2, len(tmd.GetEnvironment())) - assert.Equal(t, "TestValNew", tmd.GetEnvironment()["TestKey2"]) - - // test update non-existing EnvKey -- should fail - errResult = tmd.UpdateEnvVar("testKey3", "nope") - assert.Equal(t, 2, len(tmd.GetEnvironment())) - expectedErrorMsg := "EnvKey: testKey3 not found; will not update EnvVal: nope" - assert.EqualErrorf(t, errResult, expectedErrorMsg, "Error should be: %v, got: %v", expectedErrorMsg, errResult) -} - -func TestDeleteEnvVar(t *testing.T) { - testMountPoint1 := &MountPoint{SourceVolume: "TestMountPointVolume1"} - tmd := NewManagedDaemon(TestImageName, TestImageCanonicalRef, TestImagePath, testMountPoint1, "") - tmd.SetEnvironment(map[string]string{"testKey1": "testVal1", "TestKey2": "TestVal2"}) - // test delete EnvKey - errResult := tmd.DeleteEnvVar("TestKey2") - assert.Nil(t, errResult) - assert.Equal(t, 1, len(tmd.GetEnvironment())) - - // test delete non-existing EnvKey -- should fail - errResult = tmd.DeleteEnvVar("testKey3") - assert.Equal(t, 1, len(tmd.GetEnvironment())) - expectedErrorMsg := "EnvKey: testKey3 not found; will not delete" - assert.EqualErrorf(t, errResult, expectedErrorMsg, "Error should be: %v, got: %v", expectedErrorMsg, errResult) -} - -func TestGetDockerHealthCheckConfig(t *testing.T) { - testHealthCheck := []string{"echo", "test"} - testHealthInterval := 1 * time.Minute - testHealthTimeout := 2 * time.Minute - testHealthRetries := 5 - expectedDockerCheck := &dockercontainer.HealthConfig{ - Test: testHealthCheck, - Interval: testHealthInterval, - Timeout: testHealthTimeout, - Retries: testHealthRetries, - } - testMountPoint1 := &MountPoint{SourceVolume: "TestMountPointVolume1"} - tmd := NewManagedDaemon(TestImageName, TestImageCanonicalRef, TestImagePath, testMountPoint1, "") - tmd.SetHealthCheck(testHealthCheck, testHealthInterval, testHealthTimeout, testHealthRetries) - dockerCheck := tmd.GetDockerHealthConfig() - assert.Equal(t, expectedDockerCheck, dockerCheck) -} diff --git a/ecs-agent/manageddaemon/managed_daemon.go b/ecs-agent/manageddaemon/managed_daemon.go new file mode 100644 index 00000000000..23f3558314f --- /dev/null +++ b/ecs-agent/manageddaemon/managed_daemon.go @@ -0,0 +1,326 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package manageddaemon + +import ( + "fmt" + "time" + + dockercontainer "github.com/docker/docker/api/types/container" +) + +const ( + imageTarPath = "/var/lib/ecs/deps/daemons" + imageTagDefault = "latest" + defaultAgentCommunicationPathHostRoot = "/var/run/ecs" + defaultApplicationLogPathHostRoot = "/var/log/ecs" + defaultAgentCommunicationMount = "agentCommunicationMount" + defaultApplicationLogMount = "applicationLogMount" +) + +type ManagedDaemon struct { + imageName string + imageRef string + imageTag string + + healthCheckTest []string + healthCheckInterval time.Duration + healthCheckTimeout time.Duration + healthCheckRetries int + + // Daemons require an agent <-> daemon mount + // identified by the volume name `agentCommunicationMount` + // the SourceVolumeHostPath will always be overridden to + // /var/run/ecs/ + agentCommunicationMount *MountPoint + + // Daemons require an application log mount + // identified by the volume name `applicationLogMount` + // the SourceVolumeHostPath will always be overridden to + // /var/log/ecs/ + applicationLogMount *MountPoint + + mountPoints []*MountPoint + environment map[string]string + + loadedDaemonImageRef string +} + +// A valid managed daemon will require +// healthcheck and mount points to be added +func NewManagedDaemon( + imageName string, + imageRef string, + imageTag string, +) *ManagedDaemon { + if imageTag == "" { + imageTag = imageTagDefault + } + // health check retries 0 is valid + // we'll override this default to -1 + newManagedDaemon := &ManagedDaemon{ + imageName: imageName, + imageRef: imageRef, + imageTag: imageTag, + healthCheckRetries: -1, + } + return newManagedDaemon +} + +// ImportAll function will parse/validate all managed daemon definitions +// defined in /var/lib/ecs/deps/daemons and will return an array +// of valid ManagedDeamon objects +func ImportAll() []*ManagedDaemon { + // TODO parse taskdef json files in /deps/daemons + // TODO validate that each daemon has a corresponding image tar + ebsManagedDaemon := NewManagedDaemon("ebs-csi-driver", + "public.ecr.aws/ebs-csi-driver/aws-ebs-csi-driver", + "v1.20.0") + // TODO add healthcheck + // TODO add mount points + return []*ManagedDaemon{ebsManagedDaemon} +} + +func (md *ManagedDaemon) SetHealthCheck( + healthCheckTest []string, + healthCheckInterval time.Duration, + healthCheckTimeout time.Duration, + healthCheckRetries int) { + md.healthCheckInterval = healthCheckInterval + md.healthCheckTimeout = healthCheckTimeout + md.healthCheckRetries = healthCheckRetries + md.healthCheckTest = make([]string, len(healthCheckTest)) + copy(md.healthCheckTest, healthCheckTest) +} + +func (md *ManagedDaemon) GetImageName() string { + return md.imageName +} + +func (md *ManagedDaemon) GetImageRef() string { + return md.imageRef +} + +func (md *ManagedDaemon) GetImageTag() string { + return md.imageTag +} + +func (md *ManagedDaemon) GetImageCanonicalRef() string { + return (fmt.Sprintf("%s:%s", md.imageRef, md.imageTag)) +} + +func (md *ManagedDaemon) GetImageTarPath() string { + return (fmt.Sprintf("%s/%s", imageTarPath, md.imageName)) +} + +func (md *ManagedDaemon) GetAgentCommunicationMount() *MountPoint { + return md.agentCommunicationMount +} + +func (md *ManagedDaemon) GetApplicationLogMount() *MountPoint { + return md.applicationLogMount +} + +// returns list of mountpoints without the +// agentCommunicationMount and applicationLogMount +func (md *ManagedDaemon) GetFilteredMountPoints() []*MountPoint { + filteredMounts := make([]*MountPoint, len(md.mountPoints)) + copy(filteredMounts, md.mountPoints) + return filteredMounts +} + +// returns list of mountpoints which (re)integrates +// agentCommunicationMount and applicationLogMount +// these will always include host mount file overrides +func (md *ManagedDaemon) GetMountPoints() []*MountPoint { + allMounts := make([]*MountPoint, len(md.mountPoints)) + copy(allMounts, md.mountPoints) + allMounts = append(allMounts, md.agentCommunicationMount) + allMounts = append(allMounts, md.applicationLogMount) + return allMounts +} + +func (md *ManagedDaemon) GetEnvironment() map[string]string { + return md.environment +} + +func (md *ManagedDaemon) GetLoadedDaemonImageRef() string { + return md.loadedDaemonImageRef +} + +// filter mount points for agentCommunicationMount +// set required mounts +// and override host paths in favor of agent defaults +// when a duplicate SourceVolumeID is given, the last Mount wins +func (md *ManagedDaemon) SetMountPoints(mountPoints []*MountPoint) error { + var mountPointMap = make(map[string]*MountPoint) + for _, mp := range mountPoints { + if mp.SourceVolumeID == defaultAgentCommunicationMount { + mp.SourceVolumeHostPath = fmt.Sprintf("%s/%s/", defaultAgentCommunicationPathHostRoot, md.imageName) + md.agentCommunicationMount = mp + } else if mp.SourceVolumeID == defaultApplicationLogMount { + mp.SourceVolumeHostPath = fmt.Sprintf("%s/%s/", defaultApplicationLogPathHostRoot, md.imageName) + md.applicationLogMount = mp + } else { + mountPointMap[mp.SourceVolumeID] = mp + } + } + mountResult := []*MountPoint{} + for _, mp := range mountPointMap { + mountResult = append(mountResult, mp) + } + md.mountPoints = mountResult + return nil +} + +// Used to set or to update the agentCommunicationMount +func (md *ManagedDaemon) SetAgentCommunicationMount(mp *MountPoint) error { + if mp.SourceVolumeID == defaultAgentCommunicationMount { + mp.SourceVolumeHostPath = fmt.Sprintf("%s/%s/", defaultAgentCommunicationPathHostRoot, md.imageName) + md.agentCommunicationMount = mp + return nil + } else { + return fmt.Errorf("AgentCommunicationMount %s must have a SourceVolumeID of %s", mp.SourceVolumeID, defaultAgentCommunicationMount) + } +} + +// Used to set or to update the applicationLogMount +func (md *ManagedDaemon) SetApplicationLogMount(mp *MountPoint) error { + if mp.SourceVolumeID == defaultApplicationLogMount { + mp.SourceVolumeHostPath = fmt.Sprintf("%s/%s/", defaultApplicationLogPathHostRoot, md.imageName) + md.applicationLogMount = mp + return nil + } else { + return fmt.Errorf("ApplicationLogMount %s must have a SourceVolumeID of %s", mp.SourceVolumeID, defaultApplicationLogMount) + } +} + +func (md *ManagedDaemon) SetEnvironment(environment map[string]string) { + md.environment = make(map[string]string) + for key, val := range environment { + md.environment[key] = val + } +} + +func (md *ManagedDaemon) SetLoadedDaemonImageRef(loadedImageRef string) { + md.loadedDaemonImageRef = loadedImageRef +} + +// AddMountPoint will add by MountPoint.SourceVolume +// which is unique to the task and is a required field +// and will throw an error if an existing +// MountPoint.SourceVolume is found +func (md *ManagedDaemon) AddMountPoint(mp *MountPoint) error { + mountIndex := md.GetMountPointIndex(mp) + if mountIndex != -1 { + return fmt.Errorf("MountPoint already exists at index %d", mountIndex) + } + md.mountPoints = append(md.mountPoints, mp) + return nil +} + +// UpdateMountPoint will update by +// MountPoint.SourceVolume which is unique to the task +// and will throw an error if the MountPoint.SourceVolume +// is not found +func (md *ManagedDaemon) UpdateMountPointBySourceVolume(mp *MountPoint) error { + mountIndex := md.GetMountPointIndex(mp) + if mountIndex < 0 { + return fmt.Errorf("MountPoint %s not found; will not update", mp.SourceVolume) + } + md.mountPoints[mountIndex] = mp + return nil +} + +// UpdateMountPoint will delete by +// MountPoint.SourceVolume which is unique to the task +// and will throw an error if the MountPoint.SourceVolume +// is not found +func (md *ManagedDaemon) DeleteMountPoint(mp *MountPoint) error { + mountIndex := md.GetMountPointIndex(mp) + if mountIndex < 0 { + return fmt.Errorf("MountPoint %s not found; will not delete", mp.SourceVolume) + } + md.mountPoints = append(md.mountPoints[:mountIndex], md.mountPoints[mountIndex+1:]...) + return nil +} + +// GetMountPointIndex will return index of a mountpoint or -1 +// search by the unique MountPoint.SourceVolume field +func (md *ManagedDaemon) GetMountPointIndex(mp *MountPoint) int { + sourceVolume := mp.SourceVolume + for i, mount := range md.mountPoints { + if mount.SourceVolume == sourceVolume { + return i + } + } + return -1 +} + +// AddEnvVar will add by envKey +// and will throw an error if an existing +// envKey is found +func (md *ManagedDaemon) AddEnvVar(envKey string, envVal string) error { + _, exists := md.environment[envKey] + if !exists { + md.environment[envKey] = envVal + return nil + } + return fmt.Errorf("EnvKey: %s already exists; will not add EnvVal: %s", envKey, envVal) +} + +// Updates environment varable by evnKey +// and will throw an error if the envKey +// is not found +func (md *ManagedDaemon) UpdateEnvVar(envKey string, envVal string) error { + _, ok := md.environment[envKey] + if !ok { + return fmt.Errorf("EnvKey: %s not found; will not update EnvVal: %s", envKey, envVal) + } + md.environment[envKey] = envVal + return nil +} + +// Deletes environment variable by envKey +// and will throw an error if the envKey +// is not found +func (md *ManagedDaemon) DeleteEnvVar(envKey string) error { + _, ok := md.environment[envKey] + if !ok { + return fmt.Errorf("EnvKey: %s not found; will not delete", envKey) + } + delete(md.environment, envKey) + return nil +} + +// Generates a DockerHealthConfig object from the +// ManagedDaeemon Health Check fields +func (md *ManagedDaemon) GetDockerHealthConfig() *dockercontainer.HealthConfig { + return &dockercontainer.HealthConfig{ + Test: md.healthCheckTest, + Interval: md.healthCheckInterval, + Timeout: md.healthCheckTimeout, + Retries: md.healthCheckRetries, + } +} + +// Validates that all required fields are present and valid +func (md *ManagedDaemon) IsValidManagedDaemon() bool { + isValid := true + isValid = isValid && (md.agentCommunicationMount != nil) + isValid = isValid && (md.applicationLogMount != nil) + isValid = isValid && (len(md.healthCheckTest) != 0) + isValid = isValid && (md.healthCheckRetries != -1) + return isValid +} diff --git a/ecs-agent/manageddaemon/managed_daemon_test.go b/ecs-agent/manageddaemon/managed_daemon_test.go new file mode 100644 index 00000000000..a9974430e69 --- /dev/null +++ b/ecs-agent/manageddaemon/managed_daemon_test.go @@ -0,0 +1,362 @@ +//go:build unit +// +build unit + +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package manageddaemon + +import ( + "fmt" + "testing" + "time" + + dockercontainer "github.com/docker/docker/api/types/container" + "github.com/stretchr/testify/assert" +) + +const ( + TestImageName = "TestDaemon" + TestImageRef = "TestImage" + TestImageTag = "testTag" + TestImagePath = "/test/image/path" + TestAgentPath = "/test/agent/path" + TestMountPointVolume = "testVolume" +) + +func TestNewManagedDaemon(t *testing.T) { + cases := []struct { + testName string + testImageName string + testImageTag string + testImageRef string + expectedDaemon *ManagedDaemon + }{ + { + testName: "All Fields", + testImageName: TestImageName, + testImageTag: TestImageTag, + testImageRef: TestImageRef, + expectedDaemon: &ManagedDaemon{imageName: TestImageName, imageTag: TestImageTag, imageRef: TestImageRef}, + }, + { + testName: "Missing Image Name", + testImageName: "", + testImageTag: TestImageTag, + testImageRef: TestImageRef, + expectedDaemon: &ManagedDaemon{imageName: "", imageTag: TestImageTag, imageRef: TestImageRef}, + }, + { + testName: "Missing Image Tag", + testImageName: TestImageName, + testImageTag: "", + testImageRef: TestImageRef, + expectedDaemon: &ManagedDaemon{imageName: TestImageName, imageTag: "", imageRef: TestImageRef}, + }, + { + testName: "Missing Image Ref", + testImageName: TestImageName, + testImageTag: TestImageTag, + testImageRef: "", + expectedDaemon: &ManagedDaemon{imageName: TestImageName, imageTag: TestImageTag, imageRef: ""}, + }, + } + + for _, c := range cases { + t.Run(c.testName, func(t *testing.T) { + assert.Equal(t, c.expectedDaemon.GetImageName(), c.testImageName, "Wrong value for Managed Daemon Image Name") + assert.Equal(t, c.expectedDaemon.GetImageTag(), c.testImageTag, "Wrong value for Managed Daemon Image Tag") + assert.Equal(t, c.expectedDaemon.GetImageRef(), c.testImageRef, "Wrong value for Managed Daemon Image Ref") + }) + } +} + +func TestSetMountPoints(t *testing.T) { + cases := []struct { + TestName string + TestAgentMount *MountPoint + TestMountCount int + ExpectedMountCount int + }{ + { + TestName: "No Mounts", + TestAgentMount: &MountPoint{SourceVolumeID: TestMountPointVolume, ReadOnly: true}, + TestMountCount: 0, + ExpectedMountCount: 0, + }, + { + TestName: "Single Mount", + TestAgentMount: &MountPoint{SourceVolumeID: TestMountPointVolume, ReadOnly: true}, + TestMountCount: 1, + ExpectedMountCount: 1, + }, + { + TestName: "Duplicate SourceVolumeID Mounts Last Mount Wins", + TestAgentMount: &MountPoint{SourceVolumeID: TestMountPointVolume}, + TestMountCount: 2, + ExpectedMountCount: 1, + }, + { + TestName: "Duplicate SourceVolumeID applicationLogMount Last Mount Wins", + TestAgentMount: &MountPoint{SourceVolumeID: "applicationLogMount"}, + TestMountCount: 2, + ExpectedMountCount: 0, + }, + } + + for _, c := range cases { + t.Run(c.TestName, func(t *testing.T) { + tmd := NewManagedDaemon(TestImageName, TestImageRef, TestImageTag) + mountPoints := []*MountPoint{} + mountPoints = append(mountPoints, &MountPoint{SourceVolumeID: "agentCommunicationMount"}) + mountPoints = append(mountPoints, &MountPoint{SourceVolumeID: "applicationLogMount"}) + for i := 0; i < c.TestMountCount; i++ { + mountPoints = append(mountPoints, c.TestAgentMount) + } + tmd.SetMountPoints(mountPoints) + assert.Equal(t, c.ExpectedMountCount, len(tmd.GetFilteredMountPoints()), "Wrong value for Set Managed Daemon Mounts") + // validate required mount points + expectedAgentCommunicationMount := fmt.Sprintf("/var/run/ecs/%s/", TestImageName) + expectedApplicationLogMount := fmt.Sprintf("/var/log/ecs/%s/", TestImageName) + assert.Equal(t, expectedAgentCommunicationMount, tmd.GetAgentCommunicationMount().SourceVolumeHostPath) + assert.Equal(t, expectedApplicationLogMount, tmd.GetApplicationLogMount().SourceVolumeHostPath) + }) + } +} + +func TestAddMountPoint(t *testing.T) { + testMountPoint1 := &MountPoint{SourceVolume: "TestMountPointVolume1"} + testMountPoint2 := &MountPoint{SourceVolume: "TestMountPointVolume2"} + mountPoints := []*MountPoint{} + mountPoints = append(mountPoints, &MountPoint{SourceVolumeID: "agentCommunicationMount"}) + mountPoints = append(mountPoints, &MountPoint{SourceVolumeID: "applicationLogMount"}) + mountPoints = append(mountPoints, testMountPoint1) + tmd := NewManagedDaemon(TestImageName, TestImageRef, TestImageTag) + tmd.SetMountPoints(mountPoints) + // test add valid mount point + errResult := tmd.AddMountPoint(testMountPoint2) + assert.Equal(t, 2, len(tmd.GetFilteredMountPoints())) + assert.Equal(t, "TestMountPointVolume2", tmd.GetFilteredMountPoints()[1].SourceVolume) + assert.Nil(t, errResult) + + // test add existing mount point -- should fail + errResult = tmd.AddMountPoint(testMountPoint2) + expectedErrorMsg := "MountPoint already exists at index 1" + assert.EqualErrorf(t, errResult, expectedErrorMsg, "Error should be: %v, got: %v", expectedErrorMsg, errResult) +} + +func TestGetMountPointsFilteredUnfiltered(t *testing.T) { + testMountPoint1 := &MountPoint{SourceVolume: "TestMountPointVolume1"} + testMountPoint2 := &MountPoint{SourceVolume: "TestMountPointVolume2"} + mountPoints := []*MountPoint{} + mountPoints = append(mountPoints, &MountPoint{SourceVolumeID: "agentCommunicationMount"}) + mountPoints = append(mountPoints, &MountPoint{SourceVolumeID: "applicationLogMount"}) + mountPoints = append(mountPoints, testMountPoint1) + tmd := NewManagedDaemon(TestImageName, TestImageRef, TestImageTag) + tmd.SetMountPoints(mountPoints) + // test add valid mount point + errResult := tmd.AddMountPoint(testMountPoint2) + assert.Equal(t, 2, len(tmd.GetFilteredMountPoints())) + assert.Equal(t, 4, len(tmd.GetMountPoints())) + assert.Nil(t, errResult) + + // test add existing mount point -- should fail + errResult = tmd.AddMountPoint(testMountPoint2) + expectedErrorMsg := "MountPoint already exists at index 1" + assert.EqualErrorf(t, errResult, expectedErrorMsg, "Error should be: %v, got: %v", expectedErrorMsg, errResult) +} + +func TestUpdateMountPoint(t *testing.T) { + testMountPoint1 := &MountPoint{SourceVolume: "TestMountPointVolume1"} + mountPoints := []*MountPoint{} + mountPoints = append(mountPoints, &MountPoint{SourceVolumeID: "agentCommunicationMount"}) + mountPoints = append(mountPoints, &MountPoint{SourceVolumeID: "applicationLogMount"}) + mountPoints = append(mountPoints, testMountPoint1) + tmd := NewManagedDaemon(TestImageName, TestImageRef, TestImageTag) + tmd.SetMountPoints(mountPoints) + assert.Equal(t, 1, len(tmd.GetFilteredMountPoints())) + assert.False(t, tmd.GetFilteredMountPoints()[0].ReadOnly) + + // test update existing mount point + updatedMountPoint1 := &MountPoint{SourceVolume: "TestMountPointVolume1", ReadOnly: true} + errResult := tmd.UpdateMountPointBySourceVolume(updatedMountPoint1) + assert.Equal(t, 1, len(tmd.GetFilteredMountPoints())) + assert.True(t, tmd.GetMountPoints()[0].ReadOnly) + + // test update non-existing mount point + testMountPoint2 := &MountPoint{SourceVolume: "TestMountPointVolume2"} + errResult = tmd.UpdateMountPointBySourceVolume(testMountPoint2) + expectedErrorMsg := "MountPoint TestMountPointVolume2 not found; will not update" + assert.EqualErrorf(t, errResult, expectedErrorMsg, "Error should be: %v, got: %v", expectedErrorMsg, errResult) +} + +func TestDeleteMountPoint(t *testing.T) { + testMountPoint1 := &MountPoint{SourceVolume: "TestMountPointVolume1"} + mountPoints := []*MountPoint{} + mountPoints = append(mountPoints, &MountPoint{SourceVolumeID: "agentCommunicationMount"}) + mountPoints = append(mountPoints, &MountPoint{SourceVolumeID: "applicationLogMount"}) + mountPoints = append(mountPoints, testMountPoint1) + tmd := NewManagedDaemon(TestImageName, TestImageRef, TestImageTag) + tmd.SetMountPoints(mountPoints) + assert.Equal(t, 1, len(tmd.GetFilteredMountPoints())) + assert.False(t, tmd.GetMountPoints()[0].ReadOnly) + // test delete non-existing mount point + testMountPoint2 := &MountPoint{SourceVolume: "TestMountPointVolume2"} + errResult := tmd.DeleteMountPoint(testMountPoint2) + assert.Equal(t, 1, len(tmd.GetFilteredMountPoints())) + expectedErrorMsg := "MountPoint TestMountPointVolume2 not found; will not delete" + assert.EqualErrorf(t, errResult, expectedErrorMsg, "Error should be: %v, got: %v", expectedErrorMsg, errResult) + + // test delete existing mount point + errResult = tmd.DeleteMountPoint(testMountPoint1) + assert.Equal(t, 0, len(tmd.mountPoints)) +} + +func TestSetEnvironment(t *testing.T) { + cases := []struct { + TestName string + TestEnvironmentMap map[string]string + }{ + { + TestName: "Missing Map", + TestEnvironmentMap: nil, + }, + { + TestName: "Single Element Map", + TestEnvironmentMap: map[string]string{"testKey1": "testVal1"}, + }, + { + TestName: "Multi Map", + TestEnvironmentMap: map[string]string{"testKey1": "testVal1", "TestKey2": "TestVal2"}, + }, + } + + for _, c := range cases { + t.Run(c.TestName, func(t *testing.T) { + tmd := NewManagedDaemon(TestImageName, TestImageRef, TestImageTag) + tmd.SetEnvironment(c.TestEnvironmentMap) + assert.Equal(t, len(c.TestEnvironmentMap), len(tmd.GetEnvironment()), "Wrong value for Set Environment") + }) + } +} + +func TestAddEnvVar(t *testing.T) { + tmd := NewManagedDaemon(TestImageName, TestImageRef, TestImageTag) + tmd.SetEnvironment(map[string]string{"testKey1": "testVal1", "TestKey2": "TestVal2"}) + // test add new EnvKey + errResult := tmd.AddEnvVar("testKey3", "testVal3") + assert.Nil(t, errResult) + assert.Equal(t, 3, len(tmd.GetEnvironment())) + assert.Equal(t, "testVal3", tmd.GetEnvironment()["testKey3"]) + + // test add existing EnvKey -- should fail + errResult = tmd.AddEnvVar("testKey3", "nope") + assert.Equal(t, 3, len(tmd.GetEnvironment())) + assert.Equal(t, "testVal3", tmd.GetEnvironment()["testKey3"]) + expectedErrorMsg := "EnvKey: testKey3 already exists; will not add EnvVal: nope" + assert.EqualErrorf(t, errResult, expectedErrorMsg, "Error should be: %v, got: %v", expectedErrorMsg, errResult) +} + +func TestUpdateEnvVar(t *testing.T) { + tmd := NewManagedDaemon(TestImageName, TestImageRef, TestImageTag) + tmd.SetEnvironment(map[string]string{"testKey1": "testVal1", "TestKey2": "TestVal2"}) + // test update EnvKey + errResult := tmd.UpdateEnvVar("TestKey2", "TestValNew") + assert.Nil(t, errResult) + assert.Equal(t, 2, len(tmd.GetEnvironment())) + assert.Equal(t, "TestValNew", tmd.GetEnvironment()["TestKey2"]) + + // test update non-existing EnvKey -- should fail + errResult = tmd.UpdateEnvVar("testKey3", "nope") + assert.Equal(t, 2, len(tmd.GetEnvironment())) + expectedErrorMsg := "EnvKey: testKey3 not found; will not update EnvVal: nope" + assert.EqualErrorf(t, errResult, expectedErrorMsg, "Error should be: %v, got: %v", expectedErrorMsg, errResult) +} + +func TestDeleteEnvVar(t *testing.T) { + tmd := NewManagedDaemon(TestImageName, TestImageRef, TestImageTag) + tmd.SetEnvironment(map[string]string{"testKey1": "testVal1", "TestKey2": "TestVal2"}) + // test delete EnvKey + errResult := tmd.DeleteEnvVar("TestKey2") + assert.Nil(t, errResult) + assert.Equal(t, 1, len(tmd.GetEnvironment())) + + // test delete non-existing EnvKey -- should fail + errResult = tmd.DeleteEnvVar("testKey3") + assert.Equal(t, 1, len(tmd.GetEnvironment())) + expectedErrorMsg := "EnvKey: testKey3 not found; will not delete" + assert.EqualErrorf(t, errResult, expectedErrorMsg, "Error should be: %v, got: %v", expectedErrorMsg, errResult) +} + +func TestGetDockerHealthCheckConfig(t *testing.T) { + testHealthCheck := []string{"echo", "test"} + testHealthInterval := 1 * time.Minute + testHealthTimeout := 2 * time.Minute + testHealthRetries := 5 + expectedDockerCheck := &dockercontainer.HealthConfig{ + Test: testHealthCheck, + Interval: testHealthInterval, + Timeout: testHealthTimeout, + Retries: testHealthRetries, + } + tmd := NewManagedDaemon(TestImageName, TestImageRef, TestImageTag) + tmd.SetHealthCheck(testHealthCheck, testHealthInterval, testHealthTimeout, testHealthRetries) + dockerCheck := tmd.GetDockerHealthConfig() + assert.Equal(t, expectedDockerCheck, dockerCheck) +} + +func TestIsValidManagedDaemon(t *testing.T) { + testAgentCommunicationMount := &MountPoint{SourceVolumeID: "agentCommunicationMount"} + testApplicationLogMount := &MountPoint{SourceVolumeID: "applicationLogMount"} + testHealthCheck := []string{"test"} + cases := []struct { + TestName string + TestDaemon *ManagedDaemon + ExpectedResult bool + }{ + { + TestName: "All Valid", + TestDaemon: &ManagedDaemon{agentCommunicationMount: testAgentCommunicationMount, + applicationLogMount: testApplicationLogMount, + healthCheckTest: testHealthCheck, + healthCheckRetries: 0}, + ExpectedResult: true, + }, + { + TestName: "Missing Required Agent communication Mount", + TestDaemon: &ManagedDaemon{applicationLogMount: testApplicationLogMount, + healthCheckTest: testHealthCheck, + healthCheckRetries: 0}, + ExpectedResult: false, + }, + { + TestName: "Missing Required Log Mount", + TestDaemon: &ManagedDaemon{agentCommunicationMount: testAgentCommunicationMount, + healthCheckTest: testHealthCheck, + healthCheckRetries: 0}, + ExpectedResult: false, + }, + { + TestName: "Missing health check", + TestDaemon: &ManagedDaemon{agentCommunicationMount: testAgentCommunicationMount, + applicationLogMount: testApplicationLogMount, + healthCheckRetries: 0}, + ExpectedResult: false, + }, + } + for _, c := range cases { + t.Run(c.TestName, func(t *testing.T) { + assert.Equal(t, c.TestDaemon.IsValidManagedDaemon(), c.ExpectedResult, "Wrong result for valid daemon check") + }) + } +} diff --git a/ecs-agent/manageddaemon/mountpoint.go b/ecs-agent/manageddaemon/mountpoint.go new file mode 100644 index 00000000000..e3f48f1b5ad --- /dev/null +++ b/ecs-agent/manageddaemon/mountpoint.go @@ -0,0 +1,29 @@ +// Copyright Amazon.com Inc. or its affiliates. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"). You may +// not use this file except in compliance with the License. A copy of the +// License is located at +// +// http://aws.amazon.com/apache2.0/ +// +// or in the "license" file accompanying this file. This file is distributed +// on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +// express or implied. See the License for the specific language governing +// permissions and limitations under the License. + +package manageddaemon + +type MountPoint struct { + // SourceVolumeID is used to identify the task volume globally, it's empty + // when for internal mount points. + SourceVolumeID string `json:"SourceVolumeID"` + // SourceVolume is the name of the source volume, it's unique within the task. + SourceVolume string `json:"SourceVolume,omitempty"` + // SourceVolumeType is the type (EFS, EBS, or host) of the volume. EBS Volumes in particular are + // treated differently within the microVM, and need special consideration in determining the microVM path. + SourceVolumeType string `json:"SourceVolumeType,omitempty"` + SourceVolumeHostPath string `json:"SourceVolumeHostPath,omitempty"` + ContainerPath string `json:"ContainerPath,omitempty"` + ReadOnly bool `json:"ReadOnly,omitempty"` + Internal bool `json:"Internal,omitempty"` +}