diff --git a/.buildkite/integration.pipeline.yml b/.buildkite/integration.pipeline.yml index 7d1b9616868..7b20d688c01 100644 --- a/.buildkite/integration.pipeline.yml +++ b/.buildkite/integration.pipeline.yml @@ -115,7 +115,9 @@ steps: - "build/diagnostics/*" agents: provider: "gcp" + machineType: "c2-standard-16" image: "family/core-ubuntu-2204" + diskSizeGb: 400 notify: - github_commit_status: context: "buildkite/elastic-agent-extended-testing - Kubernetes Integration tests" diff --git a/dev-tools/mage/checksums.go b/dev-tools/mage/checksums.go index 318974be8d7..100d138195c 100644 --- a/dev-tools/mage/checksums.go +++ b/dev-tools/mage/checksums.go @@ -108,9 +108,9 @@ func ChecksumsWithManifest(requiredPackage string, versionedFlatPath string, ver // Only care about packages that match the required package constraint (os/arch) if strings.Contains(pkgName, requiredPackage) { // Iterate over the external binaries that we care about for packaging agent - for binary := range manifest.ExpectedBinaries { + for _, spec := range manifest.ExpectedBinaries { // If the individual package doesn't match the expected prefix, then continue - if !strings.HasPrefix(pkgName, binary) { + if !strings.HasPrefix(pkgName, spec.BinaryName) { continue } @@ -215,14 +215,14 @@ func getComponentVersion(componentName string, requiredPackage string, component // Iterate over all the packages in the component project for pkgName := range componentProject.Packages { // Only care about the external binaries that we want to package - for binary, project := range manifest.ExpectedBinaries { + for _, spec := range manifest.ExpectedBinaries { // If the given component name doesn't match the external binary component, skip - if componentName != project.Name { + if componentName != spec.ProjectName { continue } // Split the package name on the binary name prefix plus a dash - firstSplit := strings.Split(pkgName, binary+"-") + firstSplit := strings.Split(pkgName, spec.BinaryName+"-") if len(firstSplit) < 2 { continue } diff --git a/dev-tools/mage/dockerbuilder.go b/dev-tools/mage/dockerbuilder.go index f99f4f1dcdb..0e0bd2078ca 100644 --- a/dev-tools/mage/dockerbuilder.go +++ b/dev-tools/mage/dockerbuilder.go @@ -46,7 +46,7 @@ func (b *dockerBuilder) Build() error { } if err := b.copyFiles(); err != nil { - return err + return fmt.Errorf("error copying files for docker variant %q: %w", b.DockerVariant, err) } if err := b.prepareBuild(); err != nil { diff --git a/dev-tools/mage/dockervariants.go b/dev-tools/mage/dockervariants.go index da8f777d059..40b49be181d 100644 --- a/dev-tools/mage/dockervariants.go +++ b/dev-tools/mage/dockervariants.go @@ -17,6 +17,7 @@ const ( complete = "complete" completeWolfi = "complete-wolfi" cloud = "cloud" + service = "service" ) // DockerVariant defines the docker variant to build. @@ -31,6 +32,7 @@ const ( WolfiComplete Complete Cloud + Service ) // String returns the name of the docker variant type. @@ -50,6 +52,8 @@ func (typ DockerVariant) String() string { return complete case Cloud: return cloud + case Service: + return service default: return invalid } @@ -77,6 +81,8 @@ func (typ *DockerVariant) UnmarshalText(text []byte) error { *typ = Complete case cloud: *typ = Cloud + case service: + *typ = Service default: return fmt.Errorf("unknown docker variant: %v", string(text)) } diff --git a/dev-tools/mage/manifest/manifest.go b/dev-tools/mage/manifest/manifest.go index 6efc989ba1a..09ff95e27e6 100644 --- a/dev-tools/mage/manifest/manifest.go +++ b/dev-tools/mage/manifest/manifest.go @@ -94,21 +94,23 @@ var PlatformPackages = map[string]string{ // ExpectedBinaries is a map of binaries agent needs to their project in the unified-release manager. // The project names are those used in the "projects" list in the unified release manifest. // See the sample manifests in the testdata directory. -var ExpectedBinaries = map[string]BinarySpec{ - "agentbeat": {Name: "beats", Platforms: AllPlatforms}, - "apm-server": {Name: "apm-server", Platforms: []Platform{{"linux", "x86_64"}, {"linux", "arm64"}, {"windows", "x86_64"}, {"darwin", "x86_64"}}}, - "cloudbeat": {Name: "cloudbeat", Platforms: []Platform{{"linux", "x86_64"}, {"linux", "arm64"}}}, - "cloud-defend": {Name: "cloud-defend", Platforms: []Platform{{"linux", "x86_64"}, {"linux", "arm64"}}}, - "endpoint-security": {Name: "endpoint-dev", Platforms: AllPlatforms}, - "fleet-server": {Name: "fleet-server", Platforms: AllPlatforms}, - "pf-elastic-collector": {Name: "prodfiler", Platforms: []Platform{{"linux", "x86_64"}, {"linux", "arm64"}}}, - "pf-elastic-symbolizer": {Name: "prodfiler", Platforms: []Platform{{"linux", "x86_64"}, {"linux", "arm64"}}}, - "pf-host-agent": {Name: "prodfiler", Platforms: []Platform{{"linux", "x86_64"}, {"linux", "arm64"}}}, +var ExpectedBinaries = []BinarySpec{ + {BinaryName: "agentbeat", ProjectName: "beats", Platforms: AllPlatforms}, + {BinaryName: "apm-server", ProjectName: "apm-server", Platforms: []Platform{{"linux", "x86_64"}, {"linux", "arm64"}, {"windows", "x86_64"}, {"darwin", "x86_64"}}}, + {BinaryName: "cloudbeat", ProjectName: "cloudbeat", Platforms: []Platform{{"linux", "x86_64"}, {"linux", "arm64"}}}, + {BinaryName: "connectors", ProjectName: "connectors", Platforms: []Platform{{"linux", "x86_64"}, {"linux", "arm64"}}, PythonWheel: true}, + {BinaryName: "endpoint-security", ProjectName: "endpoint-dev", Platforms: AllPlatforms}, + {BinaryName: "fleet-server", ProjectName: "fleet-server", Platforms: AllPlatforms}, + {BinaryName: "pf-elastic-collector", ProjectName: "prodfiler", Platforms: []Platform{{"linux", "x86_64"}, {"linux", "arm64"}}}, + {BinaryName: "pf-elastic-symbolizer", ProjectName: "prodfiler", Platforms: []Platform{{"linux", "x86_64"}, {"linux", "arm64"}}}, + {BinaryName: "pf-host-agent", ProjectName: "prodfiler", Platforms: []Platform{{"linux", "x86_64"}, {"linux", "arm64"}}}, } type BinarySpec struct { - Name string - Platforms []Platform + BinaryName string + ProjectName string + Platforms []Platform + PythonWheel bool } func (proj BinarySpec) SupportsPlatform(platform string) bool { @@ -120,6 +122,13 @@ func (proj BinarySpec) SupportsPlatform(platform string) bool { return false } +func (proj BinarySpec) GetPackageName(version string, platform string) string { + if proj.PythonWheel { + return fmt.Sprintf("%s-%s.zip", proj.BinaryName, version) + } + return fmt.Sprintf("%s-%s-%s", proj.BinaryName, version, PlatformPackages[platform]) +} + type Platform struct { OS string Arch string @@ -188,27 +197,27 @@ func DownloadComponents(ctx context.Context, manifest string, platforms []string errGrp, downloadsCtx := errgroup.WithContext(ctx) // for project, pkgs := range expectedProjectPkgs() { - for binary, project := range ExpectedBinaries { + for _, spec := range ExpectedBinaries { for _, platform := range platforms { targetPath := filepath.Join(dropPath) err := os.MkdirAll(targetPath, 0755) if err != nil { return fmt.Errorf("failed to create directory %s", targetPath) } - log.Printf("+++ Prepare to download project [%s] for [%s]", project.Name, platform) + log.Printf("+++ Prepare to download [%s] project [%s] for [%s]", spec.BinaryName, spec.ProjectName, platform) - if !project.SupportsPlatform(platform) { - log.Printf(">>>>>>>>> Binary [%s] does not support platform [%s] ", binary, platform) + if !spec.SupportsPlatform(platform) { + log.Printf(">>>>>>>>> Binary [%s] does not support platform [%s] ", spec.BinaryName, platform) continue } - pkgURL, err := resolveManifestPackage(projects[project.Name], binary, PlatformPackages[platform], majorMinorPatchVersion) + pkgURL, err := resolveManifestPackage(projects[spec.ProjectName], spec, majorMinorPatchVersion, platform) if err != nil { return err } for _, p := range pkgURL { - log.Printf(">>>>>>>>> Downloading [%s] [%s] ", binary, p) + log.Printf(">>>>>>>>> Downloading [%s] [%s] ", spec.BinaryName, p) pkgFilename := path.Base(p) downloadTarget := filepath.Join(targetPath, pkgFilename) if _, err := os.Stat(downloadTarget); err != nil { @@ -229,35 +238,35 @@ func DownloadComponents(ctx context.Context, manifest string, platforms []string return nil } -func resolveManifestPackage(project Project, binary string, platformPkg string, version string) ([]string, error) { +func resolveManifestPackage(project Project, spec BinarySpec, version string, platform string) ([]string, error) { var val Package var ok bool // Try the normal/easy case first - packageName := fmt.Sprintf("%s-%s-%s", binary, version, platformPkg) + packageName := spec.GetPackageName(version, platform) val, ok = project.Packages[packageName] if !ok { // If we didn't find it, it may be an Independent Agent Release, where // the opted-in projects will have a patch version one higher than // the rest of the projects, so we need to seek that out if mg.Verbose() { - log.Printf(">>>>>>>>>>> Looking for package [%s] of type [%s]", binary, platformPkg) + log.Printf(">>>>>>>>>>> Looking for package [%s] of type [%s]", spec.BinaryName, PlatformPackages[platform]) } var foundIt bool for pkgName := range project.Packages { - if strings.HasPrefix(pkgName, binary) { - firstSplit := strings.Split(pkgName, binary+"-") + if strings.HasPrefix(pkgName, spec.BinaryName) { + firstSplit := strings.Split(pkgName, spec.BinaryName+"-") if len(firstSplit) < 2 { continue } secondHalf := firstSplit[1] // Make sure we're finding one w/ the same required package type - if strings.Contains(secondHalf, platformPkg) { + if strings.Contains(secondHalf, PlatformPackages[platform]) { // Split again after the version with the required package string - secondSplit := strings.Split(secondHalf, "-"+platformPkg) + secondSplit := strings.Split(secondHalf, "-"+PlatformPackages[platform]) if len(secondSplit) < 2 { continue } @@ -269,7 +278,7 @@ func resolveManifestPackage(project Project, binary string, platformPkg string, } // Create a project/package key with the package, derived version, and required package - foundPkgKey := fmt.Sprintf("%s-%s-%s", binary, pkgVersion, platformPkg) + foundPkgKey := fmt.Sprintf("%s-%s-%s", spec.BinaryName, pkgVersion, PlatformPackages[platform]) if mg.Verbose() { log.Printf(">>>>>>>>>>> Looking for project package key: [%s]", foundPkgKey) } diff --git a/dev-tools/mage/manifest/manifest_test.go b/dev-tools/mage/manifest/manifest_test.go index 11f6e34d5f1..b975149aa27 100644 --- a/dev-tools/mage/manifest/manifest_test.go +++ b/dev-tools/mage/manifest/manifest_test.go @@ -135,15 +135,15 @@ func TestResolveManifestPackage(t *testing.T) { projects := manifestJson.Projects // Verify the component name is in the list of expected packages. - project, ok := ExpectedBinaries[tc.binary] + spec, ok := findBinarySpec(tc.binary) assert.True(t, ok) - if !project.SupportsPlatform(tc.platform) { - t.Logf("Project %s does not support platform %s", project.Name, tc.platform) + if !spec.SupportsPlatform(tc.platform) { + t.Logf("Project %s does not support platform %s", spec.ProjectName, tc.platform) return } - urlList, err := resolveManifestPackage(projects[tc.projectName], tc.binary, PlatformPackages[tc.platform], manifestJson.Version) + urlList, err := resolveManifestPackage(projects[tc.projectName], spec, manifestJson.Version, tc.platform) require.NoError(t, err) assert.Len(t, urlList, 3) @@ -153,3 +153,12 @@ func TestResolveManifestPackage(t *testing.T) { }) } } + +func findBinarySpec(name string) (BinarySpec, bool) { + for _, spec := range ExpectedBinaries { + if spec.BinaryName == name { + return spec, true + } + } + return BinarySpec{}, false +} diff --git a/dev-tools/packaging/files/linux/connectors.sh b/dev-tools/packaging/files/linux/connectors.sh new file mode 100755 index 00000000000..81f95e24372 --- /dev/null +++ b/dev-tools/packaging/files/linux/connectors.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +PY_AGENT_CLIENT_PATH=/usr/share/connectors +PYTHON_PATH=$PY_AGENT_CLIENT_PATH/.venv/bin/python +COMPONENT_PATH=$PY_AGENT_CLIENT_PATH/connectors/agent/cli.py +$PYTHON_PATH $COMPONENT_PATH diff --git a/dev-tools/packaging/packages.yml b/dev-tools/packaging/packages.yml index 813152fe056..ec7008748a9 100644 --- a/dev-tools/packaging/packages.yml +++ b/dev-tools/packaging/packages.yml @@ -291,6 +291,20 @@ shared: source: '{{.AgentDropPath}}/archives/{{.GOOS}}-{{.AgentArchName}}.tar.gz/agentbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz' mode: 0755 + # service build is based on previous cloud variant + - &agent_docker_service_spec + docker_variant: 'service' + files: + 'data/service/connectors-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}.zip': + source: '{{.AgentDropPath}}/archives/{{.GOOS}}-{{.AgentArchName}}.tar.gz/connectors-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}.zip' + mode: 0755 + 'data/{{.BeatName}}-{{ commit_short }}/components/connectors': + source: '{{ elastic_beats_dir }}/dev-tools/packaging/files/linux/connectors.sh' + mode: 0755 + 'data/{{.BeatName}}-{{ commit_short }}/components/connectors.spec.yml': + source: '{{ elastic_beats_dir }}/specs/connectors.spec.yml' + mode: 0644 + # includes nodejs with @elastic/synthetics - &agent_docker_complete_spec <<: *agent_docker_spec @@ -1025,6 +1039,35 @@ specs: files: '{{.BeatName}}{{.BinaryExt}}': source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + #### Service specific docker images #### + - os: linux + arch: amd64 + types: [ docker ] + spec: + <<: *agent_docker_spec + # The service image is always based on Wolfi + <<: *docker_wolfi_spec + <<: *docker_builder_spec + <<: *agent_docker_cloud_spec + <<: *agent_docker_service_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + - os: linux + arch: arm64 + types: [ docker ] + spec: + <<: *agent_docker_spec + # The service image is always based on Wolfi + <<: *docker_wolfi_arm_spec + <<: *docker_builder_arm_spec + <<: *agent_docker_cloud_spec + <<: *agent_docker_service_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - os: linux arch: amd64 types: [docker] diff --git a/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl b/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl index b7e2ce14a8d..027a4241267 100644 --- a/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl +++ b/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl @@ -43,7 +43,7 @@ RUN true && \ chmod 0775 {{ $beatHome}}/{{ $modulesd }} && \ {{- end }} -{{- if eq .Variant "cloud" }} +{{- if or (eq .Variant "cloud") (eq .Variant "service") }} mkdir -p /opt/agentbeat /opt/filebeat /opt/metricbeat && \ cp -f {{ $beatHome }}/data/cloud_downloads/filebeat.sh /opt/filebeat/filebeat && \ chmod +x /opt/filebeat/filebeat && \ @@ -173,13 +173,21 @@ RUN mkdir /licenses COPY --from=home {{ $beatHome }}/LICENSE.txt /licenses COPY --from=home {{ $beatHome }}/NOTICE.txt /licenses -{{- if eq .Variant "cloud" }} +{{- if or (eq .Variant "cloud") (eq .Variant "service") }} COPY --from=home /opt /opt # Generate folder for a stub command that will be overwritten at runtime RUN mkdir /app && \ chown {{ .user }}:{{ .user }} /app {{- end }} +{{- if eq .Variant "service" }} +RUN apk add --no-cache git make python-3.11 py3.11-pip && \ + unzip {{ $beatHome }}/data/service/connectors-*.zip -d {{ $beatHome }}/data/service && \ + mv {{ $beatHome }}/data/service/elasticsearch_connectors-* /usr/share/connectors && \ + PYTHON=python3.11 make -C /usr/share/connectors clean install install-agent && \ + chmod 0755 {{ $beatHome }}/data/elastic-agent-*/components/connectors +{{- end }} + {{- if (and (eq .Variant "complete") (contains .from "ubuntu")) }} USER root ENV NODE_PATH={{ $beatHome }}/.node @@ -287,7 +295,7 @@ ENV LIBBEAT_MONITORING_CGROUPS_HIERARCHY_OVERRIDE=/ WORKDIR {{ $beatHome }} -{{- if eq .Variant "cloud" }} +{{- if or (eq .Variant "cloud") (eq .Variant "service") }} ENTRYPOINT ["/usr/bin/tini", "--"] CMD ["/app/apm.sh"] # Generate a stub command that will be overwritten at runtime diff --git a/magefile.go b/magefile.go index 8cec1fbd29c..4ab9db4743a 100644 --- a/magefile.go +++ b/magefile.go @@ -93,6 +93,8 @@ const ( agentCoreProjectName = "elastic-agent-core" helmChartPath = "./deploy/helm/elastic-agent" + + sha512FileExt = ".sha512" ) var ( @@ -533,11 +535,6 @@ func DownloadManifest(ctx context.Context) error { return errAtLeastOnePlatform } - var requiredPackages []string - for _, p := range platforms { - requiredPackages = append(requiredPackages, manifest.PlatformPackages[p]) - } - if e := manifest.DownloadComponents(ctx, devtools.ManifestURL, platforms, dropPath); e != nil { return fmt.Errorf("failed to download the manifest file, %w", e) } @@ -595,16 +592,9 @@ func FixDRADockerArtifacts() error { return nil } -func getPackageName(beat, version, pkg string) (string, string) { - if hasSnapshotEnv() { - version += "-SNAPSHOT" - } - return version, fmt.Sprintf("%s-%s-%s", beat, version, pkg) -} - func requiredPackagesPresent(basePath, beat, version string, requiredPackages []string) bool { for _, pkg := range requiredPackages { - _, packageName := getPackageName(beat, version, pkg) + packageName := fmt.Sprintf("%s-%s-%s", beat, version, pkg) path := filepath.Join(basePath, "build", "distributions", packageName) if _, err := os.Stat(path); err != nil { @@ -959,16 +949,16 @@ func runAgent(ctx context.Context, env map[string]string) error { func packageAgent(ctx context.Context, platforms []string, dependenciesVersion string, manifestResponse *manifest.Build, agentPackaging, agentBinaryTarget mg.Fn) error { fmt.Println("--- Package Elastic-Agent") - requiredPackages := []string{} + platformPackageSuffixes := []string{} for _, p := range platforms { - requiredPackages = append(requiredPackages, manifest.PlatformPackages[p]) + platformPackageSuffixes = append(platformPackageSuffixes, manifest.PlatformPackages[p]) } if mg.Verbose() { - log.Printf("--- Packaging dependenciesVersion[%s], %+v \n", dependenciesVersion, requiredPackages) + log.Printf("--- Packaging dependenciesVersion[%s], %+v \n", dependenciesVersion, platformPackageSuffixes) } // download/copy all the necessary dependencies for packaging elastic-agent - archivePath, dropPath := collectPackageDependencies(platforms, dependenciesVersion, requiredPackages) + archivePath, dropPath := collectPackageDependencies(platforms, dependenciesVersion, platformPackageSuffixes) // cleanup after build defer os.RemoveAll(archivePath) @@ -984,7 +974,7 @@ func packageAgent(ctx context.Context, platforms []string, dependenciesVersion s defer os.RemoveAll(flatPath) // extract all dependencies from their archives into flat dir - flattenDependencies(requiredPackages, dependenciesVersion, archivePath, dropPath, flatPath, manifestResponse) + flattenDependencies(platformPackageSuffixes, dependenciesVersion, archivePath, dropPath, flatPath, manifestResponse) // package agent log.Println("--- Running packaging function") @@ -1002,7 +992,7 @@ func packageAgent(ctx context.Context, platforms []string, dependenciesVersion s // NOTE: after the build is done the caller must: // - delete archivePath and dropPath contents // - unset AGENT_DROP_PATH environment variable -func collectPackageDependencies(platforms []string, packageVersion string, requiredPackages []string) (archivePath string, dropPath string) { +func collectPackageDependencies(platforms []string, packageVersion string, platformPackageSuffixes []string) (archivePath string, dropPath string) { dropPath, found := os.LookupEnv(agentDropPath) @@ -1021,7 +1011,11 @@ func collectPackageDependencies(platforms []string, packageVersion string, requi if mg.Verbose() { log.Printf(">> Creating drop-in folder %+v \n", dropPath) } - archivePath = movePackagesToArchive(dropPath, requiredPackages) + archivePath = movePackagesToArchive(dropPath, platformPackageSuffixes, packageVersion) + + if hasSnapshotEnv() { + packageVersion = fmt.Sprintf("%s-SNAPSHOT", packageVersion) + } os.Setenv(agentDropPath, dropPath) @@ -1038,17 +1032,16 @@ func collectPackageDependencies(platforms []string, packageVersion string, requi errGroup, ctx := errgroup.WithContext(context.Background()) completedDownloads := &atomic.Int32{} - for binary, project := range manifest.ExpectedBinaries { + for _, spec := range manifest.ExpectedBinaries { for _, platform := range platforms { - if !project.SupportsPlatform(platform) { - fmt.Printf("--- Binary %s does not support %s, download skipped\n", binary, platform) + if !spec.SupportsPlatform(platform) { + fmt.Printf("--- Binary %s does not support %s, download skipped\n", spec.BinaryName, platform) continue } - reqPackage := manifest.PlatformPackages[platform] - targetPath := filepath.Join(archivePath, reqPackage) + targetPath := filepath.Join(archivePath, manifest.PlatformPackages[platform]) os.MkdirAll(targetPath, 0755) - newVersion, packageName := getPackageName(binary, packageVersion, reqPackage) - errGroup.Go(downloadBinary(ctx, project.Name, packageName, binary, platform, newVersion, targetPath, completedDownloads)) + packageName := spec.GetPackageName(packageVersion, platform) + errGroup.Go(downloadBinary(ctx, spec.ProjectName, packageName, spec.BinaryName, platform, packageVersion, targetPath, completedDownloads)) } } @@ -1070,7 +1063,7 @@ func collectPackageDependencies(platforms []string, packageVersion string, requi packagesCopied := 0 - if !requiredPackagesPresent(pwd, b, packageVersion, requiredPackages) { + if !requiredPackagesPresent(pwd, b, packageVersion, platformPackageSuffixes) { fmt.Printf("--- Package %s\n", pwd) cmd := exec.Command("mage", "package") cmd.Dir = pwd @@ -1088,7 +1081,7 @@ func collectPackageDependencies(platforms []string, packageVersion string, requi // copy to new drop sourcePath := filepath.Join(pwd, "build", "distributions") - for _, rp := range requiredPackages { + for _, rp := range platformPackageSuffixes { files, err := filepath.Glob(filepath.Join(sourcePath, "*"+rp+"*")) if err != nil { panic(err) @@ -1121,11 +1114,32 @@ func collectPackageDependencies(platforms []string, packageVersion string, requi } } } else { - archivePath = movePackagesToArchive(dropPath, requiredPackages) + archivePath = movePackagesToArchive(dropPath, platformPackageSuffixes, packageVersion) } return archivePath, dropPath } +func removePythonWheels(matches []string, version string) []string { + if hasSnapshotEnv() { + version = fmt.Sprintf("%s-SNAPSHOT", version) + } + + var wheels []string + for _, spec := range manifest.ExpectedBinaries { + if spec.PythonWheel { + wheels = append(wheels, spec.GetPackageName(version, "")) + } + } + + cleaned := make([]string, 0, len(matches)) + for _, path := range matches { + if !slices.Contains(wheels, filepath.Base(path)) { + cleaned = append(cleaned, path) + } + } + return cleaned +} + // flattenDependencies will extract all the required packages collected in archivePath and dropPath in flatPath and // regenerate checksums func flattenDependencies(requiredPackages []string, packageVersion, archivePath, dropPath, flatPath string, manifestResponse *manifest.Build) { @@ -1149,6 +1163,14 @@ func flattenDependencies(requiredPackages []string, packageVersion, archivePath, } matches = append(matches, zipMatches...) + if mg.Verbose() { + log.Printf("--- Unfiltered dependencies to flatten in %s : %v", targetPath, matches) + } + + // never flatten any python wheels, the packages.yml and docker should handle + // those specifically so that the python wheels are installed into the container + matches = removePythonWheels(matches, packageVersion) + if mg.Verbose() { log.Printf("--- Extracting into the flat dir: %v", matches) } @@ -1410,7 +1432,7 @@ func downloadDRAArtifacts(ctx context.Context, manifestUrl string, downloadDir s } // download the SHA to check integrity - artifactSHADownloadPath := filepath.Join(draDownloadDir, pkgName+".sha512") + artifactSHADownloadPath := filepath.Join(draDownloadDir, pkgName+sha512FileExt) err = manifest.DownloadPackage(errCtx, pkgDesc.ShaURL, artifactSHADownloadPath) if err != nil { return fmt.Errorf("downloading SHA for %q: %w", pkgName, err) @@ -1515,7 +1537,7 @@ func downloadBinary(ctx context.Context, project string, packageName string, bin } compl.Add(1) - fmt.Printf("Done downloading %s\n", packageName) + fmt.Printf("Done downloading %s into %s\n", packageName, targetPath) return nil } } @@ -1548,7 +1570,7 @@ func appendComponentChecksums(versionedDropPath string, checksums map[string]str } // movePackagesToArchive Create archive folder and move any pre-existing artifacts into it. -func movePackagesToArchive(dropPath string, requiredPackages []string) string { +func movePackagesToArchive(dropPath string, platformPackageSuffixes []string, packageVersion string) string { archivePath := filepath.Join(dropPath, "archives") os.MkdirAll(archivePath, 0755) @@ -1564,8 +1586,15 @@ func movePackagesToArchive(dropPath string, requiredPackages []string) string { matches = append(matches, zipMatches...) for _, f := range matches { - for _, rp := range requiredPackages { - if !strings.Contains(f, rp) { + for _, packageSuffix := range platformPackageSuffixes { + if mg.Verbose() { + log.Printf("--- Evaluating moving dependency %s to archive path %s\n", f, archivePath) + } + // if the matched file name does not contain the platform suffix and it's not a platform-independent package, skip it + if !strings.Contains(f, packageSuffix) && !isPlatformIndependentPackage(f, packageVersion) { + if mg.Verbose() { + log.Printf("--- Skipped moving dependency %s to archive path\n", f) + } continue } @@ -1580,13 +1609,25 @@ func movePackagesToArchive(dropPath string, requiredPackages []string) string { continue } - targetPath := filepath.Join(archivePath, rp, filepath.Base(f)) + targetPath := filepath.Join(archivePath, packageSuffix, filepath.Base(f)) targetDir := filepath.Dir(targetPath) if err := os.MkdirAll(targetDir, 0750); err != nil { fmt.Printf("warning: failed to create directory %s: %s", targetDir, err) } - if err := os.Rename(f, targetPath); err != nil { - panic(fmt.Errorf("failed renaming file: %w", err)) + + // Platform-independent packages need to be placed in the archive sub-folders for all platforms, copy instead of moving + if isPlatformIndependentPackage(f, packageVersion) { + if err := copyFile(f, targetPath); err != nil { + panic(fmt.Errorf("failed copying file: %w", err)) + } + } else { + if err := os.Rename(f, targetPath); err != nil { + panic(fmt.Errorf("failed renaming file: %w", err)) + } + } + + if mg.Verbose() { + log.Printf("--- Moved dependency in archive path %s => %s\n", f, targetPath) } } } @@ -1594,6 +1635,44 @@ func movePackagesToArchive(dropPath string, requiredPackages []string) string { return archivePath } +func copyFile(src, dst string) error { + srcStat, err := os.Stat(src) + if err != nil { + return fmt.Errorf("stat source file %q: %w", src, err) + } + + srcF, err := os.Open(src) + if err != nil { + return fmt.Errorf("opening source file %q: %w", src, err) + } + defer srcF.Close() + + dstF, err := os.OpenFile(dst, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, srcStat.Mode()|os.ModePerm) + if err != nil { + return fmt.Errorf("opening/creating destination file %q: %w", dst, err) + } + defer dstF.Close() + + _, err = io.Copy(dstF, srcF) + if err != nil { + return fmt.Errorf("copying file %q to %q: %w", src, dst, err) + } + + return nil +} + +func isPlatformIndependentPackage(f string, packageVersion string) bool { + fileBaseName := filepath.Base(f) + for _, spec := range manifest.ExpectedBinaries { + packageName := spec.GetPackageName(packageVersion, "") + // as of now only python wheels packages are platform-independent + if spec.PythonWheel && (fileBaseName == packageName || fileBaseName == packageName+sha512FileExt) { + return true + } + } + return false +} + func selectedPackageTypes() string { if len(devtools.SelectedPackageTypes) == 0 { return "" diff --git a/pkg/testing/kubernetes/supported.go b/pkg/testing/kubernetes/supported.go index 26f1bef3e6d..e7db5ba71c3 100644 --- a/pkg/testing/kubernetes/supported.go +++ b/pkg/testing/kubernetes/supported.go @@ -72,6 +72,10 @@ var variants = []struct { Name: "cloud", Image: "docker.elastic.co/beats-ci/elastic-agent-cloud", }, + { + Name: "service", + Image: "docker.elastic.co/beats-ci/elastic-agent-service", + }, } // GetSupported returns the list of supported OS types for Kubernetes. diff --git a/specs/connectors.spec.yml b/specs/connectors.spec.yml new file mode 100644 index 00000000000..0fb61f0bba2 --- /dev/null +++ b/specs/connectors.spec.yml @@ -0,0 +1,17 @@ +version: 2 +inputs: + - name: connectors-py + description: "Connectors Python" + platforms: + - linux/amd64 + - linux/arm64 + - container/amd64 + - container/arm64 + outputs: + - elasticsearch + command: + restart_monitoring_period: 5s + maximum_restarts_per_period: 1 + timeouts: + restart: 1s + args: [] diff --git a/testing/integration/kubernetes_agent_service_test.go b/testing/integration/kubernetes_agent_service_test.go new file mode 100644 index 00000000000..4a5ebdda2ad --- /dev/null +++ b/testing/integration/kubernetes_agent_service_test.go @@ -0,0 +1,129 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License 2.0; +// you may not use this file except in compliance with the Elastic License 2.0. + +//go:build integration + +package integration + +import ( + "bufio" + "bytes" + "context" + "crypto/sha256" + "encoding/base64" + "fmt" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + + "github.com/elastic/elastic-agent/pkg/testing/define" +) + +func TestKubernetesAgentService(t *testing.T) { + info := define.Require(t, define.Requirements{ + Stack: &define.Stack{}, + Local: false, + Sudo: false, + OS: []define.OS{ + // only test the service container + {Type: define.Kubernetes, DockerVariant: "service"}, + }, + Group: define.Kubernetes, + }) + + agentImage := os.Getenv("AGENT_IMAGE") + require.NotEmpty(t, agentImage, "AGENT_IMAGE must be set") + + client, err := info.KubeClient() + require.NoError(t, err) + require.NotNil(t, client) + + testLogsBasePath := os.Getenv("K8S_TESTS_POD_LOGS_BASE") + require.NotEmpty(t, testLogsBasePath, "K8S_TESTS_POD_LOGS_BASE must be set") + + err = os.MkdirAll(filepath.Join(testLogsBasePath, t.Name()), 0755) + require.NoError(t, err, "failed to create test logs directory") + + namespace := info.Namespace + + esHost := os.Getenv("ELASTICSEARCH_HOST") + require.NotEmpty(t, esHost, "ELASTICSEARCH_HOST must be set") + + esAPIKey, err := generateESAPIKey(info.ESClient, namespace) + require.NoError(t, err, "failed to generate ES API key") + require.NotEmpty(t, esAPIKey, "failed to generate ES API key") + + renderedManifest, err := renderKustomize(agentK8SKustomize) + require.NoError(t, err, "failed to render kustomize") + + hasher := sha256.New() + hasher.Write([]byte(t.Name())) + testNamespace := strings.ToLower(base64.URLEncoding.EncodeToString(hasher.Sum(nil))) + testNamespace = noSpecialCharsRegexp.ReplaceAllString(testNamespace, "") + + k8sObjects, err := yamlToK8SObjects(bufio.NewReader(bytes.NewReader(renderedManifest))) + require.NoError(t, err, "failed to convert yaml to k8s objects") + + adjustK8SAgentManifests(k8sObjects, testNamespace, "elastic-agent-standalone", + func(container *corev1.Container) { + // set agent image + container.Image = agentImage + // set ImagePullPolicy to "Never" to avoid pulling the image + // as the image is already loaded by the kubernetes provisioner + container.ImagePullPolicy = "Never" + + // set Elasticsearch host and API key + for idx, env := range container.Env { + if env.Name == "ES_HOST" { + container.Env[idx].Value = esHost + container.Env[idx].ValueFrom = nil + } + if env.Name == "API_KEY" { + container.Env[idx].Value = esAPIKey + container.Env[idx].ValueFrom = nil + } + } + + // has a unique entrypoint and command because its ran in the cloud + // adjust the spec to run it correctly + container.Command = []string{"elastic-agent"} + container.Args = []string{"-c", "/etc/elastic-agent/agent.yml", "-e"} + }, + func(pod *corev1.PodSpec) { + for volumeIdx, volume := range pod.Volumes { + // need to update the volume path of the state directory + // to match the test namespace + if volume.Name == "elastic-agent-state" { + hostPathType := corev1.HostPathDirectoryOrCreate + pod.Volumes[volumeIdx].VolumeSource.HostPath = &corev1.HostPathVolumeSource{ + Type: &hostPathType, + Path: fmt.Sprintf("/var/lib/elastic-agent-standalone/%s/state", testNamespace), + } + } + } + }) + + // update the configmap to only run the connectors input + serviceAgentYAML, err := os.ReadFile(filepath.Join("testdata", "connectors.agent.yml")) + require.NoError(t, err) + for _, obj := range k8sObjects { + switch objWithType := obj.(type) { + case *corev1.ConfigMap: + _, ok := objWithType.Data["agent.yml"] + if ok { + objWithType.Data["agent.yml"] = string(serviceAgentYAML) + } + } + } + + ctx := context.Background() + + deployK8SAgent(t, ctx, client, k8sObjects, testNamespace, false, testLogsBasePath, map[string]bool{ + "connectors-py": true, + }) +} diff --git a/testing/integration/kubernetes_agent_standalone_test.go b/testing/integration/kubernetes_agent_standalone_test.go index 417a36c30c3..ddcbb559cca 100644 --- a/testing/integration/kubernetes_agent_standalone_test.go +++ b/testing/integration/kubernetes_agent_standalone_test.go @@ -15,6 +15,7 @@ import ( "encoding/json" "errors" "fmt" + "io" "os" "path/filepath" @@ -25,8 +26,6 @@ import ( "github.com/stretchr/testify/require" - "github.com/elastic/elastic-agent/pkg/testing/define" - "github.com/elastic/elastic-agent/pkg/testing/tools/fleettools" "github.com/elastic/go-elasticsearch/v8" appsv1 "k8s.io/api/apps/v1" @@ -47,6 +46,11 @@ import ( "helm.sh/helm/v3/pkg/action" "helm.sh/helm/v3/pkg/chart/loader" "helm.sh/helm/v3/pkg/cli" + + aclient "github.com/elastic/elastic-agent/pkg/control/v2/client" + atesting "github.com/elastic/elastic-agent/pkg/testing" + "github.com/elastic/elastic-agent/pkg/testing/define" + "github.com/elastic/elastic-agent/pkg/testing/tools/fleettools" ) const ( @@ -657,46 +661,71 @@ func deployK8SAgent(t *testing.T, ctx context.Context, client klient.Client, obj require.NotEmpty(t, agentPodName, "agent pod name is empty") - command := []string{"elastic-agent", "status"} + command := []string{"elastic-agent", "status", "--output=json"} + var status atesting.AgentStatusOutput var stdout, stderr bytes.Buffer var agentHealthyErr error // we will wait maximum 120 seconds for the agent to report healthy for i := 0; i < 120; i++ { + status = atesting.AgentStatusOutput{} // clear status output stdout.Reset() stderr.Reset() agentHealthyErr = client.Resources().ExecInPod(ctx, namespace, agentPodName, "elastic-agent-standalone", command, &stdout, &stderr) if agentHealthyErr == nil { - break + if uerr := json.Unmarshal(stdout.Bytes(), &status); uerr == nil { + if status.State == int(aclient.Healthy) { + // agent is healthy innner tests should now pass + if runInnerK8STests { + err := client.Resources().ExecInPod(ctx, namespace, agentPodName, "elastic-agent-standalone", + []string{"/usr/share/elastic-agent/k8s-inner-tests", "-test.v"}, &stdout, &stderr) + t.Log(stdout.String()) + if err != nil { + t.Log(stderr.String()) + } + require.NoError(t, err, "error at k8s inner tests execution") + } + + // validate that the components defined are also healthy if they should exist + componentsCorrect := true + for component, shouldBePresent := range componentPresence { + compState, ok := getComponentState(status, component) + if shouldBePresent { + if !ok { + // doesn't exist + componentsCorrect = false + } else if compState != int(aclient.Healthy) { + // not healthy + componentsCorrect = false + } + } else if ok { + // should not be present + // break instantly and fail (as it existing should never happen) + break + } + } + if componentsCorrect { + // agent health and components are correct + return + } + } + } } time.Sleep(time.Second * 1) } - statusString := stdout.String() - if agentHealthyErr != nil { - t.Errorf("elastic-agent never reported healthy: %v", agentHealthyErr) - t.Logf("stdout: %s\n", statusString) - t.Logf("stderr: %s\n", stderr.String()) - t.FailNow() - return - } - - stdout.Reset() - stderr.Reset() - - for component, shouldBePresent := range componentPresence { - isPresent := strings.Contains(statusString, component) - require.Equal(t, shouldBePresent, isPresent) - } + t.Errorf("elastic-agent never reported healthy: %+v", status) + t.Logf("stdout: %s\n", stdout.String()) + t.Logf("stderr: %s\n", stderr.String()) + t.FailNow() +} - if runInnerK8STests { - err := client.Resources().ExecInPod(ctx, namespace, agentPodName, "elastic-agent-standalone", - []string{"/usr/share/elastic-agent/k8s-inner-tests", "-test.v"}, &stdout, &stderr) - t.Log(stdout.String()) - if err != nil { - t.Log(stderr.String()) +func getComponentState(status atesting.AgentStatusOutput, componentName string) (int, bool) { + for _, comp := range status.Components { + if comp.Name == componentName { + return comp.State, true } - require.NoError(t, err, "error at k8s inner tests execution") } + return -1, false } // dumpLogs dumps the logs of all pods in the given namespace to the given target directory diff --git a/testing/integration/testdata/connectors.agent.yml b/testing/integration/testdata/connectors.agent.yml new file mode 100644 index 00000000000..5c3466ae8ae --- /dev/null +++ b/testing/integration/testdata/connectors.agent.yml @@ -0,0 +1,13 @@ +outputs: + default: + type: elasticsearch + hosts: + - >- + ${ES_HOST} + api_key: ${API_KEY} +agent: + monitoring: + enabled: false +inputs: + - id: connectors + type: connectors-py