diff --git a/.ci/.e2e-tests.yaml b/.ci/.e2e-tests.yaml index c9613c115b..02a4399f08 100644 --- a/.ci/.e2e-tests.yaml +++ b/.ci/.e2e-tests.yaml @@ -17,9 +17,6 @@ SUITES: - name: "Fleet" pullRequestFilter: " && ~debian" tags: "fleet_mode_agent" - - name: "Fleet Server" - pullRequestFilter: " && ~debian" - tags: "fleet_server" - name: "Endpoint Integration" pullRequestFilter: " && ~debian" tags: "agent_endpoint_integration" diff --git a/.ci/build-docker-images.groovy b/.ci/build-docker-images.groovy new file mode 100644 index 0000000000..b93fd619b0 --- /dev/null +++ b/.ci/build-docker-images.groovy @@ -0,0 +1,94 @@ +#!/usr/bin/env groovy + +@Library('apm@current') _ + +pipeline { + agent { label 'ubuntu-20' } + environment { + REPO = 'e2e-testing' + BASE_DIR = "src/github.com/elastic/${env.REPO}" + DOCKER_REGISTRY = 'docker.elastic.co' + DOCKER_REGISTRY_SECRET = 'secret/observability-team/ci/docker-registry/prod' + HOME = "${env.WORKSPACE}" + NOTIFY_TO = credentials('notify-to') + PIPELINE_LOG_LEVEL = 'INFO' + JOB_GIT_CREDENTIALS = "f6c7695a-671e-4f4f-a331-acdce44ff9ba" + } + options { + timeout(time: 1, unit: 'HOURS') + buildDiscarder(logRotator(numToKeepStr: '20', artifactNumToKeepStr: '20')) + timestamps() + ansiColor('xterm') + disableResume() + durabilityHint('PERFORMANCE_OPTIMIZED') + rateLimitBuilds(throttle: [count: 60, durationName: 'hour', userBoost: true]) + quietPeriod(10) + } + triggers { + cron 'H H(0-5) * * 1-5' + } + stages { + stage('Checkout') { + steps { + deleteDir() + gitCheckout(basedir: "${BASE_DIR}", + branch: "${params.BRANCH_REFERENCE}", + repo: "https://github.com/elastic/${REPO}.git", + credentialsId: "${JOB_GIT_CREDENTIALS}" + ) + stash allowEmpty: true, name: 'source', useDefaultExcludes: false + } + } + stage('Build AMD Docker images'){ + agent { label 'ubuntu-20 && immutable && docker' } + environment { + HOME = "${env.WORKSPACE}/${BASE_DIR}" + } + steps { + deleteDir() + unstash 'source' + dockerLogin(secret: "${DOCKER_ELASTIC_SECRET}", registry: "${DOCKER_REGISTRY}") + dir("${BASE_DIR}") { + withEnv(["ARCH=amd64"]) { + sh(label: 'Build AMD images', script: '.ci/scripts/build-docker-images.sh') + } + } + } + } + stage('Build ARM Docker images'){ + agent { label 'arm && immutable && docker' } + environment { + HOME = "${env.WORKSPACE}/${BASE_DIR}" + } + steps { + deleteDir() + unstash 'source' + dockerLogin(secret: "${DOCKER_ELASTIC_SECRET}", registry: "${DOCKER_REGISTRY}") + dir("${BASE_DIR}") { + withEnv(["ARCH=arm64"]) { + sh(label: 'Build ARM images', script: '.ci/scripts/build-docker-images.sh') + } + } + } + } + stage('Push multiplatform manifest'){ + agent { label 'ubuntu-20 && immutable && docker' } + environment { + HOME = "${env.WORKSPACE}/${BASE_DIR}" + } + steps { + deleteDir() + unstash 'source' + dockerLogin(secret: "${DOCKER_ELASTIC_SECRET}", registry: "${DOCKER_REGISTRY}") + dir("${BASE_DIR}") { + sh(label: 'Push multiplatform manifest', script: '.ci/scripts/push-multiplatform-manifest.sh') + } + } + } + } + post { + cleanup { + notifyBuildResult() + } + } +} diff --git a/.ci/docker/centos-systemd/Dockerfile b/.ci/docker/centos-systemd/Dockerfile new file mode 100644 index 0000000000..4b0bc4b483 --- /dev/null +++ b/.ci/docker/centos-systemd/Dockerfile @@ -0,0 +1,18 @@ +FROM centos:7 + +ENV container docker + +LABEL maintainer="manuel.delapena@elastic.co" + +RUN (cd /lib/systemd/system/sysinit.target.wants/; for i in *; do [ $i == systemd-tmpfiles-setup.service ] || rm -f $i; done); \ +rm -f /lib/systemd/system/multi-user.target.wants/*;\ +rm -f /etc/systemd/system/*.wants/*;\ +rm -f /lib/systemd/system/local-fs.target.wants/*; \ +rm -f /lib/systemd/system/sockets.target.wants/*udev*; \ +rm -f /lib/systemd/system/sockets.target.wants/*initctl*; \ +rm -f /lib/systemd/system/basic.target.wants/*;\ +rm -f /lib/systemd/system/anaconda.target.wants/*; + +VOLUME [ "/sys/fs/cgroup" ] + +CMD ["/usr/sbin/init"] diff --git a/.ci/docker/debian-systemd/Dockerfile b/.ci/docker/debian-systemd/Dockerfile new file mode 100644 index 0000000000..a503483689 --- /dev/null +++ b/.ci/docker/debian-systemd/Dockerfile @@ -0,0 +1,141 @@ +# This file is part of docker-debian-systemd. +# +# Copyright (c) +# 2018-2019 Alexander Haase +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +# This image bases on the regular Debian image. By default the 'latest' tag +# (pointing to the current stable release) of the parent image will be used. +# However, an alternate parent tag may be set by defining the 'TAG' build +# argument to a specific Debian release, e.g. 'stretch' or 'buster'. +ARG TAG=latest +FROM debian:${TAG} +LABEL maintainer="manuel.delapena@elastic.co" + +# Configure the debconf frontend. +# +# This image doesn't include whiptail, dialog, nor the readline perl module. +# Therefore, the debconf frontend will be set to 'teletype' to avoid error +# messages about no dialog frontend could be found. +RUN echo 'debconf debconf/frontend select teletype' | debconf-set-selections + + +# Install the necessary packages. +# +# In addition to the regular Debian base image, a BASIC set of packages from the +# Debian minimal configuration will be installed. After all packages have been +# installed, the apt caches and some log files will be removed to minimize the +# image. +# +# NOTE: An upgrade will be performed to include updates and security fixes of +# installed packages that received updates in the Debian repository after +# the upstream image has been created. +# +# NOTE: No syslog daemon will be installed, as systemd's journald should fit +# most needs. Please file an issue if you think this should be changed. +RUN apt-get update +RUN apt-get dist-upgrade -y +RUN apt-get install -y --no-install-recommends \ + systemd \ + systemd-sysv \ + cron \ + anacron + +RUN apt-get clean +RUN rm -rf \ + /var/lib/apt/lists/* \ + /var/log/alternatives.log \ + /var/log/apt/history.log \ + /var/log/apt/term.log \ + /var/log/dpkg.log + + +# Configure systemd. +# +# For running systemd inside a Docker container, some additional tweaks are +# required. For a detailed list see: +# +# https://developers.redhat.com/blog/2016/09/13/ \ +# running-systemd-in-a-non-privileged-container/ +# +# Additional tweaks will be applied in the final image below. + +# To avoid ugly warnings when running this image on a host running systemd, the +# following units will be masked. +# +# NOTE: This will not remove ALL warnings in all Debian releases, but seems to +# work for stretch. +RUN systemctl mask -- \ + dev-hugepages.mount \ + sys-fs-fuse-connections.mount + +# The machine-id should be generated when creating the container. This will be +# done automatically if the file is not present, so let's delete it. +RUN rm -f \ + /etc/machine-id \ + /var/lib/dbus/machine-id + + + + +# Build the final image. +# +# To get a minimal image without deleted files in intermediate layers, the +# contents of the image previously built will be copied into a second version of +# the parent image. +# +# NOTE: This method requires buildkit, as the differ of buildkit will copy +# changed files only and we'll get a minimal image with just the changed +# files in a single new layer. +# +# NOTE: All settings related to the image's environment (e.g. CMD, ENV and +# VOLUME settings) need to be set in the following image definition to be +# used by child images and containers. + +FROM debian:${TAG} +COPY --from=0 / / + + +# Configure systemd. +# +# For running systemd inside a Docker container, some additional tweaks are +# required. Some of them have already been applied above. +# +# The 'container' environment variable tells systemd that it's running inside a +# Docker container environment. +ENV container docker + +# A different stop signal is required, so systemd will initiate a shutdown when +# running 'docker stop '. +STOPSIGNAL SIGRTMIN+3 + +# The host's cgroup filesystem need's to be mounted (read-only) in the +# container. '/run', '/run/lock' and '/tmp' need to be tmpfs filesystems when +# running the container without 'CAP_SYS_ADMIN'. +# +# NOTE: For running Debian stretch, 'CAP_SYS_ADMIN' still needs to be added, as +# stretch's version of systemd is not recent enough. Buster will run just +# fine without 'CAP_SYS_ADMIN'. +VOLUME [ "/sys/fs/cgroup", "/run", "/run/lock", "/tmp" ] + +# As this image should run systemd, the default command will be changed to start +# the init system. CMD will be preferred in favor of ENTRYPOINT, so one may +# override it when creating the container to e.g. to run a bash console instead. +CMD [ "/sbin/init" ] diff --git a/.ci/jobs/build-docker-images.yml b/.ci/jobs/build-docker-images.yml new file mode 100644 index 0000000000..dcadbb2915 --- /dev/null +++ b/.ci/jobs/build-docker-images.yml @@ -0,0 +1,27 @@ +--- +- job: + name: Beats/build-docker-images + display-name: E2E Tests Docker images + description: Job to pre-build docker images used in E2E tests. + view: Beats + project-type: pipeline + parameters: + - string: + name: BRANCH_REFERENCE + default: master + description: the Git branch specifier + pipeline-scm: + script-path: .ci/build-docker-images.groovy + scm: + - git: + url: git@github.com:elastic/e2e-testint.git + refspec: +refs/heads/*:refs/remotes/origin/* + wipe-workspace: true + name: origin + shallow-clone: true + credentials-id: f6c7695a-671e-4f4f-a331-acdce44ff9ba + reference-repo: /var/lib/jenkins/.git-references/e2e-testing.git + branches: + - $BRANCH_REFERENCE + triggers: + - timed: 'H H(0-5) * * 1-5' diff --git a/.ci/scripts/build-docker-images.sh b/.ci/scripts/build-docker-images.sh new file mode 100755 index 0000000000..98d4d09997 --- /dev/null +++ b/.ci/scripts/build-docker-images.sh @@ -0,0 +1,29 @@ +#!/usr/bin/env bash + +## Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +## or more contributor license agreements. Licensed under the Elastic License; +## you may not use this file except in compliance with the Elastic License. + +set -euxo pipefail + +ARCH="${ARCH:-amd64}" + +readonly ELASTIC_REGISTRY="docker.elastic.co" +readonly OBSERVABILITY_CI_REGISTRY="${ELASTIC_REGISTRY}/observability-ci" + +main() { + _build_and_push "centos-systemd" + _build_and_push "debian-systemd" +} + +_build_and_push() { + local image="${1}" + + local platformSpecificImage="${OBSERVABILITY_CI_REGISTRY}/${image}-${ARCH}:latest" + + docker build -t ${platformSpecificImage} .ci/docker/${image} + + docker push ${platformSpecificImage} +} + +main "$@" diff --git a/.ci/scripts/push-multiplatform-manifest.sh b/.ci/scripts/push-multiplatform-manifest.sh new file mode 100755 index 0000000000..56f5771b0e --- /dev/null +++ b/.ci/scripts/push-multiplatform-manifest.sh @@ -0,0 +1,34 @@ +#!/usr/bin/env bash + +## Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +## or more contributor license agreements. Licensed under the Elastic License; +## you may not use this file except in compliance with the Elastic License. + +set -euxo pipefail + +readonly ELASTIC_REGISTRY="docker.elastic.co" +readonly MANIFEST_TOOL_IMAGE="${ELASTIC_REGISTRY}/infra/manifest-tool:latest" +readonly OBSERVABILITY_CI_REGISTRY="${ELASTIC_REGISTRY}/observability-ci" + +main() { + _push_multiplatform_manifest "centos-systemd" + _push_multiplatform_manifest "debian-systemd" +} + +_push_multiplatform_manifest() { + local image="${1}" + + local fqn="${OBSERVABILITY_CI_REGISTRY}/${image}:latest" + # the '-ARCH' placeholder will be replaced with the values in the '--platforms' argument + local templateFqn="${OBSERVABILITY_CI_REGISTRY}/${image}-ARCH:latest" + + docker run --rm \ + --mount src=${HOME}/.docker,target=/docker-config,type=bind \ + ${MANIFEST_TOOL_IMAGE} --docker-cfg "/docker-config" \ + push from-args \ + --platforms linux/amd64,linux/arm64 \ + --template ${templateFqn} \ + --target ${fqn} +} + +main "$@" diff --git a/cli/config/compose/profiles/fleet/configurations/kibana.config.yml b/cli/config/compose/profiles/fleet/configurations/kibana.config.yml index 8c466c6da6..c093afe8b3 100644 --- a/cli/config/compose/profiles/fleet/configurations/kibana.config.yml +++ b/cli/config/compose/profiles/fleet/configurations/kibana.config.yml @@ -16,5 +16,5 @@ xpack.fleet.registryUrl: http://package-registry:8080 xpack.fleet.agents.enabled: true xpack.fleet.agents.elasticsearch.host: http://elasticsearch:9200 xpack.fleet.agents.fleet_server.hosts: - - http://kibana:5601 + - http://fleet-server:8220 xpack.fleet.agents.tlsCheckDisabled: true diff --git a/cli/config/compose/profiles/fleet/docker-compose.yml b/cli/config/compose/profiles/fleet/docker-compose.yml index c92eeeb695..8425136d49 100644 --- a/cli/config/compose/profiles/fleet/docker-compose.yml +++ b/cli/config/compose/profiles/fleet/docker-compose.yml @@ -40,3 +40,22 @@ services: test: ["CMD", "curl", "-f", "http://localhost:8080"] retries: 300 interval: 1s + + fleet-server: + image: "docker.elastic.co/beats/elastic-agent:${stackVersion:-8.0.0-SNAPSHOT}" + depends_on: + elasticsearch: + condition: service_healthy + kibana: + condition: service_healthy + healthcheck: + test: "curl -f http://127.0.0.1:8220/api/status | grep HEALTHY 2>&1 >/dev/null" + retries: 12 + interval: 5s + environment: + - "FLEET_SERVER_ENABLE=1" + - "FLEET_SERVER_INSECURE_HTTP=1" + - "KIBANA_FLEET_SETUP=1" + - "KIBANA_FLEET_HOST=http://kibana:5601" + - "FLEET_SERVER_HOST=0.0.0.0" + - "FLEET_SERVER_PORT=8220" diff --git a/cli/config/compose/services/centos-systemd/docker-compose.yml b/cli/config/compose/services/centos-systemd/docker-compose.yml index 1d3b77d01a..6721740dcb 100644 --- a/cli/config/compose/services/centos-systemd/docker-compose.yml +++ b/cli/config/compose/services/centos-systemd/docker-compose.yml @@ -4,6 +4,7 @@ services: image: centos/systemd:${centos_systemdTag:-latest} container_name: ${centos_systemdContainerName} entrypoint: "/usr/sbin/init" + platform: ${stackPlatform:-linux/amd64} privileged: true volumes: - ${centos_systemdAgentBinarySrcPath:-.}:${centos_systemdAgentBinaryTargetPath:-/tmp} diff --git a/cli/config/compose/services/debian-systemd/docker-compose.yml b/cli/config/compose/services/debian-systemd/docker-compose.yml index b6d15cd1b1..ae47b83aac 100644 --- a/cli/config/compose/services/debian-systemd/docker-compose.yml +++ b/cli/config/compose/services/debian-systemd/docker-compose.yml @@ -4,6 +4,7 @@ services: image: alehaa/debian-systemd:${debian_systemdTag:-stretch} container_name: ${debian_systemdContainerName} entrypoint: "/sbin/init" + platform: ${stackPlatform:-linux/amd64} privileged: true volumes: - ${debian_systemdAgentBinarySrcPath:-.}:${debian_systemdAgentBinaryTargetPath:-/tmp} diff --git a/cli/config/compose/services/elastic-agent/apm-legacy/config/apm-server.yml b/cli/config/compose/services/elastic-agent/apm-legacy/config/apm-server.yml new file mode 100644 index 0000000000..ac1a563a91 --- /dev/null +++ b/cli/config/compose/services/elastic-agent/apm-legacy/config/apm-server.yml @@ -0,0 +1,21 @@ +monitoring.enabled: true +http.enabled: true +http.port: 5067 +http.host: "0.0.0.0" +apm-server: + host: "0.0.0:8200" + secret_token: "1234" + # Enable APM Server Golang expvar support (https://golang.org/pkg/expvar/). + expvar: + enabled: true + url: "/debug/vars" + kibana: + # For APM Agent configuration in Kibana, enabled must be true. + enabled: true + host: "kibana" + username: "elastic" + password: "changeme" +output.elasticsearch: + hosts: ["http://elasticsearch:9200"] + username: "elastic" + password: "changeme" diff --git a/cli/config/compose/services/elastic-agent/apm-legacy/config/capabilities.yml b/cli/config/compose/services/elastic-agent/apm-legacy/config/capabilities.yml new file mode 100644 index 0000000000..e2ad548a4c --- /dev/null +++ b/cli/config/compose/services/elastic-agent/apm-legacy/config/capabilities.yml @@ -0,0 +1,5 @@ +capabilities: +- rule: allow + input: fleet-server +- rule: deny + input: "*" diff --git a/cli/config/compose/services/elastic-agent/apm-legacy/config/credentials.yml b/cli/config/compose/services/elastic-agent/apm-legacy/config/credentials.yml new file mode 100644 index 0000000000..90e67b0a2a --- /dev/null +++ b/cli/config/compose/services/elastic-agent/apm-legacy/config/credentials.yml @@ -0,0 +1,10 @@ +fleet_server: + elasticsearch: + host: "elasticsearch" + username: "elastic" + password: "changeme" +kibana: + fleet: + host: "kibana" + username: "elastic" + password: "changeme" diff --git a/cli/config/compose/services/elastic-agent/apm-legacy/config/fleet-setup.yml b/cli/config/compose/services/elastic-agent/apm-legacy/config/fleet-setup.yml new file mode 100644 index 0000000000..30feaf7e85 --- /dev/null +++ b/cli/config/compose/services/elastic-agent/apm-legacy/config/fleet-setup.yml @@ -0,0 +1,9 @@ +fleet: + enroll: true + force: false + insecure: true +fleet_server: + enable: true +kibana: + fleet: + setup: true diff --git a/cli/config/compose/services/elastic-agent/docker-compose-cloud.yml b/cli/config/compose/services/elastic-agent/docker-compose-cloud.yml new file mode 100644 index 0000000000..e4613f7f0c --- /dev/null +++ b/cli/config/compose/services/elastic-agent/docker-compose-cloud.yml @@ -0,0 +1,26 @@ +version: '2.4' +services: + elastic-agent: + image: docker.elastic.co/${elasticAgentDockerNamespace:-beats}/elastic-agent${elasticAgentDockerImageSuffix}:${elasticAgentTag:-8.0.0-SNAPSHOT} + container_name: ${elasticAgentContainerName} + depends_on: + elasticsearch: + condition: service_healthy + kibana: + condition: service_healthy + environment: + - "FLEET_SERVER_ENABLE=1" + - "FLEET_SERVER_INSECURE_HTTP=1" + - "ELASTIC_AGENT_CLOUD=1" + - "APM_SERVER_PATH=/apm-legacy/apm-server/" + - "STATE_PATH=/apm-legacy/elastic-agent/" + - "CONFIG_PATH=/apm-legacy/config/" + - "DATA_PATH=/apm-legacy/data/" + - "LOGS_PATH=/apm-legacy/logs/" + - "HOME_PATH=/apm-legacy/" + volumes: + - "${apmVolume}:/apm-legacy" + ports: + - "127.0.0.1:8220:8220" + - "127.0.0.1:8200:8200" + - "127.0.0.1:5066:5066" diff --git a/cli/config/compose/services/elastic-agent/docker-compose.yml b/cli/config/compose/services/elastic-agent/docker-compose.yml index 114a49f0d1..199768ec19 100644 --- a/cli/config/compose/services/elastic-agent/docker-compose.yml +++ b/cli/config/compose/services/elastic-agent/docker-compose.yml @@ -9,12 +9,8 @@ services: kibana: condition: service_healthy environment: - - "FLEET_SERVER_ELASTICSEARCH_HOST=http://${elasticsearchHost:-elasticsearch}:${elasticsearchPort:-9200}" - "FLEET_SERVER_ENABLE=${fleetServerMode:-0}" - "FLEET_SERVER_INSECURE_HTTP=${fleetServerMode:-0}" - - "FLEET_SERVER_HOST=0.0.0.0" - - "FLEET_SERVER_ELASTICSEARCH_USERNAME=elastic" - - "FLEET_SERVER_ELASTICSEARCH_PASSWORD=changeme" platform: ${elasticAgentPlatform:-linux/amd64} ports: - "127.0.0.1:8220:8220" diff --git a/cli/config/compose/services/fleet-server-centos/docker-compose.yml b/cli/config/compose/services/fleet-server-centos/docker-compose.yml new file mode 100644 index 0000000000..8492ce3947 --- /dev/null +++ b/cli/config/compose/services/fleet-server-centos/docker-compose.yml @@ -0,0 +1,10 @@ +version: '2.4' +services: + fleet-server-centos: + image: centos/systemd:${fleet_server_centosTag:-latest} + container_name: ${fleet_server_centosContainerName} + entrypoint: "/usr/sbin/init" + privileged: true + volumes: + - ${fleet_server_centosAgentBinarySrcPath:-.}:${fleet_server_centosAgentBinaryTargetPath:-/tmp} + - /sys/fs/cgroup:/sys/fs/cgroup:ro diff --git a/cli/config/compose/services/fleet-server-debian/docker-compose.yml b/cli/config/compose/services/fleet-server-debian/docker-compose.yml new file mode 100644 index 0000000000..bb86c67f83 --- /dev/null +++ b/cli/config/compose/services/fleet-server-debian/docker-compose.yml @@ -0,0 +1,10 @@ +version: '2.4' +services: + fleet-server-debian: + image: alehaa/debian-systemd:${fleet_server_debianTag:-stretch} + container_name: ${fleet_server_debianContainerName} + entrypoint: "/sbin/init" + privileged: true + volumes: + - ${fleet_server_debianAgentBinarySrcPath:-.}:${fleet_server_debianAgentBinaryTargetPath:-/tmp} + - /sys/fs/cgroup:/sys/fs/cgroup:ro diff --git a/cli/config/config.go b/cli/config/config.go index caafaa7575..a77d574251 100644 --- a/cli/config/config.go +++ b/cli/config/config.go @@ -73,14 +73,16 @@ func FileExists(configFile string) (bool, error) { // GetComposeFile returns the path of the compose file, looking up the // tool's workdir -func GetComposeFile(isProfile bool, composeName string) (string, error) { - composeFileName := "docker-compose.yml" +func GetComposeFile(isProfile bool, composeName string, composeFileName ...string) (string, error) { + if isProfile || composeFileName == nil || composeFileName[0] == "" { + composeFileName = []string{"docker-compose.yml"} + } serviceType := "services" if isProfile { serviceType = "profiles" } - composeFilePath := path.Join(Op.Workspace, "compose", serviceType, composeName, composeFileName) + composeFilePath := path.Join(Op.Workspace, "compose", serviceType, composeName, composeFileName[0]) found, err := io.Exists(composeFilePath) if found && err == nil { log.WithFields(log.Fields{ @@ -130,15 +132,19 @@ func Init() { } shell.CheckInstalledSoftware(binaries...) - InitConfig() + initConfig() } -// InitConfig initialises configuration -func InitConfig() { +// initConfig initialises configuration +func initConfig() { if Op != nil { return } + newConfig(OpDir()) +} +// OpDir returns the directory to copy to +func OpDir() string { home, err := homedir.Dir() if err != nil { log.WithFields(log.Fields{ @@ -146,9 +152,7 @@ func InitConfig() { }).Fatal("Could not get current user's HOME dir") } - w := filepath.Join(home, ".op") - - newConfig(w) + return filepath.Join(home, ".op") } // PutServiceEnvironment puts the environment variables for the service, replacing "SERVICE_" diff --git a/e2e/_suites/fleet/features/apm_integration.feature b/e2e/_suites/fleet/features/apm_integration.feature new file mode 100644 index 0000000000..76b93879b0 --- /dev/null +++ b/e2e/_suites/fleet/features/apm_integration.feature @@ -0,0 +1,30 @@ +@apm_server +Feature: APM Integration +Scenarios for APM + +@install +Scenario Outline: Deploying a stand-alone agent with fleet server mode + Given a "" stand-alone agent is deployed with fleet server mode + And the stand-alone agent is listed in Fleet as "online" + When the "Elastic APM" integration is added to the policy + Then the "Elastic APM" datasource is shown in the policy + And the "apm-server" process is in the "started" state on the host + + +@default +Examples: default + | image | + | default | + + + +@cloud +Scenario Outline: Deploying a stand-alone agent with fleet server mode on cloud + When a "" stand-alone agent is deployed with fleet server mode on cloud + Then the "apm-server" process is in the "started" state on the host + + +@default +Examples: default + | image | + | default | diff --git a/e2e/_suites/fleet/features/fleet_server.feature b/e2e/_suites/fleet/features/fleet_server.feature deleted file mode 100644 index ff863764fd..0000000000 --- a/e2e/_suites/fleet/features/fleet_server.feature +++ /dev/null @@ -1,19 +0,0 @@ -@fleet_server -Feature: Fleet Server - Scenarios for Fleet Server, where an Elasticseach and a Kibana instances are already provisioned, - so that the Agent is able to communicate with them - -@start-fleet-server -Scenario Outline: Deploying an Elastic Agent that starts Fleet Server - When a "" agent is deployed to Fleet with "tar" installer in fleet-server mode - Then the agent is listed in Fleet as "online" - -@centos -Examples: Centos -| os | -| centos | - -@debian -Examples: Debian -| os | -| debian | diff --git a/e2e/_suites/fleet/features/stand_alone_agent.feature b/e2e/_suites/fleet/features/stand_alone_agent.feature index 171705c957..20a44ce177 100644 --- a/e2e/_suites/fleet/features/stand_alone_agent.feature +++ b/e2e/_suites/fleet/features/stand_alone_agent.feature @@ -51,8 +51,8 @@ Examples: Ubi8 | image | | ubi8 | -@run_fleet_server -Scenario Outline: Deploying a stand-alone agent with fleet server mode +@bootstrap-fleet-server +Scenario Outline: Bootstrapping Fleet Server from a stand-alone Elastic Agent When a "" stand-alone agent is deployed with fleet server mode Then the stand-alone agent is listed in Fleet as "online" diff --git a/e2e/_suites/fleet/fleet.go b/e2e/_suites/fleet/fleet.go index f632d9a21f..c69b936ef8 100644 --- a/e2e/_suites/fleet/fleet.go +++ b/e2e/_suites/fleet/fleet.go @@ -7,6 +7,7 @@ package main import ( "context" "fmt" + "github.com/google/uuid" "strings" "time" @@ -20,7 +21,6 @@ import ( "github.com/elastic/e2e-testing/internal/kibana" "github.com/elastic/e2e-testing/internal/shell" "github.com/elastic/e2e-testing/internal/utils" - "github.com/google/uuid" "github.com/pkg/errors" log "github.com/sirupsen/logrus" ) @@ -41,21 +41,23 @@ type FleetTestSuite struct { Installers map[string]installer.ElasticAgentInstaller Integration kibana.IntegrationPackage // the installed integration Policy kibana.Policy - FleetPolicy kibana.Policy + FleetServerPolicy kibana.Policy PolicyUpdatedAt string // the moment the policy was updated Version string // current elastic-agent version kibanaClient *kibana.Client + // fleet server + FleetServerHostname string // hostname of the fleet server. If empty, it means the agent is the first one, bootstrapping fleet server } // afterScenario destroys the state created by a scenario func (fts *FleetTestSuite) afterScenario() { serviceManager := compose.NewServiceManager() - serviceName := fts.Image + agentInstaller := fts.getInstaller() - if log.IsLevelEnabled(log.DebugLevel) { - agentInstaller := fts.getInstaller() + serviceName := fts.getServiceName(agentInstaller) + if log.IsLevelEnabled(log.DebugLevel) { err := agentInstaller.PrintLogsFn(fts.Hostname) if err != nil { log.WithFields(log.Fields{ @@ -63,13 +65,13 @@ func (fts *FleetTestSuite) afterScenario() { "error": err, }).Warn("Could not get agent logs in the container") } + } - // only call it when the elastic-agent is present - if !fts.ElasticAgentStopped { - err := agentInstaller.UninstallFn() - if err != nil { - log.Warnf("Could not uninstall the agent after the scenario: %v", err) - } + // only call it when the elastic-agent is present + if !fts.ElasticAgentStopped { + err := agentInstaller.UninstallFn() + if err != nil { + log.Warnf("Could not uninstall the agent after the scenario: %v", err) } } @@ -83,7 +85,7 @@ func (fts *FleetTestSuite) afterScenario() { developerMode := shell.GetEnvBool("DEVELOPER_MODE") if !developerMode { - _ = serviceManager.RemoveServicesFromCompose(context.Background(), common.FleetProfileName, []string{serviceName + "-systemd"}, common.ProfileEnv) + _ = serviceManager.RemoveServicesFromCompose(context.Background(), common.FleetProfileName, []string{serviceName}, common.ProfileEnv) } else { log.WithField("service", serviceName).Info("Because we are running in development mode, the service won't be stopped") } @@ -96,32 +98,14 @@ func (fts *FleetTestSuite) afterScenario() { }).Warn("The enrollment token could not be deleted") } - // Cleanup all package policies - packagePolicies, err := fts.kibanaClient.ListPackagePolicies() - if err != nil { - log.WithFields(log.Fields{ - "err": err, - "policy": fts.FleetPolicy, - }).Error("The package policies could not be found") - } - for _, pkgPolicy := range packagePolicies { - // Do not remove the fleet server package integration otherwise fleet server fails to bootstrap - if !strings.Contains(pkgPolicy.Name, "fleet_server") && pkgPolicy.PolicyID == fts.FleetPolicy.ID { - err = fts.kibanaClient.DeleteIntegrationFromPolicy(pkgPolicy) - if err != nil { - log.WithFields(log.Fields{ - "err": err, - "packagePolicy": pkgPolicy, - }).Error("The integration could not be deleted from the configuration") - } - } - } + fts.kibanaClient.DeleteAllPolicies(fts.FleetServerPolicy) // clean up fields fts.CurrentTokenID = "" fts.CurrentToken = "" fts.Image = "" fts.Hostname = "" + fts.FleetServerHostname = "" } // beforeScenario creates the state needed by a scenario @@ -140,18 +124,18 @@ func (fts *FleetTestSuite) beforeScenario() { } fts.Policy = policy - fleetPolicy, err := fts.kibanaClient.GetDefaultPolicy(true) + fleetServerPolicy, err := fts.kibanaClient.GetDefaultPolicy(true) if err != nil { log.WithFields(log.Fields{ "err": err, }).Warn("The default fleet server policy could not be obtained") } - fts.FleetPolicy = fleetPolicy + fts.FleetServerPolicy = fleetServerPolicy } func (fts *FleetTestSuite) contributeSteps(s *godog.ScenarioContext) { - s.Step(`^a "([^"]*)" agent is deployed to Fleet with "([^"]*)" installer$`, fts.anAgentIsDeployedToFleetWithInstallerInFleetMode) + s.Step(`^a "([^"]*)" agent is deployed to Fleet with "([^"]*)" installer$`, fts.anAgentIsDeployedToFleetWithInstaller) s.Step(`^a "([^"]*)" agent "([^"]*)" is deployed to Fleet with "([^"]*)" installer$`, fts.anStaleAgentIsDeployedToFleetWithInstaller) s.Step(`^agent is in version "([^"]*)"$`, fts.agentInVersion) s.Step(`^agent is upgraded to version "([^"]*)"$`, fts.anAgentIsUpgraded) @@ -176,9 +160,6 @@ func (fts *FleetTestSuite) contributeSteps(s *godog.ScenarioContext) { s.Step(`^the policy response will be shown in the Security App$`, fts.thePolicyResponseWillBeShownInTheSecurityApp) s.Step(`^the policy is updated to have "([^"]*)" in "([^"]*)" mode$`, fts.thePolicyIsUpdatedToHaveMode) s.Step(`^the policy will reflect the change in the Security App$`, fts.thePolicyWillReflectTheChangeInTheSecurityApp) - - // fleet server steps - s.Step(`^a "([^"]*)" agent is deployed to Fleet with "([^"]*)" installer in fleet-server mode$`, fts.anAgentIsDeployedToFleetWithInstallerInFleetMode) } func (fts *FleetTestSuite) anStaleAgentIsDeployedToFleetWithInstaller(image, version, installerType string) error { @@ -215,7 +196,7 @@ func (fts *FleetTestSuite) anStaleAgentIsDeployedToFleetWithInstaller(image, ver // prepare installer for stale version if fts.Version != agentVersionBackup { - i := installer.GetElasticAgentInstaller(image, installerType, fts.Version) + i := installer.GetElasticAgentInstaller(image, installerType, fts.Version, "") fts.Installers[fmt.Sprintf("%s-%s-%s", image, installerType, version)] = i } @@ -311,13 +292,15 @@ func (fts *FleetTestSuite) anAgentIsDeployedToFleetWithInstallerAndFleetServer(i agentInstaller := fts.getInstaller() - profile := agentInstaller.Profile // name of the runtime dependencies compose file - - serviceName := common.ElasticAgentServiceName // name of the service - containerName := fmt.Sprintf("%s_%s_%s_%d", profile, fts.Image+"-systemd", serviceName, 1) // name of the container + containerName := fts.getContainerName(agentInstaller, 1) // name of the container // enroll the agent with a new token - enrollmentKey, err := fts.kibanaClient.CreateEnrollmentAPIKey(fts.FleetPolicy) + policy := fts.Policy + if bootstrapFleetServer { + policy = fts.FleetServerPolicy + } + + enrollmentKey, err := fts.kibanaClient.CreateEnrollmentAPIKey(policy) if err != nil { return err } @@ -325,7 +308,7 @@ func (fts *FleetTestSuite) anAgentIsDeployedToFleetWithInstallerAndFleetServer(i fts.CurrentTokenID = enrollmentKey.ID var fleetConfig *kibana.FleetConfig - fleetConfig, err = deployAgentToFleet(agentInstaller, containerName, fts.CurrentToken, bootstrapFleetServer) + fleetConfig, err = deployAgentToFleet(agentInstaller, containerName, fts.CurrentToken, fts.FleetServerHostname) fts.Cleanup = true if err != nil { @@ -350,16 +333,31 @@ func (fts *FleetTestSuite) anAgentIsDeployedToFleetWithInstallerAndFleetServer(i return err } +// getContainerName returns the current container name for the service: +// we are using the Docker client instead of docker-compose because it does not support +// returning the output of a command: it simply returns error level +func (fts *FleetTestSuite) getContainerName(i installer.ElasticAgentInstaller, index int) string { + return fmt.Sprintf("%s_%s_%s_%d", i.Profile, i.Image, common.ElasticAgentServiceName, index) +} + +// getServiceName returns the current service name, the one defined at the docker compose +func (fts *FleetTestSuite) getServiceName(i installer.ElasticAgentInstaller) string { + return i.Image +} + func (fts *FleetTestSuite) getInstaller() installer.ElasticAgentInstaller { + bootstrappedAgent := fts.FleetServerHostname == "" + + key := fmt.Sprintf("%s-%s-%s-%t", fts.Image, fts.InstallerType, fts.Version, bootstrappedAgent) // check if the agent is already cached - if i, exists := fts.Installers[fts.Image+"-"+fts.InstallerType+"-"+fts.Version]; exists { + if i, exists := fts.Installers[key]; exists { return i } - agentInstaller := installer.GetElasticAgentInstaller(fts.Image, fts.InstallerType, fts.Version) + agentInstaller := installer.GetElasticAgentInstaller(fts.Image, fts.InstallerType, fts.Version, fts.FleetServerHostname) // cache the new installer - fts.Installers[fts.Image+"-"+fts.InstallerType+"-"+fts.Version] = agentInstaller + fts.Installers[key] = agentInstaller return agentInstaller } @@ -419,11 +417,7 @@ func (fts *FleetTestSuite) processStateChangedOnTheHost(process string, state st return err } - // name of the container for the service: - // we are using the Docker client instead of docker-compose - // because it does not support returning the output of a - // command: it simply returns error level - containerName := fmt.Sprintf("%s_%s_%s_%d", profile, fts.Image+"-systemd", common.ElasticAgentServiceName, 1) + containerName := fts.getContainerName(agentInstaller, 1) return docker.CheckProcessStateOnTheHost(containerName, process, "stopped", common.TimeoutFactor) } @@ -520,13 +514,7 @@ func theAgentIsListedInFleetWithStatus(desiredStatus string, hostname string) er func (fts *FleetTestSuite) theFileSystemAgentFolderIsEmpty() error { agentInstaller := fts.getInstaller() - profile := agentInstaller.Profile // name of the runtime dependencies compose file - - // name of the container for the service: - // we are using the Docker client instead of docker-compose - // because it does not support returning the output of a - // command: it simply returns error level - containerName := fmt.Sprintf("%s_%s_%s_%d", profile, fts.Image+"-systemd", common.ElasticAgentServiceName, 1) + containerName := fts.getContainerName(agentInstaller, 1) content, err := agentInstaller.ListElasticAgentWorkingDirContent(containerName) if err != nil { @@ -543,16 +531,13 @@ func (fts *FleetTestSuite) theFileSystemAgentFolderIsEmpty() error { func (fts *FleetTestSuite) theHostIsRestarted() error { agentInstaller := fts.getInstaller() - profile := agentInstaller.Profile // name of the runtime dependencies compose file - image := agentInstaller.Image // image of the service - service := agentInstaller.Service // name of the service - - containerName := fmt.Sprintf("%s_%s_%s_%d", profile, fts.Image+"-systemd", common.ElasticAgentServiceName, 1) + containerName := fts.getContainerName(agentInstaller, 1) _, err := shell.Execute(context.Background(), ".", "docker", "stop", containerName) if err != nil { log.WithFields(log.Fields{ - "image": image, - "service": service, + "containerName": containerName, + "image": agentInstaller.Image, + "service": agentInstaller.Service, }).Error("Could not stop the service") } @@ -561,14 +546,16 @@ func (fts *FleetTestSuite) theHostIsRestarted() error { _, err = shell.Execute(context.Background(), ".", "docker", "start", containerName) if err != nil { log.WithFields(log.Fields{ - "image": image, - "service": service, + "containerName": containerName, + "image": agentInstaller.Image, + "service": agentInstaller.Service, }).Error("Could not start the service") } log.WithFields(log.Fields{ - "image": image, - "service": service, + "containerName": containerName, + "image": agentInstaller.Image, + "service": agentInstaller.Service, }).Debug("The service has been restarted") return nil } @@ -646,7 +633,7 @@ func (fts *FleetTestSuite) theAgentIsReenrolledOnTheHost() error { // during an unenroll the fleet server exits as there is no longer // and agent id associated with the enrollment. When fleet server // restarts it needs a new agent to associate with the boostrap - cfg, err := kibana.NewFleetConfig(fts.CurrentToken, true, false) + cfg, err := kibana.NewFleetConfig(fts.CurrentToken, fts.FleetServerHostname) if err != nil { return err } @@ -679,8 +666,12 @@ func (fts *FleetTestSuite) theEnrollmentTokenIsRevoked() error { } func (fts *FleetTestSuite) thePolicyShowsTheDatasourceAdded(packageName string) error { + return thePolicyShowsTheDatasourceAdded(fts.kibanaClient, fts.FleetServerPolicy, packageName) +} + +func thePolicyShowsTheDatasourceAdded(client *kibana.Client, policy kibana.Policy, packageName string) error { log.WithFields(log.Fields{ - "policyID": fts.FleetPolicy.ID, + "policyID": policy.ID, "package": packageName, }).Trace("Checking if the policy shows the package added") @@ -690,11 +681,11 @@ func (fts *FleetTestSuite) thePolicyShowsTheDatasourceAdded(packageName string) exp := common.GetExponentialBackOff(maxTimeout) configurationIsPresentFn := func() error { - packagePolicy, err := fts.kibanaClient.GetIntegrationFromAgentPolicy(packageName, fts.FleetPolicy) + packagePolicy, err := client.GetIntegrationFromAgentPolicy(packageName, policy) if err != nil { log.WithFields(log.Fields{ "packagePolicy": packagePolicy, - "policy": fts.FleetPolicy, + "policy": policy, "retry": retryCount, "error": err, }).Warn("The integration was not found in the policy") @@ -715,13 +706,17 @@ func (fts *FleetTestSuite) thePolicyShowsTheDatasourceAdded(packageName string) } func (fts *FleetTestSuite) theIntegrationIsOperatedInThePolicy(packageName string, action string) error { + return theIntegrationIsOperatedInThePolicy(fts.kibanaClient, fts.FleetServerPolicy, packageName, action) +} + +func theIntegrationIsOperatedInThePolicy(client *kibana.Client, policy kibana.Policy, packageName string, action string) error { log.WithFields(log.Fields{ "action": action, - "policy": fts.FleetPolicy, + "policy": policy, "package": packageName, }).Trace("Doing an operation for a package on a policy") - integration, err := fts.kibanaClient.GetIntegrationByPackageName(packageName) + integration, err := client.GetIntegrationByPackageName(packageName) if err != nil { return err } @@ -731,44 +726,20 @@ func (fts *FleetTestSuite) theIntegrationIsOperatedInThePolicy(packageName strin Name: integration.Name, Description: integration.Title, Namespace: "default", - PolicyID: fts.FleetPolicy.ID, + PolicyID: policy.ID, Enabled: true, Package: integration, Inputs: []kibana.Input{}, } + packageDataStream.Inputs = inputs(integration.Name) - if strings.EqualFold(integration.Name, "linux") { - packageDataStream.Inputs = []kibana.Input{ - { - Type: "linux/metrics", - Enabled: true, - Streams: []interface{}{ - map[string]interface{}{ - "id": "linux/metrics-linux.memory-" + uuid.New().String(), - "enabled": true, - "data_stream": map[string]interface{}{ - "dataset": "linux.memory", - "type": "metrics", - }, - }, - }, - Vars: map[string]kibana.Var{ - "period": { - Value: "1s", - Type: "string", - }, - }, - }, - } - } - - return fts.kibanaClient.AddIntegrationToPolicy(packageDataStream) + return client.AddIntegrationToPolicy(packageDataStream) } else if strings.ToLower(action) == actionREMOVED { - packageDataStream, err := fts.kibanaClient.GetIntegrationFromAgentPolicy(integration.Name, fts.FleetPolicy) + packageDataStream, err := client.GetIntegrationFromAgentPolicy(integration.Name, policy) if err != nil { return err } - return fts.kibanaClient.DeleteIntegrationFromPolicy(packageDataStream) + return client.DeleteIntegrationFromPolicy(packageDataStream) } return nil @@ -857,13 +828,8 @@ func (fts *FleetTestSuite) theHostNameIsShownInTheAdminViewInTheSecurityApp(stat return nil } -func (fts *FleetTestSuite) anIntegrationIsSuccessfullyDeployedWithAgentAndInstaller(integration string, image string, agentInstaller string) error { - err := fts.anAgentIsDeployedToFleetWithInstallerInFleetMode(image, agentInstaller) - if err != nil { - return err - } - - err = fts.theAgentIsListedInFleetWithStatus("online") +func (fts *FleetTestSuite) anIntegrationIsSuccessfullyDeployedWithAgentAndInstaller(integration string, image string, installerType string) error { + err := fts.anAgentIsDeployedToFleetWithInstaller(image, installerType) if err != nil { return err } @@ -937,7 +903,7 @@ func (fts *FleetTestSuite) thePolicyIsUpdatedToHaveMode(name string, mode string return godog.ErrPending } - packageDS, err := fts.kibanaClient.GetIntegrationFromAgentPolicy("endpoint", fts.FleetPolicy) + packageDS, err := fts.kibanaClient.GetIntegrationFromAgentPolicy("endpoint", fts.FleetServerPolicy) if err != nil { return err @@ -971,7 +937,7 @@ func (fts *FleetTestSuite) thePolicyWillReflectTheChangeInTheSecurityApp() error return err } - pkgPolicy, err := fts.kibanaClient.GetIntegrationFromAgentPolicy("endpoint", fts.FleetPolicy) + pkgPolicy, err := fts.kibanaClient.GetIntegrationFromAgentPolicy("endpoint", fts.FleetServerPolicy) if err != nil { return err } @@ -1035,11 +1001,9 @@ func (fts *FleetTestSuite) anAttemptToEnrollANewAgentFails() error { agentInstaller := fts.getInstaller() - profile := agentInstaller.Profile // name of the runtime dependencies compose file - - containerName := fmt.Sprintf("%s_%s_%s_%d", profile, fts.Image+"-systemd", common.ElasticAgentServiceName, 2) // name of the new container + containerName := fts.getContainerName(agentInstaller, 2) // name of the new container - fleetConfig, err := deployAgentToFleet(agentInstaller, containerName, fts.CurrentToken, false) + fleetConfig, err := deployAgentToFleet(agentInstaller, containerName, fts.CurrentToken, fts.FleetServerHostname) // the installation process for TAR includes the enrollment if agentInstaller.InstallerType != "tar" { if err != nil { @@ -1177,7 +1141,7 @@ func (fts *FleetTestSuite) checkDataStream() error { return err } -func deployAgentToFleet(agentInstaller installer.ElasticAgentInstaller, containerName string, token string, bootstrapFleetServer bool) (*kibana.FleetConfig, error) { +func deployAgentToFleet(agentInstaller installer.ElasticAgentInstaller, containerName string, token string, fleetServerHost string) (*kibana.FleetConfig, error) { profile := agentInstaller.Profile // name of the runtime dependencies compose file service := agentInstaller.Service // name of the service serviceTag := agentInstaller.Tag // docker tag of the service @@ -1208,7 +1172,7 @@ func deployAgentToFleet(agentInstaller installer.ElasticAgentInstaller, containe return nil, err } - cfg, cfgError := kibana.NewFleetConfig(token, bootstrapFleetServer, false) + cfg, cfgError := kibana.NewFleetConfig(token, fleetServerHost) if cfgError != nil { return nil, cfgError } @@ -1220,3 +1184,46 @@ func deployAgentToFleet(agentInstaller installer.ElasticAgentInstaller, containe return cfg, agentInstaller.PostInstallFn() } + +func inputs(integration string) []kibana.Input { + switch integration { + case "apm": + return []kibana.Input{ + { + Type: "apm", + Enabled: true, + Streams: []interface{}{}, + Vars: map[string]kibana.Var{ + "apm-server": { + Value: "host", + Type: "localhost:8200", + }, + }, + }, + } + case "linux": + return []kibana.Input{ + { + Type: "linux/metrics", + Enabled: true, + Streams: []interface{}{ + map[string]interface{}{ + "id": "linux/metrics-linux.memory-" + uuid.New().String(), + "enabled": true, + "data_stream": map[string]interface{}{ + "dataset": "linux.memory", + "type": "metrics", + }, + }, + }, + Vars: map[string]kibana.Var{ + "period": { + Value: "1s", + Type: "string", + }, + }, + }, + } + } + return []kibana.Input{} +} diff --git a/e2e/_suites/fleet/fleet_server.go b/e2e/_suites/fleet/fleet_server.go index e634ade08e..daecf45fd9 100644 --- a/e2e/_suites/fleet/fleet_server.go +++ b/e2e/_suites/fleet/fleet_server.go @@ -4,7 +4,46 @@ package main -func (fts *FleetTestSuite) anAgentIsDeployedToFleetWithInstallerInFleetMode(image string, installerType string) error { +import log "github.com/sirupsen/logrus" + +func (fts *FleetTestSuite) bootstrapFleetServerWithInstaller(image string, installerType string) error { fts.ElasticAgentStopped = true - return fts.anAgentIsDeployedToFleetWithInstallerAndFleetServer(image, installerType, true) + + log.WithFields(log.Fields{ + "image": image, + "installer": installerType, + }).Trace("Bootstrapping fleet server for the agent") + + err := fts.anAgentIsDeployedToFleetWithInstallerAndFleetServer(image, installerType, true) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "image": image, + "installer": installerType, + }).Error("Fleet server could not be bootstrapped for the agent") + return err + } + + log.WithFields(log.Fields{ + "fleetServerHostname": fts.FleetServerHostname, + "image": image, + "installer": installerType, + }).Info("Fleet server was bootstrapped for the agent") + + err = fts.theAgentIsListedInFleetWithStatus("online") + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "fleetServerHostname": fts.FleetServerHostname, + "image": image, + "installer": installerType, + }).Error("Fleet server could not reach the online status") + return err + } + + // the new compose files for fleet-server (centos/debian) are setting the hostname + // we need it here, before getting the installer, to get the installer using fleet-server host + fts.FleetServerHostname = "fleet-server-" + image + + return nil } diff --git a/e2e/_suites/fleet/ingest_manager_test.go b/e2e/_suites/fleet/ingest_manager_test.go index 44c748d234..c2366e2785 100644 --- a/e2e/_suites/fleet/ingest_manager_test.go +++ b/e2e/_suites/fleet/ingest_manager_test.go @@ -146,6 +146,7 @@ func InitializeIngestManagerTestSuite(ctx *godog.TestSuiteContext) { if err != nil { log.WithFields(log.Fields{ "profile": profile, + "error": err.Error(), }).Fatal("Could not run the runtime dependencies for the profile.") } diff --git a/e2e/_suites/fleet/stand-alone.go b/e2e/_suites/fleet/stand-alone.go index fd4d723cfe..e52db64915 100644 --- a/e2e/_suites/fleet/stand-alone.go +++ b/e2e/_suites/fleet/stand-alone.go @@ -7,6 +7,8 @@ package main import ( "context" "fmt" + "github.com/elastic/e2e-testing/cli/config" + "path" "strings" "time" @@ -26,9 +28,10 @@ import ( // StandAloneTestSuite represents the scenarios for Stand-alone-mode type StandAloneTestSuite struct { - Cleanup bool - Hostname string - Image string + Cleanup bool + Hostname string + Image string + FleetPolicy kibana.Policy // date controls for queries AgentStoppedDate time.Time RuntimeDependenciesStartDate time.Time @@ -50,15 +53,38 @@ func (sats *StandAloneTestSuite) afterScenario() { } else { log.WithField("service", serviceName).Info("Because we are running in development mode, the service won't be stopped") } + + sats.kibanaClient.DeleteAllPolicies(sats.FleetPolicy) } func (sats *StandAloneTestSuite) contributeSteps(s *godog.ScenarioContext) { s.Step(`^a "([^"]*)" stand-alone agent is deployed$`, sats.aStandaloneAgentIsDeployed) - s.Step(`^a "([^"]*)" stand-alone agent is deployed with fleet server mode$`, sats.aStandaloneAgentIsDeployedWithFleetServerMode) + s.Step(`^a "([^"]*)" stand-alone agent is deployed with fleet server mode$`, sats.bootstrapFleetServerFromAStandaloneAgent) + s.Step(`^a "([^"]*)" stand-alone agent is deployed with fleet server mode on cloud$`, sats.aStandaloneAgentIsDeployedWithFleetServerModeOnCloud) s.Step(`^there is new data in the index from agent$`, sats.thereIsNewDataInTheIndexFromAgent) s.Step(`^the "([^"]*)" docker container is stopped$`, sats.theDockerContainerIsStopped) s.Step(`^there is no new data in the index after agent shuts down$`, sats.thereIsNoNewDataInTheIndexAfterAgentShutsDown) s.Step(`^the stand-alone agent is listed in Fleet as "([^"]*)"$`, sats.theStandaloneAgentIsListedInFleetWithStatus) + s.Step(`^the "([^"]*)" integration is added to the policy$`, sats.theIntegrationIsAddedToThePolicy) + s.Step(`^the "([^"]*)" datasource is shown in the policy$`, sats.thePolicyShowsTheDatasourceAdded) +} + +func (sats *StandAloneTestSuite) theIntegrationIsAddedToThePolicy(packageName string) error { + return theIntegrationIsOperatedInThePolicy(sats.kibanaClient, sats.FleetPolicy, packageName, "added") +} + +func (sats *StandAloneTestSuite) thePolicyShowsTheDatasourceAdded(packageName string) error { + return thePolicyShowsTheDatasourceAdded(sats.kibanaClient, sats.FleetPolicy, packageName) +} + +func (sats *StandAloneTestSuite) aStandaloneAgentIsDeployedWithFleetServerModeOnCloud(image string) error { + fleetPolicy, err := sats.kibanaClient.GetDefaultPolicy(true) + if err != nil { + return err + } + sats.FleetPolicy = fleetPolicy + volume := path.Join(config.OpDir(), "compose", "services", "elastic-agent", "apm-legacy") + return sats.startAgent(image, "docker-compose-cloud.yml", map[string]string{"apmVolume": volume}) } func (sats *StandAloneTestSuite) theStandaloneAgentIsListedInFleetWithStatus(desiredStatus string) error { @@ -87,15 +113,20 @@ func (sats *StandAloneTestSuite) theStandaloneAgentIsListedInFleetWithStatus(des return nil } -func (sats *StandAloneTestSuite) aStandaloneAgentIsDeployedWithFleetServerMode(image string) error { - return sats.startAgent(image, map[string]string{"fleetServerMode": "1"}) +func (sats *StandAloneTestSuite) bootstrapFleetServerFromAStandaloneAgent(image string) error { + fleetPolicy, err := sats.kibanaClient.GetDefaultPolicy(true) + if err != nil { + return err + } + sats.FleetPolicy = fleetPolicy + return sats.startAgent(image, "", map[string]string{"fleetServerMode": "1"}) } func (sats *StandAloneTestSuite) aStandaloneAgentIsDeployed(image string) error { - return sats.startAgent(image, nil) + return sats.startAgent(image, "", nil) } -func (sats *StandAloneTestSuite) startAgent(image string, env map[string]string) error { +func (sats *StandAloneTestSuite) startAgent(image string, composeFilename string, env map[string]string) error { log.Trace("Deploying an agent to Fleet") @@ -107,7 +138,7 @@ func (sats *StandAloneTestSuite) startAgent(image string, env map[string]string) // load the docker images that were already: // a. downloaded from the GCP bucket // b. fetched from the local beats binaries - dockerInstaller := installer.GetElasticAgentInstaller("docker", image, common.AgentVersion) + dockerInstaller := installer.GetElasticAgentInstaller("docker", image, common.AgentVersion, "") dockerInstaller.PreInstallFn() @@ -133,7 +164,8 @@ func (sats *StandAloneTestSuite) startAgent(image string, env map[string]string) common.ProfileEnv[k] = v } - err := serviceManager.AddServicesToCompose(context.Background(), common.FleetProfileName, []string{common.ElasticAgentServiceName}, common.ProfileEnv) + err := serviceManager.AddServicesToCompose(context.Background(), common.FleetProfileName, + []string{common.ElasticAgentServiceName}, common.ProfileEnv, composeFilename) if err != nil { log.Error("Could not deploy the elastic-agent") return err diff --git a/e2e/_suites/fleet/world.go b/e2e/_suites/fleet/world.go index 6fa758a44f..0560e1d2d8 100644 --- a/e2e/_suites/fleet/world.go +++ b/e2e/_suites/fleet/world.go @@ -19,11 +19,14 @@ type IngestManagerTestSuite struct { func (imts *IngestManagerTestSuite) processStateOnTheHost(process string, state string) error { profile := common.FleetProfileName - serviceName := common.ElasticAgentServiceName - containerName := fmt.Sprintf("%s_%s_%s_%d", profile, imts.Fleet.Image+"-systemd", serviceName, 1) + var containerName string + if imts.StandAlone.Hostname != "" { - containerName = fmt.Sprintf("%s_%s_%d", profile, serviceName, 1) + containerName = fmt.Sprintf("%s_%s_%d", profile, common.ElasticAgentServiceName, 1) + } else { + agentInstaller := imts.Fleet.getInstaller() + containerName = imts.Fleet.getContainerName(agentInstaller, 1) } return docker.CheckProcessStateOnTheHost(containerName, process, state, common.TimeoutFactor) diff --git a/e2e/_suites/helm/helm_charts_test.go b/e2e/_suites/helm/helm_charts_test.go index b0d81b35c6..38ce603d5d 100644 --- a/e2e/_suites/helm/helm_charts_test.go +++ b/e2e/_suites/helm/helm_charts_test.go @@ -675,7 +675,8 @@ func InitializeHelmChartTestSuite(ctx *godog.TestSuiteContext) { suiteContext = apm.ContextWithSpan(suiteContext, suiteParentSpan) defer suiteParentSpan.End() - if elasticAPMActive { + elasticAPMEnvironment := shell.GetEnv("ELASTIC_APM_ENVIRONMENT", "ci") + if elasticAPMActive && elasticAPMEnvironment == "local" { serviceManager := compose.NewServiceManager() env := map[string]string{ diff --git a/e2e/_suites/kubernetes-autodiscover/autodiscover_test.go b/e2e/_suites/kubernetes-autodiscover/autodiscover_test.go index 561cfb01ae..667db8be18 100644 --- a/e2e/_suites/kubernetes-autodiscover/autodiscover_test.go +++ b/e2e/_suites/kubernetes-autodiscover/autodiscover_test.go @@ -1,3 +1,7 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + package main import ( @@ -18,22 +22,42 @@ import ( messages "github.com/cucumber/messages-go/v10" log "github.com/sirupsen/logrus" + "github.com/elastic/e2e-testing/cli/config" + "github.com/elastic/e2e-testing/internal/common" + "github.com/elastic/e2e-testing/internal/docker" + "github.com/elastic/e2e-testing/internal/kubernetes" "github.com/elastic/e2e-testing/internal/shell" "github.com/elastic/e2e-testing/internal/utils" ) +var beatVersions = map[string]string{} + const defaultBeatVersion = "8.0.0-SNAPSHOT" -const defaultEventsWaitTimeout = 120 * time.Second -const defaultDeployWaitTimeout = 120 * time.Second + +var defaultEventsWaitTimeout = 60 * time.Second +var defaultDeployWaitTimeout = 60 * time.Second + +func init() { + // initialise timeout factor + common.TimeoutFactor = shell.GetEnvInteger("TIMEOUT_FACTOR", common.TimeoutFactor) + + defaultEventsWaitTimeout = defaultEventsWaitTimeout * time.Duration(common.TimeoutFactor) + defaultDeployWaitTimeout = defaultDeployWaitTimeout * time.Duration(common.TimeoutFactor) +} type podsManager struct { - kubectl kubernetesControl + kubectl kubernetes.Control ctx context.Context } func (m *podsManager) executeTemplateFor(podName string, writer io.Writer, options []string) error { path := filepath.Join("testdata/templates", sanitizeName(podName)+".yml.tmpl") + err := m.configureDockerImage(podName) + if err != nil { + return err + } + usedOptions := make(map[string]bool) funcs := template.FuncMap{ "option": func(o string) bool { @@ -49,7 +73,7 @@ func (m *podsManager) executeTemplateFor(podName string, writer io.Writer, optio return utils.GetDockerNamespaceEnvVar("beats") }, "beats_version": func() string { - return shell.GetEnv("GITHUB_CHECK_SHA1", shell.GetEnv("BEAT_VERSION", defaultBeatVersion)) + return beatVersions[podName] }, "namespace": func() string { return m.kubectl.Namespace @@ -85,6 +109,64 @@ func (m *podsManager) executeTemplateFor(podName string, writer io.Writer, optio return nil } +func (m *podsManager) configureDockerImage(podName string) error { + if podName != "filebeat" && podName != "heartbeat" && podName != "metricbeat" { + log.Debugf("Not processing custom binaries for pod: %s. Only [filebeat, heartbeat, metricbeat] will be processed", podName) + return nil + } + + // we are caching the versions by pod to avoid downloading and loading/tagging the Docker image multiple times + if beatVersions[podName] != "" { + log.Tracef("The beat version was already loaded: %s", beatVersions[podName]) + return nil + } + + beatVersion := shell.GetEnv("BEAT_VERSION", defaultBeatVersion) + + useCISnapshots := shell.GetEnvBool("BEATS_USE_CI_SNAPSHOTS") + beatsLocalPath := shell.GetEnv("BEATS_LOCAL_PATH", "") + if useCISnapshots || beatsLocalPath != "" { + log.Debugf("Configuring Docker image for %s", podName) + + // this method will detect if the GITHUB_CHECK_SHA1 variable is set + artifactName := utils.BuildArtifactName(podName, beatVersion, defaultBeatVersion, "linux", "amd64", "tar.gz", true) + + imagePath, err := utils.FetchBeatsBinary(artifactName, podName, beatVersion, defaultBeatVersion, common.TimeoutFactor, true) + if err != nil { + return err + } + + // load the TAR file into the docker host as a Docker image + err = docker.LoadImage(imagePath) + if err != nil { + return err + } + + beatVersion = beatVersion + "-amd64" + + // tag the image with the proper docker tag, including platform + err = docker.TagImage( + "docker.elastic.co/beats/"+podName+":"+defaultBeatVersion, + "docker.elastic.co/observability-ci/"+podName+":"+beatVersion, + ) + if err != nil { + return err + } + + // load PR image into kind + err = cluster.LoadImage(m.ctx, "docker.elastic.co/observability-ci/"+podName+":"+beatVersion) + if err != nil { + return err + } + + } + + log.Tracef("Caching beat version '%s' for %s", beatVersion, podName) + beatVersions[podName] = beatVersion + + return nil +} + func (m *podsManager) isDeleted(podName string, options []string) error { var buf bytes.Buffer err := m.executeTemplateFor(podName, &buf, options) @@ -380,24 +462,27 @@ func waitDuration(ctx context.Context, duration string) error { return nil } -var cluster kubernetesCluster +var cluster kubernetes.Cluster func InitializeTestSuite(ctx *godog.TestSuiteContext) { suiteContext, cancel := context.WithCancel(context.Background()) log.DeferExitHandler(cancel) ctx.BeforeSuite(func() { - err := cluster.initialize(suiteContext) + // init logger + config.Init() + + err := cluster.Initialize(suiteContext, "testdata/kind.yml") if err != nil { log.WithError(err).Fatal("Failed to initialize cluster") } log.DeferExitHandler(func() { - cluster.cleanup(suiteContext) + cluster.Cleanup(suiteContext) }) }) ctx.AfterSuite(func() { - cluster.cleanup(suiteContext) + cluster.Cleanup(suiteContext) cancel() }) } @@ -406,12 +491,12 @@ func InitializeScenario(ctx *godog.ScenarioContext) { scenarioCtx, cancel := context.WithCancel(context.Background()) log.DeferExitHandler(cancel) - var kubectl kubernetesControl + var kubectl kubernetes.Control var pods podsManager - ctx.BeforeScenario(func(*messages.Pickle) { + ctx.BeforeScenario(func(p *messages.Pickle) { kubectl = cluster.Kubectl().WithNamespace(scenarioCtx, "") if kubectl.Namespace != "" { - log.Debugf("Running scenario in namespace: %s", kubectl.Namespace) + log.Debugf("Running scenario %s in namespace: %s", p.Name, kubectl.Namespace) } pods.kubectl = kubectl pods.ctx = scenarioCtx diff --git a/e2e/_suites/kubernetes-autodiscover/testdata/templates/filebeat.yml.tmpl b/e2e/_suites/kubernetes-autodiscover/testdata/templates/filebeat.yml.tmpl index 70cfce3cb6..6bf3627d4d 100644 --- a/e2e/_suites/kubernetes-autodiscover/testdata/templates/filebeat.yml.tmpl +++ b/e2e/_suites/kubernetes-autodiscover/testdata/templates/filebeat.yml.tmpl @@ -45,6 +45,7 @@ spec: containers: - name: filebeat image: docker.elastic.co/{{ beats_namespace }}/filebeat:{{ beats_version }} + imagePullPolicy: IfNotPresent args: [ "-c", "/etc/filebeat.yml", "-e", diff --git a/e2e/_suites/kubernetes-autodiscover/testdata/templates/heartbeat.yml.tmpl b/e2e/_suites/kubernetes-autodiscover/testdata/templates/heartbeat.yml.tmpl index aa02eca6ad..88e6e9dab4 100644 --- a/e2e/_suites/kubernetes-autodiscover/testdata/templates/heartbeat.yml.tmpl +++ b/e2e/_suites/kubernetes-autodiscover/testdata/templates/heartbeat.yml.tmpl @@ -64,6 +64,7 @@ spec: containers: - name: heartbeat image: docker.elastic.co/{{ beats_namespace }}/heartbeat:{{ beats_version }} + imagePullPolicy: IfNotPresent args: [ "-c", "/etc/heartbeat.yml", "-e", diff --git a/e2e/_suites/kubernetes-autodiscover/testdata/templates/metricbeat.yml.tmpl b/e2e/_suites/kubernetes-autodiscover/testdata/templates/metricbeat.yml.tmpl index 042bb56bd7..a21a9ea43e 100644 --- a/e2e/_suites/kubernetes-autodiscover/testdata/templates/metricbeat.yml.tmpl +++ b/e2e/_suites/kubernetes-autodiscover/testdata/templates/metricbeat.yml.tmpl @@ -41,6 +41,7 @@ spec: containers: - name: metricbeat image: docker.elastic.co/{{ beats_namespace }}/metricbeat:{{ beats_version }} + imagePullPolicy: IfNotPresent args: [ "-c", "/etc/metricbeat.yml", "-e", diff --git a/e2e/_suites/metricbeat/metricbeat_test.go b/e2e/_suites/metricbeat/metricbeat_test.go index 2a14879c65..de3ef0c8f5 100644 --- a/e2e/_suites/metricbeat/metricbeat_test.go +++ b/e2e/_suites/metricbeat/metricbeat_test.go @@ -281,7 +281,8 @@ func InitializeMetricbeatTestSuite(ctx *godog.TestSuiteContext) { }).Fatal("The Elasticsearch cluster could not get the healthy status") } - if elasticAPMActive { + elasticAPMEnvironment := shell.GetEnv("ELASTIC_APM_ENVIRONMENT", "ci") + if elasticAPMActive && elasticAPMEnvironment == "local" { steps.AddAPMServicesForInstrumentation(suiteContext, "metricbeat", stackVersion, true, env) } }) diff --git a/internal/common/defaults.go b/internal/common/defaults.go index dc98c83f59..664f0f99e4 100644 --- a/internal/common/defaults.go +++ b/internal/common/defaults.go @@ -19,6 +19,9 @@ const ElasticEndpointIntegrationTitle = "Endpoint Security" // FleetProfileName the name of the profile to run the runtime, backend services const FleetProfileName = "fleet" +// FleetServerAgentServiceName the name of the service for the Elastic Agent +const FleetServerAgentServiceName = "fleet-server" + // AgentVersionBase is the base version of the agent to use var AgentVersionBase = "8.0.0-SNAPSHOT" diff --git a/internal/compose/compose.go b/internal/compose/compose.go index cc6374282a..a2f3e2cff5 100644 --- a/internal/compose/compose.go +++ b/internal/compose/compose.go @@ -19,7 +19,7 @@ import ( // ServiceManager manages lifecycle of a service type ServiceManager interface { - AddServicesToCompose(ctx context.Context, profile string, composeNames []string, env map[string]string) error + AddServicesToCompose(ctx context.Context, profile string, composeNames []string, env map[string]string, composeFilename ...string) error ExecCommandInService(profile string, image string, serviceName string, cmds []string, env map[string]string, detach bool) error RemoveServicesFromCompose(ctx context.Context, profile string, composeNames []string, env map[string]string) error RunCommand(profile string, composeNames []string, composeArgs []string, env map[string]string) error @@ -37,7 +37,7 @@ func NewServiceManager() ServiceManager { } // AddServicesToCompose adds services to a running docker compose -func (sm *DockerServiceManager) AddServicesToCompose(ctx context.Context, profile string, composeNames []string, env map[string]string) error { +func (sm *DockerServiceManager) AddServicesToCompose(ctx context.Context, profile string, composeNames []string, env map[string]string, composeFilename ...string) error { span, _ := apm.StartSpanOptions(ctx, "Add services to Docker Compose", "docker-compose.services.add", apm.SpanOptions{ Parent: apm.SpanFromContext(ctx).TraceContext(), }) @@ -56,7 +56,7 @@ func (sm *DockerServiceManager) AddServicesToCompose(ctx context.Context, profil persistedEnv[k] = v } - err := executeCompose(sm, true, newComposeNames, []string{"up", "-d"}, persistedEnv) + err := executeCompose(true, newComposeNames, []string{"up", "-d"}, persistedEnv, composeFilename...) if err != nil { return err } @@ -115,7 +115,7 @@ func (sm *DockerServiceManager) RemoveServicesFromCompose(ctx context.Context, p command := []string{"rm", "-fvs"} command = append(command, composeName) - err := executeCompose(sm, true, newComposeNames, command, persistedEnv) + err := executeCompose(true, newComposeNames, command, persistedEnv) if err != nil { log.WithFields(log.Fields{ "command": command, @@ -135,7 +135,7 @@ func (sm *DockerServiceManager) RemoveServicesFromCompose(ctx context.Context, p // RunCommand executes a docker-compose command in a running a docker compose func (sm *DockerServiceManager) RunCommand(profile string, composeNames []string, composeArgs []string, env map[string]string) error { - return executeCompose(sm, true, composeNames, composeArgs, env) + return executeCompose(true, composeNames, composeArgs, env) } // RunCompose runs a docker compose by its name @@ -145,7 +145,7 @@ func (sm *DockerServiceManager) RunCompose(ctx context.Context, isProfile bool, }) defer span.End() - return executeCompose(sm, isProfile, composeNames, []string{"up", "-d"}, env) + return executeCompose(isProfile, composeNames, []string{"up", "-d"}, env) } // StopCompose stops a docker compose by its name @@ -175,7 +175,7 @@ func (sm *DockerServiceManager) StopCompose(ctx context.Context, isProfile bool, } persistedEnv := state.Recover(ID, config.Op.Workspace) - err := executeCompose(sm, isProfile, composeNames, []string{"down", "--remove-orphans"}, persistedEnv) + err := executeCompose(isProfile, composeNames, []string{"down", "--remove-orphans"}, persistedEnv) if err != nil { return fmt.Errorf("Could not stop compose file: %v - %v", composeFilePaths, err) } @@ -189,7 +189,7 @@ func (sm *DockerServiceManager) StopCompose(ctx context.Context, isProfile bool, return nil } -func executeCompose(sm *DockerServiceManager, isProfile bool, composeNames []string, command []string, env map[string]string) error { +func executeCompose(isProfile bool, composeNames []string, command []string, env map[string]string, composeFilename ...string) error { composeFilePaths := make([]string, len(composeNames)) for i, composeName := range composeNames { b := false @@ -197,7 +197,7 @@ func executeCompose(sm *DockerServiceManager, isProfile bool, composeNames []str b = true } - composeFilePath, err := config.GetComposeFile(b, composeName) + composeFilePath, err := config.GetComposeFile(b, composeName, composeFilename...) if err != nil { return fmt.Errorf("Could not get compose file: %s - %v", composeFilePath, err) } diff --git a/internal/installer/elasticagent.go b/internal/installer/elasticagent.go index 8399089158..ec32ea4f6b 100644 --- a/internal/installer/elasticagent.go +++ b/internal/installer/elasticagent.go @@ -106,20 +106,22 @@ func downloadAgentBinary(artifactName string, artifact string, version string) ( } // GetElasticAgentInstaller returns an installer from a docker image -func GetElasticAgentInstaller(image string, installerType string, version string) ElasticAgentInstaller { +func GetElasticAgentInstaller(image string, installerType string, version string, fleetServerHost string) ElasticAgentInstaller { log.WithFields(log.Fields{ - "image": image, - "installer": installerType, + "fleetServerHost": fleetServerHost, + "image": image, + "installer": installerType, + "version": version, }).Debug("Configuring installer for the agent") var installer ElasticAgentInstaller var err error if "centos" == image && "tar" == installerType { - installer, err = newTarInstaller("centos", "latest", version) + installer, err = newTarInstaller("centos", "latest", version, fleetServerHost) } else if "centos" == image && "systemd" == installerType { installer, err = newCentosInstaller("centos", "latest", version) } else if "debian" == image && "tar" == installerType { - installer, err = newTarInstaller("debian", "stretch", version) + installer, err = newTarInstaller("debian", "stretch", version, fleetServerHost) } else if "debian" == image && "systemd" == installerType { installer, err = newDebianInstaller("debian", "stretch", version) } else if "docker" == image && "default" == installerType { diff --git a/internal/installer/tar.go b/internal/installer/tar.go index 26d0288165..72cc698a08 100644 --- a/internal/installer/tar.go +++ b/internal/installer/tar.go @@ -145,8 +145,12 @@ func (i *TARPackage) WithVersion(version string) *TARPackage { } // newTarInstaller returns an instance of the Debian installer for a specific version -func newTarInstaller(image string, tag string, version string) (ElasticAgentInstaller, error) { +func newTarInstaller(image string, tag string, version string, fleetServerHost string) (ElasticAgentInstaller, error) { dockerImage := image + "-systemd" // we want to consume systemd boxes + if fleetServerHost == "" { + dockerImage = "fleet-server-" + image + } + service := dockerImage profile := common.FleetProfileName diff --git a/internal/kibana/fleet.go b/internal/kibana/fleet.go index 8ea654f6f2..01b8d7d13a 100644 --- a/internal/kibana/fleet.go +++ b/internal/kibana/fleet.go @@ -27,11 +27,13 @@ type FleetConfig struct { } // NewFleetConfig builds a new configuration for the fleet agent, defaulting ES credentials, URI and port. -// If the 'bootstrappFleetServer' flag is true, the it will create the config for the initial fleet server +// If the 'fleetServerHost' flag is empty, then it will create the config for the initial fleet server // used to bootstrap Fleet Server -// If the 'fleetServerMode' flag is true, the it will create the config for an agent using an existing Fleet +// If the 'fleetServerHost' flag is not empty, the it will create the config for an agent using an existing Fleet // Server to connect to Fleet. It will also retrieve the default policy ID for fleet server -func NewFleetConfig(token string, bootstrapFleetServer bool, fleetServerMode bool) (*FleetConfig, error) { +func NewFleetConfig(token string, fleetServerHost string) (*FleetConfig, error) { + bootstrapFleetServer := (fleetServerHost == "") + cfg := &FleetConfig{ BootstrapFleetServer: bootstrapFleetServer, EnrollmentToken: token, @@ -41,7 +43,7 @@ func NewFleetConfig(token string, bootstrapFleetServer bool, fleetServerMode boo KibanaPort: 5601, KibanaURI: "kibana", FleetServerPort: 8220, - FleetServerURI: "localhost", + FleetServerURI: fleetServerHost, } client, err := NewClient() @@ -49,7 +51,7 @@ func NewFleetConfig(token string, bootstrapFleetServer bool, fleetServerMode boo return cfg, err } - if fleetServerMode { + if !bootstrapFleetServer { defaultFleetServerPolicy, err := client.GetDefaultPolicy(true) if err != nil { return nil, err @@ -91,11 +93,11 @@ func (cfg FleetConfig) Flags() []string { baseFlags := []string{"-e", "-v", "--force", "--insecure", "--enrollment-token=" + cfg.EnrollmentToken} if common.AgentVersionBase == "8.0.0-SNAPSHOT" { - return append(baseFlags, "--url", fmt.Sprintf("http://%s@%s:%d", cfg.ElasticsearchCredentials, cfg.FleetServerURI, cfg.FleetServerPort)) + return append(baseFlags, "--url", fmt.Sprintf("https://%s@%s:%d", cfg.ElasticsearchCredentials, cfg.FleetServerURI, cfg.FleetServerPort)) } if cfg.ServerPolicyID != "" { - baseFlags = append(baseFlags, "--fleet-server-insecure-http", "--fleet-server", fmt.Sprintf("http://%s@%s:%d", cfg.ElasticsearchCredentials, cfg.ElasticsearchURI, cfg.ElasticsearchPort), "--fleet-server-host=http://0.0.0.0", "--fleet-server-policy", cfg.ServerPolicyID) + baseFlags = append(baseFlags, "--fleet-server-insecure-http", "--fleet-server", fmt.Sprintf("https://%s@%s:%d", cfg.ElasticsearchCredentials, cfg.ElasticsearchURI, cfg.ElasticsearchPort), "--fleet-server-host=http://0.0.0.0", "--fleet-server-policy", cfg.ServerPolicyID) } return append(baseFlags, "--kibana-url", fmt.Sprintf("http://%s@%s:%d", cfg.ElasticsearchCredentials, cfg.KibanaURI, cfg.KibanaPort)) diff --git a/internal/kibana/integrations.go b/internal/kibana/integrations.go index 2c63dea9a4..8807cc287a 100644 --- a/internal/kibana/integrations.go +++ b/internal/kibana/integrations.go @@ -52,7 +52,7 @@ func (c *Client) DeleteIntegrationFromPolicy(packageDS PackageDataStream) error // GetIntegrations returns all available integrations func (c *Client) GetIntegrations() ([]IntegrationPackage, error) { - statusCode, respBody, err := c.get(fmt.Sprintf("%s/epm/packages", FleetAPI)) + statusCode, respBody, err := c.get(fmt.Sprintf("%s/epm/packages?experimental=true", FleetAPI)) if err != nil { log.WithFields(log.Fields{ diff --git a/internal/kibana/policies.go b/internal/kibana/policies.go index 7e7402b416..5667a499ae 100644 --- a/internal/kibana/policies.go +++ b/internal/kibana/policies.go @@ -7,6 +7,7 @@ package kibana import ( "encoding/json" "fmt" + "strings" "github.com/pkg/errors" log "github.com/sirupsen/logrus" @@ -75,6 +76,29 @@ func (c *Client) ListPolicies() ([]Policy, error) { return resp.Items, nil } +// DeleteAllPolicies deletes all policies +func (c *Client) DeleteAllPolicies(except Policy) { + // Cleanup all package policies + packagePolicies, err := c.ListPackagePolicies() + if err != nil { + log.WithFields(log.Fields{ + "err": err, + }).Error("The package policies could not be found") + } + for _, pkgPolicy := range packagePolicies { + // Do not remove the fleet server package integration otherwise fleet server fails to bootstrap + if !strings.Contains(pkgPolicy.Name, "fleet_server") && pkgPolicy.PolicyID == except.ID { + err = c.DeleteIntegrationFromPolicy(pkgPolicy) + if err != nil { + log.WithFields(log.Fields{ + "err": err, + "packagePolicy": pkgPolicy, + }).Error("The integration could not be deleted from the configuration") + } + } + } +} + // Var represents a single variable at the package or // data stream level, encapsulating the data type of the // variable and it's value. diff --git a/e2e/_suites/kubernetes-autodiscover/kubernetes.go b/internal/kubernetes/kubernetes.go similarity index 61% rename from e2e/_suites/kubernetes-autodiscover/kubernetes.go rename to internal/kubernetes/kubernetes.go index 6143392c86..a050d6af05 100644 --- a/e2e/_suites/kubernetes-autodiscover/kubernetes.go +++ b/internal/kubernetes/kubernetes.go @@ -1,4 +1,8 @@ -package main +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package kubernetes import ( "context" @@ -17,19 +21,22 @@ import ( "github.com/elastic/e2e-testing/internal/shell" ) -type kubernetesControl struct { +// Control struct for k8s cluster +type Control struct { config string Namespace string NamespaceUID string createdNamespace bool } -func (c kubernetesControl) WithConfig(config string) kubernetesControl { +// WithConfig config setter +func (c Control) WithConfig(config string) Control { c.config = config return c } -func (c kubernetesControl) WithNamespace(ctx context.Context, namespace string) kubernetesControl { +// WithNamespace namespace setter +func (c Control) WithNamespace(ctx context.Context, namespace string) Control { if namespace == "" { namespace = "test-" + uuid.New().String() err := c.createNamespace(ctx, namespace) @@ -47,7 +54,7 @@ func (c kubernetesControl) WithNamespace(ctx context.Context, namespace string) return c } -func (c kubernetesControl) createNamespace(ctx context.Context, namespace string) error { +func (c Control) createNamespace(ctx context.Context, namespace string) error { if namespace == "" { return nil } @@ -70,7 +77,8 @@ func (c kubernetesControl) createNamespace(ctx context.Context, namespace string }, exp) } -func (c kubernetesControl) Cleanup(ctx context.Context) error { +// Cleanup deletes k8s namespace +func (c Control) Cleanup(ctx context.Context) error { if c.createdNamespace && c.Namespace != "" { output, err := c.Run(ctx, "delete", "namespace", c.Namespace) if err != nil { @@ -80,11 +88,13 @@ func (c kubernetesControl) Cleanup(ctx context.Context) error { return nil } -func (c kubernetesControl) Run(ctx context.Context, runArgs ...string) (output string, err error) { +// Run ability to run kubectl commands +func (c Control) Run(ctx context.Context, runArgs ...string) (output string, err error) { return c.RunWithStdin(ctx, nil, runArgs...) } -func (c kubernetesControl) RunWithStdin(ctx context.Context, stdin io.Reader, runArgs ...string) (output string, err error) { +// RunWithStdin run kubectl commands passing in options from stdin +func (c Control) RunWithStdin(ctx context.Context, stdin io.Reader, runArgs ...string) (output string, err error) { shell.CheckInstalledSoftware("kubectl") var args []string if c.config != "" { @@ -97,23 +107,26 @@ func (c kubernetesControl) RunWithStdin(ctx context.Context, stdin io.Reader, ru return shell.ExecuteWithStdin(ctx, ".", stdin, "kubectl", args...) } -type kubernetesCluster struct { +// Cluster kind structure definition +type Cluster struct { kindName string kubeconfig string tmpDir string } -func (c kubernetesCluster) Kubectl() kubernetesControl { - return kubernetesControl{}.WithConfig(c.kubeconfig) +// Kubectl executable reference to kubectl with applied kubeconfig +func (c Cluster) Kubectl() Control { + return Control{}.WithConfig(c.kubeconfig) } -func (c kubernetesCluster) isAvailable(ctx context.Context) error { +func (c Cluster) isAvailable(ctx context.Context) error { _, err := c.Kubectl().Run(ctx, "api-versions") return err } -func (c *kubernetesCluster) initialize(ctx context.Context) error { +// Initialize detect existing cluster contexts, otherwise will create one via Kind +func (c *Cluster) Initialize(ctx context.Context, kindConfigPath string) error { err := c.isAvailable(ctx) if err == nil { return nil @@ -138,7 +151,7 @@ func (c *kubernetesCluster) initialize(ctx context.Context) error { args := []string{ "create", "cluster", "--name", name, - "--config", "testdata/kind.yml", + "--config", kindConfigPath, "--kubeconfig", c.kubeconfig, } if version, ok := os.LookupEnv("KUBERNETES_VERSION"); ok && version != "" { @@ -157,7 +170,8 @@ func (c *kubernetesCluster) initialize(ctx context.Context) error { return nil } -func (c *kubernetesCluster) cleanup(ctx context.Context) { +// Cleanup deletes the kind cluster if available +func (c *Cluster) Cleanup(ctx context.Context) { if c.kindName != "" { _, err := shell.Execute(ctx, ".", "kind", "delete", "cluster", "--name", c.kindName) if err != nil { @@ -173,3 +187,27 @@ func (c *kubernetesCluster) cleanup(ctx context.Context) { } } } + +// LoadImage loads a Docker image into Kind runtime, using it fully qualified name. +// It does not check cluster availability because a pull error could be present in the pod, +// which will need the load of the requested image, causing a chicken-egg error. +func (c *Cluster) LoadImage(ctx context.Context, image string) error { + shell.CheckInstalledSoftware("kind") + + loadArgs := []string{"load", "docker-image", image} + // default cluster name is equals to 'kind' + if c.kindName != "" { + loadArgs = append(loadArgs, "--name", c.kindName) + } + + result, err := shell.Execute(ctx, ".", "kind", loadArgs...) + if err != nil { + log.WithError(err).Fatal("Failed to load archive into kind") + } + log.WithFields(log.Fields{ + "image": image, + "result": result, + }).Info("Image has been loaded into Kind runtime") + + return nil +}