Skip to content
This repository was archived by the owner on Sep 17, 2024. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
22 commits
Select commit Hold shift + click to select a range
6814e34
chore: add APM integration to CI
mdelapenya May 6, 2021
194accc
fix: update scenario step to latest version
mdelapenya Jun 30, 2021
ea12af8
chore: remove blank lines
mdelapenya Jun 30, 2021
f050d37
fix: remove fleet server from the stand-alone agent
mdelapenya Jun 30, 2021
5d3c543
feat: add an scenario for adding integrations
mdelapenya Jun 30, 2021
5039a13
chore: simplify scenarios avoiding testing twice
mdelapenya Jun 30, 2021
ec96be9
chore: rename scenario
mdelapenya Jul 18, 2021
7f57a22
chore: add integrations feature file to the CI
mdelapenya Jul 19, 2021
f7bcf04
chore: remove references to FleetServerPolicy, as it's not used anymore
mdelapenya Jul 19, 2021
bb518d7
fix: bring fleet-server boostrap test back
mdelapenya Jul 20, 2021
60aebe0
fix: expose cloud agent in a not used port
mdelapenya Jul 27, 2021
0db8620
chore: extract a method to get Fleet Server URL
mdelapenya Jul 27, 2021
e02bd6f
fix: get stand-alone agent by hostname from agents list
mdelapenya Jul 27, 2021
ce45452
fix: automatically enroll the stand-alone agent in Fleet
mdelapenya Jul 27, 2021
7fa5f42
fix: reduce the number of occurrences
mdelapenya Jul 27, 2021
ff86a5e
fix: there are 2 filebeat instances
mdelapenya Jul 27, 2021
c48752c
chore: move cloud configs to a better place
mdelapenya Jul 27, 2021
69d866b
chore: run APM tests with ubi8 base image
mdelapenya Jul 28, 2021
679769c
fix: keep original structure
mdelapenya Jul 28, 2021
d42e255
fix: right volume path
mdelapenya Jul 28, 2021
7fd3bb4
Merge branch 'master' into apm-server-tests
mdelapenya Jul 28, 2021
82393c3
chore: remove cloud scenario for APM integration
mdelapenya Jul 28, 2021
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 6 additions & 0 deletions .ci/.e2e-tests.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,12 @@ SUITES:
- name: "Fleet"
pullRequestFilter: " && ~debian"
tags: "fleet_mode_agent"
- name: "Integrations"
pullRequestFilter: " && ~debian"
tags: "integrations"
- name: "APM Integration"
pullRequestFilter: " && ~debian"
tags: "apm_server"
- name: "Endpoint Integration"
platforms:
- "ubuntu-18.04"
Expand Down

This file was deleted.

This file was deleted.

This file was deleted.

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,6 @@ services:
volumes:
- "${apmVolume}:/apm-legacy"
ports:
- "127.0.0.1:8220:8220"
- "127.0.0.1:8221:8220"
- "127.0.0.1:8200:8200"
- "127.0.0.1:5066:5066"
4 changes: 4 additions & 0 deletions cli/config/compose/services/elastic-agent/docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,10 @@ services:
environment:
- "FLEET_SERVER_ENABLE=${fleetServerMode:-0}"
- "FLEET_SERVER_INSECURE_HTTP=${fleetServerMode:-0}"
- "FLEET_ENROLL=${fleetEnroll:-1}"
- "FLEET_ENROLLMENT_TOKEN=${fleetEnrollmentToken:-}"
- "FLEET_INSECURE=${fleetInsecure:-0}"
- "FLEET_URL=${fleetUrl:-}"
platform: ${stackPlatform:-linux/amd64}
ports:
- "127.0.0.1:${fleetServerPort:-8220}:8220"
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,7 @@ Scenario Outline: Adding the Endpoint Integration to an Agent makes the host to
Given a "<os>" agent is deployed to Fleet with "tar" installer
And the agent is listed in Fleet as "online"
When the "Endpoint Security" integration is "added" in the policy
Then the "Endpoint Security" datasource is shown in the policy as added
And the host name is shown in the Administration view in the Security App as "online"
Then the host name is shown in the Administration view in the Security App as "online"

@centos
Examples: Centos
Expand Down
27 changes: 9 additions & 18 deletions e2e/_suites/fleet/features/apm_integration.feature
Original file line number Diff line number Diff line change
Expand Up @@ -3,28 +3,19 @@ Feature: APM Integration
Scenarios for APM

@install
Scenario Outline: Deploying a <image> stand-alone agent with fleet server mode
Given a "<image>" stand-alone agent is deployed with fleet server mode
Scenario Outline: Deploying a <image> stand-alone agent with the Elastic APM integration
Given a "<image>" stand-alone agent is deployed
And the stand-alone agent is listed in Fleet as "online"
When the "Elastic APM" integration is added in the policy
Then the "Elastic APM" datasource is shown in the policy as added
And the "apm-server" process is in the "started" state on the host


@default
Examples: default
| image |
| default |



@cloud
Scenario Outline: Deploying a <image> stand-alone agent with fleet server mode on cloud
When a "<image>" stand-alone agent is deployed with fleet server mode on cloud
When the "Elastic APM" integration is "added" in the policy
Then the "apm-server" process is in the "started" state on the host


@default
Examples: default
| image |
| default |

@ubi8
@skip:arm64
Examples: Ubi8
| image |
| ubi8 |
13 changes: 13 additions & 0 deletions e2e/_suites/fleet/features/integrations.feature
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
@integrations
Feature: Integrations
Scenarios for operating integrations

@install
Scenario Outline: Adding an Integration to a Policy
When the "<integration>" integration is "added" in the policy
Then the "<integration>" datasource is shown in the policy as added
Examples:
| integration |
| Elastic APM |
| Endpoint |
| Linux |
3 changes: 1 addition & 2 deletions e2e/_suites/fleet/features/linux_integration.feature
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,7 @@ Scenario Outline: Adding the Linux Integration to an Agent ...
Given a "<os>" agent is deployed to Fleet with "tar" installer
And the agent is listed in Fleet as "online"
When the "Linux" integration is "added" in the policy
Then the "Linux" datasource is shown in the policy as added
And a Linux data stream exists with some data
Then a Linux data stream exists with some data

@centos
Examples: Centos
Expand Down
2 changes: 1 addition & 1 deletion e2e/_suites/fleet/features/stand_alone_agent.feature
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ Feature: Stand-alone Agent
@start-agent
Scenario Outline: Starting the <image> agent starts backend processes
When a "<image>" stand-alone agent is deployed
Then there are "1" instances of the "filebeat" process in the "started" state
Then there are "2" instances of the "filebeat" process in the "started" state
Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This was an error hidden by the non-autoenrolling stand-alone agent.

And there are "2" instances of the "metricbeat" process in the "started" state

@default
Expand Down
104 changes: 62 additions & 42 deletions e2e/_suites/fleet/fleet.go
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,6 @@ type FleetTestSuite struct {
Integration kibana.IntegrationPackage // the installed integration
Policy kibana.Policy
PolicyUpdatedAt string // the moment the policy was updated
FleetServerPolicy kibana.Policy
Version string // current elastic-agent version
kibanaClient *kibana.Client
deployer deploy.Deployment
Expand All @@ -69,50 +68,52 @@ func (fts *FleetTestSuite) afterScenario() {
fts.currentContext = apm.ContextWithSpan(context.Background(), span)
defer span.End()

serviceName := common.ElasticAgentServiceName
agentService := deploy.NewServiceRequest(serviceName)
if fts.InstallerType != "" {
serviceName := common.ElasticAgentServiceName
agentService := deploy.NewServiceRequest(serviceName)

if !fts.StandAlone {
agentInstaller, _ := installer.Attach(fts.currentContext, fts.deployer, agentService, fts.InstallerType)
if !fts.StandAlone {
agentInstaller, _ := installer.Attach(fts.currentContext, fts.deployer, agentService, fts.InstallerType)

if log.IsLevelEnabled(log.DebugLevel) {
err := agentInstaller.Logs()
if err != nil {
log.WithField("error", err).Warn("Could not get agent logs in the container")
if log.IsLevelEnabled(log.DebugLevel) {
err := agentInstaller.Logs()
if err != nil {
log.WithField("error", err).Warn("Could not get agent logs in the container")
}
}
}
// only call it when the elastic-agent is present
if !fts.ElasticAgentStopped {
err := agentInstaller.Uninstall(fts.currentContext)
if err != nil {
log.Warnf("Could not uninstall the agent after the scenario: %v", err)
// only call it when the elastic-agent is present
if !fts.ElasticAgentStopped {
err := agentInstaller.Uninstall(fts.currentContext)
if err != nil {
log.Warnf("Could not uninstall the agent after the scenario: %v", err)
}
}
} else if log.IsLevelEnabled(log.DebugLevel) {
_ = fts.deployer.Logs(agentService)
}
} else if log.IsLevelEnabled(log.DebugLevel) {
_ = fts.deployer.Logs(agentService)
}

err := fts.unenrollHostname()
if err != nil {
manifest, _ := fts.deployer.Inspect(fts.currentContext, agentService)
log.WithFields(log.Fields{
"err": err,
"hostname": manifest.Hostname,
}).Warn("The agentIDs for the hostname could not be unenrolled")
}
err := fts.unenrollHostname()
if err != nil {
manifest, _ := fts.deployer.Inspect(fts.currentContext, agentService)
log.WithFields(log.Fields{
"err": err,
"hostname": manifest.Hostname,
}).Warn("The agentIDs for the hostname could not be unenrolled")
}

if !common.DeveloperMode {
_ = fts.deployer.Remove(
common.FleetProfileServiceRequest,
[]deploy.ServiceRequest{
deploy.NewServiceRequest(serviceName),
},
common.ProfileEnv)
} else {
log.WithField("service", serviceName).Info("Because we are running in development mode, the service won't be stopped")
if !common.DeveloperMode {
_ = fts.deployer.Remove(
common.FleetProfileServiceRequest,
[]deploy.ServiceRequest{
deploy.NewServiceRequest(serviceName),
},
common.ProfileEnv)
} else {
log.WithField("service", serviceName).Info("Because we are running in development mode, the service won't be stopped")
}
}

err = fts.kibanaClient.DeleteEnrollmentAPIKey(fts.currentContext, fts.CurrentTokenID)
err := fts.kibanaClient.DeleteEnrollmentAPIKey(fts.currentContext, fts.CurrentTokenID)
if err != nil {
log.WithFields(log.Fields{
"err": err,
Expand All @@ -125,6 +126,7 @@ func (fts *FleetTestSuite) afterScenario() {
// clean up fields
fts.CurrentTokenID = ""
fts.CurrentToken = ""
fts.InstallerType = ""
fts.Image = ""
fts.StandAlone = false
fts.BeatsProcess = ""
Expand Down Expand Up @@ -183,15 +185,23 @@ func (fts *FleetTestSuite) contributeSteps(s *godog.ScenarioContext) {
// stand-alone only steps
s.Step(`^a "([^"]*)" stand-alone agent is deployed$`, fts.aStandaloneAgentIsDeployed)
s.Step(`^a "([^"]*)" stand-alone agent is deployed with fleet server mode$`, fts.bootstrapFleetServerFromAStandaloneAgent)
s.Step(`^a "([^"]*)" stand-alone agent is deployed with fleet server mode on cloud$`, fts.aStandaloneAgentIsDeployedWithFleetServerModeOnCloud)
s.Step(`^there is new data in the index from agent$`, fts.thereIsNewDataInTheIndexFromAgent)
s.Step(`^the "([^"]*)" docker container is stopped$`, fts.theDockerContainerIsStopped)
s.Step(`^there is no new data in the index after agent shuts down$`, fts.thereIsNoNewDataInTheIndexAfterAgentShutsDown)
s.Step(`^the stand-alone agent is listed in Fleet as "([^"]*)"$`, fts.theStandaloneAgentIsListedInFleetWithStatus)
}

func (fts *FleetTestSuite) theStandaloneAgentIsListedInFleetWithStatus(desiredStatus string) error {
maxTimeout := time.Duration(utils.TimeoutFactor) * time.Minute
exp := utils.GetExponentialBackOff(maxTimeout)
retryCount := 0

agentService := deploy.NewServiceRequest(common.ElasticAgentServiceName)
manifest, _ := fts.deployer.Inspect(fts.currentContext, agentService)

waitForAgents := func() error {
retryCount++

agents, err := fts.kibanaClient.ListAgents(fts.currentContext)
if err != nil {
return err
Expand All @@ -201,13 +211,23 @@ func (fts *FleetTestSuite) theStandaloneAgentIsListedInFleetWithStatus(desiredSt
return errors.New("No agents found")
}

agentZero := agents[0]
hostname := agentZero.LocalMetadata.Host.HostName
for _, agent := range agents {
hostname := agent.LocalMetadata.Host.HostName

if hostname == manifest.Hostname {
return theAgentIsListedInFleetWithStatus(fts.currentContext, desiredStatus, hostname)
}
}

err = errors.New("Agent not found in Fleet")
log.WithFields(log.Fields{
"elapsedTime": exp.GetElapsedTime(),
"hostname": manifest.Hostname,
"retries": retryCount,
}).Warn(err)

return theAgentIsListedInFleetWithStatus(fts.currentContext, desiredStatus, hostname)
return err
}
maxTimeout := time.Duration(utils.TimeoutFactor) * time.Minute * 2
exp := utils.GetExponentialBackOff(maxTimeout)

err := backoff.Retry(waitForAgents, exp)
if err != nil {
Expand Down
Loading