From 837db3283d732498719ed091cb269b8f920222c5 Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Fri, 3 Jun 2022 09:24:26 -0400 Subject: [PATCH 01/49] [v2] Add v2 component specification and validation. (#502) * Add v2 component specification and validation. * Remove i386 and ppc64el. Update spec for osquerybeat. * Remove windows/arm64. --- ...crypted_disk_storage_windows_linux_test.go | 3 +- internal/pkg/core/plugin/process/stdlogger.go | 4 + .../pkg/core/plugin/process/stdlogger_test.go | 4 + pkg/component/load.go | 25 +++ pkg/component/load_test.go | 70 ++++++++ pkg/component/outputs.go | 21 +++ pkg/component/platforms.go | 66 +++++++ pkg/component/spec.go | 120 +++++++++++++ pkg/component/spec_test.go | 139 ++++++++++++++ pkg/core/logger/testing.go | 7 +- specs/apm-server.yml | 23 +++ specs/auditbeat.yml | 43 +++++ specs/cloudbeat.yml | 27 +++ specs/endpoint-security.yml | 39 ++++ specs/filebeat.yml | 170 ++++++++++++++++++ specs/fleet-server.yml | 17 ++ specs/heartbeat.yml | 47 +++++ specs/metricbeat.yml | 157 ++++++++++++++++ specs/osquerybeat.yml | 26 +++ specs/packetbeat.yml | 29 +++ 20 files changed, 1035 insertions(+), 2 deletions(-) create mode 100644 pkg/component/load.go create mode 100644 pkg/component/load_test.go create mode 100644 pkg/component/outputs.go create mode 100644 pkg/component/platforms.go create mode 100644 pkg/component/spec.go create mode 100644 pkg/component/spec_test.go create mode 100644 specs/apm-server.yml create mode 100644 specs/auditbeat.yml create mode 100644 specs/cloudbeat.yml create mode 100644 specs/endpoint-security.yml create mode 100644 specs/filebeat.yml create mode 100644 specs/fleet-server.yml create mode 100644 specs/heartbeat.yml create mode 100644 specs/metricbeat.yml create mode 100644 specs/osquerybeat.yml create mode 100644 specs/packetbeat.yml diff --git a/internal/pkg/agent/storage/encrypted_disk_storage_windows_linux_test.go b/internal/pkg/agent/storage/encrypted_disk_storage_windows_linux_test.go index e6e6e5d6fd8..a7c41fbd727 100644 --- a/internal/pkg/agent/storage/encrypted_disk_storage_windows_linux_test.go +++ b/internal/pkg/agent/storage/encrypted_disk_storage_windows_linux_test.go @@ -16,9 +16,10 @@ import ( "path/filepath" "testing" + "github.com/google/go-cmp/cmp" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/secret" "github.com/elastic/elastic-agent/internal/pkg/agent/vault" - "github.com/google/go-cmp/cmp" ) const ( diff --git a/internal/pkg/core/plugin/process/stdlogger.go b/internal/pkg/core/plugin/process/stdlogger.go index 852205f8ded..4c7d8625216 100644 --- a/internal/pkg/core/plugin/process/stdlogger.go +++ b/internal/pkg/core/plugin/process/stdlogger.go @@ -1,3 +1,7 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + package process import ( diff --git a/internal/pkg/core/plugin/process/stdlogger_test.go b/internal/pkg/core/plugin/process/stdlogger_test.go index 142625c4662..959f387c32a 100644 --- a/internal/pkg/core/plugin/process/stdlogger_test.go +++ b/internal/pkg/core/plugin/process/stdlogger_test.go @@ -1,3 +1,7 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + package process import ( diff --git a/pkg/component/load.go b/pkg/component/load.go new file mode 100644 index 00000000000..7782dcaa2a6 --- /dev/null +++ b/pkg/component/load.go @@ -0,0 +1,25 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package component + +import ( + "github.com/elastic/go-ucfg/yaml" +) + +// LoadSpec loads the component specification. +// +// Will error in the case that the specification is not valid. Only valid specifications are allowed. +func LoadSpec(data []byte) (Spec, error) { + var spec Spec + cfg, err := yaml.NewConfig(data) + if err != nil { + return spec, err + } + err = cfg.Unpack(&spec) + if err != nil { + return spec, err + } + return spec, nil +} diff --git a/pkg/component/load_test.go b/pkg/component/load_test.go new file mode 100644 index 00000000000..289ace2c72f --- /dev/null +++ b/pkg/component/load_test.go @@ -0,0 +1,70 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package component + +import ( + "io/ioutil" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestLoadSpec_Components(t *testing.T) { + scenarios := []struct { + Name string + Path string + }{ + { + Name: "APM Server", + Path: "apm-server.yml", + }, + { + Name: "Auditbeat", + Path: "auditbeat.yml", + }, + { + Name: "Cloudbeat", + Path: "cloudbeat.yml", + }, + { + Name: "Endpoint Security", + Path: "endpoint-security.yml", + }, + { + Name: "Filebeat", + Path: "filebeat.yml", + }, + { + Name: "Fleet Server", + Path: "fleet-server.yml", + }, + { + Name: "Heartbeat", + Path: "heartbeat.yml", + }, + { + Name: "Metricbeat", + Path: "metricbeat.yml", + }, + { + Name: "Osquerybeat", + Path: "osquerybeat.yml", + }, + { + Name: "Packetbeat", + Path: "packetbeat.yml", + }, + } + + for _, scenario := range scenarios { + t.Run(scenario.Name, func(t *testing.T) { + data, err := ioutil.ReadFile(filepath.Join("..", "..", "specs", scenario.Path)) + require.NoError(t, err) + _, err = LoadSpec(data) + require.NoError(t, err) + }) + } +} diff --git a/pkg/component/outputs.go b/pkg/component/outputs.go new file mode 100644 index 00000000000..faaddbdfd8c --- /dev/null +++ b/pkg/component/outputs.go @@ -0,0 +1,21 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package component + +const ( + // Elasticsearch represents the elasticsearch output + Elasticsearch = "elasticsearch" + // Kafka represents the kafka output + Kafka = "kafka" + // Logstash represents the logstash output + Logstash = "logstash" + // Redis represents the redis output + Redis = "redis" + // Shipper represents support for using the elastic-agent-shipper + Shipper = "shipper" +) + +// Outputs defines the outputs that a component can support +var Outputs = []string{Elasticsearch, Kafka, Logstash, Redis, Shipper} diff --git a/pkg/component/platforms.go b/pkg/component/platforms.go new file mode 100644 index 00000000000..b8ad7ec1e9d --- /dev/null +++ b/pkg/component/platforms.go @@ -0,0 +1,66 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package component + +const ( + // Container represents running inside a container + Container = "container" + // Darwin represents running on Mac OSX + Darwin = "darwin" + // Linux represents running on Linux + Linux = "linux" + // Windows represents running on Windows + Windows = "windows" +) + +const ( + // AMD64 represents the amd64 architecture + AMD64 = "amd64" + // ARM64 represents the arm64 architecture + ARM64 = "arm64" +) + +// Platforms defines the platforms that a component can support +var Platforms = []struct { + OS string + Arch string + GOOS string +}{ + { + OS: Container, + Arch: AMD64, + GOOS: Linux, + }, + { + OS: Container, + Arch: ARM64, + GOOS: Linux, + }, + { + OS: Darwin, + Arch: AMD64, + GOOS: Darwin, + }, + { + OS: Darwin, + Arch: ARM64, + GOOS: Darwin, + }, + { + OS: Linux, + Arch: AMD64, + GOOS: Linux, + }, + { + OS: Linux, + Arch: ARM64, + GOOS: Linux, + }, + { + OS: Windows, + Arch: AMD64, + GOOS: Windows, + }, +} diff --git a/pkg/component/spec.go b/pkg/component/spec.go new file mode 100644 index 00000000000..e428fb71a5f --- /dev/null +++ b/pkg/component/spec.go @@ -0,0 +1,120 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package component + +import ( + "errors" + "fmt" + "time" +) + +// Spec a components specification. +type Spec struct { + Version int `config:"version" yaml:"version" validate:"required"` + Inputs []InputSpec `config:"inputs,omitempty" yaml:"inputs,omitempty"` +} + +// Validate ensures correctness of component specification. +func (s *Spec) Validate() error { + if s.Version != 2 { + return errors.New("only version 2 is allowed") + } + inputsToPlatforms := make(map[string][]string) + for i, input := range s.Inputs { + a, ok := inputsToPlatforms[input.Name] + if !ok { + inputsToPlatforms[input.Name] = make([]string, len(input.Platforms)) + copy(inputsToPlatforms[input.Name], input.Platforms) + continue + } + for _, platform := range input.Platforms { + for _, existing := range a { + if existing == platform { + return fmt.Errorf("input %s at inputs.%d defines the same platform as a previous definition", input.Name, i) + } + } + a = append(a, platform) + inputsToPlatforms[input.Name] = a + } + } + return nil +} + +// InputSpec is the specification for an input type. +type InputSpec struct { + Name string `config:"name" yaml:"name" validate:"required"` + Aliases []string `config:"aliases,omitempty" yaml:"aliases,omitempty"` + Description string `config:"description" yaml:"description" validate:"required"` + Platforms []string `config:"platforms" yaml:"platforms" validate:"required,min=1"` + Outputs []string `config:"outputs" yaml:"outputs" validate:"required,min=1"` + Runtime RuntimeSpec `config:"runtime" yaml:"runtime"` + + Command *CommandSpec `config:"command,omitempty" yaml:"command,omitempty"` + Service *ServiceSpec `config:"service,omitempty" yaml:"service,omitempty"` +} + +// Validate ensures correctness of input specification. +func (s *InputSpec) Validate() error { + if s.Command == nil && s.Service == nil { + return fmt.Errorf("input %s must define either command or service", s.Name) + } + for i, a := range s.Platforms { + for j, b := range s.Platforms { + if i != j && a == b { + return fmt.Errorf("input %s defines the platform %s more than once", s.Name, a) + } + } + } + for i, a := range s.Outputs { + for j, b := range s.Outputs { + if i != j && a == b { + return fmt.Errorf("input %s defines the output %s more than once", s.Name, a) + } + } + } + return nil +} + +// RuntimeSpec is the specification for runtime options. +type RuntimeSpec struct { + Preventions []RuntimePreventionSpec `config:"preventions" yaml:"preventions"` +} + +// RuntimePreventionSpec is the specification that prevents an input to run at execution time. +type RuntimePreventionSpec struct { + Condition string `config:"condition" yaml:"condition" validate:"required"` + Message string `config:"message" yaml:"message" validate:"required"` +} + +// CommandSpec is the specification for an input that executes as a subprocess. +type CommandSpec struct { + Args []string `config:"args,omitempty" yaml:"args,omitempty"` + Env []CommandEnvSpec `config:"env,omitempty" yaml:"env,omitempty"` +} + +// CommandEnvSpec is the specification that defines environment variables that will be set to execute the subprocess. +type CommandEnvSpec struct { + Name string `config:"name" yaml:"name" validate:"required"` + Value string `config:"value" yaml:"value" validate:"required"` +} + +// ServiceSpec is the specification for an input that executes as a service. +type ServiceSpec struct { + Operations ServiceOperationsSpec `config:"operations" yaml:"operations" validate:"required"` +} + +// ServiceOperationsSpec is the specification of the operations that need to be performed to get a service installed/uninstalled. +type ServiceOperationsSpec struct { + Check *ServiceOperationsCommandSpec `config:"check,omitempty" yaml:"check,omitempty"` + Install *ServiceOperationsCommandSpec `config:"install" yaml:"install" validate:"required"` + Uninstall *ServiceOperationsCommandSpec `config:"uninstall" yaml:"uninstall" validate:"required"` +} + +// ServiceOperationsCommandSpec is the specification for execution of binaries to perform the check, install, and uninstall. +type ServiceOperationsCommandSpec struct { + Args []string `config:"args,omitempty" yaml:"args,omitempty"` + Env []CommandEnvSpec `config:"env,omitempty" yaml:"env,omitempty"` + Timeout time.Duration `config:"timeout,omitempty" yaml:"timeout,omitempty"` +} diff --git a/pkg/component/spec_test.go b/pkg/component/spec_test.go new file mode 100644 index 00000000000..3b7f3bf2ae8 --- /dev/null +++ b/pkg/component/spec_test.go @@ -0,0 +1,139 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package component + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestSpec_Validation(t *testing.T) { + scenarios := []struct { + Name string + Spec string + Err string + }{ + { + Name: "Empty", + Spec: "", + Err: "missing required field accessing 'version'", + }, + { + Name: "Bad Version", + Spec: "version: 1", + Err: "only version 2 is allowed accessing config", + }, + { + Name: "No Command or Service", + Spec: ` +version: 2 +inputs: + - name: testing + description: Testing Input + platforms: + - linux/amd64 + outputs: + - shipper +`, + Err: "input testing must define either command or service accessing 'inputs.0'", + }, + { + Name: "Duplicate Platform", + Spec: ` +version: 2 +inputs: + - name: testing + description: Testing Input + platforms: + - linux/amd64 + - linux/amd64 + outputs: + - shipper + command: {} +`, + Err: "input testing defines the platform linux/amd64 more than once accessing 'inputs.0'", + }, + { + Name: "Duplicate Output", + Spec: ` +version: 2 +inputs: + - name: testing + description: Testing Input + platforms: + - linux/amd64 + outputs: + - shipper + - shipper + command: {} +`, + Err: "input testing defines the output shipper more than once accessing 'inputs.0'", + }, + { + Name: "Duplicate Platform Same Input Name", + Spec: ` +version: 2 +inputs: + - name: testing + description: Testing Input + platforms: + - linux/amd64 + outputs: + - shipper + command: {} + - name: testing + description: Testing Input + platforms: + - linux/amd64 + outputs: + - shipper + command: {} +`, + Err: "input testing at inputs.1 defines the same platform as a previous definition accessing config", + }, + { + Name: "Valid", + Spec: ` +version: 2 +inputs: + - name: testing + description: Testing Input + platforms: + - linux/amd64 + - windows/amd64 + outputs: + - shipper + command: {} + - name: testing + description: Testing Input + platforms: + - darwin/amd64 + outputs: + - shipper + service: + operations: + install: + args: ["install"] + uninstall: + args: ["uninstall"] +`, + Err: "", + }, + } + + for _, scenario := range scenarios { + t.Run(scenario.Name, func(t *testing.T) { + _, err := LoadSpec([]byte(scenario.Spec)) + if scenario.Err != "" { + require.Error(t, err) + assert.Equal(t, scenario.Err, err.Error()) + } else { + require.NoError(t, err) + } + }) + } +} diff --git a/pkg/core/logger/testing.go b/pkg/core/logger/testing.go index 0436bf2bc2f..a303584b067 100644 --- a/pkg/core/logger/testing.go +++ b/pkg/core/logger/testing.go @@ -1,10 +1,15 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + package logger import ( - "github.com/elastic/elastic-agent-libs/logp" "go.uber.org/zap" "go.uber.org/zap/zapcore" "go.uber.org/zap/zaptest/observer" + + "github.com/elastic/elastic-agent-libs/logp" ) // NewTesting creates a testing logger that buffers the logs in memory and diff --git a/specs/apm-server.yml b/specs/apm-server.yml new file mode 100644 index 00000000000..58cc26f47eb --- /dev/null +++ b/specs/apm-server.yml @@ -0,0 +1,23 @@ +version: 2 +inputs: + - name: apm + description: "APM Server" + platforms: + - linux/amd64 + - linux/arm64 + - darwin/amd64 + - darwin/arm64 + - windows/amd64 + - container/amd64 + - container/arm64 + outputs: + - elasticsearch + - kafka + - logstash + - redis + command: + args: + - "-E" + - "management.enabled=true" + - "-E" + - "gc_percent=${APMSERVER_GOGC:100}" diff --git a/specs/auditbeat.yml b/specs/auditbeat.yml new file mode 100644 index 00000000000..c17a1e24206 --- /dev/null +++ b/specs/auditbeat.yml @@ -0,0 +1,43 @@ +version: 2 +inputs: + - name: audit/auditd + description: "Auditd" + platforms: &platforms + - linux/amd64 + - linux/arm64 + - darwin/amd64 + - darwin/arm64 + - windows/amd64 + - container/amd64 + - container/arm64 + outputs: &outputs + - elasticsearch + - kafka + - logstash + - redis + command: + args: &args + - "-E" + - "setup.ilm.enabled=false" + - "-E" + - "setup.template.enabled=false" + - "-E" + - "management.enabled=true" + - "-E" + - "logging.level=debug" + - "-E" + - "gc_percent=${AUDITBEAT_GOGC:100}" + - "-E" + - "auditbeat.config.modules.enabled=false" + - name: audit/file_integrity + description: "Audit File Integrity" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: audit/system + description: "Audit System" + platforms: *platforms + outputs: *outputs + command: + args: *args diff --git a/specs/cloudbeat.yml b/specs/cloudbeat.yml new file mode 100644 index 00000000000..a45037319b7 --- /dev/null +++ b/specs/cloudbeat.yml @@ -0,0 +1,27 @@ +version: 2 +inputs: + - name: cloudbeat + description: "Cloudbeat" + platforms: + - linux/amd64 + - linux/arm64 + - darwin/amd64 + - darwin/arm64 + - windows/amd64 + - container/amd64 + - container/arm64 + outputs: + - elasticsearch + - kafka + - logstash + - redis + command: + args: + - "-E" + - "management.enabled=true" + - "-E" + - "setup.ilm.enabled=false" + - "-E" + - "setup.template.enabled=false" + - "-E" + - "gc_percent=${CLOUDBEAT_GOGC:100}" diff --git a/specs/endpoint-security.yml b/specs/endpoint-security.yml new file mode 100644 index 00000000000..d0f177b0701 --- /dev/null +++ b/specs/endpoint-security.yml @@ -0,0 +1,39 @@ +version: 2 +inputs: + - name: endpoint + description: "Endpoint Security" + platforms: + - linux/amd64 + - linux/arm64 + - darwin/amd64 + - darwin/arm64 + - windows/amd64 + outputs: + - elasticsearch + runtime: + preventions: + - condition: ${runtime.arch} == 'arm64' and ${runtime.family} == 'redhat' and ${runtime.major} == '7' + message: "No support for RHEL7 on arm64" + service: + operations: + check: + args: + - "verify" + - "--log" + - "stderr" + timeout: 30 + install: + args: + - "install" + - "--log" + - "stderr" + - "--upgrade" + - "--resources" + - "endpoint-security-resources.zip" + timeout: 600 + uninstall: + args: + - "uninstall" + - "--log" + - "stderr" + timeout: 600 diff --git a/specs/filebeat.yml b/specs/filebeat.yml new file mode 100644 index 00000000000..7726cd5244a --- /dev/null +++ b/specs/filebeat.yml @@ -0,0 +1,170 @@ +version: 2 +inputs: + - name: aws-cloudwatch + description: "AWS Cloudwatch" + platforms: &platforms + - linux/amd64 + - linux/arm64 + - darwin/amd64 + - darwin/arm64 + - windows/amd64 + - container/amd64 + - container/arm64 + outputs: &outputs + - elasticsearch + - kafka + - logstash + - redis + command: + args: &args + - "-E" + - "setup.ilm.enabled=false" + - "-E" + - "setup.template.enabled=false" + - "-E" + - "management.enabled=true" + - "-E" + - "logging.level=debug" + - "-E" + - "gc_percent=${FILEBEAT_GOGC:100}" + - "-E" + - "filebeat.config.modules.enabled=false" + - name: aws-s3 + description: "AWS S3" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: azure-eventhub + description: "Azure Eventhub" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: cloudfoundry + description: "PCF Cloudfoundry" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: container + description: "Container logs" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: docker + aliases: + - log/docker + description: "Docker logs" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: gcp-pubsub + description: "GCP Pub-Sub" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: http_endpoint + description: "HTTP Endpoint" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: httpjson + description: "HTTP JSON Endpoint" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: journald + description: "Journald" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: kafka + description: "Kafka" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: log + aliases: + - logfile + - event/file + description: "Logfile" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: mqtt + description: "MQTT" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: netflow + description: "Netflow" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: o365audit + description: "Office 365 Audit" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: redis + aliases: + - log/redis_slowlog + description: "Redis" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: syslog + aliases: + - log/syslog + description: "Syslog" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: tcp + aliases: + - event/tcp + description: "TCP" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: udp + aliases: + - event/udp + description: "UDP" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: unix + description: "Unix Socket" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: winlog + description: "Winlog" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: filestream + description: "Filestream" + platforms: *platforms + outputs: *outputs + command: + args: *args diff --git a/specs/fleet-server.yml b/specs/fleet-server.yml new file mode 100644 index 00000000000..4884d69f8ad --- /dev/null +++ b/specs/fleet-server.yml @@ -0,0 +1,17 @@ +version: 2 +inputs: + - name: fleet-server + description: "Fleet Server" + platforms: + - linux/amd64 + - linux/arm64 + - darwin/amd64 + - darwin/arm64 + - windows/amd64 + - container/amd64 + - container/arm64 + outputs: + - elasticsearch + command: + args: + - "--agent-mode" diff --git a/specs/heartbeat.yml b/specs/heartbeat.yml new file mode 100644 index 00000000000..56b8146b5bf --- /dev/null +++ b/specs/heartbeat.yml @@ -0,0 +1,47 @@ +version: 2 +inputs: + - name: synthetics/synthetics + description: "Synthetics Browser Monitor" + platforms: &platforms + - linux/amd64 + - linux/arm64 + - darwin/amd64 + - darwin/arm64 + - windows/amd64 + - container/amd64 + - container/arm64 + outputs: &outputs + - elasticsearch + - kafka + - logstash + - redis + command: + args: &args + - "-E" + - "setup.ilm.enabled=false" + - "-E" + - "setup.template.enabled=false" + - "-E" + - "management.enabled=true" + - "-E" + - "logging.level=debug" + - "-E" + - "gc_percent=${HEARTBEAT_GOGC:100}" + - name: synthetics/http + description: "Synthetics HTTP Monitor" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: synthetics/icmp + description: "Synthetics ICMP Monitor" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: synthetics/tcp + description: "Synthetics TCP Monitor" + platforms: *platforms + outputs: *outputs + command: + args: *args diff --git a/specs/metricbeat.yml b/specs/metricbeat.yml new file mode 100644 index 00000000000..9859a37582c --- /dev/null +++ b/specs/metricbeat.yml @@ -0,0 +1,157 @@ +version: 2 +inputs: + - name: beat/metrics + description: "Beat metrics" + platforms: &platforms + - linux/amd64 + - linux/arm64 + - darwin/amd64 + - darwin/arm64 + - windows/amd64 + - container/amd64 + - container/arm64 + outputs: &outputs + - elasticsearch + - kafka + - logstash + - redis + command: + args: &args + - "-E" + - "setup.ilm.enabled=false" + - "-E" + - "setup.template.enabled=false" + - "-E" + - "management.enabled=true" + - "-E" + - "logging.level=debug" + - "-E" + - "gc_percent=${METRICBEAT_GOGC:100}" + - "-E" + - "metricbeat.config.modules.enabled=false" + - name: docker/metrics + description: "Docker metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: elasticsearch/metrics + description: "Elasticsearch metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: kibana/metrics + description: "Kibana metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: kubernetes/metrics + description: "Kubernetes metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: linux/metrics + description: "Linux metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: logstash/metrics + description: "Logstash metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: mongodb/metrics + description: "Mongodb metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: mysql/metrics + description: "MySQL metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: postgresql/metrics + description: "PostgreSQL metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: redis/metrics + description: "Redis metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: system/metrics + description: "System metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: uwsgi/metrics + description: "UWSGI metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: windows/metrics + description: "Windows metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: aws/metrics + description: "AWS metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: awsfargate/metrics + description: "AWS Fargate metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: azure/metrics + description: "Azure metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: cloudfoundry/metrics + description: "PCF Cloudfoundry metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: containerd/metrics + description: "Containerd metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: mssql/metrics + description: "Microsoft SQL Server metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: oracle/metrics + description: "Oracle Database metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: syncgateway/metrics + description: "Couchbase Sync Gateway metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args diff --git a/specs/osquerybeat.yml b/specs/osquerybeat.yml new file mode 100644 index 00000000000..40fa1dff731 --- /dev/null +++ b/specs/osquerybeat.yml @@ -0,0 +1,26 @@ +version: 2 +inputs: + - name: osquery + description: "Osquery" + platforms: + - linux/amd64 + - linux/arm64 + - darwin/amd64 + - darwin/arm64 + - windows/amd64 + - container/amd64 + - container/arm64 + outputs: + - elasticsearch + command: + args: + - "-E" + - "setup.ilm.enabled=false" + - "-E" + - "setup.template.enabled=false" + - "-E" + - "management.enabled=true" + - "-E" + - "logging.level=debug" + - "-E" + - "gc_percent=${OSQUERYBEAT_GOGC:100}" diff --git a/specs/packetbeat.yml b/specs/packetbeat.yml new file mode 100644 index 00000000000..becad691f17 --- /dev/null +++ b/specs/packetbeat.yml @@ -0,0 +1,29 @@ +version: 2 +inputs: + - name: packet + description: "Packet Capture" + platforms: + - linux/amd64 + - linux/arm64 + - darwin/amd64 + - darwin/arm64 + - windows/amd64 + - container/amd64 + - container/arm64 + outputs: + - elasticsearch + - kafka + - logstash + - redis + command: + args: + - "-E" + - "setup.ilm.enabled=false" + - "-E" + - "setup.template.enabled=false" + - "-E" + - "management.enabled=true" + - "-E" + - "logging.level=debug" + - "-E" + - "gc_percent=${PACKETBEAT_GOGC:100}" From 3b9a723a8a9bc8757fc7eda1d4dc62897c36efbb Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Fri, 3 Jun 2022 11:00:17 -0400 Subject: [PATCH 02/49] Add component spec command to validate component specifications. (#510) --- internal/pkg/agent/cmd/common.go | 1 + internal/pkg/agent/cmd/component.go | 23 ++++++++++++++ internal/pkg/agent/cmd/component_spec.go | 39 ++++++++++++++++++++++++ 3 files changed, 63 insertions(+) create mode 100644 internal/pkg/agent/cmd/component.go create mode 100644 internal/pkg/agent/cmd/component_spec.go diff --git a/internal/pkg/agent/cmd/common.go b/internal/pkg/agent/cmd/common.go index 35aef4d4339..7406419d405 100644 --- a/internal/pkg/agent/cmd/common.go +++ b/internal/pkg/agent/cmd/common.go @@ -67,6 +67,7 @@ func NewCommandWithArgs(args []string, streams *cli.IOStreams) *cobra.Command { cmd.AddCommand(newContainerCommand(args, streams)) cmd.AddCommand(newStatusCommand(args, streams)) cmd.AddCommand(newDiagnosticsCommand(args, streams)) + cmd.AddCommand(newComponentCommandWithArgs(args, streams)) // windows special hidden sub-command (only added on windows) reexec := newReExecWindowsCommand(args, streams) diff --git a/internal/pkg/agent/cmd/component.go b/internal/pkg/agent/cmd/component.go new file mode 100644 index 00000000000..6144d78c333 --- /dev/null +++ b/internal/pkg/agent/cmd/component.go @@ -0,0 +1,23 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package cmd + +import ( + "github.com/spf13/cobra" + + "github.com/elastic/elastic-agent/internal/pkg/cli" +) + +func newComponentCommandWithArgs(args []string, streams *cli.IOStreams) *cobra.Command { + cmd := &cobra.Command{ + Use: "component ", + Short: "Tools to work on components", + Long: "Tools for viewing current component information and developing new components for Elastic Agent", + } + + cmd.AddCommand(newComponentSpecCommandWithArgs(args, streams)) + + return cmd +} diff --git a/internal/pkg/agent/cmd/component_spec.go b/internal/pkg/agent/cmd/component_spec.go new file mode 100644 index 00000000000..1025e488b42 --- /dev/null +++ b/internal/pkg/agent/cmd/component_spec.go @@ -0,0 +1,39 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package cmd + +import ( + "fmt" + "io/ioutil" + + "github.com/spf13/cobra" + + "github.com/elastic/elastic-agent/pkg/component" + + "github.com/elastic/elastic-agent/internal/pkg/cli" +) + +func newComponentSpecCommandWithArgs(_ []string, streams *cli.IOStreams) *cobra.Command { + cmd := &cobra.Command{ + Use: "spec [file]", + Short: "Validates a component specification", + Long: "Validates a component specification that instructs the Elastic Agent how it should be ran.", + Args: cobra.ExactArgs(1), + RunE: func(c *cobra.Command, args []string) error { + data, err := ioutil.ReadFile(args[0]) + if err != nil { + return err + } + _, err = component.LoadSpec(data) + if err != nil { + return err + } + fmt.Fprintln(streams.Out, "Component specification is valid") + return nil + }, + } + + return cmd +} From 7bb4acdb3992fb6d7b504f4ed2aeaf945511b810 Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Wed, 15 Jun 2022 11:47:11 -0400 Subject: [PATCH 03/49] [v2] Calculate the expected runtime components from policy (#550) * Upgrade elastic-agent-client. * Calculate the expected running components and units from the v2 specification and the current policy. * Update NOTICE.txt. * Fix lint from servicable main.go. * Update GRPC for the agent CLI control protocol. Fix name collision issue. * Run go mod tidy. * Fix more lint issues. * Fix fmt. * Update logic to always compute model, with err set on each component. Check runtime preventions at model generation time. * Fix items from code review, and issue on windows test runner. * Try to cleanup duplication in tests. * Try 2 of fixing duplicate lint failure, that is not really a duplicate. * Re-run mage fmt. * Lint fixes for linux, why different? * Fix nolint comment. * Add comment. --- NOTICE.txt | 100 +- control.proto | 4 +- go.mod | 14 +- go.sum | 21 +- internal/pkg/agent/cmd/diagnostics.go | 50 +- internal/pkg/agent/cmd/enroll_cmd.go | 4 +- internal/pkg/agent/control/client/client.go | 48 +- .../control/{proto => cproto}/control.pb.go | 724 +++++---------- .../agent/control/cproto/control_grpc.pb.go | 340 +++++++ internal/pkg/agent/control/server/server.go | 127 +-- .../serviceable-1.0-darwin-x86_64/main.go | 27 +- magefile.go | 6 +- pkg/component/component.go | 308 +++++++ pkg/component/component_test.go | 851 ++++++++++++++++++ pkg/component/load.go | 158 ++++ pkg/component/load_test.go | 50 +- pkg/component/platforms.go | 45 +- pkg/component/spec.go | 19 +- pkg/component/spec_test.go | 23 +- pkg/core/server/server.go | 30 +- specs/{apm-server.yml => apm-server.spec.yml} | 0 specs/{auditbeat.yml => auditbeat.spec.yml} | 0 specs/{cloudbeat.yml => cloudbeat.spec.yml} | 0 ...ecurity.yml => endpoint-security.spec.yml} | 0 specs/{filebeat.yml => filebeat.spec.yml} | 0 ...fleet-server.yml => fleet-server.spec.yml} | 0 specs/{heartbeat.yml => heartbeat.spec.yml} | 0 specs/{metricbeat.yml => metricbeat.spec.yml} | 0 .../{osquerybeat.yml => osquerybeat.spec.yml} | 0 specs/{packetbeat.yml => packetbeat.spec.yml} | 0 30 files changed, 2215 insertions(+), 734 deletions(-) rename internal/pkg/agent/control/{proto => cproto}/control.pb.go (55%) create mode 100644 internal/pkg/agent/control/cproto/control_grpc.pb.go create mode 100644 pkg/component/component.go create mode 100644 pkg/component/component_test.go rename specs/{apm-server.yml => apm-server.spec.yml} (100%) rename specs/{auditbeat.yml => auditbeat.spec.yml} (100%) rename specs/{cloudbeat.yml => cloudbeat.spec.yml} (100%) rename specs/{endpoint-security.yml => endpoint-security.spec.yml} (100%) rename specs/{filebeat.yml => filebeat.spec.yml} (100%) rename specs/{fleet-server.yml => fleet-server.spec.yml} (100%) rename specs/{heartbeat.yml => heartbeat.spec.yml} (100%) rename specs/{metricbeat.yml => metricbeat.spec.yml} (100%) rename specs/{osquerybeat.yml => osquerybeat.spec.yml} (100%) rename specs/{packetbeat.yml => packetbeat.spec.yml} (100%) diff --git a/NOTICE.txt b/NOTICE.txt index 4b402c790f4..f7854b64dc5 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -828,11 +828,11 @@ Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-a -------------------------------------------------------------------------------- Dependency : github.com/elastic/elastic-agent-client/v7 -Version: v7.0.0-20210727140539-f0905d9377f6 +Version: v7.0.0-20220524131921-43bacbeec516 Licence type (autodetected): Elastic -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-client/v7@v7.0.0-20210727140539-f0905d9377f6/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-client/v7@v7.0.0-20220524131921-43bacbeec516/LICENSE.txt: ELASTIC LICENSE AGREEMENT @@ -2183,44 +2183,6 @@ OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. --------------------------------------------------------------------------------- -Dependency : github.com/golang/protobuf -Version: v1.5.2 -Licence type (autodetected): BSD-3-Clause --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/golang/protobuf@v1.5.2/LICENSE: - -Copyright 2010 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - - -------------------------------------------------------------------------------- Dependency : github.com/google/go-cmp Version: v0.5.6 @@ -5274,11 +5236,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : golang.org/x/sys -Version: v0.0.0-20220405052023-b1e9470b6e64 +Version: v0.0.0-20220422013727-9388b58f7150 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/golang.org/x/sys@v0.0.0-20220405052023-b1e9470b6e64/LICENSE: +Contents of probable licence file $GOMODCACHE/golang.org/x/sys@v0.0.0-20220422013727-9388b58f7150/LICENSE: Copyright (c) 2009 The Go Authors. All rights reserved. @@ -5348,11 +5310,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : google.golang.org/grpc -Version: v1.42.0 +Version: v1.46.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/google.golang.org/grpc@v1.42.0/LICENSE: +Contents of probable licence file $GOMODCACHE/google.golang.org/grpc@v1.46.0/LICENSE: Apache License @@ -5560,11 +5522,11 @@ Contents of probable licence file $GOMODCACHE/google.golang.org/grpc@v1.42.0/LIC -------------------------------------------------------------------------------- Dependency : google.golang.org/protobuf -Version: v1.27.1 +Version: v1.28.0 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/google.golang.org/protobuf@v1.27.1/LICENSE: +Contents of probable licence file $GOMODCACHE/google.golang.org/protobuf@v1.28.0/LICENSE: Copyright (c) 2018 The Go Authors. All rights reserved. @@ -9588,6 +9550,44 @@ third-party archives. limitations under the License. +-------------------------------------------------------------------------------- +Dependency : github.com/golang/protobuf +Version: v1.5.2 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/golang/protobuf@v1.5.2/LICENSE: + +Copyright 2010 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + -------------------------------------------------------------------------------- Dependency : github.com/google/gofuzz Version: v1.1.0 @@ -15217,11 +15217,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : golang.org/x/net -Version: v0.0.0-20220225172249-27dd8689420f +Version: v0.0.0-20220425223048-2871e0cb64e4 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/golang.org/x/net@v0.0.0-20220225172249-27dd8689420f/LICENSE: +Contents of probable licence file $GOMODCACHE/golang.org/x/net@v0.0.0-20220425223048-2871e0cb64e4/LICENSE: Copyright (c) 2009 The Go Authors. All rights reserved. @@ -15651,11 +15651,11 @@ Contents of probable licence file $GOMODCACHE/google.golang.org/appengine@v1.6.7 -------------------------------------------------------------------------------- Dependency : google.golang.org/genproto -Version: v0.0.0-20211208223120-3a66f561d7aa +Version: v0.0.0-20220426171045-31bebdecfb46 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/google.golang.org/genproto@v0.0.0-20211208223120-3a66f561d7aa/LICENSE: +Contents of probable licence file $GOMODCACHE/google.golang.org/genproto@v0.0.0-20220426171045-31bebdecfb46/LICENSE: Apache License diff --git a/control.proto b/control.proto index c49502db920..efd063822de 100644 --- a/control.proto +++ b/control.proto @@ -4,10 +4,10 @@ syntax = "proto3"; -package proto; +package cproto; option cc_enable_arenas = true; -option go_package = "pkg/agent/control/proto;proto"; +option go_package = "internal/pkg/agent/control/cproto"; // Status codes for the current state. enum Status { diff --git a/go.mod b/go.mod index 37c11d9bfb6..2cf5692b25a 100644 --- a/go.mod +++ b/go.mod @@ -12,7 +12,7 @@ require ( github.com/docker/go-units v0.4.0 github.com/elastic/e2e-testing v1.99.2-0.20220117192005-d3365c99b9c4 github.com/elastic/elastic-agent-autodiscover v0.0.0-20220404145827-89887023c1ab - github.com/elastic/elastic-agent-client/v7 v7.0.0-20210727140539-f0905d9377f6 + github.com/elastic/elastic-agent-client/v7 v7.0.0-20220524131921-43bacbeec516 github.com/elastic/elastic-agent-libs v0.2.3 github.com/elastic/elastic-agent-system-metrics v0.3.0 github.com/elastic/go-licenser v0.4.0 @@ -20,7 +20,6 @@ require ( github.com/elastic/go-ucfg v0.8.5 github.com/gofrs/flock v0.8.1 github.com/gofrs/uuid v4.2.0+incompatible - github.com/golang/protobuf v1.5.2 github.com/google/go-cmp v0.5.6 github.com/google/uuid v1.3.0 github.com/gorilla/mux v1.8.0 @@ -47,10 +46,10 @@ require ( golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - golang.org/x/sys v0.0.0-20220405052023-b1e9470b6e64 + golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 golang.org/x/tools v0.1.9 - google.golang.org/grpc v1.42.0 - google.golang.org/protobuf v1.27.1 + google.golang.org/grpc v1.46.0 + google.golang.org/protobuf v1.28.0 gopkg.in/yaml.v2 v2.4.0 gotest.tools v2.2.0+incompatible gotest.tools/gotestsum v1.7.0 @@ -83,6 +82,7 @@ require ( github.com/gobuffalo/here v0.6.0 // indirect github.com/godbus/dbus/v5 v5.0.5 // indirect github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/protobuf v1.5.2 // indirect github.com/google/gofuzz v1.1.0 // indirect github.com/google/licenseclassifier v0.0.0-20200402202327-879cb1424de0 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect @@ -121,14 +121,14 @@ require ( go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect golang.org/x/mod v0.5.1 // indirect - golang.org/x/net v0.0.0-20220225172249-27dd8689420f // indirect + golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4 // indirect golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect golang.org/x/text v0.3.7 // indirect golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect + google.golang.org/genproto v0.0.0-20220426171045-31bebdecfb46 // indirect google.golang.org/grpc/examples v0.0.0-20220304170021-431ea809a767 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect diff --git a/go.sum b/go.sum index 91a2ec0da3a..d12450c533f 100644 --- a/go.sum +++ b/go.sum @@ -382,8 +382,8 @@ github.com/elastic/e2e-testing v1.99.2-0.20220117192005-d3365c99b9c4 h1:uYT+Krd8 github.com/elastic/e2e-testing v1.99.2-0.20220117192005-d3365c99b9c4/go.mod h1:UcNuf4pX/qDVNQr0zybm1NL2YoWik+jKBaINZqQCA40= github.com/elastic/elastic-agent-autodiscover v0.0.0-20220404145827-89887023c1ab h1:Jk6Mfk5BF8gtfE7X0bNCiDGBtwJVxRI79b4wLCAsP+A= github.com/elastic/elastic-agent-autodiscover v0.0.0-20220404145827-89887023c1ab/go.mod h1:Gg1fsQI+rVms9FJ2DefBSojfPIzgkV8xlyG8fPG0DE8= -github.com/elastic/elastic-agent-client/v7 v7.0.0-20210727140539-f0905d9377f6 h1:nFvXHBjYK3e9+xF0WKDeAKK4aOO51uC28s+L9rBmilo= -github.com/elastic/elastic-agent-client/v7 v7.0.0-20210727140539-f0905d9377f6/go.mod h1:uh/Gj9a0XEbYoM4NYz4LvaBVARz3QXLmlNjsrKY9fTc= +github.com/elastic/elastic-agent-client/v7 v7.0.0-20220524131921-43bacbeec516 h1:8sGoTlgXRCesR1+FjBv8YY5CyVhNSDjXlo4uq5q1RGM= +github.com/elastic/elastic-agent-client/v7 v7.0.0-20220524131921-43bacbeec516/go.mod h1:fkvyUfFwyAG5OnMF0h+FV9sC0Xn9YLITwQpSuwungQs= github.com/elastic/elastic-agent-libs v0.0.0-20220303160015-5b4e674da3dd/go.mod h1://82M1l73IHx0wDbS2Tzkq6Fx9fkmytS1KgkIyzvNTM= github.com/elastic/elastic-agent-libs v0.2.2/go.mod h1:1xDLBhIqBIjhJ7lr2s+xRFFkQHpitSp8q2zzv1Dqg+s= github.com/elastic/elastic-agent-libs v0.2.3 h1:GY8M0fxOs/GBY2nIB+JOB91aoD72S87iEcm2qVGFUqI= @@ -424,6 +424,7 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= github.com/envoyproxy/go-control-plane v0.10.1/go.mod h1:AY7fTTXNdv/aJ2O5jwpxAPOWUZ7hQAEvzN5Pf27BkQQ= +github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.6.2/go.mod h1:2t7qjJNvHPx8IjnBOzl9E9/baC+qXE/TeeyBRzgJDws= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= @@ -1405,8 +1406,9 @@ golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211020060615-d418f374d309/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f h1:oA4XRj0qtSt8Yo1Zms0CUlsT3KG69V2UGQWPBxujDmc= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4 h1:HVyaeDAYux4pnY+D/SiwmLOR36ewZ4iGQIIrtnuCjFA= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1562,8 +1564,9 @@ golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220405052023-b1e9470b6e64 h1:D1v9ucDTYBtbz5vNuBbAhIMAGhQhJ6Ym5ah3maMVNX4= golang.org/x/sys v0.0.0-20220405052023-b1e9470b6e64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 h1:xHms4gcpe1YE7A3yIllJXP16CMAGuqwO2lX1mTyyRRc= +golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -1783,8 +1786,9 @@ google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ6 google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220426171045-31bebdecfb46 h1:G1IeWbjrqEq9ChWxEuRPJu6laA67+XgTFHVSAvepr38= +google.golang.org/genproto v0.0.0-20220426171045-31bebdecfb46/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -1816,8 +1820,10 @@ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.42.0 h1:XT2/MFpuPFsEX2fWh3YQtHkZ+WYZFQRfaUgLZYj/p6A= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.46.0 h1:oCjezcn6g6A75TGoKYBPgKmVBLexhYLM6MebdrPApP8= +google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/grpc/examples v0.0.0-20220304170021-431ea809a767 h1:r16FSFCMhn7+LU8CzbtAIKppYeU6NUPJVdvXeIqVIq8= google.golang.org/grpc/examples v0.0.0-20220304170021-431ea809a767/go.mod h1:wKDg0brwMZpaizQ1i7IzYcJjH1TmbJudYdnQC9+J+LE= @@ -1833,8 +1839,9 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/internal/pkg/agent/cmd/diagnostics.go b/internal/pkg/agent/cmd/diagnostics.go index 9095e34bfc3..ad3a77e1a4f 100644 --- a/internal/pkg/agent/cmd/diagnostics.go +++ b/internal/pkg/agent/cmd/diagnostics.go @@ -27,17 +27,23 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" "github.com/elastic/elastic-agent/internal/pkg/agent/control/client" - "github.com/elastic/elastic-agent/internal/pkg/agent/control/proto" + "github.com/elastic/elastic-agent/internal/pkg/agent/control/cproto" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/agent/program" "github.com/elastic/elastic-agent/internal/pkg/cli" "github.com/elastic/elastic-agent/internal/pkg/config/operations" ) +const ( + outputTypeHuman = "human" + outputTypeJSON = "json" + outputTypeYAML = "yaml" +) + var diagOutputs = map[string]outputter{ - "human": humanDiagnosticsOutput, - "json": jsonOutput, - "yaml": yamlOutput, + outputTypeHuman: humanDiagnosticsOutput, + outputTypeJSON: jsonOutput, + outputTypeYAML: yamlOutput, } // DiagnosticsInfo a struct to track all information related to diagnostics for the agent. @@ -98,8 +104,8 @@ func newDiagnosticsCollectCommandWithArgs(_ []string, streams *cli.IOStreams) *c output, _ := c.Flags().GetString("output") switch output { - case "yaml": - case "json": + case outputTypeYAML: + case outputTypeJSON: default: return fmt.Errorf("unsupported output: %s", output) } @@ -254,7 +260,7 @@ func diagnosticsCollectCmd(streams *cli.IOStreams, fileName, outputFormat string } func diagnosticsPprofCmd(streams *cli.IOStreams, dur, cmdTimeout time.Duration, outFile, pType, appName, rk string) error { - pt, ok := proto.PprofOption_value[strings.ToUpper(pType)] + pt, ok := cproto.PprofOption_value[strings.ToUpper(pType)] if !ok { return fmt.Errorf("unknown pprof-type %q, select one of [allocs, block, cmdline, goroutine, heap, mutex, profile, threadcreate, trace]", pType) } @@ -278,13 +284,13 @@ func diagnosticsPprofCmd(streams *cli.IOStreams, dur, cmdTimeout time.Duration, return err } - pprofData, err := daemon.Pprof(innerCtx, dur, []proto.PprofOption{proto.PprofOption(pt)}, appName, rk) + pprofData, err := daemon.Pprof(innerCtx, dur, []cproto.PprofOption{cproto.PprofOption(pt)}, appName, rk) if err != nil { return err } // validate response - pArr, ok := pprofData[proto.PprofOption_name[pt]] + pArr, ok := pprofData[cproto.PprofOption_name[pt]] if !ok { return fmt.Errorf("route key %q not found in response data (map length: %d)", rk, len(pprofData)) } @@ -350,7 +356,7 @@ func getDiagnostics(ctx context.Context) (DiagnosticsInfo, error) { return diag, nil } -func gatherMetrics(ctx context.Context) (*proto.ProcMetricsResponse, error) { +func gatherMetrics(ctx context.Context) (*cproto.ProcMetricsResponse, error) { daemon := client.New() err := daemon.Connect(ctx) if err != nil { @@ -453,7 +459,7 @@ func gatherConfig() (AgentConfig, error) { // // The passed DiagnosticsInfo and AgentConfig data is written in the specified output format. // Any local log files are collected and copied into the archive. -func createZip(fileName, outputFormat string, diag DiagnosticsInfo, cfg AgentConfig, pprof map[string][]client.ProcPProf, metrics *proto.ProcMetricsResponse, errs []error) error { +func createZip(fileName, outputFormat string, diag DiagnosticsInfo, cfg AgentConfig, pprof map[string][]client.ProcPProf, metrics *cproto.ProcMetricsResponse, errs []error) error { f, err := os.Create(fileName) if err != nil { return err @@ -667,16 +673,16 @@ func getAllPprof(ctx context.Context, d time.Duration) (map[string][]client.Proc if err != nil { return nil, err } - pprofTypes := []proto.PprofOption{ - proto.PprofOption_ALLOCS, - proto.PprofOption_BLOCK, - proto.PprofOption_CMDLINE, - proto.PprofOption_GOROUTINE, - proto.PprofOption_HEAP, - proto.PprofOption_MUTEX, - proto.PprofOption_PROFILE, - proto.PprofOption_THREADCREATE, - proto.PprofOption_TRACE, + pprofTypes := []cproto.PprofOption{ + cproto.PprofOption_ALLOCS, + cproto.PprofOption_BLOCK, + cproto.PprofOption_CMDLINE, + cproto.PprofOption_GOROUTINE, + cproto.PprofOption_HEAP, + cproto.PprofOption_MUTEX, + cproto.PprofOption_PROFILE, + cproto.PprofOption_THREADCREATE, + cproto.PprofOption_TRACE, } return daemon.Pprof(ctx, d, pprofTypes, "", "") } @@ -717,7 +723,7 @@ func zipProfs(zw *zip.Writer, pprof map[string][]client.ProcPProf) error { return nil } -func zipMetrics(zw *zip.Writer, metrics *proto.ProcMetricsResponse) error { +func zipMetrics(zw *zip.Writer, metrics *cproto.ProcMetricsResponse) error { //nolint:staticcheck,wastedassign // false positive zf, err := zw.Create("metrics/") if err != nil { diff --git a/internal/pkg/agent/cmd/enroll_cmd.go b/internal/pkg/agent/cmd/enroll_cmd.go index 6525a8a9fde..6d8858a99c4 100644 --- a/internal/pkg/agent/cmd/enroll_cmd.go +++ b/internal/pkg/agent/cmd/enroll_cmd.go @@ -28,7 +28,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/secret" "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" "github.com/elastic/elastic-agent/internal/pkg/agent/control/client" - "github.com/elastic/elastic-agent/internal/pkg/agent/control/proto" + "github.com/elastic/elastic-agent/internal/pkg/agent/control/cproto" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/agent/install" "github.com/elastic/elastic-agent/internal/pkg/agent/storage" @@ -767,7 +767,7 @@ func waitForFleetServer(ctx context.Context, agentSubproc <-chan *os.ProcessStat continue } log.Debugf("%s: %s - %s", waitingForFleetServer, app.Status, app.Message) - if app.Status == proto.Status_DEGRADED || app.Status == proto.Status_HEALTHY { + if app.Status == cproto.Status_DEGRADED || app.Status == cproto.Status_HEALTHY { // app has started and is running if app.Message != "" { log.Infof("Fleet Server - %s", app.Message) diff --git a/internal/pkg/agent/control/client/client.go b/internal/pkg/agent/control/client/client.go index 8a40143ff94..728e830b462 100644 --- a/internal/pkg/agent/control/client/client.go +++ b/internal/pkg/agent/control/client/client.go @@ -12,27 +12,27 @@ import ( "time" "github.com/elastic/elastic-agent/internal/pkg/agent/control" - "github.com/elastic/elastic-agent/internal/pkg/agent/control/proto" + "github.com/elastic/elastic-agent/internal/pkg/agent/control/cproto" ) // Status is the status of the Elastic Agent -type Status = proto.Status +type Status = cproto.Status const ( // Starting is when the it is still starting. - Starting Status = proto.Status_STARTING + Starting Status = cproto.Status_STARTING // Configuring is when it is configuring. - Configuring Status = proto.Status_CONFIGURING + Configuring Status = cproto.Status_CONFIGURING // Healthy is when it is healthy. - Healthy Status = proto.Status_HEALTHY + Healthy Status = cproto.Status_HEALTHY // Degraded is when it is degraded. - Degraded Status = proto.Status_DEGRADED + Degraded Status = cproto.Status_DEGRADED // Failed is when it is failed. - Failed Status = proto.Status_FAILED + Failed Status = cproto.Status_FAILED // Stopping is when it is stopping. - Stopping Status = proto.Status_STOPPING + Stopping Status = cproto.Status_STOPPING // Upgrading is when it is upgrading. - Upgrading Status = proto.Status_UPGRADING + Upgrading Status = cproto.Status_UPGRADING ) // Version is the current running version of the daemon. @@ -104,9 +104,9 @@ type Client interface { // ProcMeta gathers running process meta-data. ProcMeta(ctx context.Context) ([]ProcMeta, error) // Pprof gathers data from the /debug/pprof/ endpoints specified. - Pprof(ctx context.Context, d time.Duration, pprofTypes []proto.PprofOption, appName, routeKey string) (map[string][]ProcPProf, error) + Pprof(ctx context.Context, d time.Duration, pprofTypes []cproto.PprofOption, appName, routeKey string) (map[string][]ProcPProf, error) // ProcMetrics gathers /buffer data and from the agent and each running process and returns the result. - ProcMetrics(ctx context.Context) (*proto.ProcMetricsResponse, error) + ProcMetrics(ctx context.Context) (*cproto.ProcMetricsResponse, error) } // client manages the state and communication to the Elastic Agent. @@ -114,7 +114,7 @@ type client struct { ctx context.Context cancel context.CancelFunc wg sync.WaitGroup - client proto.ElasticAgentControlClient + client cproto.ElasticAgentControlClient } // New creates a client connection to Elastic Agent. @@ -129,7 +129,7 @@ func (c *client) Connect(ctx context.Context) error { if err != nil { return err } - c.client = proto.NewElasticAgentControlClient(conn) + c.client = cproto.NewElasticAgentControlClient(conn) return nil } @@ -145,7 +145,7 @@ func (c *client) Disconnect() { // Version returns the current version of the running agent. func (c *client) Version(ctx context.Context) (Version, error) { - res, err := c.client.Version(ctx, &proto.Empty{}) + res, err := c.client.Version(ctx, &cproto.Empty{}) if err != nil { return Version{}, err } @@ -163,7 +163,7 @@ func (c *client) Version(ctx context.Context) (Version, error) { // Status returns the current status of the running agent. func (c *client) Status(ctx context.Context) (*AgentStatus, error) { - res, err := c.client.Status(ctx, &proto.Empty{}) + res, err := c.client.Status(ctx, &cproto.Empty{}) if err != nil { return nil, err } @@ -193,11 +193,11 @@ func (c *client) Status(ctx context.Context) (*AgentStatus, error) { // Restart triggers restarting the current running daemon. func (c *client) Restart(ctx context.Context) error { - res, err := c.client.Restart(ctx, &proto.Empty{}) + res, err := c.client.Restart(ctx, &cproto.Empty{}) if err != nil { return err } - if res.Status == proto.ActionStatus_FAILURE { + if res.Status == cproto.ActionStatus_FAILURE { return fmt.Errorf(res.Error) } return nil @@ -205,14 +205,14 @@ func (c *client) Restart(ctx context.Context) error { // Upgrade triggers upgrade of the current running daemon. func (c *client) Upgrade(ctx context.Context, version string, sourceURI string) (string, error) { - res, err := c.client.Upgrade(ctx, &proto.UpgradeRequest{ + res, err := c.client.Upgrade(ctx, &cproto.UpgradeRequest{ Version: version, SourceURI: sourceURI, }) if err != nil { return "", err } - if res.Status == proto.ActionStatus_FAILURE { + if res.Status == cproto.ActionStatus_FAILURE { return "", fmt.Errorf(res.Error) } return res.Version, nil @@ -220,7 +220,7 @@ func (c *client) Upgrade(ctx context.Context, version string, sourceURI string) // ProcMeta gathers running beat metadata. func (c *client) ProcMeta(ctx context.Context) ([]ProcMeta, error) { - resp, err := c.client.ProcMeta(ctx, &proto.Empty{}) + resp, err := c.client.ProcMeta(ctx, &cproto.Empty{}) if err != nil { return nil, err } @@ -261,8 +261,8 @@ func (c *client) ProcMeta(ctx context.Context) ([]ProcMeta, error) { } // Pprof gathers /debug/pprof data and returns a map of pprof-type: ProcPProf data -func (c *client) Pprof(ctx context.Context, d time.Duration, pprofTypes []proto.PprofOption, appName, routeKey string) (map[string][]ProcPProf, error) { - resp, err := c.client.Pprof(ctx, &proto.PprofRequest{ +func (c *client) Pprof(ctx context.Context, d time.Duration, pprofTypes []cproto.PprofOption, appName, routeKey string) (map[string][]ProcPProf, error) { + resp, err := c.client.Pprof(ctx, &cproto.PprofRequest{ PprofType: pprofTypes, TraceDuration: d.String(), AppName: appName, @@ -287,6 +287,6 @@ func (c *client) Pprof(ctx context.Context, d time.Duration, pprofTypes []proto. } // ProcMetrics gathers /buffer data and from the agent and each running process and returns the result. -func (c *client) ProcMetrics(ctx context.Context) (*proto.ProcMetricsResponse, error) { - return c.client.ProcMetrics(ctx, &proto.Empty{}) +func (c *client) ProcMetrics(ctx context.Context) (*cproto.ProcMetricsResponse, error) { + return c.client.ProcMetrics(ctx, &cproto.Empty{}) } diff --git a/internal/pkg/agent/control/proto/control.pb.go b/internal/pkg/agent/control/cproto/control.pb.go similarity index 55% rename from internal/pkg/agent/control/proto/control.pb.go rename to internal/pkg/agent/control/cproto/control.pb.go index ba5cad29109..43609b68f0a 100644 --- a/internal/pkg/agent/control/proto/control.pb.go +++ b/internal/pkg/agent/control/cproto/control.pb.go @@ -4,20 +4,16 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.27.1 -// protoc v3.18.0 +// protoc-gen-go v1.28.0 +// protoc v3.19.4 // source: control.proto -package proto +package cproto import ( - context "context" reflect "reflect" sync "sync" - grpc "google.golang.org/grpc" - codes "google.golang.org/grpc/codes" - status "google.golang.org/grpc/status" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" ) @@ -332,7 +328,7 @@ type RestartResponse struct { unknownFields protoimpl.UnknownFields // Response status. - Status ActionStatus `protobuf:"varint,1,opt,name=status,proto3,enum=proto.ActionStatus" json:"status,omitempty"` + Status ActionStatus `protobuf:"varint,1,opt,name=status,proto3,enum=cproto.ActionStatus" json:"status,omitempty"` // Error message when it fails to trigger restart. Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` } @@ -455,7 +451,7 @@ type UpgradeResponse struct { unknownFields protoimpl.UnknownFields // Response status. - Status ActionStatus `protobuf:"varint,1,opt,name=status,proto3,enum=proto.ActionStatus" json:"status,omitempty"` + Status ActionStatus `protobuf:"varint,1,opt,name=status,proto3,enum=cproto.ActionStatus" json:"status,omitempty"` // Version that is being upgraded to. Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` // Error message when it fails to trigger upgrade. @@ -526,7 +522,7 @@ type ApplicationStatus struct { // Application name. Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` // Current status. - Status Status `protobuf:"varint,3,opt,name=status,proto3,enum=proto.Status" json:"status,omitempty"` + Status Status `protobuf:"varint,3,opt,name=status,proto3,enum=cproto.Status" json:"status,omitempty"` // Current status message. Message string `protobuf:"bytes,4,opt,name=message,proto3" json:"message,omitempty"` // Current status payload. @@ -767,7 +763,7 @@ type StatusResponse struct { unknownFields protoimpl.UnknownFields // Overall status of Elastic Agent. - Status Status `protobuf:"varint,1,opt,name=status,proto3,enum=proto.Status" json:"status,omitempty"` + Status Status `protobuf:"varint,1,opt,name=status,proto3,enum=cproto.Status" json:"status,omitempty"` // Overall status message of Elastic Agent. Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` // Status of each application in Elastic Agent. @@ -882,7 +878,7 @@ type PprofRequest struct { unknownFields protoimpl.UnknownFields // The profiles that are requested - PprofType []PprofOption `protobuf:"varint,1,rep,packed,name=pprofType,proto3,enum=proto.PprofOption" json:"pprofType,omitempty"` + PprofType []PprofOption `protobuf:"varint,1,rep,packed,name=pprofType,proto3,enum=cproto.PprofOption" json:"pprofType,omitempty"` // A string representing a time.Duration to apply to trace, and profile options. TraceDuration string `protobuf:"bytes,2,opt,name=traceDuration,proto3" json:"traceDuration,omitempty"` // The application that will be profiled, if empty all applications are profiled. @@ -959,7 +955,7 @@ type PprofResult struct { AppName string `protobuf:"bytes,1,opt,name=appName,proto3" json:"appName,omitempty"` RouteKey string `protobuf:"bytes,2,opt,name=routeKey,proto3" json:"routeKey,omitempty"` - PprofType PprofOption `protobuf:"varint,3,opt,name=pprofType,proto3,enum=proto.PprofOption" json:"pprofType,omitempty"` + PprofType PprofOption `protobuf:"varint,3,opt,name=pprofType,proto3,enum=cproto.PprofOption" json:"pprofType,omitempty"` Result []byte `protobuf:"bytes,4,opt,name=result,proto3" json:"result,omitempty"` Error string `protobuf:"bytes,5,opt,name=error,proto3" json:"error,omitempty"` } @@ -1203,163 +1199,165 @@ var File_control_proto protoreflect.FileDescriptor var file_control_proto_rawDesc = []byte{ 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x05, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, - 0x7d, 0x0a, 0x0f, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, - 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x63, 0x6f, - 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x54, 0x69, 0x6d, - 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x54, 0x69, - 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x22, 0x54, - 0x0a, 0x0f, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x2b, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x13, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x14, - 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, - 0x72, 0x72, 0x6f, 0x72, 0x22, 0x48, 0x0a, 0x0e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x52, 0x49, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x52, 0x49, 0x22, 0x6e, - 0x0a, 0x0f, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x2b, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x0e, 0x32, 0x13, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, - 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, - 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x92, - 0x01, 0x0a, 0x11, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x25, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0d, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, - 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, - 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, - 0x6f, 0x61, 0x64, 0x22, 0xb5, 0x03, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, 0x61, - 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, - 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x65, 0x70, - 0x68, 0x65, 0x6d, 0x65, 0x72, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0b, 0x65, 0x70, 0x68, 0x65, 0x6d, 0x65, 0x72, 0x61, 0x6c, 0x49, 0x64, 0x12, 0x18, 0x0a, - 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x75, 0x69, 0x6c, 0x64, - 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x62, - 0x75, 0x69, 0x6c, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x75, - 0x69, 0x6c, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, - 0x62, 0x75, 0x69, 0x6c, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, - 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, - 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x69, 0x64, - 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x73, 0x65, 0x72, 0x49, 0x64, 0x12, 0x19, - 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x67, 0x69, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x07, 0x75, 0x73, 0x65, 0x72, 0x47, 0x69, 0x64, 0x12, 0x22, 0x0a, 0x0c, 0x61, 0x72, 0x63, - 0x68, 0x69, 0x74, 0x65, 0x63, 0x74, 0x75, 0x72, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x0c, 0x61, 0x72, 0x63, 0x68, 0x69, 0x74, 0x65, 0x63, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1b, 0x0a, - 0x09, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x6c, - 0x61, 0x73, 0x74, 0x69, 0x63, 0x5f, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x64, 0x18, 0x0e, - 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x4c, 0x69, 0x63, - 0x65, 0x6e, 0x73, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x0f, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x8f, 0x01, 0x0a, 0x0e, - 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x25, - 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0d, - 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, - 0x3c, 0x0a, 0x0c, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, - 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x41, 0x70, - 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, - 0x0c, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x39, 0x0a, - 0x10, 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x25, 0x0a, 0x05, 0x70, 0x72, 0x6f, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x0f, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, - 0x61, 0x52, 0x05, 0x70, 0x72, 0x6f, 0x63, 0x73, 0x22, 0x9c, 0x01, 0x0a, 0x0c, 0x50, 0x70, 0x72, - 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x30, 0x0a, 0x09, 0x70, 0x70, 0x72, - 0x6f, 0x66, 0x54, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0e, 0x32, 0x12, 0x2e, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, - 0x52, 0x09, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x54, 0x79, 0x70, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x74, - 0x72, 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x70, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x07, 0x61, 0x70, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, - 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, - 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x22, 0xa3, 0x01, 0x0a, 0x0b, 0x50, 0x70, 0x72, 0x6f, - 0x66, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x70, 0x70, 0x4e, 0x61, - 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x70, 0x70, 0x4e, 0x61, 0x6d, - 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x30, 0x0a, - 0x09, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x54, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, - 0x32, 0x12, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x16, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, - 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x3d, 0x0a, - 0x0d, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, - 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x12, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, - 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x75, 0x0a, 0x0f, - 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, + 0x06, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, + 0x22, 0x7d, 0x0a, 0x0f, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, + 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x63, + 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x54, 0x69, + 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x54, + 0x69, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x22, + 0x55, 0x0a, 0x0f, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x41, 0x63, 0x74, 0x69, + 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x48, 0x0a, 0x0e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, + 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x52, 0x49, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x52, 0x49, + 0x22, 0x6f, 0x0a, 0x0f, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x41, 0x63, 0x74, + 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x65, + 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x22, 0x93, 0x01, 0x0a, 0x11, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x73, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0e, 0x2e, 0x63, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, + 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x18, 0x0a, + 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0xb5, 0x03, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x63, + 0x4d, 0x65, 0x74, 0x61, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x12, 0x12, + 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0e, + 0x0a, 0x02, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x21, + 0x0a, 0x0c, 0x65, 0x70, 0x68, 0x65, 0x6d, 0x65, 0x72, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x65, 0x70, 0x68, 0x65, 0x6d, 0x65, 0x72, 0x61, 0x6c, 0x49, + 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x62, + 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x1d, + 0x0a, 0x0a, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1a, 0x0a, + 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, + 0x72, 0x5f, 0x69, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x73, 0x65, 0x72, + 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x67, 0x69, 0x64, 0x18, 0x0b, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x75, 0x73, 0x65, 0x72, 0x47, 0x69, 0x64, 0x12, 0x22, 0x0a, + 0x0c, 0x61, 0x72, 0x63, 0x68, 0x69, 0x74, 0x65, 0x63, 0x74, 0x75, 0x72, 0x65, 0x18, 0x0c, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x72, 0x63, 0x68, 0x69, 0x74, 0x65, 0x63, 0x74, 0x75, 0x72, + 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x0d, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x29, + 0x0a, 0x10, 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x5f, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, + 0x65, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, + 0x63, 0x4c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, + 0x91, 0x01, 0x0a, 0x0e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x0e, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, + 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, + 0x73, 0x61, 0x67, 0x65, 0x12, 0x3d, 0x0a, 0x0c, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x73, 0x22, 0x3a, 0x0a, 0x10, 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x05, 0x70, 0x72, 0x6f, 0x63, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x05, 0x70, 0x72, 0x6f, 0x63, 0x73, 0x22, + 0x9d, 0x01, 0x0a, 0x0c, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x12, 0x31, 0x0a, 0x09, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x54, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0e, 0x32, 0x13, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x70, 0x72, + 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x74, 0x72, 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, 0x61, + 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x63, + 0x65, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x70, 0x70, + 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x70, 0x70, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x22, + 0xa4, 0x01, 0x0a, 0x0b, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x70, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x70, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x6f, 0x75, - 0x74, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, - 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x14, 0x0a, - 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, - 0x72, 0x6f, 0x72, 0x22, 0x45, 0x0a, 0x13, 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, 0x72, 0x69, - 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2e, 0x0a, 0x06, 0x72, 0x65, - 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2a, 0x79, 0x0a, 0x06, 0x53, 0x74, - 0x61, 0x74, 0x75, 0x73, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x54, 0x41, 0x52, 0x54, 0x49, 0x4e, 0x47, - 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x55, 0x52, 0x49, 0x4e, - 0x47, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x48, 0x45, 0x41, 0x4c, 0x54, 0x48, 0x59, 0x10, 0x02, - 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x45, 0x47, 0x52, 0x41, 0x44, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0a, - 0x0a, 0x06, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x54, - 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x0d, 0x0a, 0x09, 0x55, 0x50, 0x47, 0x52, - 0x41, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x06, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x4f, 0x4c, 0x4c, 0x42, - 0x41, 0x43, 0x4b, 0x10, 0x07, 0x2a, 0x28, 0x0a, 0x0c, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, - 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x01, 0x2a, - 0x7f, 0x0a, 0x0b, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, - 0x0a, 0x06, 0x41, 0x4c, 0x4c, 0x4f, 0x43, 0x53, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x42, 0x4c, - 0x4f, 0x43, 0x4b, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4d, 0x44, 0x4c, 0x49, 0x4e, 0x45, - 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x47, 0x4f, 0x52, 0x4f, 0x55, 0x54, 0x49, 0x4e, 0x45, 0x10, - 0x03, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x45, 0x41, 0x50, 0x10, 0x04, 0x12, 0x09, 0x0a, 0x05, 0x4d, - 0x55, 0x54, 0x45, 0x58, 0x10, 0x05, 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x52, 0x4f, 0x46, 0x49, 0x4c, - 0x45, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x48, 0x52, 0x45, 0x41, 0x44, 0x43, 0x52, 0x45, - 0x41, 0x54, 0x45, 0x10, 0x07, 0x12, 0x09, 0x0a, 0x05, 0x54, 0x52, 0x41, 0x43, 0x45, 0x10, 0x08, - 0x32, 0x80, 0x03, 0x0a, 0x13, 0x45, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, 0x6e, - 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x2f, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x12, 0x0c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, - 0x79, 0x1a, 0x16, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x06, 0x53, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x12, 0x0c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, - 0x79, 0x1a, 0x15, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x07, 0x52, 0x65, 0x73, 0x74, - 0x61, 0x72, 0x74, 0x12, 0x0c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, - 0x79, 0x1a, 0x16, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, - 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x38, 0x0a, 0x07, 0x55, 0x70, 0x67, - 0x72, 0x61, 0x64, 0x65, 0x12, 0x15, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x67, - 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x12, - 0x0c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x17, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x05, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x12, - 0x13, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x14, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x70, 0x72, - 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x37, 0x0a, 0x0b, 0x50, 0x72, - 0x6f, 0x63, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x0c, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1a, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x42, 0x22, 0x5a, 0x1d, 0x70, 0x6b, 0x67, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, - 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x3b, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x74, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x31, 0x0a, 0x09, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x54, 0x79, + 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x13, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x70, + 0x70, 0x72, 0x6f, 0x66, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, + 0x6c, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, + 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x3e, 0x0a, 0x0d, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x75, 0x0a, 0x0f, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x70, 0x70, + 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x70, 0x70, 0x4e, + 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x12, + 0x16, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x46, 0x0a, + 0x13, 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4d, 0x65, + 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x06, 0x72, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2a, 0x79, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, + 0x0c, 0x0a, 0x08, 0x53, 0x54, 0x41, 0x52, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x0f, 0x0a, + 0x0b, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x55, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0b, + 0x0a, 0x07, 0x48, 0x45, 0x41, 0x4c, 0x54, 0x48, 0x59, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x44, + 0x45, 0x47, 0x52, 0x41, 0x44, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x41, 0x49, + 0x4c, 0x45, 0x44, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x54, 0x4f, 0x50, 0x50, 0x49, 0x4e, + 0x47, 0x10, 0x05, 0x12, 0x0d, 0x0a, 0x09, 0x55, 0x50, 0x47, 0x52, 0x41, 0x44, 0x49, 0x4e, 0x47, + 0x10, 0x06, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x4f, 0x4c, 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x07, + 0x2a, 0x28, 0x0a, 0x0c, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x00, 0x12, 0x0b, 0x0a, + 0x07, 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x01, 0x2a, 0x7f, 0x0a, 0x0b, 0x50, 0x70, + 0x72, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x4c, 0x4c, + 0x4f, 0x43, 0x53, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x10, 0x01, + 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4d, 0x44, 0x4c, 0x49, 0x4e, 0x45, 0x10, 0x02, 0x12, 0x0d, 0x0a, + 0x09, 0x47, 0x4f, 0x52, 0x4f, 0x55, 0x54, 0x49, 0x4e, 0x45, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, + 0x48, 0x45, 0x41, 0x50, 0x10, 0x04, 0x12, 0x09, 0x0a, 0x05, 0x4d, 0x55, 0x54, 0x45, 0x58, 0x10, + 0x05, 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x52, 0x4f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x06, 0x12, 0x10, + 0x0a, 0x0c, 0x54, 0x48, 0x52, 0x45, 0x41, 0x44, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x10, 0x07, + 0x12, 0x09, 0x0a, 0x05, 0x54, 0x52, 0x41, 0x43, 0x45, 0x10, 0x08, 0x32, 0x8e, 0x03, 0x0a, 0x13, + 0x45, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x74, + 0x72, 0x6f, 0x6c, 0x12, 0x31, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0d, + 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x17, 0x2e, + 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, + 0x16, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x07, 0x52, 0x65, 0x73, 0x74, 0x61, + 0x72, 0x74, 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x1a, 0x17, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x61, + 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x07, 0x55, 0x70, + 0x67, 0x72, 0x61, 0x64, 0x65, 0x12, 0x16, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, + 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, + 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, + 0x74, 0x61, 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x1a, 0x18, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x4d, + 0x65, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x05, 0x50, + 0x70, 0x72, 0x6f, 0x66, 0x12, 0x14, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x70, + 0x72, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x63, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x39, 0x0a, 0x0b, 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, + 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, + 0x1b, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, + 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x26, 0x5a, 0x21, + 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x61, 0x67, 0x65, + 0x6e, 0x74, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2f, 0x63, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1377,49 +1375,49 @@ func file_control_proto_rawDescGZIP() []byte { var file_control_proto_enumTypes = make([]protoimpl.EnumInfo, 3) var file_control_proto_msgTypes = make([]protoimpl.MessageInfo, 14) var file_control_proto_goTypes = []interface{}{ - (Status)(0), // 0: proto.Status - (ActionStatus)(0), // 1: proto.ActionStatus - (PprofOption)(0), // 2: proto.PprofOption - (*Empty)(nil), // 3: proto.Empty - (*VersionResponse)(nil), // 4: proto.VersionResponse - (*RestartResponse)(nil), // 5: proto.RestartResponse - (*UpgradeRequest)(nil), // 6: proto.UpgradeRequest - (*UpgradeResponse)(nil), // 7: proto.UpgradeResponse - (*ApplicationStatus)(nil), // 8: proto.ApplicationStatus - (*ProcMeta)(nil), // 9: proto.ProcMeta - (*StatusResponse)(nil), // 10: proto.StatusResponse - (*ProcMetaResponse)(nil), // 11: proto.ProcMetaResponse - (*PprofRequest)(nil), // 12: proto.PprofRequest - (*PprofResult)(nil), // 13: proto.PprofResult - (*PprofResponse)(nil), // 14: proto.PprofResponse - (*MetricsResponse)(nil), // 15: proto.MetricsResponse - (*ProcMetricsResponse)(nil), // 16: proto.ProcMetricsResponse + (Status)(0), // 0: cproto.Status + (ActionStatus)(0), // 1: cproto.ActionStatus + (PprofOption)(0), // 2: cproto.PprofOption + (*Empty)(nil), // 3: cproto.Empty + (*VersionResponse)(nil), // 4: cproto.VersionResponse + (*RestartResponse)(nil), // 5: cproto.RestartResponse + (*UpgradeRequest)(nil), // 6: cproto.UpgradeRequest + (*UpgradeResponse)(nil), // 7: cproto.UpgradeResponse + (*ApplicationStatus)(nil), // 8: cproto.ApplicationStatus + (*ProcMeta)(nil), // 9: cproto.ProcMeta + (*StatusResponse)(nil), // 10: cproto.StatusResponse + (*ProcMetaResponse)(nil), // 11: cproto.ProcMetaResponse + (*PprofRequest)(nil), // 12: cproto.PprofRequest + (*PprofResult)(nil), // 13: cproto.PprofResult + (*PprofResponse)(nil), // 14: cproto.PprofResponse + (*MetricsResponse)(nil), // 15: cproto.MetricsResponse + (*ProcMetricsResponse)(nil), // 16: cproto.ProcMetricsResponse } var file_control_proto_depIdxs = []int32{ - 1, // 0: proto.RestartResponse.status:type_name -> proto.ActionStatus - 1, // 1: proto.UpgradeResponse.status:type_name -> proto.ActionStatus - 0, // 2: proto.ApplicationStatus.status:type_name -> proto.Status - 0, // 3: proto.StatusResponse.status:type_name -> proto.Status - 8, // 4: proto.StatusResponse.applications:type_name -> proto.ApplicationStatus - 9, // 5: proto.ProcMetaResponse.procs:type_name -> proto.ProcMeta - 2, // 6: proto.PprofRequest.pprofType:type_name -> proto.PprofOption - 2, // 7: proto.PprofResult.pprofType:type_name -> proto.PprofOption - 13, // 8: proto.PprofResponse.results:type_name -> proto.PprofResult - 15, // 9: proto.ProcMetricsResponse.result:type_name -> proto.MetricsResponse - 3, // 10: proto.ElasticAgentControl.Version:input_type -> proto.Empty - 3, // 11: proto.ElasticAgentControl.Status:input_type -> proto.Empty - 3, // 12: proto.ElasticAgentControl.Restart:input_type -> proto.Empty - 6, // 13: proto.ElasticAgentControl.Upgrade:input_type -> proto.UpgradeRequest - 3, // 14: proto.ElasticAgentControl.ProcMeta:input_type -> proto.Empty - 12, // 15: proto.ElasticAgentControl.Pprof:input_type -> proto.PprofRequest - 3, // 16: proto.ElasticAgentControl.ProcMetrics:input_type -> proto.Empty - 4, // 17: proto.ElasticAgentControl.Version:output_type -> proto.VersionResponse - 10, // 18: proto.ElasticAgentControl.Status:output_type -> proto.StatusResponse - 5, // 19: proto.ElasticAgentControl.Restart:output_type -> proto.RestartResponse - 7, // 20: proto.ElasticAgentControl.Upgrade:output_type -> proto.UpgradeResponse - 11, // 21: proto.ElasticAgentControl.ProcMeta:output_type -> proto.ProcMetaResponse - 14, // 22: proto.ElasticAgentControl.Pprof:output_type -> proto.PprofResponse - 16, // 23: proto.ElasticAgentControl.ProcMetrics:output_type -> proto.ProcMetricsResponse + 1, // 0: cproto.RestartResponse.status:type_name -> cproto.ActionStatus + 1, // 1: cproto.UpgradeResponse.status:type_name -> cproto.ActionStatus + 0, // 2: cproto.ApplicationStatus.status:type_name -> cproto.Status + 0, // 3: cproto.StatusResponse.status:type_name -> cproto.Status + 8, // 4: cproto.StatusResponse.applications:type_name -> cproto.ApplicationStatus + 9, // 5: cproto.ProcMetaResponse.procs:type_name -> cproto.ProcMeta + 2, // 6: cproto.PprofRequest.pprofType:type_name -> cproto.PprofOption + 2, // 7: cproto.PprofResult.pprofType:type_name -> cproto.PprofOption + 13, // 8: cproto.PprofResponse.results:type_name -> cproto.PprofResult + 15, // 9: cproto.ProcMetricsResponse.result:type_name -> cproto.MetricsResponse + 3, // 10: cproto.ElasticAgentControl.Version:input_type -> cproto.Empty + 3, // 11: cproto.ElasticAgentControl.Status:input_type -> cproto.Empty + 3, // 12: cproto.ElasticAgentControl.Restart:input_type -> cproto.Empty + 6, // 13: cproto.ElasticAgentControl.Upgrade:input_type -> cproto.UpgradeRequest + 3, // 14: cproto.ElasticAgentControl.ProcMeta:input_type -> cproto.Empty + 12, // 15: cproto.ElasticAgentControl.Pprof:input_type -> cproto.PprofRequest + 3, // 16: cproto.ElasticAgentControl.ProcMetrics:input_type -> cproto.Empty + 4, // 17: cproto.ElasticAgentControl.Version:output_type -> cproto.VersionResponse + 10, // 18: cproto.ElasticAgentControl.Status:output_type -> cproto.StatusResponse + 5, // 19: cproto.ElasticAgentControl.Restart:output_type -> cproto.RestartResponse + 7, // 20: cproto.ElasticAgentControl.Upgrade:output_type -> cproto.UpgradeResponse + 11, // 21: cproto.ElasticAgentControl.ProcMeta:output_type -> cproto.ProcMetaResponse + 14, // 22: cproto.ElasticAgentControl.Pprof:output_type -> cproto.PprofResponse + 16, // 23: cproto.ElasticAgentControl.ProcMetrics:output_type -> cproto.ProcMetricsResponse 17, // [17:24] is the sub-list for method output_type 10, // [10:17] is the sub-list for method input_type 10, // [10:10] is the sub-list for extension type_name @@ -1622,313 +1620,3 @@ func file_control_proto_init() { file_control_proto_goTypes = nil file_control_proto_depIdxs = nil } - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConnInterface - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion6 - -// ElasticAgentControlClient is the client API for ElasticAgentControl service. -// -// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. -type ElasticAgentControlClient interface { - // Fetches the currently running version of the Elastic Agent. - Version(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*VersionResponse, error) - // Fetches the currently status of the Elastic Agent. - Status(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*StatusResponse, error) - // Restart restarts the current running Elastic Agent. - Restart(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*RestartResponse, error) - // Upgrade starts the upgrade process of Elastic Agent. - Upgrade(ctx context.Context, in *UpgradeRequest, opts ...grpc.CallOption) (*UpgradeResponse, error) - // Gather all running process metadata. - ProcMeta(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*ProcMetaResponse, error) - // Gather requested pprof data from specified applications. - Pprof(ctx context.Context, in *PprofRequest, opts ...grpc.CallOption) (*PprofResponse, error) - // Gather all running process metrics. - ProcMetrics(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*ProcMetricsResponse, error) -} - -type elasticAgentControlClient struct { - cc grpc.ClientConnInterface -} - -func NewElasticAgentControlClient(cc grpc.ClientConnInterface) ElasticAgentControlClient { - return &elasticAgentControlClient{cc} -} - -func (c *elasticAgentControlClient) Version(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*VersionResponse, error) { - out := new(VersionResponse) - err := c.cc.Invoke(ctx, "/proto.ElasticAgentControl/Version", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *elasticAgentControlClient) Status(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*StatusResponse, error) { - out := new(StatusResponse) - err := c.cc.Invoke(ctx, "/proto.ElasticAgentControl/Status", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *elasticAgentControlClient) Restart(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*RestartResponse, error) { - out := new(RestartResponse) - err := c.cc.Invoke(ctx, "/proto.ElasticAgentControl/Restart", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *elasticAgentControlClient) Upgrade(ctx context.Context, in *UpgradeRequest, opts ...grpc.CallOption) (*UpgradeResponse, error) { - out := new(UpgradeResponse) - err := c.cc.Invoke(ctx, "/proto.ElasticAgentControl/Upgrade", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *elasticAgentControlClient) ProcMeta(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*ProcMetaResponse, error) { - out := new(ProcMetaResponse) - err := c.cc.Invoke(ctx, "/proto.ElasticAgentControl/ProcMeta", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *elasticAgentControlClient) Pprof(ctx context.Context, in *PprofRequest, opts ...grpc.CallOption) (*PprofResponse, error) { - out := new(PprofResponse) - err := c.cc.Invoke(ctx, "/proto.ElasticAgentControl/Pprof", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *elasticAgentControlClient) ProcMetrics(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*ProcMetricsResponse, error) { - out := new(ProcMetricsResponse) - err := c.cc.Invoke(ctx, "/proto.ElasticAgentControl/ProcMetrics", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// ElasticAgentControlServer is the server API for ElasticAgentControl service. -type ElasticAgentControlServer interface { - // Fetches the currently running version of the Elastic Agent. - Version(context.Context, *Empty) (*VersionResponse, error) - // Fetches the currently status of the Elastic Agent. - Status(context.Context, *Empty) (*StatusResponse, error) - // Restart restarts the current running Elastic Agent. - Restart(context.Context, *Empty) (*RestartResponse, error) - // Upgrade starts the upgrade process of Elastic Agent. - Upgrade(context.Context, *UpgradeRequest) (*UpgradeResponse, error) - // Gather all running process metadata. - ProcMeta(context.Context, *Empty) (*ProcMetaResponse, error) - // Gather requested pprof data from specified applications. - Pprof(context.Context, *PprofRequest) (*PprofResponse, error) - // Gather all running process metrics. - ProcMetrics(context.Context, *Empty) (*ProcMetricsResponse, error) -} - -// UnimplementedElasticAgentControlServer can be embedded to have forward compatible implementations. -type UnimplementedElasticAgentControlServer struct { -} - -func (*UnimplementedElasticAgentControlServer) Version(context.Context, *Empty) (*VersionResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Version not implemented") -} -func (*UnimplementedElasticAgentControlServer) Status(context.Context, *Empty) (*StatusResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Status not implemented") -} -func (*UnimplementedElasticAgentControlServer) Restart(context.Context, *Empty) (*RestartResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Restart not implemented") -} -func (*UnimplementedElasticAgentControlServer) Upgrade(context.Context, *UpgradeRequest) (*UpgradeResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Upgrade not implemented") -} -func (*UnimplementedElasticAgentControlServer) ProcMeta(context.Context, *Empty) (*ProcMetaResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ProcMeta not implemented") -} -func (*UnimplementedElasticAgentControlServer) Pprof(context.Context, *PprofRequest) (*PprofResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Pprof not implemented") -} -func (*UnimplementedElasticAgentControlServer) ProcMetrics(context.Context, *Empty) (*ProcMetricsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ProcMetrics not implemented") -} - -func RegisterElasticAgentControlServer(s *grpc.Server, srv ElasticAgentControlServer) { - s.RegisterService(&_ElasticAgentControl_serviceDesc, srv) -} - -func _ElasticAgentControl_Version_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ElasticAgentControlServer).Version(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/proto.ElasticAgentControl/Version", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ElasticAgentControlServer).Version(ctx, req.(*Empty)) - } - return interceptor(ctx, in, info, handler) -} - -func _ElasticAgentControl_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ElasticAgentControlServer).Status(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/proto.ElasticAgentControl/Status", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ElasticAgentControlServer).Status(ctx, req.(*Empty)) - } - return interceptor(ctx, in, info, handler) -} - -func _ElasticAgentControl_Restart_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ElasticAgentControlServer).Restart(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/proto.ElasticAgentControl/Restart", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ElasticAgentControlServer).Restart(ctx, req.(*Empty)) - } - return interceptor(ctx, in, info, handler) -} - -func _ElasticAgentControl_Upgrade_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpgradeRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ElasticAgentControlServer).Upgrade(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/proto.ElasticAgentControl/Upgrade", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ElasticAgentControlServer).Upgrade(ctx, req.(*UpgradeRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ElasticAgentControl_ProcMeta_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ElasticAgentControlServer).ProcMeta(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/proto.ElasticAgentControl/ProcMeta", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ElasticAgentControlServer).ProcMeta(ctx, req.(*Empty)) - } - return interceptor(ctx, in, info, handler) -} - -func _ElasticAgentControl_Pprof_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PprofRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ElasticAgentControlServer).Pprof(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/proto.ElasticAgentControl/Pprof", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ElasticAgentControlServer).Pprof(ctx, req.(*PprofRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _ElasticAgentControl_ProcMetrics_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ElasticAgentControlServer).ProcMetrics(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/proto.ElasticAgentControl/ProcMetrics", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ElasticAgentControlServer).ProcMetrics(ctx, req.(*Empty)) - } - return interceptor(ctx, in, info, handler) -} - -var _ElasticAgentControl_serviceDesc = grpc.ServiceDesc{ - ServiceName: "proto.ElasticAgentControl", - HandlerType: (*ElasticAgentControlServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Version", - Handler: _ElasticAgentControl_Version_Handler, - }, - { - MethodName: "Status", - Handler: _ElasticAgentControl_Status_Handler, - }, - { - MethodName: "Restart", - Handler: _ElasticAgentControl_Restart_Handler, - }, - { - MethodName: "Upgrade", - Handler: _ElasticAgentControl_Upgrade_Handler, - }, - { - MethodName: "ProcMeta", - Handler: _ElasticAgentControl_ProcMeta_Handler, - }, - { - MethodName: "Pprof", - Handler: _ElasticAgentControl_Pprof_Handler, - }, - { - MethodName: "ProcMetrics", - Handler: _ElasticAgentControl_ProcMetrics_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "control.proto", -} diff --git a/internal/pkg/agent/control/cproto/control_grpc.pb.go b/internal/pkg/agent/control/cproto/control_grpc.pb.go new file mode 100644 index 00000000000..3365f1a6496 --- /dev/null +++ b/internal/pkg/agent/control/cproto/control_grpc.pb.go @@ -0,0 +1,340 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.19.4 +// source: control.proto + +package cproto + +import ( + context "context" + + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// ElasticAgentControlClient is the client API for ElasticAgentControl service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type ElasticAgentControlClient interface { + // Fetches the currently running version of the Elastic Agent. + Version(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*VersionResponse, error) + // Fetches the currently status of the Elastic Agent. + Status(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*StatusResponse, error) + // Restart restarts the current running Elastic Agent. + Restart(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*RestartResponse, error) + // Upgrade starts the upgrade process of Elastic Agent. + Upgrade(ctx context.Context, in *UpgradeRequest, opts ...grpc.CallOption) (*UpgradeResponse, error) + // Gather all running process metadata. + ProcMeta(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*ProcMetaResponse, error) + // Gather requested pprof data from specified applications. + Pprof(ctx context.Context, in *PprofRequest, opts ...grpc.CallOption) (*PprofResponse, error) + // Gather all running process metrics. + ProcMetrics(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*ProcMetricsResponse, error) +} + +type elasticAgentControlClient struct { + cc grpc.ClientConnInterface +} + +func NewElasticAgentControlClient(cc grpc.ClientConnInterface) ElasticAgentControlClient { + return &elasticAgentControlClient{cc} +} + +func (c *elasticAgentControlClient) Version(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*VersionResponse, error) { + out := new(VersionResponse) + err := c.cc.Invoke(ctx, "/cproto.ElasticAgentControl/Version", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *elasticAgentControlClient) Status(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*StatusResponse, error) { + out := new(StatusResponse) + err := c.cc.Invoke(ctx, "/cproto.ElasticAgentControl/Status", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *elasticAgentControlClient) Restart(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*RestartResponse, error) { + out := new(RestartResponse) + err := c.cc.Invoke(ctx, "/cproto.ElasticAgentControl/Restart", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *elasticAgentControlClient) Upgrade(ctx context.Context, in *UpgradeRequest, opts ...grpc.CallOption) (*UpgradeResponse, error) { + out := new(UpgradeResponse) + err := c.cc.Invoke(ctx, "/cproto.ElasticAgentControl/Upgrade", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *elasticAgentControlClient) ProcMeta(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*ProcMetaResponse, error) { + out := new(ProcMetaResponse) + err := c.cc.Invoke(ctx, "/cproto.ElasticAgentControl/ProcMeta", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *elasticAgentControlClient) Pprof(ctx context.Context, in *PprofRequest, opts ...grpc.CallOption) (*PprofResponse, error) { + out := new(PprofResponse) + err := c.cc.Invoke(ctx, "/cproto.ElasticAgentControl/Pprof", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *elasticAgentControlClient) ProcMetrics(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*ProcMetricsResponse, error) { + out := new(ProcMetricsResponse) + err := c.cc.Invoke(ctx, "/cproto.ElasticAgentControl/ProcMetrics", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ElasticAgentControlServer is the server API for ElasticAgentControl service. +// All implementations must embed UnimplementedElasticAgentControlServer +// for forward compatibility +type ElasticAgentControlServer interface { + // Fetches the currently running version of the Elastic Agent. + Version(context.Context, *Empty) (*VersionResponse, error) + // Fetches the currently status of the Elastic Agent. + Status(context.Context, *Empty) (*StatusResponse, error) + // Restart restarts the current running Elastic Agent. + Restart(context.Context, *Empty) (*RestartResponse, error) + // Upgrade starts the upgrade process of Elastic Agent. + Upgrade(context.Context, *UpgradeRequest) (*UpgradeResponse, error) + // Gather all running process metadata. + ProcMeta(context.Context, *Empty) (*ProcMetaResponse, error) + // Gather requested pprof data from specified applications. + Pprof(context.Context, *PprofRequest) (*PprofResponse, error) + // Gather all running process metrics. + ProcMetrics(context.Context, *Empty) (*ProcMetricsResponse, error) + mustEmbedUnimplementedElasticAgentControlServer() +} + +// UnimplementedElasticAgentControlServer must be embedded to have forward compatible implementations. +type UnimplementedElasticAgentControlServer struct { +} + +func (UnimplementedElasticAgentControlServer) Version(context.Context, *Empty) (*VersionResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Version not implemented") +} +func (UnimplementedElasticAgentControlServer) Status(context.Context, *Empty) (*StatusResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Status not implemented") +} +func (UnimplementedElasticAgentControlServer) Restart(context.Context, *Empty) (*RestartResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Restart not implemented") +} +func (UnimplementedElasticAgentControlServer) Upgrade(context.Context, *UpgradeRequest) (*UpgradeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Upgrade not implemented") +} +func (UnimplementedElasticAgentControlServer) ProcMeta(context.Context, *Empty) (*ProcMetaResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ProcMeta not implemented") +} +func (UnimplementedElasticAgentControlServer) Pprof(context.Context, *PprofRequest) (*PprofResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Pprof not implemented") +} +func (UnimplementedElasticAgentControlServer) ProcMetrics(context.Context, *Empty) (*ProcMetricsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ProcMetrics not implemented") +} +func (UnimplementedElasticAgentControlServer) mustEmbedUnimplementedElasticAgentControlServer() {} + +// UnsafeElasticAgentControlServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to ElasticAgentControlServer will +// result in compilation errors. +type UnsafeElasticAgentControlServer interface { + mustEmbedUnimplementedElasticAgentControlServer() +} + +func RegisterElasticAgentControlServer(s grpc.ServiceRegistrar, srv ElasticAgentControlServer) { + s.RegisterService(&ElasticAgentControl_ServiceDesc, srv) +} + +func _ElasticAgentControl_Version_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ElasticAgentControlServer).Version(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cproto.ElasticAgentControl/Version", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ElasticAgentControlServer).Version(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _ElasticAgentControl_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ElasticAgentControlServer).Status(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cproto.ElasticAgentControl/Status", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ElasticAgentControlServer).Status(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _ElasticAgentControl_Restart_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ElasticAgentControlServer).Restart(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cproto.ElasticAgentControl/Restart", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ElasticAgentControlServer).Restart(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _ElasticAgentControl_Upgrade_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpgradeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ElasticAgentControlServer).Upgrade(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cproto.ElasticAgentControl/Upgrade", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ElasticAgentControlServer).Upgrade(ctx, req.(*UpgradeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ElasticAgentControl_ProcMeta_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ElasticAgentControlServer).ProcMeta(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cproto.ElasticAgentControl/ProcMeta", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ElasticAgentControlServer).ProcMeta(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +func _ElasticAgentControl_Pprof_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PprofRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ElasticAgentControlServer).Pprof(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cproto.ElasticAgentControl/Pprof", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ElasticAgentControlServer).Pprof(ctx, req.(*PprofRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ElasticAgentControl_ProcMetrics_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Empty) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ElasticAgentControlServer).ProcMetrics(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/cproto.ElasticAgentControl/ProcMetrics", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ElasticAgentControlServer).ProcMetrics(ctx, req.(*Empty)) + } + return interceptor(ctx, in, info, handler) +} + +// ElasticAgentControl_ServiceDesc is the grpc.ServiceDesc for ElasticAgentControl service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var ElasticAgentControl_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "cproto.ElasticAgentControl", + HandlerType: (*ElasticAgentControlServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Version", + Handler: _ElasticAgentControl_Version_Handler, + }, + { + MethodName: "Status", + Handler: _ElasticAgentControl_Status_Handler, + }, + { + MethodName: "Restart", + Handler: _ElasticAgentControl_Restart_Handler, + }, + { + MethodName: "Upgrade", + Handler: _ElasticAgentControl_Upgrade_Handler, + }, + { + MethodName: "ProcMeta", + Handler: _ElasticAgentControl_ProcMeta_Handler, + }, + { + MethodName: "Pprof", + Handler: _ElasticAgentControl_Pprof_Handler, + }, + { + MethodName: "ProcMetrics", + Handler: _ElasticAgentControl_ProcMetrics_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "control.proto", +} diff --git a/internal/pkg/agent/control/server/server.go b/internal/pkg/agent/control/server/server.go index 366676540d9..6d3e5181729 100644 --- a/internal/pkg/agent/control/server/server.go +++ b/internal/pkg/agent/control/server/server.go @@ -23,10 +23,9 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/reexec" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade" "github.com/elastic/elastic-agent/internal/pkg/agent/control" - "github.com/elastic/elastic-agent/internal/pkg/agent/control/proto" + "github.com/elastic/elastic-agent/internal/pkg/agent/control/cproto" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/beats" monitoring "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/beats" monitoringCfg "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/config" "github.com/elastic/elastic-agent/internal/pkg/core/socket" @@ -37,8 +36,12 @@ import ( "github.com/elastic/elastic-agent/pkg/core/logger" ) +const agentName = "elastic-agent" + // Server is the daemon side of the control protocol. type Server struct { + cproto.UnimplementedElasticAgentControlServer + logger *logger.Logger rex reexec.ExecManager statusCtrl status.Controller @@ -113,7 +116,7 @@ func (s *Server) Start() error { } else { s.server = grpc.NewServer() } - proto.RegisterElasticAgentControlServer(s.server, s) + cproto.RegisterElasticAgentControlServer(s.server, s) // start serving GRPC connections go func() { @@ -137,8 +140,8 @@ func (s *Server) Stop() { } // Version returns the currently running version. -func (s *Server) Version(_ context.Context, _ *proto.Empty) (*proto.VersionResponse, error) { - return &proto.VersionResponse{ +func (s *Server) Version(_ context.Context, _ *cproto.Empty) (*cproto.VersionResponse, error) { + return &cproto.VersionResponse{ Version: release.Version(), Commit: release.Commit(), BuildTime: release.BuildTime().Format(control.TimeFormat()), @@ -147,9 +150,9 @@ func (s *Server) Version(_ context.Context, _ *proto.Empty) (*proto.VersionRespo } // Status returns the overall status of the agent. -func (s *Server) Status(_ context.Context, _ *proto.Empty) (*proto.StatusResponse, error) { +func (s *Server) Status(_ context.Context, _ *cproto.Empty) (*cproto.StatusResponse, error) { status := s.statusCtrl.Status() - return &proto.StatusResponse{ + return &cproto.StatusResponse{ Status: agentStatusToProto(status.Status), Message: status.Message, Applications: agentAppStatusToProto(status.Applications), @@ -157,29 +160,29 @@ func (s *Server) Status(_ context.Context, _ *proto.Empty) (*proto.StatusRespons } // Restart performs re-exec. -func (s *Server) Restart(_ context.Context, _ *proto.Empty) (*proto.RestartResponse, error) { +func (s *Server) Restart(_ context.Context, _ *cproto.Empty) (*cproto.RestartResponse, error) { s.rex.ReExec(nil) - return &proto.RestartResponse{ - Status: proto.ActionStatus_SUCCESS, + return &cproto.RestartResponse{ + Status: cproto.ActionStatus_SUCCESS, }, nil } // Upgrade performs the upgrade operation. -func (s *Server) Upgrade(ctx context.Context, request *proto.UpgradeRequest) (*proto.UpgradeResponse, error) { +func (s *Server) Upgrade(ctx context.Context, request *cproto.UpgradeRequest) (*cproto.UpgradeResponse, error) { s.lock.RLock() u := s.up s.lock.RUnlock() if u == nil { // not running with upgrader (must be controlled by Fleet) - return &proto.UpgradeResponse{ - Status: proto.ActionStatus_FAILURE, + return &cproto.UpgradeResponse{ + Status: cproto.ActionStatus_FAILURE, Error: "cannot be upgraded; perform upgrading using Fleet", }, nil } cb, err := u.Upgrade(ctx, &upgradeRequest{request}, false) if err != nil { - return &proto.UpgradeResponse{ - Status: proto.ActionStatus_FAILURE, + return &cproto.UpgradeResponse{ //nolint:nilerr // returns err as response + Status: cproto.ActionStatus_FAILURE, Error: err.Error(), }, nil } @@ -189,8 +192,8 @@ func (s *Server) Upgrade(ctx context.Context, request *proto.UpgradeRequest) (*p <-time.After(time.Second) s.rex.ReExec(cb) }() - return &proto.UpgradeResponse{ - Status: proto.ActionStatus_SUCCESS, + return &cproto.UpgradeResponse{ + Status: cproto.ActionStatus_SUCCESS, Version: request.Version, }, nil } @@ -213,13 +216,13 @@ type BeatInfo struct { } // ProcMeta returns version and beat inforation for all running processes. -func (s *Server) ProcMeta(ctx context.Context, _ *proto.Empty) (*proto.ProcMetaResponse, error) { +func (s *Server) ProcMeta(ctx context.Context, _ *cproto.Empty) (*cproto.ProcMetaResponse, error) { if s.routeFn == nil { return nil, errors.New("route function is nil") } - resp := &proto.ProcMetaResponse{ - Procs: []*proto.ProcMeta{}, + resp := &cproto.ProcMetaResponse{ + Procs: []*cproto.ProcMeta{}, } // gather spec data for all rk/apps running @@ -236,7 +239,7 @@ func (s *Server) ProcMeta(ctx context.Context, _ *proto.Empty) (*proto.ProcMetaR } // Pprof returns /debug/pprof data for the requested applicaiont-route_key or all running applications. -func (s *Server) Pprof(ctx context.Context, req *proto.PprofRequest) (*proto.PprofResponse, error) { +func (s *Server) Pprof(ctx context.Context, req *cproto.PprofRequest) (*cproto.PprofResponse, error) { if s.monitoringCfg == nil || s.monitoringCfg.Pprof == nil || !s.monitoringCfg.Pprof.Enabled { return nil, fmt.Errorf("agent.monitoring.pprof disabled") } @@ -250,20 +253,20 @@ func (s *Server) Pprof(ctx context.Context, req *proto.PprofRequest) (*proto.Ppr return nil, fmt.Errorf("unable to parse trace duration: %w", err) } - resp := &proto.PprofResponse{ - Results: []*proto.PprofResult{}, + resp := &cproto.PprofResponse{ + Results: []*cproto.PprofResult{}, } var wg sync.WaitGroup - ch := make(chan *proto.PprofResult, 1) + ch := make(chan *cproto.PprofResult, 1) // retrieve elastic-agent pprof data if requested or application is unspecified. - if req.AppName == "" || req.AppName == "elastic-agent" { - endpoint := beats.AgentMonitoringEndpoint(runtime.GOOS, s.monitoringCfg.HTTP) - c := newSocketRequester("elastic-agent", "", endpoint) + if req.AppName == "" || req.AppName == agentName { + endpoint := monitoring.AgentMonitoringEndpoint(runtime.GOOS, s.monitoringCfg.HTTP) + c := newSocketRequester(agentName, "", endpoint) for _, opt := range req.PprofType { wg.Add(1) - go func(opt proto.PprofOption) { + go func(opt cproto.PprofOption) { res := c.getPprof(ctx, opt, dur) ch <- res wg.Done() @@ -273,7 +276,7 @@ func (s *Server) Pprof(ctx context.Context, req *proto.PprofRequest) (*proto.Ppr // get requested rk/appname spec or all specs var specs []specInfo - if req.AppName != "elastic-agent" { + if req.AppName != agentName { specs = s.getSpecInfo(req.RouteKey, req.AppName) } for _, si := range specs { @@ -282,7 +285,7 @@ func (s *Server) Pprof(ctx context.Context, req *proto.PprofRequest) (*proto.Ppr // Launch a concurrent goroutine to gather all pprof endpoints from a socket. for _, opt := range req.PprofType { wg.Add(1) - go func(opt proto.PprofOption) { + go func(opt cproto.PprofOption) { res := c.getPprof(ctx, opt, dur) ch <- res wg.Done() @@ -305,9 +308,9 @@ func (s *Server) Pprof(ctx context.Context, req *proto.PprofRequest) (*proto.Ppr // ProcMetrics returns all buffered metrics data for the agent and running processes. // If the agent.monitoring.http.buffer variable is not set, or set to false, a nil result attribute is returned -func (s *Server) ProcMetrics(ctx context.Context, _ *proto.Empty) (*proto.ProcMetricsResponse, error) { +func (s *Server) ProcMetrics(ctx context.Context, _ *cproto.Empty) (*cproto.ProcMetricsResponse, error) { if s.monitoringCfg == nil || s.monitoringCfg.HTTP == nil || s.monitoringCfg.HTTP.Buffer == nil || !s.monitoringCfg.HTTP.Buffer.Enabled { - return &proto.ProcMetricsResponse{}, nil + return &cproto.ProcMetricsResponse{}, nil } if s.routeFn == nil { @@ -315,12 +318,12 @@ func (s *Server) ProcMetrics(ctx context.Context, _ *proto.Empty) (*proto.ProcMe } // gather metrics buffer data from the elastic-agent - endpoint := beats.AgentMonitoringEndpoint(runtime.GOOS, s.monitoringCfg.HTTP) - c := newSocketRequester("elastic-agent", "", endpoint) + endpoint := monitoring.AgentMonitoringEndpoint(runtime.GOOS, s.monitoringCfg.HTTP) + c := newSocketRequester(agentName, "", endpoint) metrics := c.procMetrics(ctx) - resp := &proto.ProcMetricsResponse{ - Result: []*proto.MetricsResponse{metrics}, + resp := &cproto.ProcMetricsResponse{ + Result: []*cproto.MetricsResponse{metrics}, } // gather metrics buffer data from all other processes @@ -442,8 +445,8 @@ func (r *socketRequester) getPath(ctx context.Context, path string) (*http.Respo } // procMeta will return process metadata by querying the "/" path. -func (r *socketRequester) procMeta(ctx context.Context) *proto.ProcMeta { - pm := &proto.ProcMeta{ +func (r *socketRequester) procMeta(ctx context.Context) *cproto.ProcMeta { + pm := &cproto.ProcMeta{ Name: r.appName, RouteKey: r.routeKey, } @@ -478,21 +481,21 @@ func (r *socketRequester) procMeta(ctx context.Context) *proto.ProcMeta { return pm } -var pprofEndpoints = map[proto.PprofOption]string{ - proto.PprofOption_ALLOCS: "/debug/pprof/allocs", - proto.PprofOption_BLOCK: "/debug/pprof/block", - proto.PprofOption_CMDLINE: "/debug/pprof/cmdline", - proto.PprofOption_GOROUTINE: "/debug/pprof/goroutine", - proto.PprofOption_HEAP: "/debug/pprof/heap", - proto.PprofOption_MUTEX: "/debug/pprof/mutex", - proto.PprofOption_PROFILE: "/debug/pprof/profile", - proto.PprofOption_THREADCREATE: "/debug/pprof/threadcreate", - proto.PprofOption_TRACE: "/debug/pprof/trace", +var pprofEndpoints = map[cproto.PprofOption]string{ + cproto.PprofOption_ALLOCS: "/debug/pprof/allocs", + cproto.PprofOption_BLOCK: "/debug/pprof/block", + cproto.PprofOption_CMDLINE: "/debug/pprof/cmdline", + cproto.PprofOption_GOROUTINE: "/debug/pprof/goroutine", + cproto.PprofOption_HEAP: "/debug/pprof/heap", + cproto.PprofOption_MUTEX: "/debug/pprof/mutex", + cproto.PprofOption_PROFILE: "/debug/pprof/profile", + cproto.PprofOption_THREADCREATE: "/debug/pprof/threadcreate", + cproto.PprofOption_TRACE: "/debug/pprof/trace", } // getProf will gather pprof data specified by the option. -func (r *socketRequester) getPprof(ctx context.Context, opt proto.PprofOption, dur time.Duration) *proto.PprofResult { - res := &proto.PprofResult{ +func (r *socketRequester) getPprof(ctx context.Context, opt cproto.PprofOption, dur time.Duration) *cproto.PprofResult { + res := &cproto.PprofResult{ AppName: r.appName, RouteKey: r.routeKey, PprofType: opt, @@ -504,7 +507,7 @@ func (r *socketRequester) getPprof(ctx context.Context, opt proto.PprofOption, d return res } - if opt == proto.PprofOption_PROFILE || opt == proto.PprofOption_TRACE { + if opt == cproto.PprofOption_PROFILE || opt == cproto.PprofOption_TRACE { path += fmt.Sprintf("?seconds=%0.f", dur.Seconds()) } @@ -525,8 +528,8 @@ func (r *socketRequester) getPprof(ctx context.Context, opt proto.PprofOption, d } // procMetrics will gather metrics buffer data -func (r *socketRequester) procMetrics(ctx context.Context) *proto.MetricsResponse { - res := &proto.MetricsResponse{ +func (r *socketRequester) procMetrics(ctx context.Context) *cproto.MetricsResponse { + res := &cproto.MetricsResponse{ AppName: r.appName, RouteKey: r.routeKey, } @@ -553,7 +556,7 @@ func (r *socketRequester) procMetrics(ctx context.Context) *proto.MetricsRespons } type upgradeRequest struct { - *proto.UpgradeRequest + *cproto.UpgradeRequest } func (r *upgradeRequest) Version() string { @@ -569,27 +572,27 @@ func (r *upgradeRequest) FleetAction() *fleetapi.ActionUpgrade { return nil } -func agentStatusToProto(code status.AgentStatusCode) proto.Status { +func agentStatusToProto(code status.AgentStatusCode) cproto.Status { if code == status.Degraded { - return proto.Status_DEGRADED + return cproto.Status_DEGRADED } if code == status.Failed { - return proto.Status_FAILED + return cproto.Status_FAILED } - return proto.Status_HEALTHY + return cproto.Status_HEALTHY } -func agentAppStatusToProto(apps []status.AgentApplicationStatus) []*proto.ApplicationStatus { - s := make([]*proto.ApplicationStatus, len(apps)) +func agentAppStatusToProto(apps []status.AgentApplicationStatus) []*cproto.ApplicationStatus { + s := make([]*cproto.ApplicationStatus, len(apps)) for i, a := range apps { var payload []byte if a.Payload != nil { payload, _ = json.Marshal(a.Payload) } - s[i] = &proto.ApplicationStatus{ + s[i] = &cproto.ApplicationStatus{ Id: a.ID, Name: a.Name, - Status: proto.Status(a.Status.ToProto()), + Status: cproto.Status(a.Status.ToProto()), Message: a.Message, Payload: string(payload), } diff --git a/internal/pkg/agent/operation/tests/scripts/serviceable-1.0-darwin-x86_64/main.go b/internal/pkg/agent/operation/tests/scripts/serviceable-1.0-darwin-x86_64/main.go index da9123f4587..99ceab143f1 100644 --- a/internal/pkg/agent/operation/tests/scripts/serviceable-1.0-darwin-x86_64/main.go +++ b/internal/pkg/agent/operation/tests/scripts/serviceable-1.0-darwin-x86_64/main.go @@ -14,9 +14,9 @@ import ( "path/filepath" "strconv" - protobuf "github.com/golang/protobuf/proto" "google.golang.org/grpc" "google.golang.org/grpc/credentials" + protobuf "google.golang.org/protobuf/proto" "gopkg.in/yaml.v2" "github.com/elastic/elastic-agent-client/v7/pkg/client" @@ -29,27 +29,27 @@ func main() { panic(err) } f, _ := os.OpenFile(filepath.Join(os.TempDir(), "testing.out"), os.O_APPEND|os.O_CREATE|os.O_RDWR, 0666) - f.WriteString("starting \n") + _, _ = f.WriteString("starting \n") ctx, cancel := context.WithCancel(context.Background()) s := &configServer{ f: f, ctx: ctx, cancel: cancel, } - f.WriteString(fmt.Sprintf("reading creds from port: %d\n", srvPort)) + _, _ = f.WriteString(fmt.Sprintf("reading creds from port: %d\n", srvPort)) client, err := clientFromNet(srvPort, s) if err != nil { - f.WriteString(err.Error()) + _, _ = f.WriteString(err.Error()) panic(err) } s.client = client err = client.Start(ctx) if err != nil { - f.WriteString(err.Error()) + _, _ = f.WriteString(err.Error()) panic(err) } <-ctx.Done() - f.WriteString("finished \n") + _, _ = f.WriteString("finished \n") } type configServer struct { @@ -60,41 +60,41 @@ type configServer struct { } func (s *configServer) OnConfig(cfgString string) { - s.client.Status(proto.StateObserved_CONFIGURING, "Writing config file", nil) + _ = s.client.Status(proto.StateObserved_CONFIGURING, "Writing config file", nil) testCfg := &TestConfig{} if err := yaml.Unmarshal([]byte(cfgString), &testCfg); err != nil { - s.client.Status(proto.StateObserved_FAILED, fmt.Sprintf("Failed to unmarshall config: %s", err), nil) + _ = s.client.Status(proto.StateObserved_FAILED, fmt.Sprintf("Failed to unmarshall config: %s", err), nil) return } if testCfg.TestFile != "" { tf, err := os.Create(testCfg.TestFile) if err != nil { - s.client.Status(proto.StateObserved_FAILED, fmt.Sprintf("Failed to create file %s: %s", testCfg.TestFile, err), nil) + _ = s.client.Status(proto.StateObserved_FAILED, fmt.Sprintf("Failed to create file %s: %s", testCfg.TestFile, err), nil) return } err = tf.Close() if err != nil { - s.client.Status(proto.StateObserved_FAILED, fmt.Sprintf("Failed to close file %s: %s", testCfg.TestFile, err), nil) + _ = s.client.Status(proto.StateObserved_FAILED, fmt.Sprintf("Failed to close file %s: %s", testCfg.TestFile, err), nil) return } } - s.client.Status(proto.StateObserved_HEALTHY, "Running", map[string]interface{}{ + _ = s.client.Status(proto.StateObserved_HEALTHY, "Running", map[string]interface{}{ "status": proto.StateObserved_HEALTHY, "message": "Running", }) } func (s *configServer) OnStop() { - s.client.Status(proto.StateObserved_STOPPING, "Stopping", nil) + _ = s.client.Status(proto.StateObserved_STOPPING, "Stopping", nil) s.cancel() } func (s *configServer) OnError(err error) { - s.f.WriteString(err.Error()) + _, _ = s.f.WriteString(err.Error()) } // TestConfig is a configuration for testing Config calls @@ -136,6 +136,7 @@ func clientFromNet(port int, impl client.StateInterface, actions ...client.Actio ServerName: connInfo.ServerName, Certificates: []tls.Certificate{cert}, RootCAs: caCertPool, + MinVersion: tls.VersionTLS12, }) return client.New(connInfo.Addr, connInfo.Token, impl, actions, grpc.WithTransportCredentials(trans)), nil } diff --git a/magefile.go b/magefile.go index aba39385159..b640f46fd08 100644 --- a/magefile.go +++ b/magefile.go @@ -496,7 +496,11 @@ func Config() { // ControlProto generates pkg/agent/control/proto module. func ControlProto() error { - return sh.RunV("protoc", "--go_out=plugins=grpc:.", "control.proto") + return sh.RunV( + "protoc", + "--go_out=internal/pkg/agent/control/cproto", "--go_opt=paths=source_relative", + "--go-grpc_out=internal/pkg/agent/control/cproto", "--go-grpc_opt=paths=source_relative", + "control.proto") } // BuildSpec make sure that all the suppported program spec are built into the binary. diff --git a/pkg/component/component.go b/pkg/component/component.go new file mode 100644 index 00000000000..db38bb38471 --- /dev/null +++ b/pkg/component/component.go @@ -0,0 +1,308 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package component + +import ( + "errors" + "fmt" + + "github.com/elastic/elastic-agent-client/v7/pkg/client" + + "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" + "github.com/elastic/elastic-agent/internal/pkg/eql" +) + +var ( + // ErrOutputNotSupported is returned when an input does not support an output type + ErrOutputNotSupported = errors.New("input doesn't support output type") +) + +// ErrInputRuntimeCheckFail error is used when an input specification runtime prevention check occurs. +type ErrInputRuntimeCheckFail struct { + // message is the reason defined in the check + message string +} + +// NewErrInputRuntimeCheckFail creates a ErrInputRuntimeCheckFail with the message. +func NewErrInputRuntimeCheckFail(message string) *ErrInputRuntimeCheckFail { + return &ErrInputRuntimeCheckFail{message} +} + +// Error returns the message set on the check. +func (e *ErrInputRuntimeCheckFail) Error() string { + return e.message +} + +// Unit is a single input or output that a component must run. +type Unit struct { + ID string + Type client.UnitType + Config map[string]interface{} +} + +// Component is a set of units that needs to run. +type Component struct { + // ID is the unique ID of the component. + ID string + + // Err used when there is an error with running this input. Used by the runtime to alert + // the reason that all of these units are failed. + Err error + + // Spec on how the input should run. + Spec InputRuntimeSpec + + // Units that should be running inside this component. + Units []Unit +} + +// ToComponents returns the components that should be running based on the policy and the current runtime specification. +func (r *RuntimeSpecs) ToComponents(policy map[string]interface{}) ([]Component, error) { + outputsMap, err := toIntermediate(policy) + if err != nil { + return nil, err + } + if outputsMap == nil { + return nil, nil + } + + // set the runtime variables that are available in the input specification runtime checks + vars, err := transpiler.NewVars(map[string]interface{}{ + "runtime": map[string]interface{}{ + "platform": r.platform.String(), + "os": r.platform.OS, + "arch": r.platform.Arch, + "family": r.platform.Family, + "major": r.platform.Major, + "minor": r.platform.Minor, + }, + }, nil) + if err != nil { + return nil, err + } + + var components []Component + for outputName, output := range outputsMap { + if !output.enabled { + // skip; not enabled + continue + } + + // merge aliases into same input type + inputsMap := make(map[string][]inputI) + for inputType, inputs := range output.inputs { + realInputType, ok := r.aliasMapping[inputType] + if ok { + inputsMap[realInputType] = append(inputsMap[realInputType], inputs...) + } else { + inputsMap[inputType] = append(inputsMap[inputType], inputs...) + } + } + + for inputType, inputs := range inputsMap { + inputSpec, err := r.GetInput(inputType) + if err == nil { + // update the inputType to match the spec; as it could have been alias + inputType = inputSpec.InputType + if !containsStr(inputSpec.Spec.Outputs, output.outputType) { + inputSpec = InputRuntimeSpec{} // empty the spec + err = ErrOutputNotSupported + } else { + err = validateRuntimeChecks(&inputSpec.Spec, vars) + if err != nil { + inputSpec = InputRuntimeSpec{} // empty the spec + } + } + } + units := make([]Unit, 0, len(inputs)+1) + for _, input := range inputs { + if !input.enabled { + // skip; not enabled + continue + } + units = append(units, Unit{ + ID: fmt.Sprintf("%s-%s-%s", inputType, outputName, input.id), + Type: client.UnitTypeInput, + Config: input.input, + }) + } + if len(units) > 0 { + componentID := fmt.Sprintf("%s-%s", inputType, outputName) + units = append(units, Unit{ + ID: componentID, + Type: client.UnitTypeOutput, + Config: output.output, + }) + components = append(components, Component{ + ID: componentID, + Err: err, + Spec: inputSpec, + Units: units, + }) + } + } + } + return components, nil +} + +// toIntermediate takes the policy and returns it into an intermediate representation that is easier to map into a set +// of components. +func toIntermediate(policy map[string]interface{}) (map[string]outputI, error) { + const ( + outputsKey = "outputs" + enabledKey = "enabled" + inputsKey = "inputs" + typeKey = "type" + idKey = "id" + useKey = "use_output" + ) + + // intermediate structure for output to input mapping (this structure allows different input types per output) + outputsMap := make(map[string]outputI) + + // map the outputs first + outputsRaw, ok := policy[outputsKey] + if !ok { + // no outputs defined; no components then + return nil, nil + } + outputs, ok := outputsRaw.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("invalid 'outputs', expected a map not a %T", outputsRaw) + } + for name, outputRaw := range outputs { + output, ok := outputRaw.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("invalid 'outputs.%s', expected a map not a %T", name, outputRaw) + } + typeRaw, ok := output[typeKey] + if !ok { + return nil, fmt.Errorf("invalid 'outputs.%s', 'type' missing", name) + } + t, ok := typeRaw.(string) + if !ok { + return nil, fmt.Errorf("invalid 'outputs.%s.type', expected a string not a %T", name, typeRaw) + } + enabled := true + if enabledRaw, ok := output[enabledKey]; ok { + enabledVal, ok := enabledRaw.(bool) + if !ok { + return nil, fmt.Errorf("invalid 'outputs.%s.enabled', expected a bool not a %T", name, enabledRaw) + } + enabled = enabledVal + delete(output, enabledKey) + } + outputsMap[name] = outputI{ + name: name, + enabled: enabled, + outputType: t, + output: output, + inputs: make(map[string][]inputI), + } + } + + // map the inputs to the outputs + inputsRaw, ok := policy[inputsKey] + if !ok { + // no inputs; no components then + return nil, nil + } + inputs, ok := inputsRaw.([]interface{}) + if !ok { + return nil, fmt.Errorf("invalid 'inputs', expected an array not a %T", inputsRaw) + } + for idx, inputRaw := range inputs { + input, ok := inputRaw.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("invalid 'inputs.%d', expected a map not a %T", idx, inputRaw) + } + typeRaw, ok := input[typeKey] + if !ok { + return nil, fmt.Errorf("invalid 'inputs.%d', 'type' missing", idx) + } + t, ok := typeRaw.(string) + if !ok { + return nil, fmt.Errorf("invalid 'inputs.%d.type', expected a string not a %T", idx, typeRaw) + } + idRaw, ok := input[idKey] + if !ok { + return nil, fmt.Errorf("invalid 'inputs.%d', 'id' missing", idx) + } + id, ok := idRaw.(string) + if !ok { + return nil, fmt.Errorf("invalid 'inputs.%d.id', expected a string not a %T", idx, idRaw) + } + outputName := "default" + if outputRaw, ok := input[useKey]; ok { + outputNameVal, ok := outputRaw.(string) + if !ok { + return nil, fmt.Errorf("invalid 'inputs.%d.use_output', expected a string not a %T", idx, outputRaw) + } + outputName = outputNameVal + delete(input, useKey) + } + output, ok := outputsMap[outputName] + if !ok { + return nil, fmt.Errorf("invalid 'inputs.%d.use_output', references an unknown output '%s'", idx, outputName) + } + enabled := true + if enabledRaw, ok := input[enabledKey]; ok { + enabledVal, ok := enabledRaw.(bool) + if !ok { + return nil, fmt.Errorf("invalid 'inputs.%d.enabled', expected a bool not a %T", idx, enabledRaw) + } + enabled = enabledVal + delete(input, enabledKey) + } + output.inputs[t] = append(output.inputs[t], inputI{ + idx: idx, + id: id, + enabled: enabled, + inputType: t, + input: input, + }) + } + if len(outputsMap) == 0 { + return nil, nil + } + return outputsMap, nil +} + +type inputI struct { + idx int + id string + enabled bool + inputType string + input map[string]interface{} +} + +type outputI struct { + name string + enabled bool + outputType string + output map[string]interface{} + inputs map[string][]inputI +} + +func validateRuntimeChecks(spec *InputSpec, store eql.VarStore) error { + for _, prevention := range spec.Runtime.Preventions { + expression, err := eql.New(prevention.Condition) + if err != nil { + // this should not happen because the specification already validates that this + // should never error; but just in-case we consider this a reason to prevent the running of the input + return NewErrInputRuntimeCheckFail(err.Error()) + } + ok, err := expression.Eval(store) + if err != nil { + // error is considered a failure and reported as a reason + return NewErrInputRuntimeCheckFail(err.Error()) + } + if ok { + // true means the prevention valid (so input should not run) + return NewErrInputRuntimeCheckFail(prevention.Message) + } + } + return nil +} diff --git a/pkg/component/component_test.go b/pkg/component/component_test.go new file mode 100644 index 00000000000..a929fbde7b3 --- /dev/null +++ b/pkg/component/component_test.go @@ -0,0 +1,851 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package component + +import ( + "path/filepath" + "sort" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/elastic/elastic-agent-client/v7/pkg/client" +) + +func TestToComponents(t *testing.T) { + var linuxAMD64Platform = PlatformDetail{ + Platform: Platform{ + OS: Linux, + Arch: AMD64, + GOOS: Linux, + }, + } + + scenarios := []struct { + Name string + Platform PlatformDetail + Policy map[string]interface{} + Err string + Result []Component + }{ + { + Name: "Empty policy", + Platform: linuxAMD64Platform, + Policy: map[string]interface{}{}, + }, + { + Name: "Invalid: outputs as an array", + Platform: linuxAMD64Platform, + Policy: map[string]interface{}{ + "outputs": []string{"should be a map"}, + }, + Err: "invalid 'outputs', expected a map not a []string", + }, + { + Name: "Invalid: outputs entry as an array", + Platform: linuxAMD64Platform, + Policy: map[string]interface{}{ + "outputs": map[string]interface{}{ + "default": []string{"should be a map"}, + }, + }, + Err: "invalid 'outputs.default', expected a map not a []string", + }, + { + Name: "Invalid: outputs entry missing type", + Platform: linuxAMD64Platform, + Policy: map[string]interface{}{ + "outputs": map[string]interface{}{ + "default": map[string]interface{}{}, + }, + }, + Err: "invalid 'outputs.default', 'type' missing", + }, + { + Name: "Invalid: outputs entry type not a string", + Platform: linuxAMD64Platform, + Policy: map[string]interface{}{ + "outputs": map[string]interface{}{ + "default": map[string]interface{}{ + "type": 0, + }, + }, + }, + Err: "invalid 'outputs.default.type', expected a string not a int", + }, + { + Name: "Invalid: outputs entry type not a string", + Platform: linuxAMD64Platform, + Policy: map[string]interface{}{ + "outputs": map[string]interface{}{ + "default": map[string]interface{}{ + "type": "elasticsearch", + "enabled": "false", + }, + }, + }, + Err: "invalid 'outputs.default.enabled', expected a bool not a string", + }, + { + Name: "No inputs", + Platform: linuxAMD64Platform, + Policy: map[string]interface{}{ + "outputs": map[string]interface{}{ + "default": map[string]interface{}{ + "type": "elasticsearch", + "enabled": true, + }, + }, + }, + }, + { + Name: "Invalid: inputs as a map", + Platform: linuxAMD64Platform, + Policy: map[string]interface{}{ + "outputs": map[string]interface{}{ + "default": map[string]interface{}{ + "type": "elasticsearch", + "enabled": true, + }, + }, + "inputs": map[string]interface{}{}, + }, + Err: "invalid 'inputs', expected an array not a map[string]interface {}", + }, + { + Name: "Invalid: inputs entry as an array", + Platform: linuxAMD64Platform, + Policy: map[string]interface{}{ + "outputs": map[string]interface{}{ + "default": map[string]interface{}{ + "type": "elasticsearch", + "enabled": true, + }, + }, + "inputs": []interface{}{ + []string{"should be a map"}, + }, + }, + Err: "invalid 'inputs.0', expected a map not a []string", + }, + { + Name: "Invalid: inputs entry missing type", + Platform: linuxAMD64Platform, + Policy: map[string]interface{}{ + "outputs": map[string]interface{}{ + "default": map[string]interface{}{ + "type": "elasticsearch", + "enabled": true, + }, + }, + "inputs": []interface{}{ + map[string]interface{}{}, + }, + }, + Err: "invalid 'inputs.0', 'type' missing", + }, + { + Name: "Invalid: inputs entry type not a string", + Platform: linuxAMD64Platform, + Policy: map[string]interface{}{ + "outputs": map[string]interface{}{ + "default": map[string]interface{}{ + "type": "elasticsearch", + "enabled": true, + }, + }, + "inputs": []interface{}{ + map[string]interface{}{ + "type": 0, + }, + }, + }, + Err: "invalid 'inputs.0.type', expected a string not a int", + }, + { + Name: "Invalid: inputs entry missing id", + Platform: linuxAMD64Platform, + Policy: map[string]interface{}{ + "outputs": map[string]interface{}{ + "default": map[string]interface{}{ + "type": "elasticsearch", + "enabled": true, + }, + }, + "inputs": []interface{}{ + map[string]interface{}{ + "type": "filestream", + }, + }, + }, + Err: "invalid 'inputs.0', 'id' missing", + }, + { + Name: "Invalid: inputs entry id not a string", + Platform: linuxAMD64Platform, + Policy: map[string]interface{}{ + "outputs": map[string]interface{}{ + "default": map[string]interface{}{ + "type": "elasticsearch", + "enabled": true, + }, + }, + "inputs": []interface{}{ + map[string]interface{}{ + "type": "filestream", + "id": 0, + }, + }, + }, + Err: "invalid 'inputs.0.id', expected a string not a int", + }, + { + Name: "Invalid: inputs entry use_output not a string", + Platform: linuxAMD64Platform, + Policy: map[string]interface{}{ + "outputs": map[string]interface{}{ + "default": map[string]interface{}{ + "type": "elasticsearch", + "enabled": true, + }, + }, + "inputs": []interface{}{ + map[string]interface{}{ + "type": "filestream", + "id": "filestream-0", + "use_output": 0, + }, + }, + }, + Err: "invalid 'inputs.0.use_output', expected a string not a int", + }, + { + Name: "Invalid: inputs entry use_output references unknown output", + Platform: linuxAMD64Platform, + Policy: map[string]interface{}{ + "outputs": map[string]interface{}{ + "default": map[string]interface{}{ + "type": "elasticsearch", + "enabled": true, + }, + }, + "inputs": []interface{}{ + map[string]interface{}{ + "type": "filestream", + "id": "filestream-0", + "use_output": "other", + }, + }, + }, + Err: "invalid 'inputs.0.use_output', references an unknown output 'other'", + }, + { + Name: "Invalid: inputs entry enabled not a bool", + Platform: linuxAMD64Platform, + Policy: map[string]interface{}{ + "outputs": map[string]interface{}{ + "default": map[string]interface{}{ + "type": "elasticsearch", + "enabled": true, + }, + }, + "inputs": []interface{}{ + map[string]interface{}{ + "type": "filestream", + "id": "filestream-0", + "use_output": "default", + "enabled": "false", + }, + }, + }, + Err: "invalid 'inputs.0.enabled', expected a bool not a string", + }, + { + Name: "Invalid: inputs unknown type", + Platform: linuxAMD64Platform, + Policy: map[string]interface{}{ + "outputs": map[string]interface{}{ + "default": map[string]interface{}{ + "type": "elasticsearch", + "enabled": true, + }, + }, + "inputs": []interface{}{ + map[string]interface{}{ + "type": "unknown", + "id": "unknown-0", + "use_output": "default", + "enabled": true, + }, + }, + }, + Result: []Component{ + { + ID: "unknown-default", + Spec: InputRuntimeSpec{}, + Err: ErrInputNotSupported, + Units: []Unit{ + { + ID: "unknown-default", + Type: client.UnitTypeOutput, + Config: map[string]interface{}{ + "type": "elasticsearch", + }, + }, + { + ID: "unknown-default-unknown-0", + Type: client.UnitTypeInput, + Config: map[string]interface{}{ + "type": "unknown", + "id": "unknown-0", + }, + }, + }, + }, + }, + }, + { + Name: "Invalid: inputs endpoint not support on container platform", + Platform: PlatformDetail{ + Platform: Platform{ + OS: Container, + Arch: AMD64, + GOOS: Linux, + }, + }, + Policy: map[string]interface{}{ + "outputs": map[string]interface{}{ + "default": map[string]interface{}{ + "type": "elasticsearch", + "enabled": true, + }, + }, + "inputs": []interface{}{ + map[string]interface{}{ + "type": "endpoint", + "id": "endpoint-0", + "use_output": "default", + "enabled": true, + }, + }, + }, + Result: []Component{ + { + ID: "endpoint-default", + Spec: InputRuntimeSpec{}, + Err: ErrInputNotSupportedOnPlatform, + Units: []Unit{ + { + ID: "endpoint-default", + Type: client.UnitTypeOutput, + Config: map[string]interface{}{ + "type": "elasticsearch", + }, + }, + { + ID: "endpoint-default-endpoint-0", + Type: client.UnitTypeInput, + Config: map[string]interface{}{ + "type": "endpoint", + "id": "endpoint-0", + }, + }, + }, + }, + }, + }, + { + Name: "Invalid: inputs endpoint doesn't support logstash", + Platform: linuxAMD64Platform, + Policy: map[string]interface{}{ + "outputs": map[string]interface{}{ + "default": map[string]interface{}{ + "type": "logstash", + }, + }, + "inputs": []interface{}{ + map[string]interface{}{ + "type": "endpoint", + "id": "endpoint-0", + }, + }, + }, + Result: []Component{ + { + ID: "endpoint-default", + Spec: InputRuntimeSpec{}, + Err: ErrOutputNotSupported, + Units: []Unit{ + { + ID: "endpoint-default", + Type: client.UnitTypeOutput, + Config: map[string]interface{}{ + "type": "logstash", + }, + }, + { + ID: "endpoint-default-endpoint-0", + Type: client.UnitTypeInput, + Config: map[string]interface{}{ + "type": "endpoint", + "id": "endpoint-0", + }, + }, + }, + }, + }, + }, + { + Name: "Invalid: inputs endpoint doesnt support arm64 redhat major 7", + Platform: PlatformDetail{ + Platform: Platform{ + OS: Linux, + Arch: ARM64, + GOOS: Linux, + }, + Family: "redhat", + Major: "7", + Minor: "2", + }, + Policy: map[string]interface{}{ + "outputs": map[string]interface{}{ + "default": map[string]interface{}{ + "type": "elasticsearch", + "enabled": true, + }, + }, + "inputs": []interface{}{ + map[string]interface{}{ + "type": "endpoint", + "id": "endpoint-0", + "use_output": "default", + "enabled": true, + }, + }, + }, + Result: []Component{ + { + ID: "endpoint-default", + Spec: InputRuntimeSpec{}, + Err: NewErrInputRuntimeCheckFail("No support for RHEL7 on arm64"), + Units: []Unit{ + { + ID: "endpoint-default", + Type: client.UnitTypeOutput, + Config: map[string]interface{}{ + "type": "elasticsearch", + }, + }, + { + ID: "endpoint-default-endpoint-0", + Type: client.UnitTypeInput, + Config: map[string]interface{}{ + "type": "endpoint", + "id": "endpoint-0", + }, + }, + }, + }, + }, + }, + { + Name: "Output disabled", + Platform: linuxAMD64Platform, + Policy: map[string]interface{}{ + "outputs": map[string]interface{}{ + "default": map[string]interface{}{ + "type": "elasticsearch", + "enabled": false, + }, + }, + "inputs": []interface{}{ + map[string]interface{}{ + "type": "filestream", + "id": "filestream-0", + "enabled": true, + }, + }, + }, + }, + { + Name: "Input disabled", + Platform: linuxAMD64Platform, + Policy: map[string]interface{}{ + "outputs": map[string]interface{}{ + "default": map[string]interface{}{ + "type": "elasticsearch", + "enabled": true, + }, + }, + "inputs": []interface{}{ + map[string]interface{}{ + "type": "filestream", + "id": "filestream-0", + "enabled": false, + }, + }, + }, + }, + { + Name: "Simple representation", + Platform: linuxAMD64Platform, + Policy: map[string]interface{}{ + "outputs": map[string]interface{}{ + "default": map[string]interface{}{ + "type": "elasticsearch", + "enabled": true, + }, + }, + "inputs": []interface{}{ + map[string]interface{}{ + "type": "filestream", + "id": "filestream-0", + "enabled": true, + }, + map[string]interface{}{ + "type": "filestream", + "id": "filestream-1", + "enabled": false, + }, + }, + }, + Result: []Component{ + { + Spec: InputRuntimeSpec{ + InputType: "filestream", + BinaryName: "filebeat", + BinaryPath: filepath.Join("..", "..", "specs", "filebeat"), + }, + Units: []Unit{ + { + ID: "filestream-default", + Type: client.UnitTypeOutput, + Config: map[string]interface{}{ + "type": "elasticsearch", + }, + }, + { + ID: "filestream-default-filestream-0", + Type: client.UnitTypeInput, + Config: map[string]interface{}{ + "type": "filestream", + "id": "filestream-0", + }, + }, + }, + }, + }, + }, + { + Name: "Complex representation", + Platform: linuxAMD64Platform, + Policy: map[string]interface{}{ + "outputs": map[string]interface{}{ + "default": map[string]interface{}{ + "type": "elasticsearch", + }, + "other": map[string]interface{}{ + "type": "elasticsearch", + }, + "stashit": map[string]interface{}{ + "type": "logstash", + }, + "redis": map[string]interface{}{ + "type": "redis", + }, + }, + "inputs": []interface{}{ + map[string]interface{}{ + "type": "filestream", + "id": "filestream-0", + }, + map[string]interface{}{ + "type": "filestream", + "id": "filestream-1", + }, + map[string]interface{}{ + "type": "filestream", + "id": "filestream-2", + "enabled": false, + }, + map[string]interface{}{ + "type": "filestream", + "id": "filestream-3", + "use_output": "other", + }, + map[string]interface{}{ + "type": "filestream", + "id": "filestream-4", + "use_output": "other", + }, + map[string]interface{}{ + "type": "logfile", + "id": "logfile-0", + "use_output": "default", + }, + map[string]interface{}{ + "type": "log", + "id": "logfile-1", + "use_output": "default", + }, + map[string]interface{}{ + "type": "logfile", + "id": "logfile-2", + "use_output": "other", + }, + map[string]interface{}{ + "type": "logfile", + "id": "logfile-3", + "use_output": "stashit", + }, + map[string]interface{}{ + "type": "logfile", + "id": "logfile-4", + "use_output": "redis", + }, + map[string]interface{}{ + "type": "apm", + "id": "apm-server-0", + }, + }, + }, + Result: []Component{ + { + Spec: InputRuntimeSpec{ + InputType: "filestream", + BinaryName: "filebeat", + BinaryPath: filepath.Join("..", "..", "specs", "filebeat"), + }, + Units: []Unit{ + { + ID: "filestream-default", + Type: client.UnitTypeOutput, + Config: map[string]interface{}{ + "type": "elasticsearch", + }, + }, + { + ID: "filestream-default-filestream-0", + Type: client.UnitTypeInput, + Config: map[string]interface{}{ + "type": "filestream", + "id": "filestream-0", + }, + }, + { + ID: "filestream-default-filestream-1", + Type: client.UnitTypeInput, + Config: map[string]interface{}{ + "type": "filestream", + "id": "filestream-1", + }, + }, + }, + }, + { + Spec: InputRuntimeSpec{ + InputType: "filestream", + BinaryName: "filebeat", + BinaryPath: filepath.Join("..", "..", "specs", "filebeat"), + }, + Units: []Unit{ + { + ID: "filestream-other", + Type: client.UnitTypeOutput, + Config: map[string]interface{}{ + "type": "elasticsearch", + }, + }, + { + ID: "filestream-other-filestream-3", + Type: client.UnitTypeInput, + Config: map[string]interface{}{ + "type": "filestream", + "id": "filestream-3", + }, + }, + { + ID: "filestream-other-filestream-4", + Type: client.UnitTypeInput, + Config: map[string]interface{}{ + "type": "filestream", + "id": "filestream-4", + }, + }, + }, + }, + { + Spec: InputRuntimeSpec{ + InputType: "log", + BinaryName: "filebeat", + BinaryPath: filepath.Join("..", "..", "specs", "filebeat"), + }, + Units: []Unit{ + { + ID: "log-default", + Type: client.UnitTypeOutput, + Config: map[string]interface{}{ + "type": "elasticsearch", + }, + }, + { + ID: "log-default-logfile-0", + Type: client.UnitTypeInput, + Config: map[string]interface{}{ + "type": "logfile", + "id": "logfile-0", + }, + }, + { + ID: "log-default-logfile-1", + Type: client.UnitTypeInput, + Config: map[string]interface{}{ + "type": "log", + "id": "logfile-1", + }, + }, + }, + }, + { + Spec: InputRuntimeSpec{ + InputType: "log", + BinaryName: "filebeat", + BinaryPath: filepath.Join("..", "..", "specs", "filebeat"), + }, + Units: []Unit{ + { + ID: "log-other", + Type: client.UnitTypeOutput, + Config: map[string]interface{}{ + "type": "elasticsearch", + }, + }, + { + ID: "log-other-logfile-2", + Type: client.UnitTypeInput, + Config: map[string]interface{}{ + "type": "logfile", + "id": "logfile-2", + }, + }, + }, + }, + { + Spec: InputRuntimeSpec{ + InputType: "log", + BinaryName: "filebeat", + BinaryPath: filepath.Join("..", "..", "specs", "filebeat"), + }, + Units: []Unit{ + { + ID: "log-stashit", + Type: client.UnitTypeOutput, + Config: map[string]interface{}{ + "type": "logstash", + }, + }, + { + ID: "log-stashit-logfile-3", + Type: client.UnitTypeInput, + Config: map[string]interface{}{ + "type": "logfile", + "id": "logfile-3", + }, + }, + }, + }, + { + Spec: InputRuntimeSpec{ + InputType: "log", + BinaryName: "filebeat", + BinaryPath: filepath.Join("..", "..", "specs", "filebeat"), + }, + Units: []Unit{ + { + ID: "log-redis", + Type: client.UnitTypeOutput, + Config: map[string]interface{}{ + "type": "redis", + }, + }, + { + ID: "log-redis-logfile-4", + Type: client.UnitTypeInput, + Config: map[string]interface{}{ + "type": "logfile", + "id": "logfile-4", + }, + }, + }, + }, + { + Spec: InputRuntimeSpec{ + InputType: "apm", + BinaryName: "apm-server", + BinaryPath: filepath.Join("..", "..", "specs", "apm-server"), + }, + Units: []Unit{ + { + ID: "apm-default", + Type: client.UnitTypeOutput, + Config: map[string]interface{}{ + "type": "elasticsearch", + }, + }, + { + ID: "apm-default-apm-server-0", + Type: client.UnitTypeInput, + Config: map[string]interface{}{ + "type": "apm", + "id": "apm-server-0", + }, + }, + }, + }, + }, + }, + } + + for _, scenario := range scenarios { + t.Run(scenario.Name, func(t *testing.T) { + runtime, err := LoadRuntimeSpecs(filepath.Join("..", "..", "specs"), scenario.Platform, SkipBinaryCheck()) + require.NoError(t, err) + + result, err := runtime.ToComponents(scenario.Policy) + if scenario.Err != "" { + assert.Equal(t, scenario.Err, err.Error()) + } else { + require.NoError(t, err) + require.Len(t, result, len(scenario.Result)) + sortComponents(scenario.Result) + sortComponents(result) + for i, expected := range scenario.Result { + actual := result[i] + if expected.Err != nil { + assert.Equal(t, expected.Err, actual.Err) + assert.EqualValues(t, expected.Units, actual.Units) + } else { + assert.Equal(t, expected.Spec.InputType, actual.Spec.InputType) + assert.Equal(t, expected.Spec.BinaryName, actual.Spec.BinaryName) + assert.Equal(t, expected.Spec.BinaryPath, actual.Spec.BinaryPath) + assert.EqualValues(t, expected.Units, actual.Units) + } + } + } + }) + } +} + +func sortComponents(components []Component) { + for _, comp := range components { + sort.Slice(comp.Units, func(i, j int) bool { + return comp.Units[i].ID < comp.Units[j].ID + }) + } + sort.Slice(components[:], func(i, j int) bool { + return components[i].Units[0].ID < components[j].Units[0].ID + }) +} diff --git a/pkg/component/load.go b/pkg/component/load.go index 7782dcaa2a6..feb16d0b97c 100644 --- a/pkg/component/load.go +++ b/pkg/component/load.go @@ -5,9 +5,158 @@ package component import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "github.com/elastic/go-ucfg/yaml" ) +const specGlobPattern = "*.spec.yml" + +var ( + // ErrInputNotSupported is returned when the input is not supported on any platform + ErrInputNotSupported = errors.New("input not supported") + // ErrInputNotSupportedOnPlatform is returned when the input is supported but not on this platform + ErrInputNotSupportedOnPlatform = errors.New("input not supported on this platform") +) + +// InputRuntimeSpec returns the specification for running this input on the current platform. +type InputRuntimeSpec struct { + InputType string + BinaryName string + BinaryPath string + Spec InputSpec +} + +// RuntimeSpecs return all the specifications for inputs that are supported on the current platform. +type RuntimeSpecs struct { + // platform that was loaded + platform PlatformDetail + + // inputTypes all input types even if that input is not supported on the current platform + inputTypes []string + + // inputSpecs only the input specs for the current platform + inputSpecs map[string]InputRuntimeSpec + + // aliasMapping maps aliases to real input name + aliasMapping map[string]string +} + +type loadRuntimeOpts struct { + skipBinaryCheck bool +} + +// LoadRuntimeOption are options for loading the runtime specs. +type LoadRuntimeOption func(o *loadRuntimeOpts) + +// SkipBinaryCheck skips checking that a binary is next to the runtime. +func SkipBinaryCheck() LoadRuntimeOption { + return func(o *loadRuntimeOpts) { + o.skipBinaryCheck = true + } +} + +// LoadRuntimeSpecs loads all the component input specifications from the provided directory. +// +// Returns a mapping of the input to binary name with specification for that input. The filenames in the directory +// are required to be {binary-name} with {binary-name}.spec.yml to be next to it. If a {binary-name}.spec.yml exists +// but no matching {binary-name} is found that will result in an error. If a {binary-name} exists without a +// {binary-name}.spec.yml then it will be ignored. +func LoadRuntimeSpecs(dir string, platform PlatformDetail, opts ...LoadRuntimeOption) (RuntimeSpecs, error) { + var opt loadRuntimeOpts + for _, o := range opts { + o(&opt) + } + matches, err := filepath.Glob(filepath.Join(dir, specGlobPattern)) + if err != nil { + return RuntimeSpecs{}, err + } + var types []string + mapping := make(map[string]InputRuntimeSpec) + aliases := make(map[string]string) + for _, match := range matches { + binaryName := filepath.Base(match[:len(match)-len(specGlobPattern)+1]) + binaryPath := match[:len(match)-len(specGlobPattern)+1] + if platform.OS == Windows { + binaryPath += ".exe" + } + if !opt.skipBinaryCheck { + info, err := os.Stat(binaryPath) + if errors.Is(err, os.ErrNotExist) { + return RuntimeSpecs{}, fmt.Errorf("missing matching binary for %s", match) + } else if err != nil { + return RuntimeSpecs{}, fmt.Errorf("failed to stat %s: %w", binaryPath, err) + } else if info.IsDir() { + return RuntimeSpecs{}, fmt.Errorf("missing matching binary for %s", match) + } + } + data, err := ioutil.ReadFile(match) + if err != nil { + return RuntimeSpecs{}, fmt.Errorf("failed reading spec %s: %w", match, err) + } + spec, err := LoadSpec(data) + if err != nil { + return RuntimeSpecs{}, fmt.Errorf("failed reading spec %s: %w", match, err) + } + for _, input := range spec.Inputs { + if !containsStr(types, input.Name) { + types = append(types, input.Name) + } + if !containsStr(input.Platforms, platform.String()) { + // input spec doesn't support this platform + continue + } + if existing, exists := mapping[input.Name]; exists { + return RuntimeSpecs{}, fmt.Errorf("failed loading spec '%s': input '%s' already exists in spec '%s'", match, input.Name, existing.BinaryName) + } + if existing, exists := aliases[input.Name]; exists { + return RuntimeSpecs{}, fmt.Errorf("failed loading spec '%s': input '%s' collides with an alias from another input '%s'", match, input.Name, existing) + } + for _, alias := range input.Aliases { + if existing, exists := mapping[alias]; exists { + return RuntimeSpecs{}, fmt.Errorf("failed loading spec '%s': input alias '%s' collides with an already defined input in spec '%s'", match, alias, existing.BinaryName) + } + if existing, exists := aliases[alias]; exists { + return RuntimeSpecs{}, fmt.Errorf("failed loading spec '%s': input alias '%s' collides with an already defined input alias for input '%s'", match, alias, existing) + } + } + mapping[input.Name] = InputRuntimeSpec{ + InputType: input.Name, + BinaryName: binaryName, + BinaryPath: binaryPath, + Spec: input, + } + for _, alias := range input.Aliases { + aliases[alias] = input.Name + } + } + } + return RuntimeSpecs{ + platform: platform, + inputTypes: types, + inputSpecs: mapping, + aliasMapping: aliases, + }, nil +} + +// GetInput returns the input runtime specification for this input on this platform. +func (r *RuntimeSpecs) GetInput(inputType string) (InputRuntimeSpec, error) { + runtime, ok := r.inputSpecs[inputType] + if ok { + return runtime, nil + } + if containsStr(r.inputTypes, inputType) { + // supported but not on this platform + return InputRuntimeSpec{}, ErrInputNotSupportedOnPlatform + } + // not supported at all + return InputRuntimeSpec{}, ErrInputNotSupported +} + // LoadSpec loads the component specification. // // Will error in the case that the specification is not valid. Only valid specifications are allowed. @@ -23,3 +172,12 @@ func LoadSpec(data []byte) (Spec, error) { } return spec, nil } + +func containsStr(s []string, v string) bool { + for _, i := range s { + if i == v { + return true + } + } + return false +} diff --git a/pkg/component/load_test.go b/pkg/component/load_test.go index 289ace2c72f..f9d88ebba26 100644 --- a/pkg/component/load_test.go +++ b/pkg/component/load_test.go @@ -9,9 +9,39 @@ import ( "path/filepath" "testing" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) +func TestLoadRuntimeSpecs(t *testing.T) { + for _, platform := range GlobalPlatforms { + t.Run(platform.String(), func(t *testing.T) { + detail := PlatformDetail{ + Platform: platform, + } + runtime, err := LoadRuntimeSpecs(filepath.Join("..", "..", "specs"), detail, SkipBinaryCheck()) + require.NoError(t, err) + assert.Greater(t, len(runtime.inputTypes), 0) + assert.Greater(t, len(runtime.inputSpecs), 0) + + // filestream is supported by all platforms + input, err := runtime.GetInput("filestream") + require.NoError(t, err) + assert.NotNil(t, input) + + // unknown input + _, err = runtime.GetInput("unknown") + require.ErrorIs(t, err, ErrInputNotSupported) + + // endpoint not support on container platforms + if platform.OS == "container" { + _, err = runtime.GetInput("endpoint") + assert.ErrorIs(t, err, ErrInputNotSupportedOnPlatform) + } + }) + } +} + func TestLoadSpec_Components(t *testing.T) { scenarios := []struct { Name string @@ -19,43 +49,43 @@ func TestLoadSpec_Components(t *testing.T) { }{ { Name: "APM Server", - Path: "apm-server.yml", + Path: "apm-server.spec.yml", }, { Name: "Auditbeat", - Path: "auditbeat.yml", + Path: "auditbeat.spec.yml", }, { Name: "Cloudbeat", - Path: "cloudbeat.yml", + Path: "cloudbeat.spec.yml", }, { Name: "Endpoint Security", - Path: "endpoint-security.yml", + Path: "endpoint-security.spec.yml", }, { Name: "Filebeat", - Path: "filebeat.yml", + Path: "filebeat.spec.yml", }, { Name: "Fleet Server", - Path: "fleet-server.yml", + Path: "fleet-server.spec.yml", }, { Name: "Heartbeat", - Path: "heartbeat.yml", + Path: "heartbeat.spec.yml", }, { Name: "Metricbeat", - Path: "metricbeat.yml", + Path: "metricbeat.spec.yml", }, { Name: "Osquerybeat", - Path: "osquerybeat.yml", + Path: "osquerybeat.spec.yml", }, { Name: "Packetbeat", - Path: "packetbeat.yml", + Path: "packetbeat.spec.yml", }, } diff --git a/pkg/component/platforms.go b/pkg/component/platforms.go index b8ad7ec1e9d..98e5bf21cd6 100644 --- a/pkg/component/platforms.go +++ b/pkg/component/platforms.go @@ -4,6 +4,11 @@ package component +import ( + "fmt" + "strings" +) + const ( // Container represents running inside a container Container = "container" @@ -22,12 +27,18 @@ const ( ARM64 = "arm64" ) -// Platforms defines the platforms that a component can support -var Platforms = []struct { +// Platform defines the platform that a component can support +type Platform struct { OS string Arch string GOOS string -}{ +} + +// Platforms is an array of platforms. +type Platforms []Platform + +// GlobalPlatforms defines the platforms that a component can support +var GlobalPlatforms = Platforms{ { OS: Container, Arch: AMD64, @@ -64,3 +75,31 @@ var Platforms = []struct { GOOS: Windows, }, } + +// String returns the platform string identifier. +func (p *Platform) String() string { + return fmt.Sprintf("%s/%s", p.OS, p.Arch) +} + +// Exists returns true if the +func (p Platforms) Exists(platform string) bool { + pieces := strings.SplitN(platform, "/", 2) + if len(pieces) != 2 { + return false + } + for _, platform := range p { + if platform.OS == pieces[0] && platform.Arch == pieces[1] { + return true + } + } + return false +} + +// PlatformDetail is platform that has more detail information about the running platform. +type PlatformDetail struct { + Platform + + Family string + Major string + Minor string +} diff --git a/pkg/component/spec.go b/pkg/component/spec.go index e428fb71a5f..2800f7275a2 100644 --- a/pkg/component/spec.go +++ b/pkg/component/spec.go @@ -8,6 +8,8 @@ import ( "errors" "fmt" "time" + + "github.com/elastic/elastic-agent/internal/pkg/eql" ) // Spec a components specification. @@ -32,7 +34,7 @@ func (s *Spec) Validate() error { for _, platform := range input.Platforms { for _, existing := range a { if existing == platform { - return fmt.Errorf("input %s at inputs.%d defines the same platform as a previous definition", input.Name, i) + return fmt.Errorf("input '%s' at inputs.%d defines the same platform as a previous definition", input.Name, i) } } a = append(a, platform) @@ -58,22 +60,31 @@ type InputSpec struct { // Validate ensures correctness of input specification. func (s *InputSpec) Validate() error { if s.Command == nil && s.Service == nil { - return fmt.Errorf("input %s must define either command or service", s.Name) + return fmt.Errorf("input '%s' must define either command or service", s.Name) } for i, a := range s.Platforms { + if !GlobalPlatforms.Exists(a) { + return fmt.Errorf("input '%s' defines an unknown platform '%s'", s.Name, a) + } for j, b := range s.Platforms { if i != j && a == b { - return fmt.Errorf("input %s defines the platform %s more than once", s.Name, a) + return fmt.Errorf("input '%s' defines the platform '%s' more than once", s.Name, a) } } } for i, a := range s.Outputs { for j, b := range s.Outputs { if i != j && a == b { - return fmt.Errorf("input %s defines the output %s more than once", s.Name, a) + return fmt.Errorf("input '%s' defines the output '%s' more than once", s.Name, a) } } } + for idx, prevention := range s.Runtime.Preventions { + _, err := eql.New(prevention.Condition) + if err != nil { + return fmt.Errorf("input '%s' defined 'runtime.preventions.%d.condition' failed to compile: %w", s.Name, idx, err) + } + } return nil } diff --git a/pkg/component/spec_test.go b/pkg/component/spec_test.go index 3b7f3bf2ae8..33866df5b0f 100644 --- a/pkg/component/spec_test.go +++ b/pkg/component/spec_test.go @@ -39,7 +39,7 @@ inputs: outputs: - shipper `, - Err: "input testing must define either command or service accessing 'inputs.0'", + Err: "input 'testing' must define either command or service accessing 'inputs.0'", }, { Name: "Duplicate Platform", @@ -55,7 +55,22 @@ inputs: - shipper command: {} `, - Err: "input testing defines the platform linux/amd64 more than once accessing 'inputs.0'", + Err: "input 'testing' defines the platform 'linux/amd64' more than once accessing 'inputs.0'", + }, + { + Name: "Unknown Platform", + Spec: ` +version: 2 +inputs: + - name: testing + description: Testing Input + platforms: + - unknown/amd64 + outputs: + - shipper + command: {} +`, + Err: "input 'testing' defines an unknown platform 'unknown/amd64' accessing 'inputs.0'", }, { Name: "Duplicate Output", @@ -71,7 +86,7 @@ inputs: - shipper command: {} `, - Err: "input testing defines the output shipper more than once accessing 'inputs.0'", + Err: "input 'testing' defines the output 'shipper' more than once accessing 'inputs.0'", }, { Name: "Duplicate Platform Same Input Name", @@ -93,7 +108,7 @@ inputs: - shipper command: {} `, - Err: "input testing at inputs.1 defines the same platform as a previous definition accessing config", + Err: "input 'testing' at inputs.1 defines the same platform as a previous definition accessing config", }, { Name: "Valid", diff --git a/pkg/core/server/server.go b/pkg/core/server/server.go index f7535ecb7e9..6d3a284cd79 100644 --- a/pkg/core/server/server.go +++ b/pkg/core/server/server.go @@ -21,9 +21,9 @@ import ( "google.golang.org/grpc/status" "github.com/gofrs/uuid" - protobuf "github.com/golang/protobuf/proto" "google.golang.org/grpc" "google.golang.org/grpc/credentials" + protobuf "google.golang.org/protobuf/proto" "github.com/elastic/elastic-agent-client/v7/pkg/client" "github.com/elastic/elastic-agent-client/v7/pkg/proto" @@ -99,6 +99,8 @@ type Handler interface { // Server is the GRPC server that the launched applications connect back to. type Server struct { + proto.UnimplementedElasticAgentServer + logger *logger.Logger ca *authority.CertificateAuthority listenAddr string @@ -154,6 +156,7 @@ func (s *Server) Start() error { ClientAuth: tls.RequireAndVerifyClientCert, ClientCAs: certPool, GetCertificate: s.getCertificate, + MinVersion: tls.VersionTLS12, }) if s.tracer != nil { apmInterceptor := apmgrpc.NewUnaryServerInterceptor(apmgrpc.WithRecovery(), apmgrpc.WithTracer(s.tracer)) @@ -197,7 +200,10 @@ func (s *Server) Stop() { func (s *Server) Get(app interface{}) (*ApplicationState, bool) { var foundState *ApplicationState s.apps.Range(func(_ interface{}, val interface{}) bool { - as := val.(*ApplicationState) + as, ok := val.(*ApplicationState) + if !ok { + return true + } if as.app == app { foundState = as return false @@ -211,7 +217,10 @@ func (s *Server) Get(app interface{}) (*ApplicationState, bool) { func (s *Server) FindByInputType(inputType string) (*ApplicationState, bool) { var foundState *ApplicationState s.apps.Range(func(_ interface{}, val interface{}) bool { - as := val.(*ApplicationState) + as, ok := val.(*ApplicationState) + if !ok { + return true + } if as.inputTypes == nil { return true } @@ -385,6 +394,11 @@ func (s *Server) Checkin(server proto.ElasticAgent_CheckinServer) error { return nil } +// CheckinV2 implements the GRPC bi-direction stream connection for v2 check-ins. +func (s *Server) CheckinV2(server proto.ElasticAgent_CheckinV2Server) error { + return errors.New("not implemented") +} + // Actions implements the GRPC bi-direction stream connection for actions. func (s *Server) Actions(server proto.ElasticAgent_ActionsServer) error { firstRespChan := make(chan *proto.ActionResponse) @@ -883,7 +897,10 @@ func (s *Server) watchdog() { now := time.Now().UTC() s.apps.Range(func(_ interface{}, val interface{}) bool { - serverApp := val.(*ApplicationState) + serverApp, ok := val.(*ApplicationState) + if !ok { + return true + } serverApp.checkinLock.RLock() statusTime := serverApp.statusTime serverApp.checkinLock.RUnlock() @@ -934,7 +951,10 @@ func (s *Server) getByToken(token string) (*ApplicationState, bool) { func (s *Server) getCertificate(chi *tls.ClientHelloInfo) (*tls.Certificate, error) { var cert *tls.Certificate s.apps.Range(func(_ interface{}, val interface{}) bool { - sa := val.(*ApplicationState) + sa, ok := val.(*ApplicationState) + if !ok { + return true + } if sa.srvName == chi.ServerName { cert = sa.cert.Certificate return false diff --git a/specs/apm-server.yml b/specs/apm-server.spec.yml similarity index 100% rename from specs/apm-server.yml rename to specs/apm-server.spec.yml diff --git a/specs/auditbeat.yml b/specs/auditbeat.spec.yml similarity index 100% rename from specs/auditbeat.yml rename to specs/auditbeat.spec.yml diff --git a/specs/cloudbeat.yml b/specs/cloudbeat.spec.yml similarity index 100% rename from specs/cloudbeat.yml rename to specs/cloudbeat.spec.yml diff --git a/specs/endpoint-security.yml b/specs/endpoint-security.spec.yml similarity index 100% rename from specs/endpoint-security.yml rename to specs/endpoint-security.spec.yml diff --git a/specs/filebeat.yml b/specs/filebeat.spec.yml similarity index 100% rename from specs/filebeat.yml rename to specs/filebeat.spec.yml diff --git a/specs/fleet-server.yml b/specs/fleet-server.spec.yml similarity index 100% rename from specs/fleet-server.yml rename to specs/fleet-server.spec.yml diff --git a/specs/heartbeat.yml b/specs/heartbeat.spec.yml similarity index 100% rename from specs/heartbeat.yml rename to specs/heartbeat.spec.yml diff --git a/specs/metricbeat.yml b/specs/metricbeat.spec.yml similarity index 100% rename from specs/metricbeat.yml rename to specs/metricbeat.spec.yml diff --git a/specs/osquerybeat.yml b/specs/osquerybeat.spec.yml similarity index 100% rename from specs/osquerybeat.yml rename to specs/osquerybeat.spec.yml diff --git a/specs/packetbeat.yml b/specs/packetbeat.spec.yml similarity index 100% rename from specs/packetbeat.yml rename to specs/packetbeat.spec.yml From 2679c82399afec02146a7cac3aa5a68617a9be76 Mon Sep 17 00:00:00 2001 From: Michal Pristas Date: Tue, 21 Jun 2022 16:34:52 +0200 Subject: [PATCH 04/49] Initial Flat Structure (#544) Flattening the structure and removing download/install steps for programs. Co-authored-by: Aleksandr Maus --- dev-tools/mage/common.go | 49 +- dev-tools/mage/pkgtypes.go | 83 +- dev-tools/packaging/package_test.go | 25 +- dev-tools/packaging/packages.yml | 2146 +++++++---------- .../docker/Dockerfile.elastic-agent.tmpl | 492 ++-- .../application/fleet_server_bootstrap.go | 2 +- .../pkg/agent/application/info/agent_id.go | 14 +- .../agent/application/managed_mode_test.go | 3 +- .../pkg/agent/application/paths/common.go | 4 + .../application/pipeline/emitter/emitter.go | 4 - .../emitter/modifiers/monitoring_decorator.go | 2 +- .../pipeline/router/router_test.go | 6 +- .../pkg/agent/application/upgrade/upgrade.go | 2 +- internal/pkg/agent/cmd/inspect.go | 4 +- internal/pkg/agent/configrequest/step.go | 2 +- internal/pkg/agent/install/uninstall.go | 4 +- internal/pkg/agent/operation/common_test.go | 6 +- internal/pkg/agent/operation/monitoring.go | 22 +- .../pkg/agent/operation/monitoring_test.go | 14 +- internal/pkg/agent/operation/operation.go | 1 - .../pkg/agent/operation/operation_fetch.go | 79 - .../pkg/agent/operation/operation_install.go | 65 - .../agent/operation/operation_uninstall.go | 55 - .../pkg/agent/operation/operation_verify.go | 76 - internal/pkg/agent/operation/operator.go | 21 +- .../pkg/agent/operation/operator_handlers.go | 11 +- internal/pkg/agent/operation/operator_test.go | 25 +- internal/pkg/agent/program/program.go | 20 +- internal/pkg/agent/program/program_test.go | 4 +- internal/pkg/agent/program/spec.go | 17 +- .../pkg/agent/stateresolver/resolve_test.go | 50 +- ...crypted_disk_storage_windows_linux_test.go | 3 +- .../pkg/agent/storage/encrypted_disk_store.go | 6 +- internal/pkg/artifact/artifact.go | 2 +- .../download/composed/downloader_test.go | 2 +- .../pkg/artifact/download/http/downloader.go | 13 +- .../artifact/download/http/elastic_test.go | 22 +- .../install/atomic/atomic_installer.go | 3 +- .../artifact/install/hooks/hooks_installer.go | 1 + internal/pkg/core/app/descriptor.go | 45 +- .../core/monitoring/beats/beats_monitor.go | 2 +- .../pkg/core/monitoring/beats/monitoring.go | 8 +- .../pkg/core/monitoring/server/process.go | 6 +- internal/pkg/core/plugin/service/app.go | 4 +- magefile.go | 162 +- pkg/component/input_spec.go | 55 + pkg/component/load.go | 7 +- pkg/component/output_spec.go | 31 + pkg/component/spec.go | 47 +- specs/apm-server.spec.yml | 46 +- specs/auditbeat.spec.yml | 86 +- specs/cloudbeat.spec.yml | 54 +- specs/endpoint-security.spec.yml | 78 +- specs/filebeat.spec.yml | 340 +-- specs/fleet-server.spec.yml | 34 +- specs/heartbeat.spec.yml | 94 +- specs/metricbeat.spec.yml | 314 +-- specs/osquerybeat.spec.yml | 52 +- specs/packetbeat.spec.yml | 58 +- 59 files changed, 2260 insertions(+), 2623 deletions(-) delete mode 100644 internal/pkg/agent/operation/operation_fetch.go delete mode 100644 internal/pkg/agent/operation/operation_install.go delete mode 100644 internal/pkg/agent/operation/operation_uninstall.go delete mode 100644 internal/pkg/agent/operation/operation_verify.go create mode 100644 pkg/component/input_spec.go create mode 100644 pkg/component/output_spec.go diff --git a/dev-tools/mage/common.go b/dev-tools/mage/common.go index 07268162dbe..e14f3038587 100644 --- a/dev-tools/mage/common.go +++ b/dev-tools/mage/common.go @@ -40,9 +40,15 @@ import ( "github.com/pkg/errors" ) +const ( + inlineTemplate = "inline" + xpackDirName = "x-pack" + windowsBinarySuffix = ".exe" +) + // Expand expands the given Go text/template string. func Expand(in string, args ...map[string]interface{}) (string, error) { - return expandTemplate("inline", in, FuncMap, EnvMap(args...)) + return expandTemplate(inlineTemplate, in, FuncMap, EnvMap(args...)) } // MustExpand expands the given Go text/template string. It panics if there is @@ -77,7 +83,7 @@ func expandTemplate(name, tmpl string, funcs template.FuncMap, args ...map[strin t, err := t.Parse(tmpl) if err != nil { - if name == "inline" { + if name == inlineTemplate { return "", errors.Wrapf(err, "failed to parse template '%v'", tmpl) } return "", errors.Wrap(err, "failed to parse template") @@ -85,7 +91,7 @@ func expandTemplate(name, tmpl string, funcs template.FuncMap, args ...map[strin buf := new(bytes.Buffer) if err := t.Execute(buf, joinMaps(args...)); err != nil { - if name == "inline" { + if name == inlineTemplate { return "", errors.Wrapf(err, "failed to expand template '%v'", tmpl) } return "", errors.Wrap(err, "failed to expand template") @@ -122,11 +128,12 @@ func expandFile(src, dst string, args ...map[string]interface{}) error { return err } - dst, err = expandTemplate("inline", dst, FuncMap, args...) + dst, err = expandTemplate(inlineTemplate, dst, FuncMap, args...) if err != nil { return err } + //nolint:gosec // 0644 is required if err = ioutil.WriteFile(createDir(dst), []byte(output), 0644); err != nil { return errors.Wrap(err, "failed to write rendered template") } @@ -272,6 +279,7 @@ func MustFindReplace(file string, re *regexp.Regexp, repl string) { func DownloadFile(url, destinationDir string) (string, error) { log.Println("Downloading", url) + //nolint:gosec,noctx // url is not user input resp, err := http.Get(url) if err != nil { return "", errors.Wrap(err, "http get failed") @@ -327,6 +335,7 @@ func unzip(sourceFile, destinationDir string) error { } defer innerFile.Close() + //nolint:gosec // G305 zip traversal, no user input path := filepath.Join(destinationDir, f.Name) if !strings.HasPrefix(path, destinationDir) { return errors.Errorf("illegal file path in zip: %v", f.Name) @@ -346,6 +355,7 @@ func unzip(sourceFile, destinationDir string) error { } defer out.Close() + //nolint:gosec // DoS vulnerability, no user input if _, err = io.Copy(out, innerFile); err != nil { return err } @@ -365,6 +375,7 @@ func unzip(sourceFile, destinationDir string) error { // Tar compress a directory using tar + gzip algorithms func Tar(src string, targetFile string) error { + //nolint:forbidigo // pattern forbidden but we want it here fmt.Printf(">> creating TAR file from directory: %s, target: %s\n", src, targetFile) f, err := os.Create(targetFile) @@ -378,13 +389,14 @@ func Tar(src string, targetFile string) error { tw := tar.NewWriter(zr) // walk through every file in the folder - filepath.Walk(src, func(file string, fi os.FileInfo, errFn error) error { + err = filepath.Walk(src, func(file string, fi os.FileInfo, errFn error) error { if errFn != nil { return fmt.Errorf("error traversing the file system: %w", errFn) } // if a symlink, skip file if fi.Mode().Type() == os.ModeSymlink { + //nolint:forbidigo // pattern forbidden but we want it here fmt.Printf(">> skipping symlink: %s\n", file) return nil } @@ -417,6 +429,9 @@ func Tar(src string, targetFile string) error { } return nil }) + if err != nil { + return fmt.Errorf("error walking path '%s': %w", src, err) + } // produce tar if err := tw.Close(); err != nil { @@ -457,6 +472,7 @@ func untar(sourceFile, destinationDir string) error { return err } + //nolint:gosec // G305: file traversal, no user input path := filepath.Join(destinationDir, header.Name) if !strings.HasPrefix(path, destinationDir) { return errors.Errorf("illegal file path in tar: %v", header.Name) @@ -468,11 +484,16 @@ func untar(sourceFile, destinationDir string) error { return err } case tar.TypeReg: + if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { + return err + } + writer, err := os.Create(path) if err != nil { return err } + //nolint:gosec // decompression bomb, no user input if _, err = io.Copy(writer, tarReader); err != nil { return err } @@ -549,7 +570,7 @@ func numParallel() int { // based on GOMAXPROCS. The provided ctx is passed to the functions (if they // accept it as a param). func ParallelCtx(ctx context.Context, fns ...interface{}) { - var fnWrappers []func(context.Context) error + fnWrappers := make([]func(context.Context) error, 0, len(fns)) for _, f := range fns { fnWrapper := funcTypeWrap(f) if fnWrapper == nil { @@ -747,6 +768,7 @@ func CreateSHA512File(file string) error { computedHash := hex.EncodeToString(sum.Sum(nil)) out := fmt.Sprintf("%v %v", computedHash, filepath.Base(file)) + //nolint:gosec // permissions are correct return ioutil.WriteFile(file+".sha512", []byte(out), 0644) } @@ -774,7 +796,7 @@ func IsUpToDate(dst string, sources ...string) bool { var files []string for _, s := range sources { - filepath.Walk(s, func(path string, info os.FileInfo, err error) error { + err := filepath.Walk(s, func(path string, info os.FileInfo, err error) error { if err != nil { if os.IsNotExist(err) { return nil @@ -788,6 +810,9 @@ func IsUpToDate(dst string, sources ...string) bool { return nil }) + if err != nil { + panic(err) + } } execute, err := target.Path(dst, files...) @@ -800,7 +825,7 @@ func OSSBeatDir(path ...string) string { ossDir := CWD() // Check if we need to correct ossDir because it's in x-pack. - if parentDir := filepath.Base(filepath.Dir(ossDir)); parentDir == "x-pack" { + if parentDir := filepath.Base(filepath.Dir(ossDir)); parentDir == xpackDirName { // If the OSS version of the beat exists. tmp := filepath.Join(ossDir, "../..", BeatName) if _, err := os.Stat(tmp); !os.IsNotExist(err) { @@ -817,7 +842,7 @@ func XPackBeatDir(path ...string) string { // Check if we have an X-Pack only beats cur := CWD() - if parentDir := filepath.Base(filepath.Dir(cur)); parentDir == "x-pack" { + if parentDir := filepath.Base(filepath.Dir(cur)); parentDir == xpackDirName { tmp := filepath.Join(filepath.Dir(cur), BeatName) return filepath.Join(append([]string{tmp}, path...)...) } @@ -845,7 +870,7 @@ func CreateDir(file string) string { // binaryExtension returns the appropriate file extension based on GOOS. func binaryExtension(goos string) string { if goos == "windows" { - return ".exe" + return windowsBinarySuffix } return "" } @@ -869,7 +894,7 @@ func ParseVersion(version string) (major, minor, patch int, err error) { major, _ = strconv.Atoi(data["major"]) minor, _ = strconv.Atoi(data["minor"]) patch, _ = strconv.Atoi(data["patch"]) - return + return major, minor, patch, nil } // ListMatchingEnvVars returns all of the environment variables names that begin @@ -946,7 +971,7 @@ func ReadGLIBCRequirement(elfFile string) (*SemanticVersion, error) { return nil, errors.New("no GLIBC symbols found in binary (is this a static binary?)") } - var versions []SemanticVersion + versions := make([]SemanticVersion, 0, len(versionSet)) for ver := range versionSet { versions = append(versions, ver) } diff --git a/dev-tools/mage/pkgtypes.go b/dev-tools/mage/pkgtypes.go index d976114ac64..68458f65952 100644 --- a/dev-tools/mage/pkgtypes.go +++ b/dev-tools/mage/pkgtypes.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//nolint:goconst // avoiding const check for Deb/Zip package mage import ( @@ -16,6 +17,7 @@ import ( "os" "path/filepath" "reflect" + "regexp" "runtime" "strconv" "strings" @@ -36,6 +38,13 @@ const ( // defaultBinaryName specifies the output file for zip and tar.gz. defaultBinaryName = "{{.Name}}-{{.Version}}{{if .Snapshot}}-SNAPSHOT{{end}}{{if .OS}}-{{.OS}}{{end}}{{if .Arch}}-{{.Arch}}{{end}}" + + componentConfigMode os.FileMode = 0600 +) + +var ( + configFilePattern = regexp.MustCompile(`.*\.yml$|.*\.yml\.disabled$`) + componentConfigFilePattern = regexp.MustCompile(`.*beat\.spec\.yml$|.*beat\.yml$|apm-server\.yml$|apm-server\.spec\.yml$|elastic-agent\.yml$`) ) // PackageType defines the file format of the package (e.g. zip, rpm, etc). @@ -85,11 +94,12 @@ type PackageSpec struct { // PackageFile represents a file or directory within a package. type PackageFile struct { - Source string `yaml:"source,omitempty"` // Regular source file or directory. - Content string `yaml:"content,omitempty"` // Inline template string. - Template string `yaml:"template,omitempty"` // Input template file. - Target string `yaml:"target,omitempty"` // Target location in package. Relative paths are added to a package specific directory (e.g. metricbeat-7.0.0-linux-x86_64). - Mode os.FileMode `yaml:"mode,omitempty"` // Target mode for file. Does not apply when source is a directory. + Source string `yaml:"source,omitempty"` // Regular source file or directory. + Content string `yaml:"content,omitempty"` // Inline template string. + Template string `yaml:"template,omitempty"` // Input template file. + Target string `yaml:"target,omitempty"` // Target location in package. Relative paths are added to a package specific directory (e.g. metricbeat-7.0.0-linux-x86_64). + Mode os.FileMode `yaml:"mode,omitempty"` // Target mode for file. Does not apply when source is a directory. + ConfigMode os.FileMode `yaml:"config_mode,omitempty"` Config bool `yaml:"config"` // Mark file as config in the package (deb and rpm only). Modules bool `yaml:"modules"` // Mark directory as directory with modules. Dep func(PackageSpec) error `yaml:"-" hash:"-" json:"-"` // Dependency to invoke during Evaluate. @@ -100,22 +110,22 @@ type PackageFile struct { // OSArchNames defines the names of architectures for use in packages. var OSArchNames = map[string]map[PackageType]map[string]string{ - "windows": map[PackageType]map[string]string{ - Zip: map[string]string{ + "windows": { + Zip: { "386": "x86", "amd64": "x86_64", }, }, - "darwin": map[PackageType]map[string]string{ - TarGz: map[string]string{ + "darwin": { + TarGz: { "386": "x86", "amd64": "x86_64", "arm64": "aarch64", // "universal": "universal", }, }, - "linux": map[PackageType]map[string]string{ - RPM: map[string]string{ + "linux": { + RPM: { "386": "i686", "amd64": "x86_64", "armv7": "armhfp", @@ -127,7 +137,7 @@ var OSArchNames = map[string]map[PackageType]map[string]string{ "s390x": "s390x", }, // https://www.debian.org/ports/ - Deb: map[string]string{ + Deb: { "386": "i386", "amd64": "amd64", "armv5": "armel", @@ -140,7 +150,7 @@ var OSArchNames = map[string]map[PackageType]map[string]string{ "ppc64le": "ppc64el", "s390x": "s390x", }, - TarGz: map[string]string{ + TarGz: { "386": "x86", "amd64": "x86_64", "armv5": "armv5", @@ -155,13 +165,13 @@ var OSArchNames = map[string]map[PackageType]map[string]string{ "ppc64le": "ppc64le", "s390x": "s390x", }, - Docker: map[string]string{ + Docker: { "amd64": "amd64", "arm64": "arm64", }, }, - "aix": map[PackageType]map[string]string{ - TarGz: map[string]string{ + "aix": { + TarGz: { "ppc64": "ppc64", }, }, @@ -443,7 +453,7 @@ func (s PackageSpec) Evaluate(args ...map[string]interface{}) PackageSpec { // ImageName computes the image name from the spec. A template for the image // name can be configured by adding image_name to extra_vars. func (s PackageSpec) ImageName() (string, error) { - if name, _ := s.ExtraVars["image_name"]; name != "" { + if name := s.ExtraVars["image_name"]; name != "" { imageName, err := s.Expand(name) if err != nil { return "", errors.Wrapf(err, "failed to expand image_name") @@ -670,14 +680,6 @@ func PackageTarGz(spec PackageSpec) error { return errors.Wrap(CreateSHA512File(spec.OutputFile), "failed to create .sha512 file") } -func replaceFileArch(filename string, pkgFile PackageFile, arch string) (string, PackageFile) { - filename = strings.ReplaceAll(filename, "universal", arch) - pkgFile.Source = strings.ReplaceAll(pkgFile.Source, "universal", arch) - pkgFile.Target = strings.ReplaceAll(pkgFile.Target, "universal", arch) - - return filename, pkgFile -} - // PackageDeb packages a deb file. This requires Docker to execute FPM. func PackageDeb(spec PackageSpec) error { return runFPM(spec, Deb) @@ -821,9 +823,14 @@ func addFileToZip(ar *zip.Writer, baseDir string, pkgFile PackageFile) error { return err } - if info.Mode().IsRegular() && pkgFile.Mode > 0 { + switch { + case componentConfigFilePattern.MatchString(info.Name()): + header.SetMode(componentConfigMode & os.ModePerm) + case pkgFile.ConfigMode > 0 && configFilePattern.MatchString(info.Name()): + header.SetMode(pkgFile.ConfigMode & os.ModePerm) + case info.Mode().IsRegular() && pkgFile.Mode > 0: header.SetMode(pkgFile.Mode & os.ModePerm) - } else if info.IsDir() { + case info.IsDir(): header.SetMode(0755) } @@ -888,12 +895,21 @@ func addFileToTar(ar *tar.Writer, baseDir string, pkgFile PackageFile) error { header.Uname, header.Gname = "root", "root" header.Uid, header.Gid = 0, 0 - if info.Mode().IsRegular() && pkgFile.Mode > 0 { + switch { + case componentConfigFilePattern.MatchString(info.Name()): + header.Mode = int64(componentConfigMode & os.ModePerm) + case pkgFile.ConfigMode > 0 && configFilePattern.MatchString(info.Name()): + header.Mode = int64(pkgFile.ConfigMode & os.ModePerm) + case info.Mode().IsRegular() && pkgFile.Mode > 0: header.Mode = int64(pkgFile.Mode & os.ModePerm) - } else if info.IsDir() { + case info.IsDir(): header.Mode = int64(0755) } + if strings.Contains(info.Name(), "disabled") { + log.Println(">>>>>", info.Name(), pkgFile.ConfigMode, "matches", configFilePattern.MatchString(info.Name()), "or", componentConfigFilePattern.MatchString(info.Name())) + } + if filepath.IsAbs(pkgFile.Target) { baseDir = "" } @@ -957,9 +973,14 @@ func addSymlinkToTar(tmpdir string, ar *tar.Writer, baseDir string, pkgFile Pack header.Uname, header.Gname = "root", "root" header.Uid, header.Gid = 0, 0 - if info.Mode().IsRegular() && pkgFile.Mode > 0 { + switch { + case componentConfigFilePattern.MatchString(info.Name()): + header.Mode = int64(componentConfigMode & os.ModePerm) + case pkgFile.ConfigMode > 0 && configFilePattern.MatchString(info.Name()): + header.Mode = int64(pkgFile.ConfigMode & os.ModePerm) + case info.Mode().IsRegular() && pkgFile.Mode > 0: header.Mode = int64(pkgFile.Mode & os.ModePerm) - } else if info.IsDir() { + case info.IsDir(): header.Mode = int64(0755) } diff --git a/dev-tools/packaging/package_test.go b/dev-tools/packaging/package_test.go index 74135533aaa..2eb60829637 100644 --- a/dev-tools/packaging/package_test.go +++ b/dev-tools/packaging/package_test.go @@ -34,10 +34,12 @@ const ( expectedManifestMode = os.FileMode(0644) expectedModuleFileMode = expectedManifestMode expectedModuleDirMode = os.FileMode(0755) + + rootUser = "root" ) var ( - configFilePattern = regexp.MustCompile(`.*beat\.yml$|apm-server\.yml|elastic-agent\.yml$`) + configFilePattern = regexp.MustCompile(`.*beat\.spec.yml$|.*beat\.yml$|apm-server\.yml|elastic-agent\.yml$$`) manifestFilePattern = regexp.MustCompile(`manifest.yml`) modulesDirPattern = regexp.MustCompile(`module/.+`) modulesDDirPattern = regexp.MustCompile(`modules.d/$`) @@ -173,8 +175,6 @@ func checkZip(t *testing.T, file string) { } const ( - npcapSettings = "Windows Npcap installation settings" - npcapGrant = `Insecure.Com LLC \(“The Nmap Project”\) has granted Elasticsearch` npcapLicense = `Dependency : Npcap \(https://nmap.org/npcap/\)` libpcapLicense = `Dependency : Libpcap \(http://www.tcpdump.org/\)` winpcapLicense = `Dependency : Winpcap \(https://www.winpcap.org/\)` @@ -225,7 +225,7 @@ func checkDocker(t *testing.T, file string) { checkDockerEntryPoint(t, p, info) checkDockerLabels(t, p, info, file) checkDockerUser(t, p, info, *rootUserContainer) - checkConfigPermissionsWithMode(t, p, os.FileMode(0644)) + checkConfigPermissionsWithMode(t, p, configFilePattern, os.FileMode(0644)) checkManifestPermissionsWithMode(t, p, os.FileMode(0644)) checkModulesPresent(t, "", p) checkModulesDPresent(t, "", p) @@ -234,13 +234,13 @@ func checkDocker(t *testing.T, file string) { // Verify that the main configuration file is installed with a 0600 file mode. func checkConfigPermissions(t *testing.T, p *packageFile) { - checkConfigPermissionsWithMode(t, p, expectedConfigMode) + checkConfigPermissionsWithMode(t, p, configFilePattern, expectedConfigMode) } -func checkConfigPermissionsWithMode(t *testing.T, p *packageFile, expectedMode os.FileMode) { +func checkConfigPermissionsWithMode(t *testing.T, p *packageFile, configPattern *regexp.Regexp, expectedMode os.FileMode) { t.Run(p.Name+" config file permissions", func(t *testing.T) { for _, entry := range p.Contents { - if configFilePattern.MatchString(entry.File) { + if configPattern.MatchString(entry.File) { mode := entry.Mode.Perm() if expectedMode != mode { t.Errorf("file %v has wrong permissions: expected=%v actual=%v", @@ -249,7 +249,7 @@ func checkConfigPermissionsWithMode(t *testing.T, p *packageFile, expectedMode o return } } - t.Errorf("no config file found matching %v", configFilePattern) + t.Errorf("no config file found matching %v", configPattern) }) } @@ -493,7 +493,7 @@ func checkDockerLabels(t *testing.T, p *packageFile, info *dockerInfo, file stri func checkDockerUser(t *testing.T, p *packageFile, info *dockerInfo, expectRoot bool) { t.Run(fmt.Sprintf("%s user", p.Name), func(t *testing.T) { - if expectRoot != (info.Config.User == "root") { + if expectRoot != (info.Config.User == rootUser) { t.Errorf("unexpected docker user: %s", info.Config.User) } }) @@ -564,7 +564,7 @@ func readRPM(rpmFile string) (*packageFile, *rpm.PackageFile, error) { File: file.Name(), Mode: file.Mode(), } - if file.Owner() != "root" { + if file.Owner() != rootUser { // not 0 pe.UID = 123 pe.GID = 123 @@ -707,6 +707,7 @@ func readDocker(dockerFile string) (*packageFile, *dockerInfo, error) { defer gzipReader.Close() tarReader := tar.NewReader(gzipReader) + manifestFileName := "manifest.json" for { header, err := tarReader.Next() if err != nil { @@ -717,12 +718,12 @@ func readDocker(dockerFile string) (*packageFile, *dockerInfo, error) { } switch { - case header.Name == "manifest.json": + case header.Name == manifestFileName: manifest, err = readDockerManifest(tarReader) if err != nil { return nil, nil, err } - case strings.HasSuffix(header.Name, ".json") && header.Name != "manifest.json": + case strings.HasSuffix(header.Name, ".json") && header.Name != manifestFileName: info, err = readDockerInfo(tarReader) if err != nil { return nil, nil, err diff --git a/dev-tools/packaging/packages.yml b/dev-tools/packaging/packages.yml index acc89420081..0c2e0da906e 100644 --- a/dev-tools/packaging/packages.yml +++ b/dev-tools/packaging/packages.yml @@ -1,1236 +1,910 @@ ---- - -# This file contains the package specifications for both Community Beats and -# Official Beats. The shared section contains YAML anchors that are used to -# define common parts of the package in order to not repeat ourselves. - -shared: - - &common - name: '{{.BeatName}}' - service_name: '{{.BeatServiceName}}' - os: '{{.GOOS}}' - arch: '{{.PackageArch}}' - vendor: '{{.BeatVendor}}' - version: '{{ beat_version }}' - license: '{{.BeatLicense}}' - url: '{{.BeatURL}}' - description: '{{.BeatDescription}}' - - # agent specific - # Deb/RPM spec for community beats. - - &deb_rpm_agent_spec - <<: *common - post_install_script: '{{ elastic_beats_dir }}/dev-tools/packaging/files/linux/systemd-daemon-reload.sh' - files: - /usr/share/{{.BeatName}}/bin/{{.BeatName}}{{.BinaryExt}}: - source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - mode: 0755 - /usr/share/{{.BeatName}}/LICENSE.txt: - source: '{{ repo.RootDir }}/LICENSE.txt' - mode: 0644 - /usr/share/{{.BeatName}}/NOTICE.txt: - source: '{{ repo.RootDir }}/NOTICE.txt' - mode: 0644 - /usr/share/{{.BeatName}}/README.md: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/common/README.md.tmpl' - mode: 0644 - /usr/share/{{.BeatName}}/.build_hash.txt: - content: > - {{ commit }} - mode: 0644 - /etc/{{.BeatName}}/elastic-agent.reference.yml: - source: 'elastic-agent.reference.yml' - mode: 0644 - /etc/{{.BeatName}}/elastic-agent.yml: - source: 'elastic-agent.yml' - mode: 0600 - config: true - /etc/{{.BeatName}}/.elastic-agent.active.commit: - content: > - {{ commit }} - mode: 0644 - /usr/share/{{.BeatName}}/bin/{{.BeatName}}-god: - source: build/golang-crossbuild/god-{{.GOOS}}-{{.Platform.Arch}} - mode: 0755 - /usr/bin/{{.BeatName}}: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/linux/elastic-agent.sh.tmpl' - mode: 0755 - /lib/systemd/system/{{.BeatServiceName}}.service: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/linux/elastic-agent.unit.tmpl' - mode: 0644 - /etc/init.d/{{.BeatServiceName}}: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/{{.PackageType}}/elastic-agent.init.sh.tmpl' - mode: 0755 - /var/lib/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}}: - source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - mode: 0755 - /var/lib/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/downloads/filebeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz: - source: '{{.AgentDropPath}}/filebeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz' - mode: 0644 - /var/lib/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/downloads/filebeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512: - source: '{{.AgentDropPath}}/filebeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512' - mode: 0644 - /var/lib/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/downloads/filebeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc: - source: '{{.AgentDropPath}}/filebeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc' - mode: 0644 - skip_on_missing: true - /var/lib/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/downloads/heartbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz: - source: '{{.AgentDropPath}}/heartbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz' - mode: 0644 - /var/lib/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/downloads/heartbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512: - source: '{{.AgentDropPath}}/heartbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512' - mode: 0644 - /var/lib/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/downloads/heartbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc: - source: '{{.AgentDropPath}}/heartbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc' - mode: 0644 - skip_on_missing: true - /var/lib/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/downloads/metricbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz: - source: '{{.AgentDropPath}}/metricbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz' - mode: 0644 - /var/lib/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/downloads/metricbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512: - source: '{{.AgentDropPath}}/metricbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512' - mode: 0644 - /var/lib/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/downloads/metricbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc: - source: '{{.AgentDropPath}}/metricbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc' - mode: 0644 - skip_on_missing: true - /var/lib/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/downloads/osquerybeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz: - source: '{{.AgentDropPath}}/osquerybeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz' - mode: 0644 - skip_on_missing: true - /var/lib/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/downloads/osquerybeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512: - source: '{{.AgentDropPath}}/osquerybeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512' - mode: 0644 - skip_on_missing: true - /var/lib/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/downloads/osquerybeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc: - source: '{{.AgentDropPath}}/osquerybeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc' - mode: 0644 - skip_on_missing: true - /var/lib/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/downloads/endpoint-security-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz: - source: '{{.AgentDropPath}}/endpoint-security-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz' - mode: 0644 - skip_on_missing: true - /var/lib/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/downloads/endpoint-security-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512: - source: '{{.AgentDropPath}}/endpoint-security-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512' - mode: 0644 - skip_on_missing: true - /var/lib/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/downloads/endpoint-security-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc: - source: '{{.AgentDropPath}}/endpoint-security-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc' - mode: 0644 - skip_on_missing: true - /var/lib/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/downloads/apm-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz: - source: '{{.AgentDropPath}}/apm-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz' - mode: 0644 - skip_on_missing: true - /var/lib/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/downloads/apm-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512: - source: '{{.AgentDropPath}}/apm-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512' - mode: 0644 - skip_on_missing: true - /var/lib/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/downloads/apm-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc: - source: '{{.AgentDropPath}}/apm-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc' - mode: 0644 - skip_on_missing: true - /var/lib/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/downloads/fleet-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz: - source: '{{.AgentDropPath}}/fleet-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz' - mode: 0644 - skip_on_missing: true - /var/lib/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/downloads/fleet-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512: - source: '{{.AgentDropPath}}/fleet-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512' - mode: 0644 - skip_on_missing: true - /var/lib/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/downloads/fleet-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc: - source: '{{.AgentDropPath}}/fleet-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc' - mode: 0644 - skip_on_missing: true - /var/lib/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/downloads/cloudbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz: - source: '{{.AgentDropPath}}/cloudbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz' - mode: 0644 - skip_on_missing: true - /var/lib/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/downloads/cloudbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512: - source: '{{.AgentDropPath}}/cloudbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512' - mode: 0644 - skip_on_missing: true - /var/lib/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/downloads/cloudbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc: - source: '{{.AgentDropPath}}/cloudbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc' - mode: 0644 - skip_on_missing: true - - - - # MacOS pkg spec for community beats. - - &macos_agent_pkg_spec - <<: *common - extra_vars: - # OS X 10.11 El Capitan is the oldest supported by Go 1.14. - # https://golang.org/doc/go1.14#ports - min_supported_osx_version: 10.11 - identifier: 'co.{{.BeatVendor | tolower}}.beats.{{.BeatName}}' - install_path: /Library/Application Support - pre_install_script: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/darwin/scripts/preinstall.tmpl' - post_install_script: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/darwin/scripts/postinstall.elastic-agent.tmpl' - files: - /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/bin/{{.BeatName}}{{.BinaryExt}}: - source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - mode: 0755 - /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/LICENSE.txt: - source: '{{ repo.RootDir }}/LICENSE.txt' - mode: 0644 - /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/NOTICE.txt: - source: '{{ repo.RootDir }}/NOTICE.txt' - mode: 0644 - /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/README.md: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/common/README.md.tmpl' - mode: 0644 - /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/.build_hash.txt: - content: > - {{ commit }} - mode: 0644 - /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/{{.identifier}}.plist: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/darwin/launchd-daemon.plist.tmpl' - mode: 0644 - /etc/{{.BeatName}}/elastic-agent.reference.yml: - source: 'elastic-agent.reference.yml' - mode: 0644 - /etc/{{.BeatName}}/elastic-agent.yml: - source: 'elastic-agent.yml' - mode: 0600 - config: true - /etc/{{.BeatName}}/.elastic-agent.active.commit: - content: > - {{ commit }} - mode: 0644 - /etc/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}}: - source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - mode: 0755 - /etc/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/downloads/filebeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz: - source: '{{.AgentDropPath}}/filebeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz' - mode: 0644 - /etc/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/downloads/filebeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512: - source: '{{.AgentDropPath}}/filebeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512' - mode: 0644 - skip_on_missing: true - /etc/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/downloads/filebeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc: - source: '{{.AgentDropPath}}/filebeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc' - mode: 0644 - skip_on_missing: true - /etc/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/downloads/heartbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz: - source: '{{.AgentDropPath}}/heartbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz' - mode: 0644 - /etc/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/downloads/heartbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512: - source: '{{.AgentDropPath}}/heartbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512' - mode: 0644 - skip_on_missing: true - /etc/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/downloads/heartbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc: - source: '{{.AgentDropPath}}/heartbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc' - mode: 0644 - skip_on_missing: true - /etc/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/downloads/metricbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz: - source: '{{.AgentDropPath}}/metricbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz' - mode: 0644 - /etc/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/downloads/metricbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512: - source: '{{.AgentDropPath}}/metricbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512' - mode: 0644 - /etc/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/downloads/metricbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc: - source: '{{.AgentDropPath}}/metricbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc' - mode: 0644 - skip_on_missing: true - /etc/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/downloads/osquerybeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz: - source: '{{.AgentDropPath}}/osquerybeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz' - mode: 0644 - skip_on_missing: true - /etc/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/downloads/osquerybeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512: - source: '{{.AgentDropPath}}/osquerybeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512' - mode: 0644 - skip_on_missing: true - /etc/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/downloads/osquerybeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc: - source: '{{.AgentDropPath}}/osquerybeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc' - mode: 0644 - skip_on_missing: true - /etc/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/downloads/endpoint-security-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz: - source: '{{.AgentDropPath}}/endpoint-security-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz' - mode: 0644 - skip_on_missing: true - /etc/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/downloads/endpoint-security-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512: - source: '{{.AgentDropPath}}/endpoint-security-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512' - mode: 0644 - skip_on_missing: true - /etc/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/downloads/endpoint-security-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc: - source: '{{.AgentDropPath}}/endpoint-security-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc' - mode: 0644 - skip_on_missing: true - /etc/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/downloads/apm-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz: - source: '{{.AgentDropPath}}/apm-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz' - mode: 0644 - skip_on_missing: true - /etc/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/downloads/apm-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512: - source: '{{.AgentDropPath}}/apm-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512' - mode: 0644 - skip_on_missing: true - /etc/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/downloads/apm-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc: - source: '{{.AgentDropPath}}/apm-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc' - mode: 0644 - skip_on_missing: true - /etc/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/downloads/fleet-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz: - source: '{{.AgentDropPath}}/fleet-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz' - mode: 0644 - skip_on_missing: true - /etc/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/downloads/fleet-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512: - source: '{{.AgentDropPath}}/fleet-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512' - mode: 0644 - skip_on_missing: true - /etc/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/downloads/fleet-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc: - source: '{{.AgentDropPath}}/fleet-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc' - mode: 0644 - skip_on_missing: true - - - &agent_binary_files - '{{.BeatName}}{{.BinaryExt}}': - source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - mode: 0755 - 'data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}}': - source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - mode: 0755 - LICENSE.txt: - source: '{{ repo.RootDir }}/LICENSE.txt' - mode: 0644 - NOTICE.txt: - source: '{{ repo.RootDir }}/NOTICE.txt' - mode: 0644 - README.md: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/common/README.md.tmpl' - mode: 0644 - .build_hash.txt: - content: > - {{ commit }} - mode: 0644 - 'elastic-agent.reference.yml': - source: 'elastic-agent.reference.yml' - mode: 0644 - 'elastic-agent.yml': - source: 'elastic-agent.yml' - mode: 0600 - config: true - '.elastic-agent.active.commit': - content: > - {{ commit }} - mode: 0644 - - # Binary package spec (tar.gz for linux/darwin) for community beats. - - &agent_binary_spec - <<: *common - files: - <<: *agent_binary_files - 'data/{{.BeatName}}-{{ commit_short }}/downloads/filebeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz': - source: '{{.AgentDropPath}}/filebeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz' - mode: 0644 - 'data/{{.BeatName}}-{{ commit_short }}/downloads/filebeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512': - source: '{{.AgentDropPath}}/filebeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512' - mode: 0644 - 'data/{{.BeatName}}-{{ commit_short }}/downloads/filebeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc': - source: '{{.AgentDropPath}}/filebeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc' - mode: 0644 - skip_on_missing: true - 'data/{{.BeatName}}-{{ commit_short }}/downloads/heartbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz': - source: '{{.AgentDropPath}}/heartbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz' - mode: 0644 - 'data/{{.BeatName}}-{{ commit_short }}/downloads/heartbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512': - source: '{{.AgentDropPath}}/heartbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512' - mode: 0644 - 'data/{{.BeatName}}-{{ commit_short }}/downloads/heartbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc': - source: '{{.AgentDropPath}}/heartbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc' - mode: 0644 - skip_on_missing: true - 'data/{{.BeatName}}-{{ commit_short }}/downloads/metricbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz': - source: '{{.AgentDropPath}}/metricbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz' - mode: 0644 - 'data/{{.BeatName}}-{{ commit_short }}/downloads/metricbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512': - source: '{{.AgentDropPath}}/metricbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512' - mode: 0644 - 'data/{{.BeatName}}-{{ commit_short }}/downloads/metricbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc': - source: '{{.AgentDropPath}}/metricbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc' - mode: 0644 - skip_on_missing: true - 'data/{{.BeatName}}-{{ commit_short }}/downloads/osquerybeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz': - source: '{{.AgentDropPath}}/osquerybeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz' - mode: 0644 - skip_on_missing: true - 'data/{{.BeatName}}-{{ commit_short }}/downloads/osquerybeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512': - source: '{{.AgentDropPath}}/osquerybeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512' - mode: 0644 - skip_on_missing: true - 'data/{{.BeatName}}-{{ commit_short }}/downloads/osquerybeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc': - source: '{{.AgentDropPath}}/osquerybeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc' - mode: 0644 - skip_on_missing: true - 'data/{{.BeatName}}-{{ commit_short }}/downloads/endpoint-security-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz': - source: '{{.AgentDropPath}}/endpoint-security-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz' - mode: 0644 - skip_on_missing: true - 'data/{{.BeatName}}-{{ commit_short }}/downloads/endpoint-security-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512': - source: '{{.AgentDropPath}}/endpoint-security-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512' - mode: 0644 - skip_on_missing: true - 'data/{{.BeatName}}-{{ commit_short }}/downloads/endpoint-security-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc': - source: '{{.AgentDropPath}}/endpoint-security-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc' - mode: 0644 - skip_on_missing: true - 'data/{{.BeatName}}-{{ commit_short }}/downloads/apm-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz': - source: '{{.AgentDropPath}}/apm-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz' - mode: 0644 - skip_on_missing: true - 'data/{{.BeatName}}-{{ commit_short }}/downloads/apm-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512': - source: '{{.AgentDropPath}}/apm-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512' - mode: 0644 - skip_on_missing: true - 'data/{{.BeatName}}-{{ commit_short }}/downloads/apm-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc': - source: '{{.AgentDropPath}}/apm-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc' - mode: 0644 - skip_on_missing: true - 'data/{{.BeatName}}-{{ commit_short }}/downloads/fleet-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz': - source: '{{.AgentDropPath}}/fleet-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz' - mode: 0644 - skip_on_missing: true - 'data/{{.BeatName}}-{{ commit_short }}/downloads/fleet-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512': - source: '{{.AgentDropPath}}/fleet-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512' - mode: 0644 - skip_on_missing: true - 'data/{{.BeatName}}-{{ commit_short }}/downloads/fleet-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc': - source: '{{.AgentDropPath}}/fleet-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc' - mode: 0644 - skip_on_missing: true - 'data/{{.BeatName}}-{{ commit_short }}/downloads/cloudbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz': - source: '{{.AgentDropPath}}/cloudbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz' - mode: 0644 - skip_on_missing: true - 'data/{{.BeatName}}-{{ commit_short }}/downloads/cloudbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512': - source: '{{.AgentDropPath}}/cloudbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.sha512' - mode: 0644 - skip_on_missing: true - 'data/{{.BeatName}}-{{ commit_short }}/downloads/cloudbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc': - source: '{{.AgentDropPath}}/cloudbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz.asc' - mode: 0644 - skip_on_missing: true - - - # Binary package spec (zip for windows) for community beats. - - &agent_windows_binary_spec - <<: *common - files: - <<: *agent_binary_files - 'data/{{.BeatName}}-{{ commit_short }}/downloads/filebeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.zip': - source: '{{.AgentDropPath}}/filebeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.zip' - mode: 0644 - 'data/{{.BeatName}}-{{ commit_short }}/downloads/filebeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.zip.sha512': - source: '{{.AgentDropPath}}/filebeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.zip.sha512' - mode: 0644 - 'data/{{.BeatName}}-{{ commit_short }}/downloads/filebeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.zip.asc': - source: '{{.AgentDropPath}}/filebeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.zip.asc' - mode: 0644 - skip_on_missing: true - 'data/{{.BeatName}}-{{ commit_short }}/downloads/heartbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.zip': - source: '{{.AgentDropPath}}/heartbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.zip' - mode: 0644 - 'data/{{.BeatName}}-{{ commit_short }}/downloads/heartbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.zip.sha512': - source: '{{.AgentDropPath}}/heartbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.zip.sha512' - mode: 0644 - 'data/{{.BeatName}}-{{ commit_short }}/downloads/heartbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.zip.asc': - source: '{{.AgentDropPath}}/heartbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.zip.asc' - mode: 0644 - skip_on_missing: true - 'data/{{.BeatName}}-{{ commit_short }}/downloads/metricbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.zip': - source: '{{.AgentDropPath}}/metricbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.zip' - mode: 0644 - 'data/{{.BeatName}}-{{ commit_short }}/downloads/metricbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.zip.sha512': - source: '{{.AgentDropPath}}/metricbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.zip.sha512' - mode: 0644 - 'data/{{.BeatName}}-{{ commit_short }}/downloads/metricbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.zip.asc': - source: '{{.AgentDropPath}}/metricbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.zip.asc' - mode: 0644 - skip_on_missing: true - 'data/{{.BeatName}}-{{ commit_short }}/downloads/osquerybeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.zip': - source: '{{.AgentDropPath}}/osquerybeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.zip' - mode: 0644 - skip_on_missing: true - 'data/{{.BeatName}}-{{ commit_short }}/downloads/osquerybeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.zip.sha512': - source: '{{.AgentDropPath}}/osquerybeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.zip.sha512' - mode: 0644 - skip_on_missing: true - 'data/{{.BeatName}}-{{ commit_short }}/downloads/osquerybeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.zip.asc': - source: '{{.AgentDropPath}}/osquerybeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.zip.asc' - mode: 0644 - skip_on_missing: true - 'data/{{.BeatName}}-{{ commit_short }}/downloads/endpoint-security-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.zip': - source: '{{.AgentDropPath}}/endpoint-security-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.zip' - mode: 0644 - skip_on_missing: true - 'data/{{.BeatName}}-{{ commit_short }}/downloads/endpoint-security-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.zip.sha512': - source: '{{.AgentDropPath}}/endpoint-security-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.zip.sha512' - mode: 0644 - skip_on_missing: true - 'data/{{.BeatName}}-{{ commit_short }}/downloads/endpoint-security-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.zip.asc': - source: '{{.AgentDropPath}}/endpoint-security-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.zip.asc' - mode: 0644 - skip_on_missing: true - 'data/{{.BeatName}}-{{ commit_short }}/downloads/apm-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.zip': - source: '{{.AgentDropPath}}/apm-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.zip' - mode: 0644 - skip_on_missing: true - 'data/{{.BeatName}}-{{ commit_short }}/downloads/apm-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.zip.sha512': - source: '{{.AgentDropPath}}/apm-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.zip.sha512' - mode: 0644 - skip_on_missing: true - 'data/{{.BeatName}}-{{ commit_short }}/downloads/apm-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.zip.asc': - source: '{{.AgentDropPath}}/apm-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.zip.asc' - mode: 0644 - skip_on_missing: true - 'data/{{.BeatName}}-{{ commit_short }}/downloads/fleet-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.zip': - source: '{{.AgentDropPath}}/fleet-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.zip' - mode: 0644 - skip_on_missing: true - 'data/{{.BeatName}}-{{ commit_short }}/downloads/fleet-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.zip.sha512': - source: '{{.AgentDropPath}}/fleet-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.zip.sha512' - mode: 0644 - skip_on_missing: true - 'data/{{.BeatName}}-{{ commit_short }}/downloads/fleet-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.zip.asc': - source: '{{.AgentDropPath}}/fleet-server-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.zip.asc' - mode: 0644 - skip_on_missing: true - 'data/{{.BeatName}}-{{ commit_short }}/downloads/cloudbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.zip': - source: '{{.AgentDropPath}}/cloudbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.zip' - mode: 0644 - skip_on_missing: true - 'data/{{.BeatName}}-{{ commit_short }}/downloads/cloudbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.zip.sha512': - source: '{{.AgentDropPath}}/cloudbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.zip.sha512' - mode: 0644 - skip_on_missing: true - 'data/{{.BeatName}}-{{ commit_short }}/downloads/cloudbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.zip.asc': - source: '{{.AgentDropPath}}/cloudbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.zip.asc' - mode: 0644 - skip_on_missing: true - - - &agent_docker_spec - <<: *agent_binary_spec - extra_vars: - from: 'ubuntu:20.04' - buildFrom: 'ubuntu:20.04' - dockerfile: 'Dockerfile.elastic-agent.tmpl' - docker_entrypoint: 'docker-entrypoint.elastic-agent.tmpl' - user: '{{ .BeatName }}' - linux_capabilities: '' - image_name: '' - beats_install_path: "install" - files: - 'elastic-agent.yml': - source: 'elastic-agent.docker.yml' - mode: 0600 - config: true - '.elastic-agent.active.commit': - content: > - {{ commit }} - mode: 0644 - - - &agent_docker_arm_spec - <<: *agent_docker_spec - extra_vars: - from: 'arm64v8/ubuntu:20.04' - buildFrom: 'arm64v8/ubuntu:20.04' - - - &agent_docker_cloud_spec - <<: *agent_docker_spec - extra_vars: - image_name: '{{.BeatName}}-cloud' - repository: 'docker.elastic.co/beats-ci' - - - &agent_docker_complete_spec - <<: *agent_docker_spec - extra_vars: - image_name: '{{.BeatName}}-complete' - - # Deb/RPM spec for community beats. - - &deb_rpm_spec - <<: *common - post_install_script: '{{ elastic_beats_dir }}/dev-tools/packaging/files/linux/systemd-daemon-reload.sh' - files: - /usr/share/{{.BeatName}}/bin/{{.BeatName}}{{.BinaryExt}}: - source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - mode: 0755 - /etc/{{.BeatName}}/fields.yml: - source: fields.yml - mode: 0644 - /usr/share/{{.BeatName}}/LICENSE.txt: - source: '{{ repo.RootDir }}/LICENSE.txt' - mode: 0644 - /usr/share/{{.BeatName}}/NOTICE.txt: - source: '{{ repo.RootDir }}/NOTICE.txt' - mode: 0644 - /usr/share/{{.BeatName}}/README.md: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/common/README.md.tmpl' - mode: 0644 - /usr/share/{{.BeatName}}/.build_hash.txt: - content: > - {{ commit }} - mode: 0644 - /etc/{{.BeatName}}/{{.BeatName}}.reference.yml: - source: '{{.BeatName}}.reference.yml' - mode: 0644 - /etc/{{.BeatName}}/{{.BeatName}}.yml: - source: '{{.BeatName}}.yml' - mode: 0600 - config: true - /usr/share/{{.BeatName}}/kibana: - source: _meta/kibana.generated - mode: 0644 - /usr/share/{{.BeatName}}/bin/{{.BeatName}}-god: - source: build/golang-crossbuild/god-{{.GOOS}}-{{.Platform.Arch}} - mode: 0755 - /usr/bin/{{.BeatName}}: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/linux/beatname.sh.tmpl' - mode: 0755 - /lib/systemd/system/{{.BeatServiceName}}.service: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/linux/systemd.unit.tmpl' - mode: 0644 - /etc/init.d/{{.BeatServiceName}}: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/{{.PackageType}}/init.sh.tmpl' - mode: 0755 - - # MacOS pkg spec for community beats. - - &macos_beat_pkg_spec - <<: *common - extra_vars: - # OS X 10.8 Mountain Lion is the oldest supported by Go 1.10. - # https://golang.org/doc/go1.10#ports - min_supported_osx_version: 10.8 - identifier: 'co.{{.BeatVendor | tolower}}.beats.{{.BeatName}}' - install_path: /Library/Application Support - pre_install_script: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/darwin/scripts/preinstall.tmpl' - post_install_script: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/darwin/scripts/postinstall.tmpl' - files: - /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/bin/{{.BeatName}}{{.BinaryExt}}: - source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - mode: 0755 - /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/LICENSE.txt: - source: '{{ repo.RootDir }}/LICENSE.txt' - mode: 0644 - /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/NOTICE.txt: - source: '{{ repo.RootDir }}/NOTICE.txt' - mode: 0644 - /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/README.md: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/common/README.md.tmpl' - mode: 0644 - /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/.build_hash.txt: - content: > - {{ commit }} - mode: 0644 - /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/{{.identifier}}.plist: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/darwin/launchd-daemon.plist.tmpl' - mode: 0644 - /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/kibana: - source: _meta/kibana.generated - mode: 0644 - /etc/{{.BeatName}}/fields.yml: - source: fields.yml - mode: 0644 - /etc/{{.BeatName}}/{{.BeatName}}.reference.yml: - source: '{{.BeatName}}.reference.yml' - mode: 0644 - /etc/{{.BeatName}}/{{.BeatName}}.yml: - source: '{{.BeatName}}.yml' - mode: 0600 - config: true - - - &binary_files - '{{.BeatName}}{{.BinaryExt}}': - source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - mode: 0755 - fields.yml: - source: fields.yml - mode: 0644 - LICENSE.txt: - source: '{{ repo.RootDir }}/LICENSE.txt' - mode: 0644 - NOTICE.txt: - source: '{{ repo.RootDir }}/NOTICE.txt' - mode: 0644 - README.md: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/common/README.md.tmpl' - mode: 0644 - .build_hash.txt: - content: > - {{ commit }} - mode: 0644 - '{{.BeatName}}.reference.yml': - source: '{{.BeatName}}.reference.yml' - mode: 0644 - '{{.BeatName}}.yml': - source: '{{.BeatName}}.yml' - mode: 0600 - config: true - kibana: - source: _meta/kibana.generated - mode: 0644 - - # Binary package spec (tar.gz for linux/darwin) for community beats. - - &binary_spec - <<: *common - files: - <<: *binary_files - - # Binary package spec (zip for windows) for community beats. - - &windows_binary_spec - <<: *common - files: - <<: *binary_files - install-service-{{.BeatName}}.ps1: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/windows/install-service.ps1.tmpl' - mode: 0755 - uninstall-service-{{.BeatName}}.ps1: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/windows/uninstall-service.ps1.tmpl' - mode: 0755 - - - &docker_spec - <<: *binary_spec - extra_vars: - from: 'ubuntu:20.04' - buildFrom: 'ubuntu:20.04' - user: '{{ .BeatName }}' - linux_capabilities: '' - files: - '{{.BeatName}}.yml': - source: '{{.BeatName}}.docker.yml' - mode: 0600 - config: true - - - &docker_arm_spec - <<: *docker_spec - extra_vars: - from: 'arm64v8/ubuntu:20.04' - buildFrom: 'arm64v8/ubuntu:20.04' - - - &docker_ubi_spec - extra_vars: - image_name: '{{.BeatName}}-ubi8' - from: 'docker.elastic.co/ubi8/ubi-minimal' - - - &docker_arm_ubi_spec - extra_vars: - image_name: '{{.BeatName}}-ubi8' - from: 'registry.access.redhat.com/ubi8/ubi-minimal:8.2' - - - &elastic_docker_spec - extra_vars: - repository: 'docker.elastic.co/beats' - - # - # License modifiers for Apache 2.0 - # - - &apache_license_for_binaries - license: "ASL 2.0" - files: - LICENSE.txt: - source: '{{ repo.RootDir }}/dev-tools/licenses/APACHE-LICENSE-2.0.txt' - mode: 0644 - - - &apache_license_for_deb_rpm - license: "ASL 2.0" - files: - /usr/share/{{.BeatName}}/LICENSE.txt: - source: '{{ repo.RootDir }}/dev-tools/licenses/APACHE-LICENSE-2.0.txt' - mode: 0644 - - - &apache_license_for_macos_pkg - license: "ASL 2.0" - files: - /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/LICENSE.txt: - source: '{{ repo.RootDir }}/dev-tools/licenses/APACHE-LICENSE-2.0.txt' - mode: 0644 - - # - # License modifiers for the Elastic License - # - - &elastic_license_for_binaries - license: "Elastic License" - files: - LICENSE.txt: - source: '{{ repo.RootDir }}/dev-tools/licenses/ELASTIC-LICENSE.txt' - mode: 0644 - - - &elastic_license_for_deb_rpm - license: "Elastic License" - files: - /usr/share/{{.BeatName}}/LICENSE.txt: - source: '{{ repo.RootDir }}/dev-tools/licenses/ELASTIC-LICENSE.txt' - mode: 0644 - - - &elastic_license_for_macos_pkg - license: "Elastic License" - files: - /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/LICENSE.txt: - source: '{{ repo.RootDir }}/dev-tools/licenses/ELASTIC-LICENSE.txt' - mode: 0644 - -# specs is a list of named packaging "flavors". -specs: - # Community Beats - community_beat: - - os: windows - types: [zip] - spec: - <<: *windows_binary_spec - - - os: darwin - types: [tgz] - spec: - <<: *binary_spec - - - os: linux - types: [tgz] - spec: - <<: *binary_spec - - - os: linux - types: [deb, rpm] - spec: - <<: *deb_rpm_spec - - - os: linux - types: [docker] - spec: - <<: *docker_spec - - - os: aix - types: [tgz] - spec: - <<: *binary_spec - - # Elastic Beat with Apache License (OSS) and binary taken the current - # directory. - elastic_beat_oss: - - os: windows - types: [zip] - spec: - <<: *windows_binary_spec - <<: *apache_license_for_binaries - name: '{{.BeatName}}-oss' - - - os: darwin - types: [tgz] - spec: - <<: *binary_spec - <<: *apache_license_for_binaries - name: '{{.BeatName}}-oss' - - - os: linux - types: [tgz] - spec: - <<: *binary_spec - <<: *apache_license_for_binaries - name: '{{.BeatName}}-oss' - - - os: linux - types: [deb, rpm] - spec: - <<: *deb_rpm_spec - <<: *apache_license_for_deb_rpm - name: '{{.BeatName}}-oss' - - - os: linux - types: [docker] - spec: - <<: *docker_spec - <<: *elastic_docker_spec - <<: *apache_license_for_binaries - name: '{{.BeatName}}-oss' - - - os: aix - types: [tgz] - spec: - <<: *binary_spec - <<: *apache_license_for_binaries - name: '{{.BeatName}}-oss' - - # Elastic Beat with Elastic License and binary taken the current directory. - elastic_beat_xpack: - ### - # Elastic Licensed Packages - ### - - os: windows - types: [zip] - spec: - <<: *windows_binary_spec - <<: *elastic_license_for_binaries - - - os: darwin - types: [tgz] - spec: - <<: *binary_spec - <<: *elastic_license_for_binaries - - - os: linux - types: [tgz] - spec: - <<: *binary_spec - <<: *elastic_license_for_binaries - - - os: linux - types: [deb, rpm] - spec: - <<: *deb_rpm_spec - <<: *elastic_license_for_deb_rpm - - - os: linux - arch: amd64 - types: [docker] - spec: - <<: *docker_spec - <<: *elastic_docker_spec - <<: *elastic_license_for_binaries - - - os: linux - arch: amd64 - types: [docker] - spec: - <<: *docker_spec - <<: *docker_ubi_spec - <<: *elastic_docker_spec - <<: *elastic_license_for_binaries - - - os: linux - arch: arm64 - types: [docker] - spec: - <<: *docker_arm_spec - <<: *elastic_docker_spec - <<: *elastic_license_for_binaries - - - os: linux - arch: arm64 - types: [docker] - spec: - <<: *docker_arm_spec - <<: *docker_arm_ubi_spec - <<: *elastic_docker_spec - <<: *elastic_license_for_binaries - - - os: aix - types: [tgz] - spec: - <<: *binary_spec - <<: *elastic_license_for_binaries - - # Elastic Beat with Elastic License and binary taken the current directory. - elastic_beat_xpack_reduced: - ### - # Elastic Licensed Packages - ### - - os: windows - types: [zip] - spec: - <<: *windows_binary_spec - <<: *elastic_license_for_binaries - - - os: darwin - types: [tgz] - spec: - <<: *binary_spec - <<: *elastic_license_for_binaries - - - os: linux - types: [tgz] - spec: - <<: *binary_spec - <<: *elastic_license_for_binaries - - - os: aix - types: [tgz] - spec: - <<: *binary_spec - <<: *elastic_license_for_binaries - - # Elastic Beat with Elastic License and binary taken from the x-pack dir. - elastic_beat_xpack_separate_binaries: - ### - # Elastic Licensed Packages - ### - - os: windows - types: [zip] - spec: - <<: *windows_binary_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - - os: darwin - types: [tgz] - spec: - <<: *binary_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - - os: linux - types: [tgz] - spec: - <<: *binary_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - - os: linux - types: [deb, rpm] - spec: - <<: *deb_rpm_spec - <<: *elastic_license_for_deb_rpm - files: - /usr/share/{{.BeatName}}/bin/{{.BeatName}}{{.BinaryExt}}: - source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - - os: linux - arch: amd64 - types: [docker] - spec: - <<: *docker_spec - <<: *elastic_docker_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - - os: linux - arch: amd64 - types: [docker] - spec: - <<: *docker_spec - <<: *docker_ubi_spec - <<: *elastic_docker_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - - os: linux - arch: arm64 - types: [docker] - spec: - <<: *docker_arm_spec - <<: *elastic_docker_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - - os: linux - arch: arm64 - types: [docker] - spec: - <<: *docker_arm_spec - <<: *docker_arm_ubi_spec - <<: *elastic_docker_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - - os: aix - types: [tgz] - spec: - <<: *binary_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - # Elastic Beat with Elastic License and binary taken from the x-pack dir. - elastic_beat_agent_binaries: - ### - # Elastic Licensed Packages - ### - - os: windows - types: [zip] - spec: - <<: *agent_windows_binary_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - - os: darwin - types: [tgz] - spec: - <<: *agent_binary_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}} - symlink: true - mode: 0755 - - - os: linux - types: [tgz] - spec: - <<: *agent_binary_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}} - symlink: true - mode: 0755 - - - os: linux - types: [deb, rpm] - spec: - <<: *deb_rpm_agent_spec - <<: *elastic_license_for_deb_rpm - files: - /usr/share/{{.BeatName}}/bin/{{.BeatName}}{{.BinaryExt}}: - source: /var/lib/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}} - symlink: true - mode: 0755 - - - os: linux - arch: amd64 - types: [docker] - spec: - <<: *agent_docker_spec - <<: *elastic_docker_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - # Complete image gets a 'complete' variant for synthetics and other large - # packages too big to fit in the main image - - os: linux - arch: amd64 - types: [docker] - spec: - <<: *agent_docker_spec - <<: *agent_docker_complete_spec - <<: *elastic_docker_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - # Cloud specific docker image - - os: linux - arch: amd64 - types: [docker] - spec: - <<: *elastic_docker_spec - <<: *agent_docker_spec - <<: *agent_docker_cloud_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - - os: linux - arch: amd64 - types: [docker] - spec: - <<: *agent_docker_spec - <<: *docker_ubi_spec - <<: *elastic_docker_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - - os: linux - arch: arm64 - types: [docker] - spec: - <<: *agent_docker_arm_spec - <<: *elastic_docker_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - # Complete image gets a 'complete' variant for synthetics and other large - # packages too big to fit in the main image - - os: linux - arch: arm64 - types: [docker] - spec: - <<: *agent_docker_arm_spec - <<: *agent_docker_complete_spec - <<: *elastic_docker_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - # Cloud specific docker image - - os: linux - arch: arm64 - types: [docker] - spec: - <<: *elastic_docker_spec - <<: *agent_docker_arm_spec - <<: *agent_docker_cloud_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - - os: linux - arch: arm64 - types: [docker] - spec: - <<: *agent_docker_arm_spec - <<: *docker_arm_ubi_spec - <<: *elastic_docker_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - - os: aix - types: [tgz] - spec: - <<: *agent_binary_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}} - symlink: true - mode: 0755 - - - # Elastic Beat with Elastic License and binary taken from the x-pack dir. - elastic_beat_agent_demo_binaries: - ### - # Elastic Licensed Packages - ### - - - os: linux - types: [tgz] - spec: - <<: *agent_binary_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - - os: linux - types: [docker] - spec: - <<: *agent_docker_spec - <<: *elastic_docker_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - - os: aix - types: [tgz] - spec: - <<: *agent_binary_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} +--- + +# This file contains the package specifications for both Community Beats and +# Official Beats. The shared section contains YAML anchors that are used to +# define common parts of the package in order to not repeat ourselves. + +shared: + - &common + name: '{{.BeatName}}' + service_name: '{{.BeatServiceName}}' + os: '{{.GOOS}}' + arch: '{{.PackageArch}}' + vendor: '{{.BeatVendor}}' + version: '{{ beat_version }}' + license: '{{.BeatLicense}}' + url: '{{.BeatURL}}' + description: '{{.BeatDescription}}' + + # agent specific + # Deb/RPM spec for community beats. + - &deb_rpm_agent_spec + <<: *common + post_install_script: '{{ elastic_beats_dir }}/dev-tools/packaging/files/linux/systemd-daemon-reload.sh' + files: + /usr/share/{{.BeatName}}/bin/{{.BeatName}}{{.BinaryExt}}: + source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + mode: 0755 + /usr/share/{{.BeatName}}/LICENSE.txt: + source: '{{ repo.RootDir }}/LICENSE.txt' + mode: 0644 + /usr/share/{{.BeatName}}/NOTICE.txt: + source: '{{ repo.RootDir }}/NOTICE.txt' + mode: 0644 + /usr/share/{{.BeatName}}/README.md: + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/common/README.md.tmpl' + mode: 0644 + /usr/share/{{.BeatName}}/.build_hash.txt: + content: > + {{ commit }} + mode: 0644 + /etc/{{.BeatName}}/elastic-agent.reference.yml: + source: 'elastic-agent.reference.yml' + mode: 0644 + /etc/{{.BeatName}}/elastic-agent.yml: + source: 'elastic-agent.yml' + mode: 0600 + config: true + /etc/{{.BeatName}}/.elastic-agent.active.commit: + content: > + {{ commit }} + mode: 0644 + /usr/share/{{.BeatName}}/bin/{{.BeatName}}-god: + source: build/golang-crossbuild/god-{{.GOOS}}-{{.Platform.Arch}} + mode: 0755 + /usr/bin/{{.BeatName}}: + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/linux/elastic-agent.sh.tmpl' + mode: 0755 + /lib/systemd/system/{{.BeatServiceName}}.service: + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/linux/elastic-agent.unit.tmpl' + mode: 0644 + /etc/init.d/{{.BeatServiceName}}: + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/{{.PackageType}}/elastic-agent.init.sh.tmpl' + mode: 0755 + /var/lib/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}}: + source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + mode: 0755 + /var/lib/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/components: + source: '{{.AgentDropPath}}/{{.GOOS}}-{{.AgentArchName}}.tar.gz/' + mode: 0755 + config_mode: 0644 + skip_on_missing: true + + # MacOS pkg spec for community beats. + - &macos_agent_pkg_spec + <<: *common + extra_vars: + # OS X 10.11 El Capitan is the oldest supported by Go 1.14. + # https://golang.org/doc/go1.14#ports + min_supported_osx_version: 10.11 + identifier: 'co.{{.BeatVendor | tolower}}.beats.{{.BeatName}}' + install_path: /Library/Application Support + pre_install_script: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/darwin/scripts/preinstall.tmpl' + post_install_script: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/darwin/scripts/postinstall.elastic-agent.tmpl' + files: + /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/bin/{{.BeatName}}{{.BinaryExt}}: + source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + mode: 0755 + /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/LICENSE.txt: + source: '{{ repo.RootDir }}/LICENSE.txt' + mode: 0644 + /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/NOTICE.txt: + source: '{{ repo.RootDir }}/NOTICE.txt' + mode: 0644 + /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/README.md: + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/common/README.md.tmpl' + mode: 0644 + /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/.build_hash.txt: + content: > + {{ commit }} + mode: 0644 + /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/{{.identifier}}.plist: + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/darwin/launchd-daemon.plist.tmpl' + mode: 0644 + /etc/{{.BeatName}}/elastic-agent.reference.yml: + source: 'elastic-agent.reference.yml' + mode: 0644 + /etc/{{.BeatName}}/elastic-agent.yml: + source: 'elastic-agent.yml' + mode: 0600 + config: true + /etc/{{.BeatName}}/.elastic-agent.active.commit: + content: > + {{ commit }} + mode: 0644 + /etc/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}}: + source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + mode: 0755 + /etc/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/components: + source: '{{.AgentDropPath}}/{{.GOOS}}-{{.AgentArchName}}.tar.gz/' + mode: 0755 + config_mode: 0644 + skip_on_missing: true + + - &agent_binary_files + '{{.BeatName}}{{.BinaryExt}}': + source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + mode: 0755 + 'data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}}': + source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + mode: 0755 + LICENSE.txt: + source: '{{ repo.RootDir }}/LICENSE.txt' + mode: 0644 + NOTICE.txt: + source: '{{ repo.RootDir }}/NOTICE.txt' + mode: 0644 + README.md: + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/common/README.md.tmpl' + mode: 0644 + .build_hash.txt: + content: > + {{ commit }} + mode: 0644 + 'elastic-agent.reference.yml': + source: 'elastic-agent.reference.yml' + mode: 0644 + 'elastic-agent.yml': + source: 'elastic-agent.yml' + mode: 0600 + config: true + '.elastic-agent.active.commit': + content: > + {{ commit }} + mode: 0644 + + # Binary package spec (tar.gz for linux/darwin) for community beats. + - &agent_binary_spec + <<: *common + files: + <<: *agent_binary_files + 'data/{{.BeatName}}-{{ commit_short }}/components': + source: '{{.AgentDropPath}}/{{.GOOS}}-{{.AgentArchName}}.tar.gz/' + mode: 0755 + config_mode: 0644 + skip_on_missing: true + + + # Binary package spec (zip for windows) for community beats. + - &agent_windows_binary_spec + <<: *common + files: + <<: *agent_binary_files + 'data/{{.BeatName}}-{{ commit_short }}/components': + source: '{{.AgentDropPath}}/{{.GOOS}}-{{.AgentArchName}}.zip/' + mode: 0755 + config_mode: 0644 + skip_on_missing: true + + - &agent_docker_spec + <<: *agent_binary_spec + extra_vars: + from: 'ubuntu:20.04' + buildFrom: 'ubuntu:20.04' + dockerfile: 'Dockerfile.elastic-agent.tmpl' + docker_entrypoint: 'docker-entrypoint.elastic-agent.tmpl' + user: '{{ .BeatName }}' + linux_capabilities: '' + image_name: '' + beats_install_path: "install" + files: + 'elastic-agent.yml': + source: 'elastic-agent.docker.yml' + mode: 0600 + config: true + '.elastic-agent.active.commit': + content: > + {{ commit }} + mode: 0644 + 'data/cloud_downloads/metricbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz': + source: '{{.AgentDropPath}}/archives/{{.GOOS}}-{{.AgentArchName}}.tar.gz/metricbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz' + mode: 0755 + 'data/cloud_downloads/filebeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz': + source: '{{.AgentDropPath}}/archives/{{.GOOS}}-{{.AgentArchName}}.tar.gz/filebeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz' + mode: 0755 + + - &agent_docker_arm_spec + <<: *agent_docker_spec + extra_vars: + from: 'arm64v8/ubuntu:20.04' + buildFrom: 'arm64v8/ubuntu:20.04' + + - &agent_docker_cloud_spec + <<: *agent_docker_spec + extra_vars: + image_name: '{{.BeatName}}-cloud' + repository: 'docker.elastic.co/beats-ci' + + - &agent_docker_complete_spec + <<: *agent_docker_spec + extra_vars: + image_name: '{{.BeatName}}-complete' + + # Deb/RPM spec for community beats. + - &deb_rpm_spec + <<: *common + post_install_script: '{{ elastic_beats_dir }}/dev-tools/packaging/files/linux/systemd-daemon-reload.sh' + files: + /usr/share/{{.BeatName}}/bin/{{.BeatName}}{{.BinaryExt}}: + source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + mode: 0755 + /etc/{{.BeatName}}/fields.yml: + source: fields.yml + mode: 0644 + /usr/share/{{.BeatName}}/LICENSE.txt: + source: '{{ repo.RootDir }}/LICENSE.txt' + mode: 0644 + /usr/share/{{.BeatName}}/NOTICE.txt: + source: '{{ repo.RootDir }}/NOTICE.txt' + mode: 0644 + /usr/share/{{.BeatName}}/README.md: + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/common/README.md.tmpl' + mode: 0644 + /usr/share/{{.BeatName}}/.build_hash.txt: + content: > + {{ commit }} + mode: 0644 + /etc/{{.BeatName}}/{{.BeatName}}.reference.yml: + source: '{{.BeatName}}.reference.yml' + mode: 0644 + /etc/{{.BeatName}}/{{.BeatName}}.yml: + source: '{{.BeatName}}.yml' + mode: 0600 + config: true + /usr/share/{{.BeatName}}/kibana: + source: _meta/kibana.generated + mode: 0644 + /usr/share/{{.BeatName}}/bin/{{.BeatName}}-god: + source: build/golang-crossbuild/god-{{.GOOS}}-{{.Platform.Arch}} + mode: 0755 + /usr/bin/{{.BeatName}}: + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/linux/beatname.sh.tmpl' + mode: 0755 + /lib/systemd/system/{{.BeatServiceName}}.service: + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/linux/systemd.unit.tmpl' + mode: 0644 + /etc/init.d/{{.BeatServiceName}}: + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/{{.PackageType}}/init.sh.tmpl' + mode: 0755 + + # MacOS pkg spec for community beats. + - &macos_beat_pkg_spec + <<: *common + extra_vars: + # OS X 10.8 Mountain Lion is the oldest supported by Go 1.10. + # https://golang.org/doc/go1.10#ports + min_supported_osx_version: 10.8 + identifier: 'co.{{.BeatVendor | tolower}}.beats.{{.BeatName}}' + install_path: /Library/Application Support + pre_install_script: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/darwin/scripts/preinstall.tmpl' + post_install_script: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/darwin/scripts/postinstall.tmpl' + files: + /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/bin/{{.BeatName}}{{.BinaryExt}}: + source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + mode: 0755 + /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/LICENSE.txt: + source: '{{ repo.RootDir }}/LICENSE.txt' + mode: 0644 + /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/NOTICE.txt: + source: '{{ repo.RootDir }}/NOTICE.txt' + mode: 0644 + /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/README.md: + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/common/README.md.tmpl' + mode: 0644 + /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/.build_hash.txt: + content: > + {{ commit }} + mode: 0644 + /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/{{.identifier}}.plist: + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/darwin/launchd-daemon.plist.tmpl' + mode: 0644 + /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/kibana: + source: _meta/kibana.generated + mode: 0644 + /etc/{{.BeatName}}/fields.yml: + source: fields.yml + mode: 0644 + /etc/{{.BeatName}}/{{.BeatName}}.reference.yml: + source: '{{.BeatName}}.reference.yml' + mode: 0644 + /etc/{{.BeatName}}/{{.BeatName}}.yml: + source: '{{.BeatName}}.yml' + mode: 0600 + config: true + + - &binary_files + '{{.BeatName}}{{.BinaryExt}}': + source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + mode: 0755 + fields.yml: + source: fields.yml + mode: 0644 + LICENSE.txt: + source: '{{ repo.RootDir }}/LICENSE.txt' + mode: 0644 + NOTICE.txt: + source: '{{ repo.RootDir }}/NOTICE.txt' + mode: 0644 + README.md: + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/common/README.md.tmpl' + mode: 0644 + .build_hash.txt: + content: > + {{ commit }} + mode: 0644 + '{{.BeatName}}.reference.yml': + source: '{{.BeatName}}.reference.yml' + mode: 0644 + '{{.BeatName}}.yml': + source: '{{.BeatName}}.yml' + mode: 0600 + config: true + kibana: + source: _meta/kibana.generated + mode: 0644 + + # Binary package spec (tar.gz for linux/darwin) for community beats. + - &binary_spec + <<: *common + files: + <<: *binary_files + + # Binary package spec (zip for windows) for community beats. + - &windows_binary_spec + <<: *common + files: + <<: *binary_files + install-service-{{.BeatName}}.ps1: + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/windows/install-service.ps1.tmpl' + mode: 0755 + uninstall-service-{{.BeatName}}.ps1: + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/windows/uninstall-service.ps1.tmpl' + mode: 0755 + + - &docker_spec + <<: *binary_spec + extra_vars: + from: 'ubuntu:20.04' + buildFrom: 'ubuntu:20.04' + user: '{{ .BeatName }}' + linux_capabilities: '' + files: + '{{.BeatName}}.yml': + source: '{{.BeatName}}.docker.yml' + mode: 0600 + config: true + + - &docker_arm_spec + <<: *docker_spec + extra_vars: + from: 'arm64v8/ubuntu:20.04' + buildFrom: 'arm64v8/ubuntu:20.04' + + - &docker_ubi_spec + extra_vars: + image_name: '{{.BeatName}}-ubi8' + from: 'docker.elastic.co/ubi8/ubi-minimal' + + - &docker_arm_ubi_spec + extra_vars: + image_name: '{{.BeatName}}-ubi8' + from: 'registry.access.redhat.com/ubi8/ubi-minimal:8.2' + + - &elastic_docker_spec + extra_vars: + repository: 'docker.elastic.co/beats' + + # + # License modifiers for Apache 2.0 + # + - &apache_license_for_binaries + license: "ASL 2.0" + files: + LICENSE.txt: + source: '{{ repo.RootDir }}/dev-tools/licenses/APACHE-LICENSE-2.0.txt' + mode: 0644 + + - &apache_license_for_deb_rpm + license: "ASL 2.0" + files: + /usr/share/{{.BeatName}}/LICENSE.txt: + source: '{{ repo.RootDir }}/dev-tools/licenses/APACHE-LICENSE-2.0.txt' + mode: 0644 + + - &apache_license_for_macos_pkg + license: "ASL 2.0" + files: + /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/LICENSE.txt: + source: '{{ repo.RootDir }}/dev-tools/licenses/APACHE-LICENSE-2.0.txt' + mode: 0644 + + # + # License modifiers for the Elastic License + # + - &elastic_license_for_binaries + license: "Elastic License" + files: + LICENSE.txt: + source: '{{ repo.RootDir }}/dev-tools/licenses/ELASTIC-LICENSE.txt' + mode: 0644 + + - &elastic_license_for_deb_rpm + license: "Elastic License" + files: + /usr/share/{{.BeatName}}/LICENSE.txt: + source: '{{ repo.RootDir }}/dev-tools/licenses/ELASTIC-LICENSE.txt' + mode: 0644 + + - &elastic_license_for_macos_pkg + license: "Elastic License" + files: + /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/LICENSE.txt: + source: '{{ repo.RootDir }}/dev-tools/licenses/ELASTIC-LICENSE.txt' + mode: 0644 + +# specs is a list of named packaging "flavors". +specs: + # Community Beats + community_beat: + - os: windows + types: [zip] + spec: + <<: *windows_binary_spec + + - os: darwin + types: [tgz] + spec: + <<: *binary_spec + + - os: linux + types: [tgz] + spec: + <<: *binary_spec + + - os: linux + types: [deb, rpm] + spec: + <<: *deb_rpm_spec + + - os: linux + types: [docker] + spec: + <<: *docker_spec + + - os: aix + types: [tgz] + spec: + <<: *binary_spec + + # Elastic Beat with Apache License (OSS) and binary taken the current + # directory. + elastic_beat_oss: + - os: windows + types: [zip] + spec: + <<: *windows_binary_spec + <<: *apache_license_for_binaries + name: '{{.BeatName}}-oss' + + - os: darwin + types: [tgz] + spec: + <<: *binary_spec + <<: *apache_license_for_binaries + name: '{{.BeatName}}-oss' + + - os: linux + types: [tgz] + spec: + <<: *binary_spec + <<: *apache_license_for_binaries + name: '{{.BeatName}}-oss' + + - os: linux + types: [deb, rpm] + spec: + <<: *deb_rpm_spec + <<: *apache_license_for_deb_rpm + name: '{{.BeatName}}-oss' + + - os: linux + types: [docker] + spec: + <<: *docker_spec + <<: *elastic_docker_spec + <<: *apache_license_for_binaries + name: '{{.BeatName}}-oss' + + - os: aix + types: [tgz] + spec: + <<: *binary_spec + <<: *apache_license_for_binaries + name: '{{.BeatName}}-oss' + + # Elastic Beat with Elastic License and binary taken the current directory. + elastic_beat_xpack: + ### + # Elastic Licensed Packages + ### + - os: windows + types: [zip] + spec: + <<: *windows_binary_spec + <<: *elastic_license_for_binaries + + - os: darwin + types: [tgz] + spec: + <<: *binary_spec + <<: *elastic_license_for_binaries + + - os: linux + types: [tgz] + spec: + <<: *binary_spec + <<: *elastic_license_for_binaries + + - os: linux + types: [deb, rpm] + spec: + <<: *deb_rpm_spec + <<: *elastic_license_for_deb_rpm + + - os: linux + arch: amd64 + types: [docker] + spec: + <<: *docker_spec + <<: *elastic_docker_spec + <<: *elastic_license_for_binaries + + - os: linux + arch: amd64 + types: [docker] + spec: + <<: *docker_spec + <<: *docker_ubi_spec + <<: *elastic_docker_spec + <<: *elastic_license_for_binaries + + - os: linux + arch: arm64 + types: [docker] + spec: + <<: *docker_arm_spec + <<: *elastic_docker_spec + <<: *elastic_license_for_binaries + + - os: linux + arch: arm64 + types: [docker] + spec: + <<: *docker_arm_spec + <<: *docker_arm_ubi_spec + <<: *elastic_docker_spec + <<: *elastic_license_for_binaries + + - os: aix + types: [tgz] + spec: + <<: *binary_spec + <<: *elastic_license_for_binaries + + # Elastic Beat with Elastic License and binary taken the current directory. + elastic_beat_xpack_reduced: + ### + # Elastic Licensed Packages + ### + - os: windows + types: [zip] + spec: + <<: *windows_binary_spec + <<: *elastic_license_for_binaries + + - os: darwin + types: [tgz] + spec: + <<: *binary_spec + <<: *elastic_license_for_binaries + + - os: linux + types: [tgz] + spec: + <<: *binary_spec + <<: *elastic_license_for_binaries + + - os: aix + types: [tgz] + spec: + <<: *binary_spec + <<: *elastic_license_for_binaries + + # Elastic Beat with Elastic License and binary taken from the x-pack dir. + elastic_beat_xpack_separate_binaries: + ### + # Elastic Licensed Packages + ### + - os: windows + types: [zip] + spec: + <<: *windows_binary_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + - os: darwin + types: [tgz] + spec: + <<: *binary_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + - os: linux + types: [tgz] + spec: + <<: *binary_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + - os: linux + types: [deb, rpm] + spec: + <<: *deb_rpm_spec + <<: *elastic_license_for_deb_rpm + files: + /usr/share/{{.BeatName}}/bin/{{.BeatName}}{{.BinaryExt}}: + source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + - os: linux + arch: amd64 + types: [docker] + spec: + <<: *docker_spec + <<: *elastic_docker_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + - os: linux + arch: amd64 + types: [docker] + spec: + <<: *docker_spec + <<: *docker_ubi_spec + <<: *elastic_docker_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + - os: linux + arch: arm64 + types: [docker] + spec: + <<: *docker_arm_spec + <<: *elastic_docker_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + - os: linux + arch: arm64 + types: [docker] + spec: + <<: *docker_arm_spec + <<: *docker_arm_ubi_spec + <<: *elastic_docker_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + - os: aix + types: [tgz] + spec: + <<: *binary_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + # Elastic Beat with Elastic License and binary taken from the x-pack dir. + elastic_beat_agent_binaries: + ### + # Elastic Licensed Packages + ### + - os: windows + types: [zip] + spec: + <<: *agent_windows_binary_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + - os: darwin + types: [tgz] + spec: + <<: *agent_binary_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}} + symlink: true + mode: 0755 + + - os: linux + types: [tgz] + spec: + <<: *agent_binary_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}} + symlink: true + mode: 0755 + + - os: linux + types: [deb, rpm] + spec: + <<: *deb_rpm_agent_spec + <<: *elastic_license_for_deb_rpm + files: + /usr/share/{{.BeatName}}/bin/{{.BeatName}}{{.BinaryExt}}: + source: /var/lib/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}} + symlink: true + mode: 0755 + + - os: linux + arch: amd64 + types: [docker] + spec: + <<: *agent_docker_spec + <<: *elastic_docker_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + # Complete image gets a 'complete' variant for synthetics and other large + # packages too big to fit in the main image + - os: linux + arch: amd64 + types: [docker] + spec: + <<: *agent_docker_spec + <<: *agent_docker_complete_spec + <<: *elastic_docker_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + # Cloud specific docker image + - os: linux + arch: amd64 + types: [docker] + spec: + <<: *elastic_docker_spec + <<: *agent_docker_spec + <<: *agent_docker_cloud_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + - os: linux + arch: amd64 + types: [docker] + spec: + <<: *agent_docker_spec + <<: *docker_ubi_spec + <<: *elastic_docker_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + - os: linux + arch: arm64 + types: [docker] + spec: + <<: *agent_docker_arm_spec + <<: *elastic_docker_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + # Complete image gets a 'complete' variant for synthetics and other large + # packages too big to fit in the main image + - os: linux + arch: arm64 + types: [docker] + spec: + <<: *agent_docker_arm_spec + <<: *agent_docker_complete_spec + <<: *elastic_docker_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + # Cloud specific docker image + - os: linux + arch: arm64 + types: [docker] + spec: + <<: *elastic_docker_spec + <<: *agent_docker_arm_spec + <<: *agent_docker_cloud_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + - os: linux + arch: arm64 + types: [docker] + spec: + <<: *agent_docker_arm_spec + <<: *docker_arm_ubi_spec + <<: *elastic_docker_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + - os: aix + types: [tgz] + spec: + <<: *agent_binary_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}} + symlink: true + mode: 0755 + + + # Elastic Beat with Elastic License and binary taken from the x-pack dir. + elastic_beat_agent_demo_binaries: + ### + # Elastic Licensed Packages + ### + + - os: linux + types: [tgz] + spec: + <<: *agent_binary_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + - os: linux + types: [docker] + spec: + <<: *agent_docker_spec + <<: *elastic_docker_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + - os: aix + types: [tgz] + spec: + <<: *agent_binary_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} diff --git a/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl b/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl index b78fcfdb196..7ff81be9559 100644 --- a/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl +++ b/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl @@ -1,247 +1,245 @@ -{{- $beatHome := printf "%s/%s" "/usr/share" .BeatName }} -{{- $beatBinary := printf "%s/%s" $beatHome .BeatName }} -{{- $repoInfo := repo }} - -# Prepare home in a different stage to avoid creating additional layers on -# the final image because of permission changes. -FROM {{ .buildFrom }} AS home - -COPY beat {{ $beatHome }} - -RUN mkdir -p {{ $beatHome }}/data {{ $beatHome }}/data/elastic-agent-{{ commit_short }}/logs && \ - chown -R root:root {{ $beatHome }} && \ - find {{ $beatHome }} -type d -exec chmod 0755 {} \; && \ - find {{ $beatHome }} -type f -exec chmod 0644 {} \; && \ - find {{ $beatHome }}/data -type d -exec chmod 0770 {} \; && \ - find {{ $beatHome }}/data -type f -exec chmod 0660 {} \; && \ - rm {{ $beatBinary }} && \ - ln -s {{ $beatHome }}/data/elastic-agent-{{ commit_short }}/elastic-agent {{ $beatBinary }} && \ - chmod 0755 {{ $beatHome }}/data/elastic-agent-*/elastic-agent && \ -{{- range $i, $modulesd := .ModulesDirs }} - chmod 0775 {{ $beatHome}}/{{ $modulesd }} && \ -{{- end }} -{{- if contains .image_name "-cloud" }} - mkdir -p /opt/filebeat /opt/metricbeat && \ - tar xf {{ $beatHome }}/data/elastic-agent-*/downloads/metricbeat-*.tar.gz -C /opt/metricbeat --strip-components=1 && \ - tar xf {{ $beatHome }}/data/elastic-agent-*/downloads/filebeat-*.tar.gz -C /opt/filebeat --strip-components=1 && \ -{{- end }} - true - -FROM {{ .from }} - -ENV BEAT_SETUID_AS={{ .user }} - -{{- if contains .from "ubi-minimal" }} -RUN for iter in {1..10}; do microdnf update -y && microdnf install -y tar gzip findutils shadow-utils && microdnf clean all && exit_code=0 && break || exit_code=$? && echo "microdnf error: retry $iter in 10s" && sleep 10; done; (exit $exit_code) -{{- else }} - -RUN for iter in {1..10}; do \ - apt-get update -y && \ - DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends --yes ca-certificates curl gawk libcap2-bin xz-utils && \ - apt-get clean all && \ - exit_code=0 && break || exit_code=$? && echo "apt-get error: retry $iter in 10s" && sleep 10; \ - done; \ - (exit $exit_code) -{{- end }} - -{{- if (and (contains .image_name "-complete") (not (contains .from "ubi-minimal"))) }} -RUN apt-get update -y && \ - for iter in {1..10}; do \ - DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends --yes \ - libglib2.0-0\ - libnss3\ - libnspr4\ - libatk1.0-0\ - libatk-bridge2.0-0\ - libcups2\ - libdrm2\ - libdbus-1-3\ - libxcb1\ - libxkbcommon0\ - libx11-6\ - libxcomposite1\ - libxdamage1\ - libxext6\ - libxfixes3\ - libxrandr2\ - libgbm1\ - libpango-1.0-0\ - libcairo2\ - libasound2\ - libatspi2.0-0\ - libxshmfence1 \ - fonts-noto-core\ - fonts-noto-cjk &&\ - apt-get clean all && \ - exit_code=0 && break || exit_code=$? && echo "apt-get error: retry $iter in 10s" && sleep 10; \ - done; \ - (exit $exit_code) -ENV NODE_PATH={{ $beatHome }}/.node -RUN echo \ - $NODE_PATH \ - {{ $beatHome }}/.config \ - {{ $beatHome }}/.synthetics \ - {{ $beatHome }}/.npm \ - {{ $beatHome }}/.cache \ - | xargs -IDIR sh -c 'mkdir -p DIR && chmod 0770 DIR' -{{- end }} - -LABEL \ - org.label-schema.build-date="{{ date }}" \ - org.label-schema.schema-version="1.0" \ - org.label-schema.vendor="{{ .BeatVendor }}" \ - org.label-schema.license="{{ .License }}" \ - org.label-schema.name="{{ .BeatName }}" \ - org.label-schema.version="{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}" \ - org.label-schema.url="{{ .BeatURL }}" \ - org.label-schema.vcs-url="{{ $repoInfo.RootImportPath }}" \ - org.label-schema.vcs-ref="{{ commit }}" \ - io.k8s.description="{{ .BeatDescription }}" \ - io.k8s.display-name="{{ .BeatName | title }} image" \ - org.opencontainers.image.created="{{ date }}" \ - org.opencontainers.image.licenses="{{ .License }}" \ - org.opencontainers.image.title="{{ .BeatName | title }}" \ - org.opencontainers.image.vendor="{{ .BeatVendor }}" \ - name="{{ .BeatName }}" \ - maintainer="infra@elastic.co" \ - vendor="{{ .BeatVendor }}" \ - version="{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}" \ - release="1" \ - url="{{ .BeatURL }}" \ - summary="{{ .BeatName }}" \ - license="{{ .License }}" \ - description="{{ .BeatDescription }}" - -ENV ELASTIC_CONTAINER "true" -ENV PATH={{ $beatHome }}:$PATH -ENV GODEBUG="madvdontneed=1" - -# Add an init process, check the checksum to make sure it's a match -RUN set -e ; \ - TINI_BIN=""; \ - TINI_SHA256=""; \ - TINI_VERSION="v0.19.0"; \ - case "$(arch)" in \ - x86_64) \ - TINI_BIN="tini-amd64"; \ - TINI_SHA256="93dcc18adc78c65a028a84799ecf8ad40c936fdfc5f2a57b1acda5a8117fa82c"; \ - ;; \ - aarch64) \ - TINI_BIN="tini-arm64"; \ - TINI_SHA256="07952557df20bfd2a95f9bef198b445e006171969499a1d361bd9e6f8e5e0e81"; \ - ;; \ - *) \ - echo >&2 ; echo >&2 "Unsupported architecture \$(arch)" ; echo >&2 ; exit 1 ; \ - ;; \ - esac ; \ - curl --retry 8 -S -L -O "https://github.com/krallin/tini/releases/download/${TINI_VERSION}/${TINI_BIN}" ; \ - echo "${TINI_SHA256} ${TINI_BIN}" | sha256sum -c - ; \ - mv "${TINI_BIN}" /usr/bin/tini ; \ - chmod +x /usr/bin/tini - -COPY docker-entrypoint /usr/local/bin/docker-entrypoint -RUN chmod 755 /usr/local/bin/docker-entrypoint - -COPY --from=home {{ $beatHome }} {{ $beatHome }} - -# Elastic Agent needs group permissions in the home itself to be able to -# create fleet.yml when running as non-root. -RUN chmod 0770 {{ $beatHome }} - -RUN mkdir /licenses -COPY --from=home {{ $beatHome }}/LICENSE.txt /licenses -COPY --from=home {{ $beatHome }}/NOTICE.txt /licenses - -{{- if contains .image_name "-cloud" }} -COPY --from=home /opt /opt -{{- end }} - -{{- if .linux_capabilities }} -# Since the beat is stored at the other end of a symlink we must follow the symlink first -# For security reasons setcap does not support symlinks. This is smart in the general case -# but in our specific case since we're building a trusted image from trusted binaries this is -# fine. Thus, we use readlink to follow the link and setcap on the actual binary -RUN readlink -f {{ $beatBinary }} | xargs setcap {{ .linux_capabilities }} -{{- end }} - -{{- if eq .user "root" }} -{{- if contains .image_name "-cloud" }} -# Generate folder for a stub command that will be overwritten at runtime -RUN mkdir /app -{{- end }} -{{- else }} -RUN groupadd --gid 1000 {{ .BeatName }} -RUN useradd -M --uid 1000 --gid 1000 --groups 0 --home {{ $beatHome }} {{ .user }} -{{- if (and (contains .image_name "-complete") (not (contains .from "ubi-minimal"))) }} -RUN chown {{ .user }} $NODE_PATH -{{- end }} -{{- if contains .image_name "-cloud" }} -# Generate folder for a stub command that will be overwritten at runtime -RUN mkdir /app -RUN chown {{ .user }} /app -{{- end }} -{{- end }} - -# Unpack beats to default install directory -RUN mkdir -p {{ $beatHome }}/data/{{.BeatName}}-{{ commit_short }}/{{ .beats_install_path }} && \ - for beatPath in {{ $beatHome }}/data/{{.BeatName}}-{{ commit_short }}/downloads/*.tar.gz; do \ - tar xf $beatPath -C {{ $beatHome }}/data/{{.BeatName}}-{{ commit_short }}/{{ .beats_install_path }}; \ - done && \ - chown -R {{ .user }}:{{ .user }} {{ $beatHome }}/data/{{.BeatName}}-{{ commit_short }}/{{ .beats_install_path }} && \ - chown -R root:root {{ $beatHome }}/data/{{.BeatName}}-{{ commit_short }}/{{ .beats_install_path }}/*/*.yml && \ - chmod 0644 {{ $beatHome }}/data/{{.BeatName}}-{{ commit_short }}/{{ .beats_install_path }}/*/*.yml && \ - # heartbeat requires cap_net_raw,cap_setuid to run ICMP checks and change npm user - setcap cap_net_raw,cap_setuid+p {{ $beatHome }}/data/{{.BeatName}}-{{ commit_short }}/{{ .beats_install_path }}/heartbeat-*/heartbeat - -USER {{ .user }} - -{{- if (and (contains .image_name "-complete") (not (contains .from "ubi-minimal"))) }} -# Setup synthetics env vars -ENV ELASTIC_SYNTHETICS_CAPABLE=true -ENV SUITES_DIR={{ $beatHome }}/suites -ENV NODE_VERSION=16.15.0 -ENV PATH="$NODE_PATH/node/bin:$PATH" -# Install the latest version of @elastic/synthetics forcefully ignoring the previously -# cached node_modules, heartbeat then calls the global executable to run test suites -# Setup node -RUN cd {{$beatHome}}/.node \ - && NODE_DOWNLOAD_URL="" \ - && case "$(arch)" in \ - x86_64) \ - NODE_DOWNLOAD_URL=https://nodejs.org/dist/v${NODE_VERSION}/node-v${NODE_VERSION}-linux-x64.tar.xz \ - ;; \ - aarch64) \ - NODE_DOWNLOAD_URL=https://nodejs.org/dist/v${NODE_VERSION}/node-v${NODE_VERSION}-linux-arm64.tar.xz \ - ;; \ - *) \ - echo >&2 ; echo >&2 "Unsupported architecture \$(arch)" ; echo >&2 ; exit 1 ; \ - ;; \ - esac \ - && mkdir -p node \ - && curl ${NODE_DOWNLOAD_URL} | tar -xJ --strip 1 -C node \ - && chmod ug+rwX -R $NODE_PATH \ - && npm i -g -f @elastic/synthetics && chmod ug+rwX -R $NODE_PATH -{{- end }} - - -{{- range $i, $port := .ExposePorts }} -EXPOSE {{ $port }} -{{- end }} - -# When running under Docker, we must ensure libbeat monitoring pulls cgroup -# metrics from /sys/fs/cgroup//, ignoring any paths found in -# /proc/self/cgroup. -ENV LIBBEAT_MONITORING_CGROUPS_HIERARCHY_OVERRIDE=/ - -WORKDIR {{ $beatHome }} - -{{- if contains .image_name "-cloud" }} -ENTRYPOINT ["/usr/bin/tini", "--"] -CMD ["/app/apm.sh"] -# Generate a stub command that will be overwritten at runtime -RUN echo -e '#!/bin/sh\nexec /usr/local/bin/docker-entrypoint' > /app/apm.sh && \ - chmod 0555 /app/apm.sh -{{- else }} -ENTRYPOINT ["/usr/bin/tini", "--", "/usr/local/bin/docker-entrypoint"] -{{- end }} - +{{- $beatHome := printf "%s/%s" "/usr/share" .BeatName }} +{{- $beatBinary := printf "%s/%s" $beatHome .BeatName }} +{{- $repoInfo := repo }} + +# Prepare home in a different stage to avoid creating additional layers on +# the final image because of permission changes. +FROM {{ .buildFrom }} AS home + +COPY beat {{ $beatHome }} + +RUN mkdir -p {{ $beatHome }}/data {{ $beatHome }}/data/elastic-agent-{{ commit_short }}/logs && \ + chown -R root:root {{ $beatHome }} && \ + find {{ $beatHome }} -type d -exec chmod 0755 {} \; && \ + find {{ $beatHome }} -type f -exec chmod 0644 {} \; && \ + find {{ $beatHome }}/data -type d -exec chmod 0770 {} \; && \ + find {{ $beatHome }}/data -type f -exec chmod 0660 {} \; && \ + rm {{ $beatBinary }} && \ + ln -s {{ $beatHome }}/data/elastic-agent-{{ commit_short }}/elastic-agent {{ $beatBinary }} && \ + chmod 0755 {{ $beatHome }}/data/elastic-agent-*/elastic-agent && \ + chmod 0755 {{ $beatHome }}/data/elastic-agent-*/components/*beat && \ + (chmod 0755 {{ $beatHome }}/data/elastic-agent-*/components/apm-server || true) && \ + (chmod 0755 {{ $beatHome }}/data/elastic-agent-*/components/elastic-endpoint || true) && \ + find {{ $beatHome }}/data/elastic-agent-{{ commit_short }}/components -name "*.yml*" -type f -exec chown root:root {} \; && \ + find {{ $beatHome }}/data/elastic-agent-{{ commit_short }}/components -name "*.yml*" -type f -exec chmod 0644 {} \; && \ +{{- range $i, $modulesd := .ModulesDirs }} + chmod 0775 {{ $beatHome}}/{{ $modulesd }} && \ +{{- end }} +{{- if contains .image_name "-cloud" }} + mkdir -p /opt/filebeat /opt/metricbeat && \ + tar xf {{ $beatHome }}/data/cloud_downloads/metricbeat-*.tar.gz -C /opt/metricbeat --strip-components=1 && \ + tar xf {{ $beatHome }}/data/cloud_downloads/filebeat-*.tar.gz -C /opt/filebeat --strip-components=1 && \ +{{- end }} + rm -rf {{ $beatHome }}/data/cloud_downloads && \ + true + +FROM {{ .from }} + +ENV BEAT_SETUID_AS={{ .user }} + +{{- if contains .from "ubi-minimal" }} +RUN for iter in {1..10}; do microdnf update -y && microdnf install -y tar gzip findutils shadow-utils && microdnf clean all && exit_code=0 && break || exit_code=$? && echo "microdnf error: retry $iter in 10s" && sleep 10; done; (exit $exit_code) +{{- else }} + +RUN for iter in {1..10}; do \ + apt-get update -y && \ + DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends --yes ca-certificates curl gawk libcap2-bin xz-utils && \ + apt-get clean all && \ + exit_code=0 && break || exit_code=$? && echo "apt-get error: retry $iter in 10s" && sleep 10; \ + done; \ + (exit $exit_code) +{{- end }} + +{{- if (and (contains .image_name "-complete") (not (contains .from "ubi-minimal"))) }} +RUN apt-get update -y && \ + for iter in {1..10}; do \ + DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends --yes \ + libglib2.0-0\ + libnss3\ + libnspr4\ + libatk1.0-0\ + libatk-bridge2.0-0\ + libcups2\ + libdrm2\ + libdbus-1-3\ + libxcb1\ + libxkbcommon0\ + libx11-6\ + libxcomposite1\ + libxdamage1\ + libxext6\ + libxfixes3\ + libxrandr2\ + libgbm1\ + libpango-1.0-0\ + libcairo2\ + libasound2\ + libatspi2.0-0\ + libxshmfence1 \ + fonts-noto-core\ + fonts-noto-cjk &&\ + apt-get clean all && \ + exit_code=0 && break || exit_code=$? && echo "apt-get error: retry $iter in 10s" && sleep 10; \ + done; \ + (exit $exit_code) +ENV NODE_PATH={{ $beatHome }}/.node +RUN echo \ + $NODE_PATH \ + {{ $beatHome }}/.config \ + {{ $beatHome }}/.synthetics \ + {{ $beatHome }}/.npm \ + {{ $beatHome }}/.cache \ + | xargs -IDIR sh -c 'mkdir -p DIR && chmod 0770 DIR' +{{- end }} + +LABEL \ + org.label-schema.build-date="{{ date }}" \ + org.label-schema.schema-version="1.0" \ + org.label-schema.vendor="{{ .BeatVendor }}" \ + org.label-schema.license="{{ .License }}" \ + org.label-schema.name="{{ .BeatName }}" \ + org.label-schema.version="{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}" \ + org.label-schema.url="{{ .BeatURL }}" \ + org.label-schema.vcs-url="{{ $repoInfo.RootImportPath }}" \ + org.label-schema.vcs-ref="{{ commit }}" \ + io.k8s.description="{{ .BeatDescription }}" \ + io.k8s.display-name="{{ .BeatName | title }} image" \ + org.opencontainers.image.created="{{ date }}" \ + org.opencontainers.image.licenses="{{ .License }}" \ + org.opencontainers.image.title="{{ .BeatName | title }}" \ + org.opencontainers.image.vendor="{{ .BeatVendor }}" \ + name="{{ .BeatName }}" \ + maintainer="infra@elastic.co" \ + vendor="{{ .BeatVendor }}" \ + version="{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}" \ + release="1" \ + url="{{ .BeatURL }}" \ + summary="{{ .BeatName }}" \ + license="{{ .License }}" \ + description="{{ .BeatDescription }}" + +ENV ELASTIC_CONTAINER "true" +ENV PATH={{ $beatHome }}:$PATH +ENV GODEBUG="madvdontneed=1" + +# Add an init process, check the checksum to make sure it's a match +RUN set -e ; \ + TINI_BIN=""; \ + TINI_SHA256=""; \ + TINI_VERSION="v0.19.0"; \ + case "$(arch)" in \ + x86_64) \ + TINI_BIN="tini-amd64"; \ + TINI_SHA256="93dcc18adc78c65a028a84799ecf8ad40c936fdfc5f2a57b1acda5a8117fa82c"; \ + ;; \ + aarch64) \ + TINI_BIN="tini-arm64"; \ + TINI_SHA256="07952557df20bfd2a95f9bef198b445e006171969499a1d361bd9e6f8e5e0e81"; \ + ;; \ + *) \ + echo >&2 ; echo >&2 "Unsupported architecture \$(arch)" ; echo >&2 ; exit 1 ; \ + ;; \ + esac ; \ + curl --retry 8 -S -L -O "https://github.com/krallin/tini/releases/download/${TINI_VERSION}/${TINI_BIN}" ; \ + echo "${TINI_SHA256} ${TINI_BIN}" | sha256sum -c - ; \ + mv "${TINI_BIN}" /usr/bin/tini ; \ + chmod +x /usr/bin/tini + +COPY docker-entrypoint /usr/local/bin/docker-entrypoint +RUN chmod 755 /usr/local/bin/docker-entrypoint + +COPY --from=home {{ $beatHome }} {{ $beatHome }} + +# Elastic Agent needs group permissions in the home itself to be able to +# create fleet.yml when running as non-root. +RUN chmod 0770 {{ $beatHome }} + +RUN mkdir /licenses +COPY --from=home {{ $beatHome }}/LICENSE.txt /licenses +COPY --from=home {{ $beatHome }}/NOTICE.txt /licenses + +{{- if contains .image_name "-cloud" }} +COPY --from=home /opt /opt +{{- end }} + + +RUN setcap cap_net_raw,cap_setuid+p {{ $beatHome }}/data/elastic-agent-{{ commit_short }}/components/heartbeat && \ +{{- if .linux_capabilities }} +# Since the beat is stored at the other end of a symlink we must follow the symlink first +# For security reasons setcap does not support symlinks. This is smart in the general case +# but in our specific case since we're building a trusted image from trusted binaries this is +# fine. Thus, we use readlink to follow the link and setcap on the actual binary + readlink -f {{ $beatBinary }} | xargs setcap {{ .linux_capabilities }} && \ +{{- end }} +true + +{{- if eq .user "root" }} +{{- if contains .image_name "-cloud" }} +# Generate folder for a stub command that will be overwritten at runtime +RUN mkdir /app +{{- end }} +{{- else }} +RUN groupadd --gid 1000 {{ .BeatName }} +RUN useradd -M --uid 1000 --gid 1000 --groups 0 --home {{ $beatHome }} {{ .user }} +{{- if (and (contains .image_name "-complete") (not (contains .from "ubi-minimal"))) }} +RUN chown {{ .user }} $NODE_PATH +{{- end }} +{{- if contains .image_name "-cloud" }} +# Generate folder for a stub command that will be overwritten at runtime +RUN mkdir /app +RUN chown {{ .user }} /app +{{- end }} +{{- end }} + +USER {{ .user }} + +{{- if (and (contains .image_name "-complete") (not (contains .from "ubi-minimal"))) }} +# Setup synthetics env vars +ENV ELASTIC_SYNTHETICS_CAPABLE=true +ENV SUITES_DIR={{ $beatHome }}/suites +ENV NODE_VERSION=16.15.0 +ENV PATH="$NODE_PATH/node/bin:$PATH" +# Install the latest version of @elastic/synthetics forcefully ignoring the previously +# cached node_modules, heartbeat then calls the global executable to run test suites +# Setup node +RUN cd {{$beatHome}}/.node \ + && NODE_DOWNLOAD_URL="" \ + && case "$(arch)" in \ + x86_64) \ + NODE_DOWNLOAD_URL=https://nodejs.org/dist/v${NODE_VERSION}/node-v${NODE_VERSION}-linux-x64.tar.xz \ + ;; \ + aarch64) \ + NODE_DOWNLOAD_URL=https://nodejs.org/dist/v${NODE_VERSION}/node-v${NODE_VERSION}-linux-arm64.tar.xz \ + ;; \ + *) \ + echo >&2 ; echo >&2 "Unsupported architecture \$(arch)" ; echo >&2 ; exit 1 ; \ + ;; \ + esac \ + && mkdir -p node \ + && curl ${NODE_DOWNLOAD_URL} | tar -xJ --strip 1 -C node \ + && chmod ug+rwX -R $NODE_PATH \ + && npm i -g -f @elastic/synthetics && chmod ug+rwX -R $NODE_PATH +{{- end }} + + +{{- range $i, $port := .ExposePorts }} +EXPOSE {{ $port }} +{{- end }} + +# When running under Docker, we must ensure libbeat monitoring pulls cgroup +# metrics from /sys/fs/cgroup//, ignoring any paths found in +# /proc/self/cgroup. +ENV LIBBEAT_MONITORING_CGROUPS_HIERARCHY_OVERRIDE=/ + +WORKDIR {{ $beatHome }} + +{{- if contains .image_name "-cloud" }} +ENTRYPOINT ["/usr/bin/tini", "--"] +CMD ["/app/apm.sh"] +# Generate a stub command that will be overwritten at runtime +RUN echo -e '#!/bin/sh\nexec /usr/local/bin/docker-entrypoint' > /app/apm.sh && \ + chmod 0555 /app/apm.sh +{{- else }} +ENTRYPOINT ["/usr/bin/tini", "--", "/usr/local/bin/docker-entrypoint"] +{{- end }} + diff --git a/internal/pkg/agent/application/fleet_server_bootstrap.go b/internal/pkg/agent/application/fleet_server_bootstrap.go index 0f6fcfe21d4..9b72b177eb9 100644 --- a/internal/pkg/agent/application/fleet_server_bootstrap.go +++ b/internal/pkg/agent/application/fleet_server_bootstrap.go @@ -216,7 +216,7 @@ func emit(ctx context.Context, log *logger.Logger, agentInfo transpiler.AgentInf if !ok { return errors.New("missing required fleet-server program specification") } - ok, err = program.DetectProgram(spec, agentInfo, ast) + ok, err = program.DetectProgram(spec.Rules, spec.When, spec.Constraints, agentInfo, ast) if err != nil { return errors.New(err, "failed parsing the configuration") } diff --git a/internal/pkg/agent/application/info/agent_id.go b/internal/pkg/agent/application/info/agent_id.go index e376a0fbfb4..e0a6c64acbe 100644 --- a/internal/pkg/agent/application/info/agent_id.go +++ b/internal/pkg/agent/application/info/agent_id.go @@ -118,7 +118,9 @@ func updateAgentInfo(s ioStore, agentInfo *persistentAgentInfo) error { agentConfigFile := paths.AgentConfigFile() reader, err := s.Load() if err != nil { - return err + return errors.New(err, "failed loading from store", + errors.TypeFilesystem, + errors.M(errors.MetaKeyPath, agentConfigFile)) } // reader is closed by this function @@ -151,10 +153,16 @@ func updateAgentInfo(s ioStore, agentInfo *persistentAgentInfo) error { r, err := yamlToReader(configMap) if err != nil { - return err + return errors.New(err, "failed creating yaml reader") } - return s.Save(r) + if err := s.Save(r); err != nil { + return errors.New(err, "failed saving agent info", + errors.TypeFilesystem, + errors.M(errors.MetaKeyPath, agentConfigFile)) + } + + return nil } func yamlToReader(in interface{}) (io.Reader, error) { diff --git a/internal/pkg/agent/application/managed_mode_test.go b/internal/pkg/agent/application/managed_mode_test.go index 7f111eae322..847211dc079 100644 --- a/internal/pkg/agent/application/managed_mode_test.go +++ b/internal/pkg/agent/application/managed_mode_test.go @@ -30,6 +30,7 @@ import ( ) func TestManagedModeRouting(t *testing.T) { + streams := make(map[pipeline.RoutingKey]pipeline.Stream) streamFn := func(l *logger.Logger, r pipeline.RoutingKey) (pipeline.Stream, error) { m := newMockStreamStore() @@ -43,7 +44,7 @@ func TestManagedModeRouting(t *testing.T) { log, _ := logger.New("", false) router, _ := router.New(log, streamFn) - agentInfo, _ := info.NewAgentInfo(true) + agentInfo, _ := info.NewAgentInfo(false) nullStore := &storage.NullStore{} composableCtrl, _ := composable.New(log, nil) emit, err := emitter.New(ctx, log, agentInfo, composableCtrl, router, &pipeline.ConfigModifiers{Decorators: []pipeline.DecoratorFunc{modifiers.InjectMonitoring}}, nil) diff --git a/internal/pkg/agent/application/paths/common.go b/internal/pkg/agent/application/paths/common.go index 7511a791146..315f515a13c 100644 --- a/internal/pkg/agent/application/paths/common.go +++ b/internal/pkg/agent/application/paths/common.go @@ -128,6 +128,10 @@ func Data() string { return filepath.Join(Top(), "data") } +func Components() string { + return filepath.Join(Home(), "components") +} + // Logs returns a the log directory for Agent func Logs() string { return logsPath diff --git a/internal/pkg/agent/application/pipeline/emitter/emitter.go b/internal/pkg/agent/application/pipeline/emitter/emitter.go index 7855fb51602..ac94d48d8b4 100644 --- a/internal/pkg/agent/application/pipeline/emitter/emitter.go +++ b/internal/pkg/agent/application/pipeline/emitter/emitter.go @@ -6,14 +6,12 @@ package emitter import ( "context" - "strings" "go.elastic.co/apm" "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" "github.com/elastic/elastic-agent/internal/pkg/capabilities" "github.com/elastic/elastic-agent/internal/pkg/composable" @@ -23,8 +21,6 @@ import ( // New creates a new emitter function. func New(ctx context.Context, log *logger.Logger, agentInfo *info.AgentInfo, controller composable.Controller, router pipeline.Router, modifiers *pipeline.ConfigModifiers, caps capabilities.Capability, reloadables ...reloadable) (pipeline.EmitterFunc, error) { - log.Debugf("Supported programs: %s", strings.Join(program.KnownProgramNames(), ", ")) - ctrl := NewController(log, agentInfo, controller, router, modifiers, caps, reloadables...) err := controller.Run(ctx, func(vars []*transpiler.Vars) { ctrl.Set(ctx, vars) diff --git a/internal/pkg/agent/application/pipeline/emitter/modifiers/monitoring_decorator.go b/internal/pkg/agent/application/pipeline/emitter/modifiers/monitoring_decorator.go index 35d099ec253..d9377aa9e61 100644 --- a/internal/pkg/agent/application/pipeline/emitter/modifiers/monitoring_decorator.go +++ b/internal/pkg/agent/application/pipeline/emitter/modifiers/monitoring_decorator.go @@ -79,7 +79,7 @@ func InjectMonitoring(agentInfo *info.AgentInfo, outputGroup string, rootAst *tr programList := make([]string, 0, len(programsToRun)) cfgHash := md5.New() for _, p := range programsToRun { - programList = append(programList, p.Spec.Cmd) + programList = append(programList, p.Spec.CommandName()) cfgHash.Write(p.Config.Hash()) } // making program list and their hashes part of the config diff --git a/internal/pkg/agent/application/pipeline/router/router_test.go b/internal/pkg/agent/application/pipeline/router/router_test.go index 6c619bb9cb9..75f33231b1b 100644 --- a/internal/pkg/agent/application/pipeline/router/router_test.go +++ b/internal/pkg/agent/application/pipeline/router/router_test.go @@ -45,7 +45,7 @@ type event struct { type notifyFunc func(pipeline.RoutingKey, rOp, ...interface{}) func TestRouter(t *testing.T) { - programs := []program.Program{{Spec: program.Supported[1]}} + programs := []program.Program{{Spec: getRandomSpec()}} ctx := context.Background() t.Run("create new and destroy unused stream", func(t *testing.T) { @@ -227,3 +227,7 @@ func assertOps(t *testing.T, expected []event, received []event) { func e(rk pipeline.RoutingKey, op rOp) event { return event{rk: rk, op: op} } + +func getRandomSpec() program.Spec { + return program.Supported[1] +} diff --git a/internal/pkg/agent/application/upgrade/upgrade.go b/internal/pkg/agent/application/upgrade/upgrade.go index 81fb7a78444..e2fe530ff77 100644 --- a/internal/pkg/agent/application/upgrade/upgrade.go +++ b/internal/pkg/agent/application/upgrade/upgrade.go @@ -41,7 +41,7 @@ const ( var ( agentSpec = program.Spec{ - Name: "Elastic Agent", + Name: "elastic-agent", Cmd: agentName, Artifact: "beats/" + agentName, } diff --git a/internal/pkg/agent/cmd/inspect.go b/internal/pkg/agent/cmd/inspect.go index e6284d56487..d7832f48772 100644 --- a/internal/pkg/agent/cmd/inspect.go +++ b/internal/pkg/agent/cmd/inspect.go @@ -218,12 +218,12 @@ func printOutputFromConfig(log *logger.Logger, agentInfo *info.AgentInfo, output var programFound bool for _, p := range programs { - if programName != "" && programName != p.Spec.Cmd { + if programName != "" && programName != p.Spec.CommandName() { continue } programFound = true - _, _ = os.Stdout.WriteString(fmt.Sprintf("[%s] %s:\n", k, p.Spec.Cmd)) + _, _ = os.Stdout.WriteString(fmt.Sprintf("[%s] %s:\n", k, p.Spec.CommandName())) err = printMapStringConfig(p.Configuration()) if err != nil { return fmt.Errorf("cannot print configuration of program '%s': %w", programName, err) diff --git a/internal/pkg/agent/configrequest/step.go b/internal/pkg/agent/configrequest/step.go index 4f8bfd66ed2..332c720f9ca 100644 --- a/internal/pkg/agent/configrequest/step.go +++ b/internal/pkg/agent/configrequest/step.go @@ -29,5 +29,5 @@ type Step struct { } func (s *Step) String() string { - return "[ID:" + s.ID + ", PROCESS: " + s.ProgramSpec.Cmd + " VERSION:" + s.Version + "]" + return "[ID:" + s.ID + ", PROCESS: " + s.ProgramSpec.Command() + " VERSION:" + s.Version + "]" } diff --git a/internal/pkg/agent/install/uninstall.go b/internal/pkg/agent/install/uninstall.go index 87ff47ae169..6b4e717fa73 100644 --- a/internal/pkg/agent/install/uninstall.go +++ b/internal/pkg/agent/install/uninstall.go @@ -206,12 +206,12 @@ func programsFromConfig(cfg *config.Config) ([]program.Program, error) { for _, v := range ppMap { for _, p := range v { - if _, found := check[p.Spec.Cmd]; found { + if _, found := check[p.Spec.CommandName()]; found { continue } pp = append(pp, p) - check[p.Spec.Cmd] = true + check[p.Spec.CommandName()] = true } } diff --git a/internal/pkg/agent/operation/common_test.go b/internal/pkg/agent/operation/common_test.go index 60193c4c4e2..2c897bbb8b5 100644 --- a/internal/pkg/agent/operation/common_test.go +++ b/internal/pkg/agent/operation/common_test.go @@ -56,10 +56,10 @@ func getTestOperator(t *testing.T, downloadPath string, installPath string, p *a l := getLogger() agentInfo, _ := info.NewAgentInfo(true) - fetcher := &DummyDownloader{} - verifier := &DummyVerifier{} installer := &DummyInstallerChecker{} uninstaller := &DummyUninstaller{} + fetcher := &DummyDownloader{} + verifier := &DummyVerifier{} stateResolver, err := stateresolver.NewStateResolver(l) if err != nil { @@ -107,7 +107,7 @@ func getProgram(binary, version string) *app.Descriptor { OperatingSystem: "darwin", Architecture: "64", } - return app.NewDescriptor(spec, version, downloadCfg, nil) + return app.NewDescriptorWithPath(installPath, spec, version, downloadCfg, nil) } func getAbsPath(path string) string { diff --git a/internal/pkg/agent/operation/monitoring.go b/internal/pkg/agent/operation/monitoring.go index f28c681e42a..11e080d7fd3 100644 --- a/internal/pkg/agent/operation/monitoring.go +++ b/internal/pkg/agent/operation/monitoring.go @@ -45,7 +45,7 @@ func (o *Operator) handleStartSidecar(s configrequest.Step) (result error) { if err != nil { return errors.New(err, errors.TypeApplication, - errors.M(errors.MetaKeyAppName, step.ProgramSpec.Cmd), + errors.M(errors.MetaKeyAppName, step.ProgramSpec.CommandName()), "operator.handleStartSidecar failed to create program") } @@ -54,13 +54,13 @@ func (o *Operator) handleStartSidecar(s configrequest.Step) (result error) { if err := o.stop(p); err != nil { result = multierror.Append(err, err) } else { - o.markStopMonitoring(step.ProgramSpec.Cmd) + o.markStopMonitoring(step.ProgramSpec.CommandName()) } } else { if err := o.start(p, cfg); err != nil { result = multierror.Append(err, err) } else { - o.markStartMonitoring(step.ProgramSpec.Cmd) + o.markStartMonitoring(step.ProgramSpec.CommandName()) } } } @@ -74,7 +74,7 @@ func (o *Operator) handleStopSidecar(s configrequest.Step) (result error) { if err != nil { return errors.New(err, errors.TypeApplication, - errors.M(errors.MetaKeyAppName, step.ProgramSpec.Cmd), + errors.M(errors.MetaKeyAppName, step.ProgramSpec.CommandName()), "operator.handleStopSidecar failed to create program") } @@ -82,7 +82,7 @@ func (o *Operator) handleStopSidecar(s configrequest.Step) (result error) { if err := o.stop(p); err != nil { result = multierror.Append(err, err) } else { - o.markStopMonitoring(step.ProgramSpec.Cmd) + o.markStopMonitoring(step.ProgramSpec.CommandName()) } } @@ -105,7 +105,7 @@ func (o *Operator) getMonitoringSteps(step configrequest.Step) []configrequest.S outputIface, found := config[outputKey] if !found { - o.logger.Errorf("operator.getMonitoringSteps: monitoring configuration not found for sidecar of type %s", step.ProgramSpec.Cmd) + o.logger.Errorf("operator.getMonitoringSteps: monitoring configuration not found for sidecar of type %s", step.ProgramSpec.CommandName()) return nil } @@ -116,7 +116,7 @@ func (o *Operator) getMonitoringSteps(step configrequest.Step) []configrequest.S } if len(outputMap) == 0 { - o.logger.Errorf("operator.getMonitoringSteps: monitoring is missing an output configuration for sidecar of type: %s", step.ProgramSpec.Cmd) + o.logger.Errorf("operator.getMonitoringSteps: monitoring is missing an output configuration for sidecar of type: %s", step.ProgramSpec.CommandName()) return nil } @@ -124,7 +124,7 @@ func (o *Operator) getMonitoringSteps(step configrequest.Step) []configrequest.S // since we are folding all the child options as a map we should make sure we have //a unique output. if len(outputMap) > 1 { - o.logger.Errorf("operator.getMonitoringSteps: monitoring has too many outputs configuration for sidecar of type: %s", step.ProgramSpec.Cmd) + o.logger.Errorf("operator.getMonitoringSteps: monitoring has too many outputs configuration for sidecar of type: %s", step.ProgramSpec.CommandName()) return nil } @@ -144,13 +144,13 @@ func (o *Operator) getMonitoringSteps(step configrequest.Step) []configrequest.S t, ok := output["type"] if !ok { - o.logger.Errorf("operator.getMonitoringSteps: unknown monitoring output for sidecar of type: %s", step.ProgramSpec.Cmd) + o.logger.Errorf("operator.getMonitoringSteps: unknown monitoring output for sidecar of type: %s", step.ProgramSpec.CommandName()) return nil } outputType, ok := t.(string) if !ok { - o.logger.Errorf("operator.getMonitoringSteps: unexpected monitoring output type: %+v for sidecar of type: %s", t, step.ProgramSpec.Cmd) + o.logger.Errorf("operator.getMonitoringSteps: unexpected monitoring output type: %+v for sidecar of type: %s", t, step.ProgramSpec.CommandName()) return nil } @@ -668,7 +668,7 @@ func normalizeHTTPCopyRules(name string) []map[string]interface{} { return fromToMap } - for _, exportedMetric := range spec.ExprtedMetrics { + for _, exportedMetric := range spec.ExportedMetrics { fromToMap = append(fromToMap, map[string]interface{}{ "from": fmt.Sprintf("http.agent.%s", exportedMetric), "to": exportedMetric, diff --git a/internal/pkg/agent/operation/monitoring_test.go b/internal/pkg/agent/operation/monitoring_test.go index cc365cae540..b48765612dd 100644 --- a/internal/pkg/agent/operation/monitoring_test.go +++ b/internal/pkg/agent/operation/monitoring_test.go @@ -13,7 +13,6 @@ import ( "github.com/stretchr/testify/require" "go.elastic.co/apm/apmtest" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" "github.com/elastic/elastic-agent/internal/pkg/testutils" "github.com/elastic/elastic-agent-client/v7/pkg/proto" @@ -21,6 +20,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" "github.com/elastic/elastic-agent/internal/pkg/agent/configrequest" "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" + "github.com/elastic/elastic-agent/internal/pkg/agent/program" "github.com/elastic/elastic-agent/internal/pkg/agent/stateresolver" "github.com/elastic/elastic-agent/internal/pkg/artifact" "github.com/elastic/elastic-agent/internal/pkg/config" @@ -37,7 +37,9 @@ import ( func TestExportedMetrics(t *testing.T) { programName := "testing" expectedMetricsName := "metric_name" - program.SupportedMap[programName] = program.Spec{ExprtedMetrics: []string{expectedMetricsName}} + program.SupportedMap[programName] = program.Spec{ + ExportedMetrics: []string{expectedMetricsName}, + } exportedMetrics := normalizeHTTPCopyRules(programName) @@ -99,13 +101,13 @@ func TestGenerateSteps(t *testing.T) { var fbFound, mbFound bool for _, s := range steps { // Filebeat step check - if s.ProgramSpec.Cmd == "filebeat" { + if s.ProgramSpec.CommandName() == "filebeat" { fbFound = true checkStep(t, "filebeat", outputType, sampleOutput, s) } // Metricbeat step check - if s.ProgramSpec.Cmd == "metricbeat" { + if s.ProgramSpec.CommandName() == "metricbeat" { mbFound = true checkStep(t, "metricbeat", outputType, sampleOutput, s) } @@ -159,10 +161,10 @@ func getMonitorableTestOperator(t *testing.T, installPath string, m monitoring.M l := getLogger() agentInfo, _ := info.NewAgentInfo(true) - fetcher := &DummyDownloader{} - verifier := &DummyVerifier{} installer := &DummyInstallerChecker{} uninstaller := &DummyUninstaller{} + fetcher := &DummyDownloader{} + verifier := &DummyVerifier{} stateResolver, err := stateresolver.NewStateResolver(l) if err != nil { diff --git a/internal/pkg/agent/operation/operation.go b/internal/pkg/agent/operation/operation.go index 4846b67aa66..0419058cc44 100644 --- a/internal/pkg/agent/operation/operation.go +++ b/internal/pkg/agent/operation/operation.go @@ -54,7 +54,6 @@ type Descriptor interface { Spec() program.Spec ServicePort() int BinaryName() string - ArtifactName() string Version() string ID() string Directory() string diff --git a/internal/pkg/agent/operation/operation_fetch.go b/internal/pkg/agent/operation/operation_fetch.go deleted file mode 100644 index 3097c9b6db3..00000000000 --- a/internal/pkg/agent/operation/operation_fetch.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package operation - -import ( - "context" - "os" - - "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" - "github.com/elastic/elastic-agent/internal/pkg/artifact" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download" - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -// operationFetch fetches artifact from preconfigured source -// skips if artifact is already downloaded -type operationFetch struct { - logger *logger.Logger - program Descriptor - operatorConfig *configuration.SettingsConfig - downloader download.Downloader -} - -func newOperationFetch( - logger *logger.Logger, - program Descriptor, - operatorConfig *configuration.SettingsConfig, - downloader download.Downloader) *operationFetch { - - return &operationFetch{ - logger: logger, - program: program, - operatorConfig: operatorConfig, - downloader: downloader, - } -} - -// Name is human readable name identifying an operation -func (o *operationFetch) Name() string { - return "operation-fetch" -} - -// Check checks whether fetch needs to occur. -// -// If the artifacts already exists then fetch will not be ran. -func (o *operationFetch) Check(_ context.Context, _ Application) (bool, error) { - downloadConfig := o.operatorConfig.DownloadConfig - fullPath, err := artifact.GetArtifactPath(o.program.Spec(), o.program.Version(), downloadConfig.OS(), downloadConfig.Arch(), downloadConfig.TargetDirectory) - if err != nil { - return false, err - } - - _, err = os.Stat(fullPath) - if os.IsNotExist(err) { - return true, nil - } - - o.logger.Debugf("binary '%s.%s' already exists in %s. Skipping operation %s", o.program.BinaryName(), o.program.Version(), fullPath, o.Name()) - return false, err -} - -// Run runs the operation -func (o *operationFetch) Run(ctx context.Context, application Application) (err error) { - defer func() { - if err != nil { - application.SetState(state.Failed, err.Error(), nil) - } - }() - - fullPath, err := o.downloader.Download(ctx, o.program.Spec(), o.program.Version()) - if err == nil { - o.logger.Infof("downloaded binary '%s.%s' into '%s' as part of operation '%s'", o.program.BinaryName(), o.program.Version(), fullPath, o.Name()) - } - - return err -} diff --git a/internal/pkg/agent/operation/operation_install.go b/internal/pkg/agent/operation/operation_install.go deleted file mode 100644 index fca3d6eb0f2..00000000000 --- a/internal/pkg/agent/operation/operation_install.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package operation - -import ( - "context" - - "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" - "github.com/elastic/elastic-agent/internal/pkg/artifact/install" - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -// operationInstall installs a artifact from predefined location -// skips if artifact is already installed -type operationInstall struct { - logger *logger.Logger - program Descriptor - operatorConfig *configuration.SettingsConfig - installer install.InstallerChecker -} - -func newOperationInstall( - logger *logger.Logger, - program Descriptor, - operatorConfig *configuration.SettingsConfig, - installer install.InstallerChecker) *operationInstall { - - return &operationInstall{ - logger: logger, - program: program, - operatorConfig: operatorConfig, - installer: installer, - } -} - -// Name is human readable name identifying an operation -func (o *operationInstall) Name() string { - return "operation-install" -} - -// Check checks whether install needs to be ran. -// -// If the installation directory already exists then it will not be ran. -func (o *operationInstall) Check(ctx context.Context, _ Application) (bool, error) { - err := o.installer.Check(ctx, o.program.Spec(), o.program.Version(), o.program.Directory()) - if err != nil { - // don't return err, just state if Run should be called - return true, nil - } - return false, nil -} - -// Run runs the operation -func (o *operationInstall) Run(ctx context.Context, application Application) (err error) { - defer func() { - if err != nil { - application.SetState(state.Failed, err.Error(), nil) - } - }() - - return o.installer.Install(ctx, o.program.Spec(), o.program.Version(), o.program.Directory()) -} diff --git a/internal/pkg/agent/operation/operation_uninstall.go b/internal/pkg/agent/operation/operation_uninstall.go deleted file mode 100644 index 1697679211a..00000000000 --- a/internal/pkg/agent/operation/operation_uninstall.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package operation - -import ( - "context" - - "github.com/elastic/elastic-agent/internal/pkg/artifact/uninstall" - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -// operationUninstall uninstalls a artifact from predefined location -type operationUninstall struct { - logger *logger.Logger - program Descriptor - uninstaller uninstall.Uninstaller -} - -func newOperationUninstall( - logger *logger.Logger, - program Descriptor, - uninstaller uninstall.Uninstaller) *operationUninstall { - - return &operationUninstall{ - logger: logger, - program: program, - uninstaller: uninstaller, - } -} - -// Name is human readable name identifying an operation -func (o *operationUninstall) Name() string { - return "operation-uninstall" -} - -// Check checks whether uninstall needs to be ran. -// -// Always true. -func (o *operationUninstall) Check(_ context.Context, _ Application) (bool, error) { - return true, nil -} - -// Run runs the operation -func (o *operationUninstall) Run(ctx context.Context, application Application) (err error) { - defer func() { - if err != nil { - application.SetState(state.Failed, err.Error(), nil) - } - }() - - return o.uninstaller.Uninstall(ctx, o.program.Spec(), o.program.Version(), o.program.Directory()) -} diff --git a/internal/pkg/agent/operation/operation_verify.go b/internal/pkg/agent/operation/operation_verify.go deleted file mode 100644 index fef764c4a12..00000000000 --- a/internal/pkg/agent/operation/operation_verify.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package operation - -import ( - "context" - "fmt" - "os" - - "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/artifact" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download" - "github.com/elastic/elastic-agent/internal/pkg/core/state" -) - -// operationVerify verifies downloaded artifact for correct signature -// skips if artifact is already installed -type operationVerify struct { - program Descriptor - operatorConfig *configuration.SettingsConfig - verifier download.Verifier -} - -func newOperationVerify( - program Descriptor, - operatorConfig *configuration.SettingsConfig, - verifier download.Verifier) *operationVerify { - return &operationVerify{ - program: program, - operatorConfig: operatorConfig, - verifier: verifier, - } -} - -// Name is human readable name identifying an operation -func (o *operationVerify) Name() string { - return "operation-verify" -} - -// Check checks whether verify needs to occur. -// -// Only if the artifacts exists does it need to be verified. -func (o *operationVerify) Check(_ context.Context, _ Application) (bool, error) { - downloadConfig := o.operatorConfig.DownloadConfig - fullPath, err := artifact.GetArtifactPath(o.program.Spec(), o.program.Version(), downloadConfig.OS(), downloadConfig.Arch(), downloadConfig.TargetDirectory) - if err != nil { - return false, err - } - - if _, err := os.Stat(fullPath); os.IsNotExist(err) { - return false, errors.New(errors.TypeApplication, - fmt.Sprintf("%s.%s package does not exist in %s. Skipping operation %s", o.program.BinaryName(), o.program.Version(), fullPath, o.Name())) - } - - return true, err -} - -// Run runs the operation -func (o *operationVerify) Run(_ context.Context, application Application) (err error) { - defer func() { - if err != nil { - application.SetState(state.Failed, err.Error(), nil) - } - }() - - if err := o.verifier.Verify(o.program.Spec(), o.program.Version()); err != nil { - return errors.New(err, - fmt.Sprintf("operation '%s' failed to verify %s.%s", o.Name(), o.program.BinaryName(), o.program.Version()), - errors.TypeSecurity) - } - - return nil -} diff --git a/internal/pkg/agent/operation/operator.go b/internal/pkg/agent/operation/operator.go index 71cb6569671..50bf725e193 100644 --- a/internal/pkg/agent/operation/operator.go +++ b/internal/pkg/agent/operation/operator.go @@ -64,10 +64,10 @@ type Operator struct { apps map[string]Application appsLock sync.Mutex - downloader download.Downloader - verifier download.Verifier installer install.InstallerChecker uninstaller uninstall.Uninstaller + downloader download.Downloader + verifier download.Verifier statusController status.Controller statusReporter status.Reporter } @@ -183,14 +183,14 @@ func (o *Operator) HandleConfig(ctx context.Context, cfg configrequest.Request) o.statusController.UpdateStateID(stateID) for _, step := range steps { - if !strings.EqualFold(step.ProgramSpec.Cmd, monitoringName) { - if _, isSupported := program.SupportedMap[strings.ToLower(step.ProgramSpec.Cmd)]; !isSupported { + if !strings.EqualFold(step.ProgramSpec.CommandName(), monitoringName) { + if _, isSupported := program.SupportedMap[step.ProgramSpec.CommandName()]; !isSupported { // mark failed, new config cannot be run - msg := fmt.Sprintf("program '%s' is not supported", step.ProgramSpec.Cmd) + msg := fmt.Sprintf("program '%s' is not supported", step.ProgramSpec.CommandName()) o.statusReporter.Update(state.Failed, msg, nil) return errors.New(msg, errors.TypeApplication, - errors.M(errors.MetaKeyAppName, step.ProgramSpec.Cmd)) + errors.M(errors.MetaKeyAppName, step.ProgramSpec.CommandName())) } } @@ -250,16 +250,10 @@ func (o *Operator) Shutdown() { // specific configuration of new process is passed func (o *Operator) start(p Descriptor, cfg map[string]interface{}) (err error) { flow := []operation{ - newRetryableOperations( - o.logger, - o.config.RetryConfig, - newOperationFetch(o.logger, p, o.config, o.downloader), - newOperationVerify(p, o.config, o.verifier), - ), - newOperationInstall(o.logger, p, o.config, o.installer), newOperationStart(o.logger, p, o.config, cfg), newOperationConfig(o.logger, o.config, cfg), } + return o.runFlow(p, flow) } @@ -267,7 +261,6 @@ func (o *Operator) start(p Descriptor, cfg map[string]interface{}) (err error) { func (o *Operator) stop(p Descriptor) (err error) { flow := []operation{ newOperationStop(o.logger, o.config), - newOperationUninstall(o.logger, p, o.uninstaller), } return o.runFlow(p, flow) diff --git a/internal/pkg/agent/operation/operator_handlers.go b/internal/pkg/agent/operation/operator_handlers.go index bfc95dcf763..36f10b3d70e 100644 --- a/internal/pkg/agent/operation/operator_handlers.go +++ b/internal/pkg/agent/operation/operator_handlers.go @@ -6,6 +6,7 @@ package operation import ( "fmt" + "strings" "github.com/elastic/elastic-agent/internal/pkg/agent/configrequest" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" @@ -26,7 +27,7 @@ func (o *Operator) initHandlerMap() { } func (o *Operator) handleRun(step configrequest.Step) error { - if step.ProgramSpec.Cmd == monitoringName { + if strings.EqualFold(step.ProgramSpec.CommandName(), monitoringName) { return o.handleStartSidecar(step) } @@ -35,15 +36,15 @@ func (o *Operator) handleRun(step configrequest.Step) error { return errors.New(err, "operator.handleStart failed to create program", errors.TypeApplication, - errors.M(errors.MetaKeyAppName, step.ProgramSpec.Cmd)) + errors.M(errors.MetaKeyAppName, step.ProgramSpec.CommandName())) } return o.start(p, cfg) } func (o *Operator) handleRemove(step configrequest.Step) error { - o.logger.Debugf("stopping process %s: %s", step.ProgramSpec.Cmd, step.ID) - if step.ProgramSpec.Cmd == monitoringName { + o.logger.Debugf("stopping process %s: %s", step.ProgramSpec.CommandName(), step.ID) + if strings.EqualFold(step.ProgramSpec.CommandName(), monitoringName) { return o.handleStopSidecar(step) } @@ -52,7 +53,7 @@ func (o *Operator) handleRemove(step configrequest.Step) error { return errors.New(err, "operator.handleRemove failed to stop program", errors.TypeApplication, - errors.M(errors.MetaKeyAppName, step.ProgramSpec.Cmd)) + errors.M(errors.MetaKeyAppName, step.ProgramSpec.CommandName())) } return o.stop(p) diff --git a/internal/pkg/agent/operation/operator_test.go b/internal/pkg/agent/operation/operator_test.go index 731f04eea8b..8400918a023 100644 --- a/internal/pkg/agent/operation/operator_test.go +++ b/internal/pkg/agent/operation/operator_test.go @@ -11,7 +11,6 @@ import ( "os" "os/exec" "path/filepath" - "runtime" "testing" "github.com/stretchr/testify/assert" @@ -24,15 +23,17 @@ import ( func TestMain(m *testing.M) { // init supported with test cases + port, err := getFreePort() + if err != nil { + panic(err) + } + configurableSpec := program.Spec{ Name: "configurable", Cmd: "configurable", Args: []string{}, } - port, err := getFreePort() - if err != nil { - panic(err) - } + serviceSpec := program.Spec{ ServicePort: port, Name: "serviceable", @@ -44,10 +45,10 @@ func TestMain(m *testing.M) { program.SupportedMap["configurable"] = configurableSpec program.SupportedMap["serviceable"] = serviceSpec - if err := isAvailable("configurable", "1.0"); err != nil { + if err := isAvailable("configurable"); err != nil { panic(err) } - if err := isAvailable("serviceable", "1.0"); err != nil { + if err := isAvailable("serviceable"); err != nil { panic(err) } @@ -462,15 +463,13 @@ func TestConfigurableService(t *testing.T) { } } -func isAvailable(name, version string) error { - p := getProgram(name, version) +func isAvailable(name string) error { + p := getProgram(name, "version") spec := p.ProcessSpec() path := spec.BinaryPath - if runtime.GOOS == "windows" { - path += ".exe" - } + if s, err := os.Stat(path); err != nil || s == nil { - return fmt.Errorf("binary not available %s", spec.BinaryPath) + return fmt.Errorf("binary not available %s: %v", spec.BinaryPath, err) } return nil } diff --git a/internal/pkg/agent/program/program.go b/internal/pkg/agent/program/program.go index eed6f62c828..08f30a81609 100644 --- a/internal/pkg/agent/program/program.go +++ b/internal/pkg/agent/program/program.go @@ -13,7 +13,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/eql" ) -// Program represents a program that must be started or must run. +// Program represents a program that must be started or must run . type Program struct { Spec Spec Config *transpiler.AST @@ -21,7 +21,7 @@ type Program struct { // Cmd return the execution command to run. func (p *Program) Cmd() string { - return p.Spec.Cmd + return p.Spec.Command() } // Checksum return the checksum of the current instance of the program. @@ -70,7 +70,7 @@ func DetectPrograms(agentInfo transpiler.AgentInfo, singleConfig *transpiler.AST programs := make([]Program, 0) for _, spec := range Supported { specificAST := singleConfig.Clone() - ok, err := DetectProgram(spec, agentInfo, specificAST) + ok, err := DetectProgram(spec.Rules, spec.When, spec.Constraints, agentInfo, specificAST) if err != nil { return nil, err } @@ -83,6 +83,7 @@ func DetectPrograms(agentInfo transpiler.AgentInfo, singleConfig *transpiler.AST } programs = append(programs, program) } + return programs, nil } @@ -90,9 +91,9 @@ func DetectPrograms(agentInfo transpiler.AgentInfo, singleConfig *transpiler.AST // // Note `ast` is modified to match what the program expects. Should clone the AST before passing to // this function if you want to still have the original. -func DetectProgram(spec Spec, info transpiler.AgentInfo, ast *transpiler.AST) (bool, error) { - if len(spec.Constraints) > 0 { - constraints, err := eql.New(spec.Constraints) +func DetectProgram(rules *transpiler.RuleList, when string, constraints string, info transpiler.AgentInfo, ast *transpiler.AST) (bool, error) { + if len(constraints) > 0 { + constraints, err := eql.New(constraints) if err != nil { return false, err } @@ -105,16 +106,16 @@ func DetectProgram(spec Spec, info transpiler.AgentInfo, ast *transpiler.AST) (b } } - err := spec.Rules.Apply(info, ast) + err := rules.Apply(info, ast) if err != nil { return false, err } - if len(spec.When) == 0 { + if len(when) == 0 { return false, ErrMissingWhen } - expression, err := eql.New(spec.When) + expression, err := eql.New(when) if err != nil { return false, err } @@ -128,6 +129,7 @@ func KnownProgramNames() []string { for idx, program := range Supported { names[idx] = program.Name } + return names } diff --git a/internal/pkg/agent/program/program_test.go b/internal/pkg/agent/program/program_test.go index a318d23e02a..4d12e40cc8e 100644 --- a/internal/pkg/agent/program/program_test.go +++ b/internal/pkg/agent/program/program_test.go @@ -488,7 +488,7 @@ func TestConfiguration(t *testing.T) { require.Equal(t, len(testPrograms), len(progs)) for _, program := range progs { - filename := name + "-" + strings.ToLower(program.Spec.Cmd) + filename := name + "-" + program.Spec.CommandName() if progKey != "default" { filename += "-" + progKey } @@ -558,7 +558,7 @@ func TestUseCases(t *testing.T) { for _, program := range defPrograms { generatedPath := filepath.Join( useCasesPath, "generated", - useCaseName+"."+strings.ToLower(program.Spec.Cmd)+".golden.yml", + useCaseName+"."+program.Spec.CommandName()+".golden.yml", ) compareMap := &transpiler.MapVisitor{} diff --git a/internal/pkg/agent/program/spec.go b/internal/pkg/agent/program/spec.go index 12f860a1e9a..0b3a8eeb347 100644 --- a/internal/pkg/agent/program/spec.go +++ b/internal/pkg/agent/program/spec.go @@ -8,6 +8,8 @@ import ( "fmt" "io/ioutil" "path/filepath" + "runtime" + "strings" "gopkg.in/yaml.v2" @@ -40,7 +42,20 @@ type Spec struct { When string `yaml:"when"` Constraints string `yaml:"constraints"` RestartOnOutputChange bool `yaml:"restart_on_output_change,omitempty"` - ExprtedMetrics []string `yaml:"exported_metrics,omitempty"` + ExportedMetrics []string `yaml:"exported_metrics,omitempty"` +} + +func (s *Spec) Command() string { + name := strings.ToLower(s.Cmd) + if runtime.GOOS == "windows" && !strings.HasSuffix(name, ".exe") { + return name + ".exe" + } + + return name +} + +func (s *Spec) CommandName() string { + return strings.ToLower(s.Cmd) } // ReadSpecs reads all the specs that match the provided globbing path. diff --git a/internal/pkg/agent/stateresolver/resolve_test.go b/internal/pkg/agent/stateresolver/resolve_test.go index 5a37ba6535d..4276ca39639 100644 --- a/internal/pkg/agent/stateresolver/resolve_test.go +++ b/internal/pkg/agent/stateresolver/resolve_test.go @@ -46,13 +46,13 @@ func TestResolver(t *testing.T) { ID: "config-1", LastModified: tn, Active: map[string]active{ - "filebeat": active{ + "filebeat": { LastChange: startState, LastModified: tn, Identifier: "filebeat", Program: fb1, }, - "metricbeat": active{ + "metricbeat": { LastChange: startState, LastModified: tn, Identifier: "metricbeat", @@ -61,13 +61,13 @@ func TestResolver(t *testing.T) { }, }, steps: []configrequest.Step{ - configrequest.Step{ + { ID: configrequest.StepRun, ProgramSpec: fb1.Spec, Version: release.Version(), Meta: withMeta(fb1), }, - configrequest.Step{ + { ID: configrequest.StepRun, ProgramSpec: mb1.Spec, Version: release.Version(), @@ -87,7 +87,7 @@ func TestResolver(t *testing.T) { ID: "config-1", LastModified: tn, Active: map[string]active{ - "filebeat": active{ + "filebeat": { LastChange: startState, LastModified: tn, Identifier: "filebeat", @@ -99,13 +99,13 @@ func TestResolver(t *testing.T) { ID: "config-2", LastModified: tn2, Active: map[string]active{ - "filebeat": active{ + "filebeat": { LastChange: unchangedState, LastModified: tn, Identifier: "filebeat", Program: fb1, }, - "metricbeat": active{ + "metricbeat": { LastChange: startState, LastModified: tn2, Identifier: "metricbeat", @@ -114,7 +114,7 @@ func TestResolver(t *testing.T) { }, }, steps: []configrequest.Step{ - configrequest.Step{ + { ID: configrequest.StepRun, ProgramSpec: mb1.Spec, Version: release.Version(), @@ -134,7 +134,7 @@ func TestResolver(t *testing.T) { ID: "config-1", LastModified: tn, Active: map[string]active{ - "filebeat": active{ + "filebeat": { LastChange: startState, LastModified: tn, Identifier: "filebeat", @@ -146,13 +146,13 @@ func TestResolver(t *testing.T) { ID: "config-2", LastModified: tn2, Active: map[string]active{ - "filebeat": active{ + "filebeat": { LastChange: updateState, LastModified: tn2, Identifier: "filebeat", Program: fb2, }, - "metricbeat": active{ + "metricbeat": { LastChange: startState, LastModified: tn2, Identifier: "metricbeat", @@ -161,13 +161,13 @@ func TestResolver(t *testing.T) { }, }, steps: []configrequest.Step{ - configrequest.Step{ + { ID: configrequest.StepRun, ProgramSpec: fb2.Spec, Version: release.Version(), Meta: withMeta(fb2), }, - configrequest.Step{ + { ID: configrequest.StepRun, ProgramSpec: mb1.Spec, Version: release.Version(), @@ -187,7 +187,7 @@ func TestResolver(t *testing.T) { ID: "config-1", LastModified: tn, Active: map[string]active{ - "filebeat": active{ + "filebeat": { LastChange: startState, LastModified: tn, Identifier: "filebeat", @@ -199,7 +199,7 @@ func TestResolver(t *testing.T) { ID: "config-2", LastModified: tn2, Active: map[string]active{ - "metricbeat": active{ + "metricbeat": { LastChange: startState, LastModified: tn2, Identifier: "metricbeat", @@ -208,12 +208,12 @@ func TestResolver(t *testing.T) { }, }, steps: []configrequest.Step{ - configrequest.Step{ + { ID: configrequest.StepRemove, ProgramSpec: fb1.Spec, Version: release.Version(), }, - configrequest.Step{ + { ID: configrequest.StepRun, ProgramSpec: mb1.Spec, Version: release.Version(), @@ -231,13 +231,13 @@ func TestResolver(t *testing.T) { ID: "config-1", LastModified: tn, Active: map[string]active{ - "filebeat": active{ + "filebeat": { LastChange: startState, LastModified: tn, Identifier: "filebeat", Program: fb1, }, - "metricbeat": active{ + "metricbeat": { LastChange: startState, LastModified: tn, Identifier: "metricbeat", @@ -251,12 +251,12 @@ func TestResolver(t *testing.T) { Active: map[string]active{}, }, steps: []configrequest.Step{ - configrequest.Step{ + { ID: configrequest.StepRemove, ProgramSpec: fb1.Spec, Version: release.Version(), }, - configrequest.Step{ + { ID: configrequest.StepRemove, ProgramSpec: mb1.Spec, Version: release.Version(), @@ -275,13 +275,13 @@ func TestResolver(t *testing.T) { ID: "config-1", LastModified: tn, Active: map[string]active{ - "filebeat": active{ + "filebeat": { LastChange: startState, LastModified: tn, Identifier: "filebeat", Program: fb1, }, - "metricbeat": active{ + "metricbeat": { LastChange: startState, LastModified: tn, Identifier: "metricbeat", @@ -293,13 +293,13 @@ func TestResolver(t *testing.T) { ID: "config-1", LastModified: tn, Active: map[string]active{ - "filebeat": active{ + "filebeat": { LastChange: unchangedState, LastModified: tn, Identifier: "filebeat", Program: fb1, }, - "metricbeat": active{ + "metricbeat": { LastChange: unchangedState, LastModified: tn, Identifier: "metricbeat", diff --git a/internal/pkg/agent/storage/encrypted_disk_storage_windows_linux_test.go b/internal/pkg/agent/storage/encrypted_disk_storage_windows_linux_test.go index a965295e9cc..cc53d065c73 100644 --- a/internal/pkg/agent/storage/encrypted_disk_storage_windows_linux_test.go +++ b/internal/pkg/agent/storage/encrypted_disk_storage_windows_linux_test.go @@ -16,9 +16,8 @@ import ( "path/filepath" "testing" - "github.com/google/go-cmp/cmp" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/secret" + "github.com/google/go-cmp/cmp" ) const ( diff --git a/internal/pkg/agent/storage/encrypted_disk_store.go b/internal/pkg/agent/storage/encrypted_disk_store.go index 7fe2f70339a..48027b3178f 100644 --- a/internal/pkg/agent/storage/encrypted_disk_store.go +++ b/internal/pkg/agent/storage/encrypted_disk_store.go @@ -91,7 +91,7 @@ func (d *EncryptedDiskStore) Save(in io.Reader) error { // Ensure has agent key err := d.ensureKey() if err != nil { - return err + return errors.New(err, "failed to ensure key") } tmpFile := d.target + ".tmp" @@ -111,7 +111,7 @@ func (d *EncryptedDiskStore) Save(in io.Reader) error { w, err := crypto.NewWriterWithDefaults(fd, d.key) if err != nil { fd.Close() - return err + return errors.New(err, "failed to open crypto writers") } if _, err := io.Copy(w, in); err != nil { @@ -180,7 +180,7 @@ func (d *EncryptedDiskStore) Load() (rc io.ReadCloser, err error) { // Ensure has agent key err = d.ensureKey() if err != nil { - return nil, err + return nil, errors.New(err, "failed to ensure key during encrypted disk store Load") } return crypto.NewReaderWithDefaults(fd, d.key) diff --git a/internal/pkg/artifact/artifact.go b/internal/pkg/artifact/artifact.go index 63ae2366a58..ebe84b95db7 100644 --- a/internal/pkg/artifact/artifact.go +++ b/internal/pkg/artifact/artifact.go @@ -32,7 +32,7 @@ func GetArtifactName(spec program.Spec, version, operatingSystem, arch string) ( return "", errors.New(fmt.Sprintf("'%s' is not a valid combination for a package", key), errors.TypeConfig) } - return fmt.Sprintf("%s-%s-%s", spec.Cmd, version, suffix), nil + return fmt.Sprintf("%s-%s-%s", spec.CommandName(), version, suffix), nil } // GetArtifactPath returns a full path of artifact for a program in specific version diff --git a/internal/pkg/artifact/download/composed/downloader_test.go b/internal/pkg/artifact/download/composed/downloader_test.go index 92fd44351ec..4964c090ef9 100644 --- a/internal/pkg/artifact/download/composed/downloader_test.go +++ b/internal/pkg/artifact/download/composed/downloader_test.go @@ -59,7 +59,7 @@ func TestComposed(t *testing.T) { for _, tc := range testCases { d := NewDownloader(tc.downloaders[0], tc.downloaders[1]) - r, _ := d.Download(context.TODO(), program.Spec{Name: "a", Cmd: "a", Artifact: "a/a"}, "b") + r, _ := d.Download(context.TODO(), program.Spec{Name: "a"}, "b") assert.Equal(t, tc.expectedResult, r == "succ") diff --git a/internal/pkg/artifact/download/http/downloader.go b/internal/pkg/artifact/download/http/downloader.go index 2da6e3d1015..5f68816c58a 100644 --- a/internal/pkg/artifact/download/http/downloader.go +++ b/internal/pkg/artifact/download/http/downloader.go @@ -76,6 +76,7 @@ func NewDownloaderWithClient(log progressLogger, config *artifact.Config, client // Download fetches the package from configured source. // Returns absolute path to downloaded package and an error. func (e *Downloader) Download(ctx context.Context, spec program.Spec, version string) (_ string, err error) { + remoteArtifact := spec.Artifact downloadedFiles := make([]string, 0, 2) defer func() { if err != nil { @@ -86,13 +87,13 @@ func (e *Downloader) Download(ctx context.Context, spec program.Spec, version st }() // download from source to dest - path, err := e.download(ctx, e.config.OS(), spec, version) + path, err := e.download(ctx, remoteArtifact, e.config.OS(), spec, version) downloadedFiles = append(downloadedFiles, path) if err != nil { return "", err } - hashPath, err := e.downloadHash(ctx, e.config.OS(), spec, version) + hashPath, err := e.downloadHash(ctx, remoteArtifact, e.config.OS(), spec, version) downloadedFiles = append(downloadedFiles, hashPath) return path, err } @@ -114,7 +115,7 @@ func (e *Downloader) composeURI(artifactName, packageName string) (string, error return uri.String(), nil } -func (e *Downloader) download(ctx context.Context, operatingSystem string, spec program.Spec, version string) (string, error) { +func (e *Downloader) download(ctx context.Context, remoteArtifact string, operatingSystem string, spec program.Spec, version string) (string, error) { filename, err := artifact.GetArtifactName(spec, version, operatingSystem, e.config.Arch()) if err != nil { return "", errors.New(err, "generating package name failed") @@ -125,10 +126,10 @@ func (e *Downloader) download(ctx context.Context, operatingSystem string, spec return "", errors.New(err, "generating package path failed") } - return e.downloadFile(ctx, spec.Artifact, filename, fullPath) + return e.downloadFile(ctx, remoteArtifact, filename, fullPath) } -func (e *Downloader) downloadHash(ctx context.Context, operatingSystem string, spec program.Spec, version string) (string, error) { +func (e *Downloader) downloadHash(ctx context.Context, remoteArtifact string, operatingSystem string, spec program.Spec, version string) (string, error) { filename, err := artifact.GetArtifactName(spec, version, operatingSystem, e.config.Arch()) if err != nil { return "", errors.New(err, "generating package name failed") @@ -142,7 +143,7 @@ func (e *Downloader) downloadHash(ctx context.Context, operatingSystem string, s filename = filename + ".sha512" fullPath = fullPath + ".sha512" - return e.downloadFile(ctx, spec.Artifact, filename, fullPath) + return e.downloadFile(ctx, remoteArtifact, filename, fullPath) } func (e *Downloader) downloadFile(ctx context.Context, artifactName, filename, fullPath string) (string, error) { diff --git a/internal/pkg/artifact/download/http/elastic_test.go b/internal/pkg/artifact/download/http/elastic_test.go index c29b8115089..e76bc92fd06 100644 --- a/internal/pkg/artifact/download/http/elastic_test.go +++ b/internal/pkg/artifact/download/http/elastic_test.go @@ -34,7 +34,7 @@ const ( var ( beatSpec = program.Spec{ - Name: "Filebeat", + Name: "filebeat", Cmd: "filebeat", Artifact: "beats/filebeat", } @@ -165,16 +165,16 @@ func getRandomTestCases() []testCase { func getElasticCoClient() http.Client { correctValues := map[string]struct{}{ - fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "i386.deb"): struct{}{}, - fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "amd64.deb"): struct{}{}, - fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "i686.rpm"): struct{}{}, - fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "x86_64.rpm"): struct{}{}, - fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "linux-x86.tar.gz"): struct{}{}, - fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "linux-arm64.tar.gz"): struct{}{}, - fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "linux-x86_64.tar.gz"): struct{}{}, - fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "windows-x86.zip"): struct{}{}, - fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "windows-x86_64.zip"): struct{}{}, - fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "darwin-x86_64.tar.gz"): struct{}{}, + fmt.Sprintf("%s-%s-%s", beatSpec.CommandName(), version, "i386.deb"): {}, + fmt.Sprintf("%s-%s-%s", beatSpec.CommandName(), version, "amd64.deb"): {}, + fmt.Sprintf("%s-%s-%s", beatSpec.CommandName(), version, "i686.rpm"): {}, + fmt.Sprintf("%s-%s-%s", beatSpec.CommandName(), version, "x86_64.rpm"): {}, + fmt.Sprintf("%s-%s-%s", beatSpec.CommandName(), version, "linux-x86.tar.gz"): {}, + fmt.Sprintf("%s-%s-%s", beatSpec.CommandName(), version, "linux-arm64.tar.gz"): {}, + fmt.Sprintf("%s-%s-%s", beatSpec.CommandName(), version, "linux-x86_64.tar.gz"): {}, + fmt.Sprintf("%s-%s-%s", beatSpec.CommandName(), version, "windows-x86.zip"): {}, + fmt.Sprintf("%s-%s-%s", beatSpec.CommandName(), version, "windows-x86_64.zip"): {}, + fmt.Sprintf("%s-%s-%s", beatSpec.CommandName(), version, "darwin-x86_64.tar.gz"): {}, } handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { diff --git a/internal/pkg/artifact/install/atomic/atomic_installer.go b/internal/pkg/artifact/install/atomic/atomic_installer.go index 10c2652c1c8..3e61aacb4ef 100644 --- a/internal/pkg/artifact/install/atomic/atomic_installer.go +++ b/internal/pkg/artifact/install/atomic/atomic_installer.go @@ -11,9 +11,8 @@ import ( "path/filepath" "runtime" - "github.com/hashicorp/go-multierror" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" + "github.com/hashicorp/go-multierror" ) type embeddedInstaller interface { diff --git a/internal/pkg/artifact/install/hooks/hooks_installer.go b/internal/pkg/artifact/install/hooks/hooks_installer.go index 3f7386bcf06..73ce7b81c5b 100644 --- a/internal/pkg/artifact/install/hooks/hooks_installer.go +++ b/internal/pkg/artifact/install/hooks/hooks_installer.go @@ -55,5 +55,6 @@ func (i *InstallerChecker) Check(ctx context.Context, spec program.Spec, version if spec.CheckInstallSteps != nil { return spec.CheckInstallSteps.Execute(ctx, installDir) } + return nil } diff --git a/internal/pkg/core/app/descriptor.go b/internal/pkg/core/app/descriptor.go index 1f902e294ad..84c7c1019ae 100644 --- a/internal/pkg/core/app/descriptor.go +++ b/internal/pkg/core/app/descriptor.go @@ -6,8 +6,8 @@ package app import ( "path/filepath" - "strings" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/program" "github.com/elastic/elastic-agent/internal/pkg/artifact" ) @@ -23,12 +23,22 @@ type Descriptor struct { // NewDescriptor creates a program which satisfies Program interface and can be used with Operator. func NewDescriptor(spec program.Spec, version string, config *artifact.Config, tags map[Tag]string) *Descriptor { - dir := directory(spec, version, config) + dir := paths.Components() + return NewDescriptorWithPath(dir, spec, version, config, tags) +} + +// NewDescriptorOnPath creates a program which satisfies Program interface and can be used with Operator. +func NewDescriptorWithPath(path string, spec program.Spec, version string, config *artifact.Config, tags map[Tag]string) *Descriptor { + servicePort := 0 + if spec.ServicePort > 0 { + servicePort = spec.ServicePort + } + return &Descriptor{ spec: spec, - directory: dir, - executionCtx: NewExecutionContext(spec.ServicePort, spec.Cmd, version, tags), - process: specification(dir, spec), + directory: path, + executionCtx: NewExecutionContext(servicePort, spec.CommandName(), version, tags), + process: specification(path, spec), } } @@ -38,11 +48,6 @@ func (p *Descriptor) ServicePort() int { return p.executionCtx.ServicePort } -// ArtifactName is the name of the artifact to download from the artifact store. E.g beats/filebeat. -func (p *Descriptor) ArtifactName() string { - return p.spec.Artifact -} - // BinaryName is the name of the binary. E.g filebeat. func (p *Descriptor) BinaryName() string { return p.executionCtx.BinaryName @@ -79,26 +84,8 @@ func (p *Descriptor) Directory() string { func specification(dir string, spec program.Spec) ProcessSpec { return ProcessSpec{ - BinaryPath: filepath.Join(dir, spec.Cmd), + BinaryPath: filepath.Join(dir, spec.Command()), Args: spec.Args, Configuration: nil, } } - -func directory(spec program.Spec, version string, config *artifact.Config) string { - if version == "" { - return filepath.Join(config.InstallPath, spec.Cmd) - } - - path, err := artifact.GetArtifactPath(spec, version, config.OS(), config.Arch(), config.InstallPath) - if err != nil { - return "" - } - - suffix := ".tar.gz" - if config.OS() == "windows" { - suffix = ".zip" - } - - return strings.TrimSuffix(path, suffix) -} diff --git a/internal/pkg/core/monitoring/beats/beats_monitor.go b/internal/pkg/core/monitoring/beats/beats_monitor.go index a513729497b..d70878eb8a0 100644 --- a/internal/pkg/core/monitoring/beats/beats_monitor.go +++ b/internal/pkg/core/monitoring/beats/beats_monitor.go @@ -145,7 +145,7 @@ func (b *Monitor) EnrichArgs(spec program.Spec, pipelineID string, args []string loggingPath := b.generateLoggingPath(spec, pipelineID) if loggingPath != "" { - logFile := spec.Cmd + logFile := spec.CommandName() if isSidecar { logFile += "_monitor" } diff --git a/internal/pkg/core/monitoring/beats/monitoring.go b/internal/pkg/core/monitoring/beats/monitoring.go index a724e6f4246..cb93d74e136 100644 --- a/internal/pkg/core/monitoring/beats/monitoring.go +++ b/internal/pkg/core/monitoring/beats/monitoring.go @@ -35,10 +35,10 @@ func MonitoringEndpoint(spec program.Spec, operatingSystem, pipelineID string) s return endpoint } if operatingSystem == "windows" { - return fmt.Sprintf(mbEndpointFileFormatWin, pipelineID, spec.Cmd) + return fmt.Sprintf(mbEndpointFileFormatWin, pipelineID, spec.CommandName()) } // unix socket path must be less than 104 characters - path := fmt.Sprintf("unix://%s.sock", filepath.Join(paths.TempDir(), pipelineID, spec.Cmd, spec.Cmd)) + path := fmt.Sprintf("unix://%s.sock", filepath.Join(paths.TempDir(), pipelineID, spec.CommandName(), spec.CommandName())) if len(path) < 104 { return path } @@ -52,9 +52,9 @@ func getLoggingFile(spec program.Spec, operatingSystem, installPath, pipelineID return path } if operatingSystem == "windows" { - return fmt.Sprintf(logFileFormatWin, paths.Home(), pipelineID, spec.Cmd) + return fmt.Sprintf(logFileFormatWin, paths.Home(), pipelineID, spec.CommandName()) } - return fmt.Sprintf(logFileFormat, paths.Home(), pipelineID, spec.Cmd) + return fmt.Sprintf(logFileFormat, paths.Home(), pipelineID, spec.CommandName()) } // AgentMonitoringEndpoint returns endpoint with exposed metrics for agent. diff --git a/internal/pkg/core/monitoring/server/process.go b/internal/pkg/core/monitoring/server/process.go index 56f7d26eb78..cbd4ddcf3df 100644 --- a/internal/pkg/core/monitoring/server/process.go +++ b/internal/pkg/core/monitoring/server/process.go @@ -91,9 +91,9 @@ func processHandler(statsHandler func(http.ResponseWriter, *http.Request) error) } var beatsPathAllowlist = map[string]struct{}{ - "": struct{}{}, - "stats": struct{}{}, - "state": struct{}{}, + "": {}, + "stats": {}, + "state": {}, } func processMetrics(ctx context.Context, endpoint, path string) ([]byte, int, error) { diff --git a/internal/pkg/core/plugin/service/app.go b/internal/pkg/core/plugin/service/app.go index 312ac8b592e..14a3abb6148 100644 --- a/internal/pkg/core/plugin/service/app.go +++ b/internal/pkg/core/plugin/service/app.go @@ -13,14 +13,13 @@ import ( "sync" "time" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "gopkg.in/yaml.v2" "github.com/elastic/elastic-agent-client/v7/pkg/proto" "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" + "github.com/elastic/elastic-agent/internal/pkg/agent/program" "github.com/elastic/elastic-agent/internal/pkg/core/app" "github.com/elastic/elastic-agent/internal/pkg/core/monitoring" "github.com/elastic/elastic-agent/internal/pkg/core/plugin" @@ -169,6 +168,7 @@ func (a *Application) Start(ctx context.Context, _ app.Taggable, cfg map[string] a.srvState.UpdateConfig(a.srvState.Config()) } else { a.setState(state.Starting, "Starting", nil) + a.srvState, err = a.srv.Register(a, string(cfgStr)) if err != nil { return err diff --git a/magefile.go b/magefile.go index b640f46fd08..6bb88c5fff9 100644 --- a/magefile.go +++ b/magefile.go @@ -23,6 +23,7 @@ import ( "github.com/hashicorp/go-multierror" "github.com/magefile/mage/mg" "github.com/magefile/mage/sh" + "github.com/otiai10/copy" "github.com/pkg/errors" "github.com/elastic/e2e-testing/pkg/downloads" @@ -50,6 +51,7 @@ const ( externalArtifacts = "EXTERNAL" configFile = "elastic-agent.yml" agentDropPath = "AGENT_DROP_PATH" + specSuffix = ".spec.yml" // TODO: change after beat ignores yml config ) // Aliases for commands required by master makefile @@ -276,8 +278,8 @@ func (Build) TestBinaries() error { execName += ".exe" } return combineErr( - RunGo("build", "-o", filepath.Join(p, "configurable-1.0-darwin-x86_64", configurableName), filepath.Join(p, "configurable-1.0-darwin-x86_64", "main.go")), - RunGo("build", "-o", filepath.Join(p, "serviceable-1.0-darwin-x86_64", serviceableName), filepath.Join(p, "serviceable-1.0-darwin-x86_64", "main.go")), + RunGo("build", "-o", filepath.Join(p, configurableName), filepath.Join(p, "configurable-1.0-darwin-x86_64", "main.go")), + RunGo("build", "-o", filepath.Join(p, serviceableName), filepath.Join(p, "serviceable-1.0-darwin-x86_64", "main.go")), RunGo("build", "-o", filepath.Join(p2, "exec-1.0-darwin-x86_64", execName), filepath.Join(p2, "exec-1.0-darwin-x86_64", "main.go")), ) } @@ -421,12 +423,14 @@ func Package() { packageAgent(requiredPackages, devtools.UseElasticAgentPackaging) } + func getPackageName(beat, version, pkg string) (string, string) { if _, ok := os.LookupEnv(snapshotEnv); ok { version += "-SNAPSHOT" } return version, fmt.Sprintf("%s-%s-%s", beat, version, pkg) } + func requiredPackagesPresent(basePath, beat, version string, requiredPackages []string) bool { for _, pkg := range requiredPackages { _, packageName := getPackageName(beat, version, pkg) @@ -512,6 +516,8 @@ func BuildSpec() error { fmt.Printf(">> Buildspec from %s to %s\n", in, out) return RunGo("run", goF, "--in", in, "--out", out) + + return nil } func BuildPGP() error { @@ -661,23 +667,24 @@ func packageAgent(requiredPackages []string, packagingFn func()) { version = release.Version() } + dropPath, found := os.LookupEnv(agentDropPath) + var archivePath string + // build deps only when drop is not provided - if dropPathEnv, found := os.LookupEnv(agentDropPath); !found || len(dropPathEnv) == 0 { + if !found || len(dropPath) == 0 { // prepare new drop - dropPath := filepath.Join("build", "distributions", "elastic-agent-drop") + dropPath = filepath.Join("build", "distributions", "elastic-agent-drop") dropPath, err := filepath.Abs(dropPath) if err != nil { panic(err) } - if err := os.MkdirAll(dropPath, 0755); err != nil { - panic(err) - } + archivePath = movePackagesToArchive(dropPath, requiredPackages) + defer os.RemoveAll(dropPath) os.Setenv(agentDropPath, dropPath) // cleanup after build - defer os.RemoveAll(dropPath) defer os.Unsetenv(agentDropPath) packedBeats := []string{"filebeat", "heartbeat", "metricbeat", "osquerybeat"} @@ -685,8 +692,10 @@ func packageAgent(requiredPackages []string, packagingFn func()) { ctx := context.Background() for _, beat := range packedBeats { for _, reqPackage := range requiredPackages { + targetPath := filepath.Join(archivePath, reqPackage) + os.MkdirAll(targetPath, 0755) newVersion, packageName := getPackageName(beat, version, reqPackage) - err := fetchBinaryFromArtifactsApi(ctx, packageName, beat, newVersion, dropPath) + err := fetchBinaryFromArtifactsApi(ctx, packageName, beat, newVersion, targetPath) if err != nil { panic(fmt.Sprintf("fetchBinaryFromArtifactsApi failed: %v", err)) } @@ -717,11 +726,98 @@ func packageAgent(requiredPackages []string, packagingFn func()) { // copy to new drop sourcePath := filepath.Join(pwd, "build", "distributions") - if err := copyAll(sourcePath, dropPath); err != nil { - panic(err) + for _, rp := range requiredPackages { + files, err := filepath.Glob(filepath.Join(sourcePath, "*"+rp+"*")) + if err != nil { + panic(err) + } + + targetPath := filepath.Join(archivePath, rp) + os.MkdirAll(targetPath, 0755) + for _, f := range files { + targetFile := filepath.Join(targetPath, filepath.Base(f)) + if err := sh.Copy(targetFile, f); err != nil { + panic(err) + } + } } } } + } else { + archivePath = movePackagesToArchive(dropPath, requiredPackages) + } + defer os.RemoveAll(archivePath) + + // create flat dir + flatPath := filepath.Join(dropPath, ".elastic-agent_flat") + os.MkdirAll(flatPath, 0755) + defer os.RemoveAll(flatPath) + + for _, rp := range requiredPackages { + targetPath := filepath.Join(archivePath, rp) + versionedFlatPath := filepath.Join(flatPath, rp) + versionedDropPath := filepath.Join(dropPath, rp) + os.MkdirAll(targetPath, 0755) + os.MkdirAll(versionedFlatPath, 0755) + os.MkdirAll(versionedDropPath, 0755) + + // untar all + matches, err := filepath.Glob(filepath.Join(targetPath, "*tar.gz")) + if err != nil { + panic(err) + } + zipMatches, err := filepath.Glob(filepath.Join(targetPath, "*zip")) + if err != nil { + panic(err) + } + matches = append(matches, zipMatches...) + + for _, m := range matches { + stat, err := os.Stat(m) + if os.IsNotExist(err) { + continue + } else if err != nil { + panic(errors.Wrap(err, "failed stating file")) + } + + if stat.IsDir() { + continue + } + + if err := devtools.Extract(m, versionedFlatPath); err != nil { + panic(err) + } + } + + files, err := filepath.Glob(filepath.Join(versionedFlatPath, "*")) + if err != nil { + panic(err) + } + + for _, f := range files { + options := copy.Options{ + OnSymlink: func(_ string) copy.SymlinkAction { + return copy.Shallow + }, + Sync: true, + } + + err = copy.Copy(f, versionedDropPath, options) + if err != nil { + panic(err) + } + + // cope spec file for match + specName := filepath.Base(f) + idx := strings.Index(specName, "-"+version) + if idx != -1 { + specName = specName[:idx] + } + + if err := devtools.Copy(filepath.Join("specs", specName+specSuffix), filepath.Join(versionedDropPath, specName+specSuffix)); err != nil { + panic(err) + } + } } // package agent @@ -732,6 +828,48 @@ func packageAgent(requiredPackages []string, packagingFn func()) { mg.SerialDeps(devtools.Package, TestPackages) } +func movePackagesToArchive(dropPath string, requiredPackages []string) string { + archivePath := filepath.Join(dropPath, "archives") + os.MkdirAll(archivePath, 0755) + + // move archives to archive path + matches, err := filepath.Glob(filepath.Join(dropPath, "*tar.gz*")) + if err != nil { + panic(err) + } + zipMatches, err := filepath.Glob(filepath.Join(dropPath, "*zip*")) + if err != nil { + panic(err) + } + matches = append(matches, zipMatches...) + + for _, f := range matches { + for _, rp := range requiredPackages { + if !strings.Contains(f, rp) { + continue + } + + stat, err := os.Stat(f) + if os.IsNotExist(err) { + continue + } else if err != nil { + panic(errors.Wrap(err, "failed stating file")) + } + + if stat.IsDir() { + continue + } + + targetPath := filepath.Join(archivePath, rp) + if err := os.Rename(f, filepath.Join(targetPath, filepath.Base(f))); err != nil { + panic(errors.Wrap(err, "failed renaming file")) + } + } + } + + return archivePath +} + func fetchBinaryFromArtifactsApi(ctx context.Context, packageName, artifact, version, downloadPath string) error { location, err := downloads.FetchBeatsBinary( ctx, @@ -755,7 +893,7 @@ func selectedPackageTypes() string { return "PACKAGES=targz,zip" } -func copyAll(from, to string) error { +func copyAll(from, to string, suffixes ...[]string) error { return filepath.Walk(from, func(path string, info os.FileInfo, err error) error { if err != nil { return err diff --git a/pkg/component/input_spec.go b/pkg/component/input_spec.go new file mode 100644 index 00000000000..0a45a830cd7 --- /dev/null +++ b/pkg/component/input_spec.go @@ -0,0 +1,55 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package component + +import ( + "fmt" + + "github.com/elastic/elastic-agent/internal/pkg/eql" +) + +// InputSpec is the specification for an input type. +type InputSpec struct { + Name string `config:"name" yaml:"name" validate:"required"` + Aliases []string `config:"aliases,omitempty" yaml:"aliases,omitempty"` + Description string `config:"description" yaml:"description" validate:"required"` + Platforms []string `config:"platforms" yaml:"platforms" validate:"required,min=1"` + Outputs []string `config:"outputs" yaml:"outputs" validate:"required,min=1"` + Runtime RuntimeSpec `config:"runtime" yaml:"runtime"` + + Command *CommandSpec `config:"command,omitempty" yaml:"command,omitempty"` + Service *ServiceSpec `config:"service,omitempty" yaml:"service,omitempty"` +} + +// Validate ensures correctness of input specification. +func (s *InputSpec) Validate() error { + if s.Command == nil && s.Service == nil { + return fmt.Errorf("input '%s' must define either command or service", s.Name) + } + for i, a := range s.Platforms { + if !GlobalPlatforms.Exists(a) { + return fmt.Errorf("input '%s' defines an unknown platform '%s'", s.Name, a) + } + for j, b := range s.Platforms { + if i != j && a == b { + return fmt.Errorf("input '%s' defines the platform '%s' more than once", s.Name, a) + } + } + } + for i, a := range s.Outputs { + for j, b := range s.Outputs { + if i != j && a == b { + return fmt.Errorf("input '%s' defines the output '%s' more than once", s.Name, a) + } + } + } + for idx, prevention := range s.Runtime.Preventions { + _, err := eql.New(prevention.Condition) + if err != nil { + return fmt.Errorf("input '%s' defined 'runtime.preventions.%d.condition' failed to compile: %w", s.Name, idx, err) + } + } + return nil +} diff --git a/pkg/component/load.go b/pkg/component/load.go index feb16d0b97c..38e934b836f 100644 --- a/pkg/component/load.go +++ b/pkg/component/load.go @@ -5,16 +5,19 @@ package component import ( - "errors" "fmt" "io/ioutil" "os" "path/filepath" + "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/go-ucfg/yaml" ) -const specGlobPattern = "*.spec.yml" +const ( + specSuffix = ".spec.yml" + specGlobPattern = "*" + specSuffix +) var ( // ErrInputNotSupported is returned when the input is not supported on any platform diff --git a/pkg/component/output_spec.go b/pkg/component/output_spec.go new file mode 100644 index 00000000000..d7d80b9d074 --- /dev/null +++ b/pkg/component/output_spec.go @@ -0,0 +1,31 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package component + +import "fmt" + +// OutputSpec is the specification for an output type. +type OutputSpec struct { + Name string `config:"name" yaml:"name" validate:"required"` + Description string `config:"description" yaml:"description" validate:"required"` + Platforms []string `config:"platforms" yaml:"platforms" validate:"required,min=1"` + + Command *CommandSpec `config:"command,omitempty" yaml:"command,omitempty"` +} + +// Validate ensures correctness of output specification. +func (s *OutputSpec) Validate() error { + if s.Command == nil { + return fmt.Errorf("input %s must define either command or service", s.Name) + } + for i, a := range s.Platforms { + for j, b := range s.Platforms { + if i != j && a == b { + return fmt.Errorf("input %s defines the platform %s more than once", s.Name, a) + } + } + } + return nil +} diff --git a/pkg/component/spec.go b/pkg/component/spec.go index 2800f7275a2..bfd0efedb86 100644 --- a/pkg/component/spec.go +++ b/pkg/component/spec.go @@ -8,12 +8,11 @@ import ( "errors" "fmt" "time" - - "github.com/elastic/elastic-agent/internal/pkg/eql" ) // Spec a components specification. type Spec struct { + Name string `yaml:"name,omitempty"` Version int `config:"version" yaml:"version" validate:"required"` Inputs []InputSpec `config:"inputs,omitempty" yaml:"inputs,omitempty"` } @@ -44,50 +43,6 @@ func (s *Spec) Validate() error { return nil } -// InputSpec is the specification for an input type. -type InputSpec struct { - Name string `config:"name" yaml:"name" validate:"required"` - Aliases []string `config:"aliases,omitempty" yaml:"aliases,omitempty"` - Description string `config:"description" yaml:"description" validate:"required"` - Platforms []string `config:"platforms" yaml:"platforms" validate:"required,min=1"` - Outputs []string `config:"outputs" yaml:"outputs" validate:"required,min=1"` - Runtime RuntimeSpec `config:"runtime" yaml:"runtime"` - - Command *CommandSpec `config:"command,omitempty" yaml:"command,omitempty"` - Service *ServiceSpec `config:"service,omitempty" yaml:"service,omitempty"` -} - -// Validate ensures correctness of input specification. -func (s *InputSpec) Validate() error { - if s.Command == nil && s.Service == nil { - return fmt.Errorf("input '%s' must define either command or service", s.Name) - } - for i, a := range s.Platforms { - if !GlobalPlatforms.Exists(a) { - return fmt.Errorf("input '%s' defines an unknown platform '%s'", s.Name, a) - } - for j, b := range s.Platforms { - if i != j && a == b { - return fmt.Errorf("input '%s' defines the platform '%s' more than once", s.Name, a) - } - } - } - for i, a := range s.Outputs { - for j, b := range s.Outputs { - if i != j && a == b { - return fmt.Errorf("input '%s' defines the output '%s' more than once", s.Name, a) - } - } - } - for idx, prevention := range s.Runtime.Preventions { - _, err := eql.New(prevention.Condition) - if err != nil { - return fmt.Errorf("input '%s' defined 'runtime.preventions.%d.condition' failed to compile: %w", s.Name, idx, err) - } - } - return nil -} - // RuntimeSpec is the specification for runtime options. type RuntimeSpec struct { Preventions []RuntimePreventionSpec `config:"preventions" yaml:"preventions"` diff --git a/specs/apm-server.spec.yml b/specs/apm-server.spec.yml index 58cc26f47eb..e646e9facce 100644 --- a/specs/apm-server.spec.yml +++ b/specs/apm-server.spec.yml @@ -1,23 +1,23 @@ -version: 2 -inputs: - - name: apm - description: "APM Server" - platforms: - - linux/amd64 - - linux/arm64 - - darwin/amd64 - - darwin/arm64 - - windows/amd64 - - container/amd64 - - container/arm64 - outputs: - - elasticsearch - - kafka - - logstash - - redis - command: - args: - - "-E" - - "management.enabled=true" - - "-E" - - "gc_percent=${APMSERVER_GOGC:100}" +version: 2 +inputs: + - name: apm + description: "APM Server" + platforms: + - linux/amd64 + - linux/arm64 + - darwin/amd64 + - darwin/arm64 + - windows/amd64 + - container/amd64 + - container/arm64 + outputs: + - elasticsearch + - kafka + - logstash + - redis + command: + args: + - "-E" + - "management.enabled=true" + - "-E" + - "gc_percent=${APMSERVER_GOGC:100}" diff --git a/specs/auditbeat.spec.yml b/specs/auditbeat.spec.yml index c17a1e24206..f8c46a96873 100644 --- a/specs/auditbeat.spec.yml +++ b/specs/auditbeat.spec.yml @@ -1,43 +1,43 @@ -version: 2 -inputs: - - name: audit/auditd - description: "Auditd" - platforms: &platforms - - linux/amd64 - - linux/arm64 - - darwin/amd64 - - darwin/arm64 - - windows/amd64 - - container/amd64 - - container/arm64 - outputs: &outputs - - elasticsearch - - kafka - - logstash - - redis - command: - args: &args - - "-E" - - "setup.ilm.enabled=false" - - "-E" - - "setup.template.enabled=false" - - "-E" - - "management.enabled=true" - - "-E" - - "logging.level=debug" - - "-E" - - "gc_percent=${AUDITBEAT_GOGC:100}" - - "-E" - - "auditbeat.config.modules.enabled=false" - - name: audit/file_integrity - description: "Audit File Integrity" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: audit/system - description: "Audit System" - platforms: *platforms - outputs: *outputs - command: - args: *args +version: 2 +inputs: + - name: audit/auditd + description: "Auditd" + platforms: &platforms + - linux/amd64 + - linux/arm64 + - darwin/amd64 + - darwin/arm64 + - windows/amd64 + - container/amd64 + - container/arm64 + outputs: &outputs + - elasticsearch + - kafka + - logstash + - redis + command: + args: &args + - "-E" + - "setup.ilm.enabled=false" + - "-E" + - "setup.template.enabled=false" + - "-E" + - "management.enabled=true" + - "-E" + - "logging.level=debug" + - "-E" + - "gc_percent=${AUDITBEAT_GOGC:100}" + - "-E" + - "auditbeat.config.modules.enabled=false" + - name: audit/file_integrity + description: "Audit File Integrity" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: audit/system + description: "Audit System" + platforms: *platforms + outputs: *outputs + command: + args: *args diff --git a/specs/cloudbeat.spec.yml b/specs/cloudbeat.spec.yml index a45037319b7..0cd100c28d6 100644 --- a/specs/cloudbeat.spec.yml +++ b/specs/cloudbeat.spec.yml @@ -1,27 +1,27 @@ -version: 2 -inputs: - - name: cloudbeat - description: "Cloudbeat" - platforms: - - linux/amd64 - - linux/arm64 - - darwin/amd64 - - darwin/arm64 - - windows/amd64 - - container/amd64 - - container/arm64 - outputs: - - elasticsearch - - kafka - - logstash - - redis - command: - args: - - "-E" - - "management.enabled=true" - - "-E" - - "setup.ilm.enabled=false" - - "-E" - - "setup.template.enabled=false" - - "-E" - - "gc_percent=${CLOUDBEAT_GOGC:100}" +version: 2 +inputs: + - name: cloudbeat + description: "Cloudbeat" + platforms: + - linux/amd64 + - linux/arm64 + - darwin/amd64 + - darwin/arm64 + - windows/amd64 + - container/amd64 + - container/arm64 + outputs: + - elasticsearch + - kafka + - logstash + - redis + command: + args: + - "-E" + - "management.enabled=true" + - "-E" + - "setup.ilm.enabled=false" + - "-E" + - "setup.template.enabled=false" + - "-E" + - "gc_percent=${CLOUDBEAT_GOGC:100}" diff --git a/specs/endpoint-security.spec.yml b/specs/endpoint-security.spec.yml index d0f177b0701..dbb0079ef11 100644 --- a/specs/endpoint-security.spec.yml +++ b/specs/endpoint-security.spec.yml @@ -1,39 +1,39 @@ -version: 2 -inputs: - - name: endpoint - description: "Endpoint Security" - platforms: - - linux/amd64 - - linux/arm64 - - darwin/amd64 - - darwin/arm64 - - windows/amd64 - outputs: - - elasticsearch - runtime: - preventions: - - condition: ${runtime.arch} == 'arm64' and ${runtime.family} == 'redhat' and ${runtime.major} == '7' - message: "No support for RHEL7 on arm64" - service: - operations: - check: - args: - - "verify" - - "--log" - - "stderr" - timeout: 30 - install: - args: - - "install" - - "--log" - - "stderr" - - "--upgrade" - - "--resources" - - "endpoint-security-resources.zip" - timeout: 600 - uninstall: - args: - - "uninstall" - - "--log" - - "stderr" - timeout: 600 +version: 2 +inputs: + - name: endpoint + description: "Endpoint Security" + platforms: + - linux/amd64 + - linux/arm64 + - darwin/amd64 + - darwin/arm64 + - windows/amd64 + outputs: + - elasticsearch + runtime: + preventions: + - condition: ${runtime.arch} == 'arm64' and ${runtime.family} == 'redhat' and ${runtime.major} == '7' + message: "No support for RHEL7 on arm64" + service: + operations: + check: + args: + - "verify" + - "--log" + - "stderr" + timeout: 30 + install: + args: + - "install" + - "--log" + - "stderr" + - "--upgrade" + - "--resources" + - "endpoint-security-resources.zip" + timeout: 600 + uninstall: + args: + - "uninstall" + - "--log" + - "stderr" + timeout: 600 diff --git a/specs/filebeat.spec.yml b/specs/filebeat.spec.yml index 7726cd5244a..07f3cb7666e 100644 --- a/specs/filebeat.spec.yml +++ b/specs/filebeat.spec.yml @@ -1,170 +1,170 @@ -version: 2 -inputs: - - name: aws-cloudwatch - description: "AWS Cloudwatch" - platforms: &platforms - - linux/amd64 - - linux/arm64 - - darwin/amd64 - - darwin/arm64 - - windows/amd64 - - container/amd64 - - container/arm64 - outputs: &outputs - - elasticsearch - - kafka - - logstash - - redis - command: - args: &args - - "-E" - - "setup.ilm.enabled=false" - - "-E" - - "setup.template.enabled=false" - - "-E" - - "management.enabled=true" - - "-E" - - "logging.level=debug" - - "-E" - - "gc_percent=${FILEBEAT_GOGC:100}" - - "-E" - - "filebeat.config.modules.enabled=false" - - name: aws-s3 - description: "AWS S3" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: azure-eventhub - description: "Azure Eventhub" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: cloudfoundry - description: "PCF Cloudfoundry" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: container - description: "Container logs" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: docker - aliases: - - log/docker - description: "Docker logs" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: gcp-pubsub - description: "GCP Pub-Sub" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: http_endpoint - description: "HTTP Endpoint" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: httpjson - description: "HTTP JSON Endpoint" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: journald - description: "Journald" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: kafka - description: "Kafka" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: log - aliases: - - logfile - - event/file - description: "Logfile" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: mqtt - description: "MQTT" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: netflow - description: "Netflow" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: o365audit - description: "Office 365 Audit" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: redis - aliases: - - log/redis_slowlog - description: "Redis" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: syslog - aliases: - - log/syslog - description: "Syslog" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: tcp - aliases: - - event/tcp - description: "TCP" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: udp - aliases: - - event/udp - description: "UDP" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: unix - description: "Unix Socket" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: winlog - description: "Winlog" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: filestream - description: "Filestream" - platforms: *platforms - outputs: *outputs - command: - args: *args +version: 2 +inputs: + - name: aws-cloudwatch + description: "AWS Cloudwatch" + platforms: &platforms + - linux/amd64 + - linux/arm64 + - darwin/amd64 + - darwin/arm64 + - windows/amd64 + - container/amd64 + - container/arm64 + outputs: &outputs + - elasticsearch + - kafka + - logstash + - redis + command: + args: &args + - "-E" + - "setup.ilm.enabled=false" + - "-E" + - "setup.template.enabled=false" + - "-E" + - "management.enabled=true" + - "-E" + - "logging.level=debug" + - "-E" + - "gc_percent=${FILEBEAT_GOGC:100}" + - "-E" + - "filebeat.config.modules.enabled=false" + - name: aws-s3 + description: "AWS S3" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: azure-eventhub + description: "Azure Eventhub" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: cloudfoundry + description: "PCF Cloudfoundry" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: container + description: "Container logs" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: docker + aliases: + - log/docker + description: "Docker logs" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: gcp-pubsub + description: "GCP Pub-Sub" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: http_endpoint + description: "HTTP Endpoint" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: httpjson + description: "HTTP JSON Endpoint" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: journald + description: "Journald" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: kafka + description: "Kafka" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: log + aliases: + - logfile + - event/file + description: "Logfile" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: mqtt + description: "MQTT" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: netflow + description: "Netflow" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: o365audit + description: "Office 365 Audit" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: redis + aliases: + - log/redis_slowlog + description: "Redis" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: syslog + aliases: + - log/syslog + description: "Syslog" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: tcp + aliases: + - event/tcp + description: "TCP" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: udp + aliases: + - event/udp + description: "UDP" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: unix + description: "Unix Socket" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: winlog + description: "Winlog" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: filestream + description: "Filestream" + platforms: *platforms + outputs: *outputs + command: + args: *args diff --git a/specs/fleet-server.spec.yml b/specs/fleet-server.spec.yml index 4884d69f8ad..e0bf9c996ff 100644 --- a/specs/fleet-server.spec.yml +++ b/specs/fleet-server.spec.yml @@ -1,17 +1,17 @@ -version: 2 -inputs: - - name: fleet-server - description: "Fleet Server" - platforms: - - linux/amd64 - - linux/arm64 - - darwin/amd64 - - darwin/arm64 - - windows/amd64 - - container/amd64 - - container/arm64 - outputs: - - elasticsearch - command: - args: - - "--agent-mode" +version: 2 +inputs: + - name: fleet-server + description: "Fleet Server" + platforms: + - linux/amd64 + - linux/arm64 + - darwin/amd64 + - darwin/arm64 + - windows/amd64 + - container/amd64 + - container/arm64 + outputs: + - elasticsearch + command: + args: + - "--agent-mode" diff --git a/specs/heartbeat.spec.yml b/specs/heartbeat.spec.yml index 56b8146b5bf..0b7da1c9048 100644 --- a/specs/heartbeat.spec.yml +++ b/specs/heartbeat.spec.yml @@ -1,47 +1,47 @@ -version: 2 -inputs: - - name: synthetics/synthetics - description: "Synthetics Browser Monitor" - platforms: &platforms - - linux/amd64 - - linux/arm64 - - darwin/amd64 - - darwin/arm64 - - windows/amd64 - - container/amd64 - - container/arm64 - outputs: &outputs - - elasticsearch - - kafka - - logstash - - redis - command: - args: &args - - "-E" - - "setup.ilm.enabled=false" - - "-E" - - "setup.template.enabled=false" - - "-E" - - "management.enabled=true" - - "-E" - - "logging.level=debug" - - "-E" - - "gc_percent=${HEARTBEAT_GOGC:100}" - - name: synthetics/http - description: "Synthetics HTTP Monitor" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: synthetics/icmp - description: "Synthetics ICMP Monitor" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: synthetics/tcp - description: "Synthetics TCP Monitor" - platforms: *platforms - outputs: *outputs - command: - args: *args +version: 2 +inputs: + - name: synthetics/synthetics + description: "Synthetics Browser Monitor" + platforms: &platforms + - linux/amd64 + - linux/arm64 + - darwin/amd64 + - darwin/arm64 + - windows/amd64 + - container/amd64 + - container/arm64 + outputs: &outputs + - elasticsearch + - kafka + - logstash + - redis + command: + args: &args + - "-E" + - "setup.ilm.enabled=false" + - "-E" + - "setup.template.enabled=false" + - "-E" + - "management.enabled=true" + - "-E" + - "logging.level=debug" + - "-E" + - "gc_percent=${HEARTBEAT_GOGC:100}" + - name: synthetics/http + description: "Synthetics HTTP Monitor" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: synthetics/icmp + description: "Synthetics ICMP Monitor" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: synthetics/tcp + description: "Synthetics TCP Monitor" + platforms: *platforms + outputs: *outputs + command: + args: *args diff --git a/specs/metricbeat.spec.yml b/specs/metricbeat.spec.yml index 9859a37582c..a744cdb535f 100644 --- a/specs/metricbeat.spec.yml +++ b/specs/metricbeat.spec.yml @@ -1,157 +1,157 @@ -version: 2 -inputs: - - name: beat/metrics - description: "Beat metrics" - platforms: &platforms - - linux/amd64 - - linux/arm64 - - darwin/amd64 - - darwin/arm64 - - windows/amd64 - - container/amd64 - - container/arm64 - outputs: &outputs - - elasticsearch - - kafka - - logstash - - redis - command: - args: &args - - "-E" - - "setup.ilm.enabled=false" - - "-E" - - "setup.template.enabled=false" - - "-E" - - "management.enabled=true" - - "-E" - - "logging.level=debug" - - "-E" - - "gc_percent=${METRICBEAT_GOGC:100}" - - "-E" - - "metricbeat.config.modules.enabled=false" - - name: docker/metrics - description: "Docker metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: elasticsearch/metrics - description: "Elasticsearch metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: kibana/metrics - description: "Kibana metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: kubernetes/metrics - description: "Kubernetes metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: linux/metrics - description: "Linux metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: logstash/metrics - description: "Logstash metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: mongodb/metrics - description: "Mongodb metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: mysql/metrics - description: "MySQL metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: postgresql/metrics - description: "PostgreSQL metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: redis/metrics - description: "Redis metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: system/metrics - description: "System metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: uwsgi/metrics - description: "UWSGI metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: windows/metrics - description: "Windows metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: aws/metrics - description: "AWS metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: awsfargate/metrics - description: "AWS Fargate metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: azure/metrics - description: "Azure metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: cloudfoundry/metrics - description: "PCF Cloudfoundry metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: containerd/metrics - description: "Containerd metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: mssql/metrics - description: "Microsoft SQL Server metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: oracle/metrics - description: "Oracle Database metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: syncgateway/metrics - description: "Couchbase Sync Gateway metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args +version: 2 +inputs: + - name: beat/metrics + description: "Beat metrics" + platforms: &platforms + - linux/amd64 + - linux/arm64 + - darwin/amd64 + - darwin/arm64 + - windows/amd64 + - container/amd64 + - container/arm64 + outputs: &outputs + - elasticsearch + - kafka + - logstash + - redis + command: + args: &args + - "-E" + - "setup.ilm.enabled=false" + - "-E" + - "setup.template.enabled=false" + - "-E" + - "management.enabled=true" + - "-E" + - "logging.level=debug" + - "-E" + - "gc_percent=${METRICBEAT_GOGC:100}" + - "-E" + - "metricbeat.config.modules.enabled=false" + - name: docker/metrics + description: "Docker metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: elasticsearch/metrics + description: "Elasticsearch metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: kibana/metrics + description: "Kibana metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: kubernetes/metrics + description: "Kubernetes metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: linux/metrics + description: "Linux metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: logstash/metrics + description: "Logstash metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: mongodb/metrics + description: "Mongodb metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: mysql/metrics + description: "MySQL metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: postgresql/metrics + description: "PostgreSQL metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: redis/metrics + description: "Redis metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: system/metrics + description: "System metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: uwsgi/metrics + description: "UWSGI metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: windows/metrics + description: "Windows metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: aws/metrics + description: "AWS metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: awsfargate/metrics + description: "AWS Fargate metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: azure/metrics + description: "Azure metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: cloudfoundry/metrics + description: "PCF Cloudfoundry metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: containerd/metrics + description: "Containerd metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: mssql/metrics + description: "Microsoft SQL Server metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: oracle/metrics + description: "Oracle Database metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: syncgateway/metrics + description: "Couchbase Sync Gateway metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args diff --git a/specs/osquerybeat.spec.yml b/specs/osquerybeat.spec.yml index 40fa1dff731..31edb9a3edb 100644 --- a/specs/osquerybeat.spec.yml +++ b/specs/osquerybeat.spec.yml @@ -1,26 +1,26 @@ -version: 2 -inputs: - - name: osquery - description: "Osquery" - platforms: - - linux/amd64 - - linux/arm64 - - darwin/amd64 - - darwin/arm64 - - windows/amd64 - - container/amd64 - - container/arm64 - outputs: - - elasticsearch - command: - args: - - "-E" - - "setup.ilm.enabled=false" - - "-E" - - "setup.template.enabled=false" - - "-E" - - "management.enabled=true" - - "-E" - - "logging.level=debug" - - "-E" - - "gc_percent=${OSQUERYBEAT_GOGC:100}" +version: 2 +inputs: + - name: osquery + description: "Osquery" + platforms: + - linux/amd64 + - linux/arm64 + - darwin/amd64 + - darwin/arm64 + - windows/amd64 + - container/amd64 + - container/arm64 + outputs: + - elasticsearch + command: + args: + - "-E" + - "setup.ilm.enabled=false" + - "-E" + - "setup.template.enabled=false" + - "-E" + - "management.enabled=true" + - "-E" + - "logging.level=debug" + - "-E" + - "gc_percent=${OSQUERYBEAT_GOGC:100}" diff --git a/specs/packetbeat.spec.yml b/specs/packetbeat.spec.yml index becad691f17..0519078cac8 100644 --- a/specs/packetbeat.spec.yml +++ b/specs/packetbeat.spec.yml @@ -1,29 +1,29 @@ -version: 2 -inputs: - - name: packet - description: "Packet Capture" - platforms: - - linux/amd64 - - linux/arm64 - - darwin/amd64 - - darwin/arm64 - - windows/amd64 - - container/amd64 - - container/arm64 - outputs: - - elasticsearch - - kafka - - logstash - - redis - command: - args: - - "-E" - - "setup.ilm.enabled=false" - - "-E" - - "setup.template.enabled=false" - - "-E" - - "management.enabled=true" - - "-E" - - "logging.level=debug" - - "-E" - - "gc_percent=${PACKETBEAT_GOGC:100}" +version: 2 +inputs: + - name: packet + description: "Packet Capture" + platforms: + - linux/amd64 + - linux/arm64 + - darwin/amd64 + - darwin/arm64 + - windows/amd64 + - container/amd64 + - container/arm64 + outputs: + - elasticsearch + - kafka + - logstash + - redis + command: + args: + - "-E" + - "setup.ilm.enabled=false" + - "-E" + - "setup.template.enabled=false" + - "-E" + - "management.enabled=true" + - "-E" + - "logging.level=debug" + - "-E" + - "gc_percent=${PACKETBEAT_GOGC:100}" From 7f6b42ae13bb4c3acec105ccd88b0e8c5c788396 Mon Sep 17 00:00:00 2001 From: Michal Pristas Date: Fri, 24 Jun 2022 09:15:29 +0200 Subject: [PATCH 05/49] Generate checksum file for components (#604) * generating checksum? * yaml output * Update dev-tools/mage/common.go Co-authored-by: Michel Laterman <82832767+michel-laterman@users.noreply.github.com> * review * ioutil removal from magefile Co-authored-by: Michel Laterman <82832767+michel-laterman@users.noreply.github.com> --- dev-tools/mage/common.go | 19 ++++++---- dev-tools/mage/pkgtypes.go | 4 --- magefile.go | 74 ++++++++++++++++++++++++++++++++++++-- 3 files changed, 84 insertions(+), 13 deletions(-) diff --git a/dev-tools/mage/common.go b/dev-tools/mage/common.go index e14f3038587..4cee3270fe1 100644 --- a/dev-tools/mage/common.go +++ b/dev-tools/mage/common.go @@ -754,22 +754,29 @@ func VerifySHA256(file string, hash string) error { // CreateSHA512File computes the sha512 sum of the specified file the writes // a sidecar file containing the hash and filename. func CreateSHA512File(file string) error { + computedHash, err := GetSHA512Hash(file) + if err != nil { + return err + } + out := fmt.Sprintf("%v %v", computedHash, filepath.Base(file)) + + //nolint:gosec // permissions are correct + return os.WriteFile(file+".sha512", []byte(out), 0644) +} +func GetSHA512Hash(file string) (string, error) { f, err := os.Open(file) if err != nil { - return errors.Wrap(err, "failed to open file for sha512 summing") + return "", errors.Wrap(err, "failed to open file for sha512 summing") } defer f.Close() sum := sha512.New() if _, err := io.Copy(sum, f); err != nil { - return errors.Wrap(err, "failed reading from input file") + return "", errors.Wrap(err, "failed reading from input file") } computedHash := hex.EncodeToString(sum.Sum(nil)) - out := fmt.Sprintf("%v %v", computedHash, filepath.Base(file)) - - //nolint:gosec // permissions are correct - return ioutil.WriteFile(file+".sha512", []byte(out), 0644) + return computedHash, nil } // Mage executes mage targets in the specified directory. diff --git a/dev-tools/mage/pkgtypes.go b/dev-tools/mage/pkgtypes.go index 68458f65952..1ae6fc9e148 100644 --- a/dev-tools/mage/pkgtypes.go +++ b/dev-tools/mage/pkgtypes.go @@ -906,10 +906,6 @@ func addFileToTar(ar *tar.Writer, baseDir string, pkgFile PackageFile) error { header.Mode = int64(0755) } - if strings.Contains(info.Name(), "disabled") { - log.Println(">>>>>", info.Name(), pkgFile.ConfigMode, "matches", configFilePattern.MatchString(info.Name()), "or", componentConfigFilePattern.MatchString(info.Name())) - } - if filepath.IsAbs(pkgFile.Target) { baseDir = "" } diff --git a/magefile.go b/magefile.go index 6bb88c5fff9..a03ea8988fe 100644 --- a/magefile.go +++ b/magefile.go @@ -39,6 +39,8 @@ import ( _ "github.com/elastic/elastic-agent/dev-tools/mage/target/integtest/notests" // mage:import "github.com/elastic/elastic-agent/dev-tools/mage/target/test" + + "gopkg.in/yaml.v2" ) const ( @@ -52,6 +54,7 @@ const ( configFile = "elastic-agent.yml" agentDropPath = "AGENT_DROP_PATH" specSuffix = ".spec.yml" // TODO: change after beat ignores yml config + checksumFilename = "checksum.yml" ) // Aliases for commands required by master makefile @@ -373,7 +376,7 @@ func AssembleDarwinUniversal() error { cmd := "lipo" if _, err := exec.LookPath(cmd); err != nil { - return fmt.Errorf("'%s' is required to assemble the universal binary: %w", + return fmt.Errorf("%q is required to assemble the universal binary: %w", cmd, err) } @@ -437,7 +440,7 @@ func requiredPackagesPresent(basePath, beat, version string, requiredPackages [] path := filepath.Join(basePath, "build", "distributions", packageName) if _, err := os.Stat(path); err != nil { - fmt.Printf("Package '%s' does not exist on path: %s\n", packageName, path) + fmt.Printf("Package %q does not exist on path: %s\n", packageName, path) return false } } @@ -794,6 +797,7 @@ func packageAgent(requiredPackages []string, packagingFn func()) { panic(err) } + checksums := make(map[string]string) for _, f := range files { options := copy.Options{ OnSymlink: func(_ string) copy.SymlinkAction { @@ -814,9 +818,16 @@ func packageAgent(requiredPackages []string, packagingFn func()) { specName = specName[:idx] } - if err := devtools.Copy(filepath.Join("specs", specName+specSuffix), filepath.Join(versionedDropPath, specName+specSuffix)); err != nil { + checksum, err := copyComponentSpecs(specName, versionedDropPath) + if err != nil { panic(err) } + + checksums[specName+specSuffix] = checksum + } + + if err := appendComponentChecksums(versionedDropPath, checksums); err != nil { + panic(err) } } @@ -827,6 +838,44 @@ func packageAgent(requiredPackages []string, packagingFn func()) { mg.Deps(CrossBuild, CrossBuildGoDaemon) mg.SerialDeps(devtools.Package, TestPackages) } +func copyComponentSpecs(componentName, versionedDropPath string) (string, error) { + sourceSpecFile := filepath.Join("specs", componentName+specSuffix) + targetPath := filepath.Join(versionedDropPath, componentName+specSuffix) + err := devtools.Copy(sourceSpecFile, targetPath) + if err != nil { + return "", errors.Wrapf(err, "failed copying spec file %q to %q", sourceSpecFile, targetPath) + } + + // compute checksum + return devtools.GetSHA512Hash(sourceSpecFile) +} + +func appendComponentChecksums(versionedDropPath string, checksums map[string]string) error { + // for each spec file checksum calculate binary checksum as well + for file := range checksums { + if !strings.HasSuffix(file, specSuffix) { + continue + } + + componentFile := strings.TrimSuffix(file, specSuffix) + hash, err := devtools.GetSHA512Hash(filepath.Join(versionedDropPath, componentFile)) + if errors.Is(err, os.ErrNotExist) { + fmt.Printf(">>> Computing hash for %q failed: file not present\n", componentFile) + continue + } else if err != nil { + return err + } + + checksums[componentFile] = hash + } + + content, err := yamlChecksum(checksums) + if err != nil { + return err + } + + return os.WriteFile(filepath.Join(versionedDropPath, checksumFilename), content, 0644) +} func movePackagesToArchive(dropPath string, requiredPackages []string) string { archivePath := filepath.Join(dropPath, "archives") @@ -959,3 +1008,22 @@ func injectBuildVars(m map[string]string) { m[k] = v } } + +func yamlChecksum(checksums map[string]string) ([]byte, error) { + filesMap := make(map[string][]checksumFile) + files := make([]checksumFile, 0, len(checksums)) + for file, checksum := range checksums { + files = append(files, checksumFile{ + Name: file, + Checksum: checksum, + }) + } + + filesMap["files"] = files + return yaml.Marshal(filesMap) +} + +type checksumFile struct { + Name string `yaml:"name"` + Checksum string `yaml:"sha512"` +} From 8812fc9b4c8f7c4199d1d585536d8a23dff7a16e Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Wed, 13 Jul 2022 16:16:17 -0400 Subject: [PATCH 06/49] V2 Runtime Component Manager (#645) * Add runtime for command v2 components. * Fix imports. * Add tests for watching checkins. * Fix lint and move checkin period to a configurable timeout. * Fix tests now that checkin timeout needs to be defined. * Fix code review and lint. --- .gitignore | 3 + internal/pkg/agent/cmd/container.go | 8 +- internal/pkg/agent/cmd/enroll_cmd.go | 5 +- internal/pkg/agent/configuration/settings.go | 2 +- internal/pkg/agent/operation/common_test.go | 2 +- .../pkg/agent/operation/monitoring_test.go | 2 +- ...crypted_disk_storage_windows_linux_test.go | 3 +- .../install/atomic/atomic_installer.go | 3 +- .../core/monitoring/server/processes_test.go | 2 +- internal/pkg/core/plugin/process/app.go | 2 +- internal/pkg/core/plugin/process/start.go | 7 +- internal/pkg/core/plugin/process/status.go | 2 +- internal/pkg/core/plugin/service/app.go | 2 +- internal/pkg/core/process/process.go | 91 -- internal/pkg/core/state/state.go | 2 +- magefile.go | 4 + main.go | 4 +- pkg/component/fake/README.md | 3 + pkg/component/fake/main.go | 285 ++++ pkg/component/runtime/command.go | 397 +++++ pkg/component/runtime/failed.go | 104 ++ pkg/component/runtime/manager.go | 578 ++++++++ pkg/component/runtime/manager_test.go | 1288 +++++++++++++++++ pkg/component/runtime/runtime.go | 356 +++++ pkg/component/runtime/runtime_comm.go | 318 ++++ pkg/component/runtime/subscription.go | 28 + pkg/component/spec.go | 17 +- {internal/pkg => pkg}/core/process/cmd.go | 10 +- .../pkg => pkg}/core/process/cmd_darwin.go | 13 +- .../pkg => pkg}/core/process/cmd_linux.go | 13 +- {internal/pkg => pkg}/core/process/config.go | 0 pkg/core/process/external_unix.go | 30 + pkg/core/process/external_windows.go | 53 + .../cmd/proc => pkg/core/process}/job_unix.go | 2 +- .../proc => pkg/core/process}/job_windows.go | 2 +- pkg/core/process/process.go | 101 ++ 36 files changed, 3609 insertions(+), 133 deletions(-) delete mode 100644 internal/pkg/core/process/process.go create mode 100644 pkg/component/fake/README.md create mode 100644 pkg/component/fake/main.go create mode 100644 pkg/component/runtime/command.go create mode 100644 pkg/component/runtime/failed.go create mode 100644 pkg/component/runtime/manager.go create mode 100644 pkg/component/runtime/manager_test.go create mode 100644 pkg/component/runtime/runtime.go create mode 100644 pkg/component/runtime/runtime_comm.go create mode 100644 pkg/component/runtime/subscription.go rename {internal/pkg => pkg}/core/process/cmd.go (78%) rename {internal/pkg => pkg}/core/process/cmd_darwin.go (77%) rename {internal/pkg => pkg}/core/process/cmd_linux.go (80%) rename {internal/pkg => pkg}/core/process/config.go (100%) create mode 100644 pkg/core/process/external_unix.go create mode 100644 pkg/core/process/external_windows.go rename {internal/pkg/agent/cmd/proc => pkg/core/process}/job_unix.go (97%) rename {internal/pkg/agent/cmd/proc => pkg/core/process}/job_windows.go (99%) create mode 100644 pkg/core/process/process.go diff --git a/.gitignore b/.gitignore index 89eaa67db73..f0b7911dbef 100644 --- a/.gitignore +++ b/.gitignore @@ -57,8 +57,11 @@ internal/pkg/agent/operation/tests/scripts/configurable-1.0-darwin-x86/configura internal/pkg/agent/operation/tests/scripts/servicable-1.0-darwin-x86/configurable internal/pkg/agent/operation/tests/scripts/configurable-1.0-darwin-x86_64/configurable internal/pkg/agent/operation/tests/scripts/serviceable-1.0-darwin-x86_64/serviceable +internal/pkg/agent/operation/tests/scripts/configurable +internal/pkg/agent/operation/tests/scripts/serviceable internal/pkg/agent/application/fleet.yml internal/pkg/agent/transpiler/tests/exec-1.0-darwin-x86_64/exec +pkg/component/fake/fake # VSCode /.vscode diff --git a/internal/pkg/agent/cmd/container.go b/internal/pkg/agent/cmd/container.go index 047d51a8fef..ff1b40936d1 100644 --- a/internal/pkg/agent/cmd/container.go +++ b/internal/pkg/agent/cmd/container.go @@ -36,9 +36,9 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/artifact/install/tar" "github.com/elastic/elastic-agent/internal/pkg/cli" "github.com/elastic/elastic-agent/internal/pkg/config" - "github.com/elastic/elastic-agent/internal/pkg/core/process" "github.com/elastic/elastic-agent/internal/pkg/release" "github.com/elastic/elastic-agent/pkg/core/logger" + "github.com/elastic/elastic-agent/pkg/core/process" "github.com/elastic/elastic-agent/version" ) @@ -727,10 +727,6 @@ func runLegacyAPMServer(streams *cli.IOStreams, path string) (*process.Info, err apmDir := filepath.Join(path, files[0].Name()) // Start apm-server process respecting path ENVs apmBinary := filepath.Join(apmDir, spec.Cmd) - log, err := logger.New("apm-server", false) - if err != nil { - return nil, err - } // add APM Server specific configuration var args []string addEnv := func(arg, env string) { @@ -751,7 +747,7 @@ func runLegacyAPMServer(streams *cli.IOStreams, path string) (*process.Info, err addEnv("--httpprof", "HTTPPROF") addSettingEnv("gc_percent", "APMSERVER_GOGC") logInfo(streams, "Starting legacy apm-server daemon as a subprocess.") - return process.Start(log, apmBinary, nil, os.Geteuid(), os.Getegid(), args) + return process.Start(apmBinary, os.Geteuid(), os.Getegid(), args, nil) } func logToStderr(cfg *configuration.Configuration) { diff --git a/internal/pkg/agent/cmd/enroll_cmd.go b/internal/pkg/agent/cmd/enroll_cmd.go index 6d8858a99c4..a886ca5bafb 100644 --- a/internal/pkg/agent/cmd/enroll_cmd.go +++ b/internal/pkg/agent/cmd/enroll_cmd.go @@ -37,12 +37,12 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/core/authority" "github.com/elastic/elastic-agent/internal/pkg/core/backoff" monitoringConfig "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/config" - "github.com/elastic/elastic-agent/internal/pkg/core/process" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" fleetclient "github.com/elastic/elastic-agent/internal/pkg/fleetapi/client" "github.com/elastic/elastic-agent/internal/pkg/release" "github.com/elastic/elastic-agent/internal/pkg/remote" "github.com/elastic/elastic-agent/pkg/core/logger" + "github.com/elastic/elastic-agent/pkg/core/process" ) const ( @@ -600,9 +600,10 @@ func (c *enrollCmd) startAgent(ctx context.Context) (<-chan *os.ProcessState, er args = append(args, "--path.home.unversioned") } proc, err := process.StartContext( - ctx, c.log, cmd, nil, os.Geteuid(), os.Getegid(), args, func(c *exec.Cmd) { + ctx, cmd, os.Geteuid(), os.Getegid(), args, nil, func(c *exec.Cmd) error { c.Stdout = os.Stdout c.Stderr = os.Stderr + return nil }) if err != nil { return nil, err diff --git a/internal/pkg/agent/configuration/settings.go b/internal/pkg/agent/configuration/settings.go index 93ef491670f..7445f02a462 100644 --- a/internal/pkg/agent/configuration/settings.go +++ b/internal/pkg/agent/configuration/settings.go @@ -9,9 +9,9 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/artifact" monitoringCfg "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/config" - "github.com/elastic/elastic-agent/internal/pkg/core/process" "github.com/elastic/elastic-agent/internal/pkg/core/retry" "github.com/elastic/elastic-agent/pkg/core/logger" + "github.com/elastic/elastic-agent/pkg/core/process" "github.com/elastic/elastic-agent/pkg/core/server" ) diff --git a/internal/pkg/agent/operation/common_test.go b/internal/pkg/agent/operation/common_test.go index 2c897bbb8b5..aadc65b5db9 100644 --- a/internal/pkg/agent/operation/common_test.go +++ b/internal/pkg/agent/operation/common_test.go @@ -25,10 +25,10 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/artifact/uninstall" "github.com/elastic/elastic-agent/internal/pkg/core/app" "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/noop" - "github.com/elastic/elastic-agent/internal/pkg/core/process" "github.com/elastic/elastic-agent/internal/pkg/core/retry" "github.com/elastic/elastic-agent/internal/pkg/core/status" "github.com/elastic/elastic-agent/pkg/core/logger" + "github.com/elastic/elastic-agent/pkg/core/process" "github.com/elastic/elastic-agent/pkg/core/server" ) diff --git a/internal/pkg/agent/operation/monitoring_test.go b/internal/pkg/agent/operation/monitoring_test.go index b48765612dd..c64ede2a8f1 100644 --- a/internal/pkg/agent/operation/monitoring_test.go +++ b/internal/pkg/agent/operation/monitoring_test.go @@ -27,10 +27,10 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/core/app" "github.com/elastic/elastic-agent/internal/pkg/core/monitoring" monitoringConfig "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/config" - "github.com/elastic/elastic-agent/internal/pkg/core/process" "github.com/elastic/elastic-agent/internal/pkg/core/retry" "github.com/elastic/elastic-agent/internal/pkg/core/state" "github.com/elastic/elastic-agent/internal/pkg/core/status" + "github.com/elastic/elastic-agent/pkg/core/process" "github.com/elastic/elastic-agent/pkg/core/server" ) diff --git a/internal/pkg/agent/storage/encrypted_disk_storage_windows_linux_test.go b/internal/pkg/agent/storage/encrypted_disk_storage_windows_linux_test.go index cc53d065c73..a965295e9cc 100644 --- a/internal/pkg/agent/storage/encrypted_disk_storage_windows_linux_test.go +++ b/internal/pkg/agent/storage/encrypted_disk_storage_windows_linux_test.go @@ -16,8 +16,9 @@ import ( "path/filepath" "testing" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/secret" "github.com/google/go-cmp/cmp" + + "github.com/elastic/elastic-agent/internal/pkg/agent/application/secret" ) const ( diff --git a/internal/pkg/artifact/install/atomic/atomic_installer.go b/internal/pkg/artifact/install/atomic/atomic_installer.go index 3e61aacb4ef..10c2652c1c8 100644 --- a/internal/pkg/artifact/install/atomic/atomic_installer.go +++ b/internal/pkg/artifact/install/atomic/atomic_installer.go @@ -11,8 +11,9 @@ import ( "path/filepath" "runtime" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" "github.com/hashicorp/go-multierror" + + "github.com/elastic/elastic-agent/internal/pkg/agent/program" ) type embeddedInstaller interface { diff --git a/internal/pkg/core/monitoring/server/processes_test.go b/internal/pkg/core/monitoring/server/processes_test.go index ff0728c8816..f700274a16b 100644 --- a/internal/pkg/core/monitoring/server/processes_test.go +++ b/internal/pkg/core/monitoring/server/processes_test.go @@ -15,9 +15,9 @@ import ( "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/assert" - "github.com/elastic/elastic-agent/internal/pkg/core/process" "github.com/elastic/elastic-agent/internal/pkg/core/state" "github.com/elastic/elastic-agent/internal/pkg/sorted" + "github.com/elastic/elastic-agent/pkg/core/process" ) func TestProcesses(t *testing.T) { diff --git a/internal/pkg/core/plugin/process/app.go b/internal/pkg/core/plugin/process/app.go index acb38ee92df..c184cefe397 100644 --- a/internal/pkg/core/plugin/process/app.go +++ b/internal/pkg/core/plugin/process/app.go @@ -19,11 +19,11 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/program" "github.com/elastic/elastic-agent/internal/pkg/core/app" "github.com/elastic/elastic-agent/internal/pkg/core/monitoring" - "github.com/elastic/elastic-agent/internal/pkg/core/process" "github.com/elastic/elastic-agent/internal/pkg/core/state" "github.com/elastic/elastic-agent/internal/pkg/core/status" "github.com/elastic/elastic-agent/internal/pkg/tokenbucket" "github.com/elastic/elastic-agent/pkg/core/logger" + "github.com/elastic/elastic-agent/pkg/core/process" "github.com/elastic/elastic-agent/pkg/core/server" ) diff --git a/internal/pkg/core/plugin/process/start.go b/internal/pkg/core/plugin/process/start.go index 29770ae714b..55081d9977d 100644 --- a/internal/pkg/core/plugin/process/start.go +++ b/internal/pkg/core/plugin/process/start.go @@ -18,8 +18,8 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/core/app" - "github.com/elastic/elastic-agent/internal/pkg/core/process" "github.com/elastic/elastic-agent/internal/pkg/core/state" + "github.com/elastic/elastic-agent/pkg/core/process" "github.com/elastic/elastic-agent/pkg/core/server" ) @@ -128,14 +128,13 @@ func (a *Application) start(ctx context.Context, t app.Taggable, cfg map[string] spec.Args = injectDataPath(spec.Args, a.pipelineID, a.id) a.state.ProcessInfo, err = process.Start( - a.logger, spec.BinaryPath, - a.processConfig, a.uid, a.gid, - spec.Args, func(c *exec.Cmd) { + spec.Args, nil, func(c *exec.Cmd) error { c.Stdout = newLoggerWriter(a.Name(), logStdOut, a.logger) c.Stderr = newLoggerWriter(a.Name(), logStdErr, a.logger) + return nil }) if err != nil { return fmt.Errorf("%q failed to start %q: %w", diff --git a/internal/pkg/core/plugin/process/status.go b/internal/pkg/core/plugin/process/status.go index 92883160398..50488dfd77b 100644 --- a/internal/pkg/core/plugin/process/status.go +++ b/internal/pkg/core/plugin/process/status.go @@ -13,8 +13,8 @@ import ( "github.com/elastic/elastic-agent-client/v7/pkg/proto" - "github.com/elastic/elastic-agent/internal/pkg/core/process" "github.com/elastic/elastic-agent/internal/pkg/core/state" + "github.com/elastic/elastic-agent/pkg/core/process" "github.com/elastic/elastic-agent/pkg/core/server" ) diff --git a/internal/pkg/core/plugin/service/app.go b/internal/pkg/core/plugin/service/app.go index 14a3abb6148..ab3631f12c0 100644 --- a/internal/pkg/core/plugin/service/app.go +++ b/internal/pkg/core/plugin/service/app.go @@ -23,11 +23,11 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/core/app" "github.com/elastic/elastic-agent/internal/pkg/core/monitoring" "github.com/elastic/elastic-agent/internal/pkg/core/plugin" - "github.com/elastic/elastic-agent/internal/pkg/core/process" "github.com/elastic/elastic-agent/internal/pkg/core/state" "github.com/elastic/elastic-agent/internal/pkg/core/status" "github.com/elastic/elastic-agent/internal/pkg/tokenbucket" "github.com/elastic/elastic-agent/pkg/core/logger" + "github.com/elastic/elastic-agent/pkg/core/process" "github.com/elastic/elastic-agent/pkg/core/server" ) diff --git a/internal/pkg/core/process/process.go b/internal/pkg/core/process/process.go deleted file mode 100644 index 65613fb9978..00000000000 --- a/internal/pkg/core/process/process.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package process - -import ( - "context" - "fmt" - "io" - "os" - "os/exec" - - "github.com/elastic/elastic-agent/internal/pkg/agent/cmd/proc" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -var ( - // ErrProcessStartFailedTimeout is a failure of start due to timeout - ErrProcessStartFailedTimeout = errors.New("process failed to start due to timeout") -) - -// Info groups information about fresh new process -type Info struct { - PID int - Process *os.Process - Stdin io.WriteCloser -} - -// Option is an option func to change the underlying command -type Option func(c *exec.Cmd) - -// Start starts a new process -// Returns: -// - network address of child process -// - process id -// - error -func Start(logger *logger.Logger, path string, config *Config, uid, gid int, args []string, opts ...Option) (proc *Info, err error) { - return StartContext(nil, logger, path, config, uid, gid, args, opts...) //nolint:staticcheck // calls a different function if no ctx -} - -// StartContext starts a new process with context. -// Returns: -// - network address of child process -// - process id -// - error -func StartContext(ctx context.Context, logger *logger.Logger, path string, config *Config, uid, gid int, args []string, opts ...Option) (*Info, error) { - cmd := getCmd(ctx, logger, path, []string{}, uid, gid, args...) - for _, o := range opts { - o(cmd) - } - stdin, err := cmd.StdinPipe() - if err != nil { - return nil, err - } - - // start process - if err := cmd.Start(); err != nil { - return nil, errors.New(err, fmt.Sprintf("failed to start '%s'", path)) - } - - // Hook to JobObject on windows, noop on other platforms. - // This ties the application processes lifespan to the agent's. - // Fixes the orphaned beats processes left behind situation - // after the agent process gets killed. - if err := proc.JobObject.Assign(cmd.Process); err != nil { - logger.Errorf("application process failed job assign: %v", err) - } - - return &Info{ - PID: cmd.Process.Pid, - Process: cmd.Process, - Stdin: stdin, - }, err -} - -// Stop stops the process cleanly. -func (i *Info) Stop() error { - return terminateCmd(i.Process) -} - -// StopWait stops the process and waits for it to exit. -func (i *Info) StopWait() error { - err := i.Stop() - if err != nil { - return err - } - _, err = i.Process.Wait() - return err -} diff --git a/internal/pkg/core/state/state.go b/internal/pkg/core/state/state.go index 080efb42c88..57dfb639b72 100644 --- a/internal/pkg/core/state/state.go +++ b/internal/pkg/core/state/state.go @@ -10,7 +10,7 @@ import ( "github.com/elastic/elastic-agent-client/v7/pkg/proto" - "github.com/elastic/elastic-agent/internal/pkg/core/process" + "github.com/elastic/elastic-agent/pkg/core/process" ) // Status describes the current status of the application process. diff --git a/magefile.go b/magefile.go index a03ea8988fe..721071b2a0f 100644 --- a/magefile.go +++ b/magefile.go @@ -272,18 +272,22 @@ func (Build) Clean() { func (Build) TestBinaries() error { p := filepath.Join("internal", "pkg", "agent", "operation", "tests", "scripts") p2 := filepath.Join("internal", "pkg", "agent", "transpiler", "tests") + p3 := filepath.Join("pkg", "component") configurableName := "configurable" serviceableName := "serviceable" execName := "exec" + fakeName := "fake" if runtime.GOOS == "windows" { configurableName += ".exe" serviceableName += ".exe" execName += ".exe" + fakeName += ".exe" } return combineErr( RunGo("build", "-o", filepath.Join(p, configurableName), filepath.Join(p, "configurable-1.0-darwin-x86_64", "main.go")), RunGo("build", "-o", filepath.Join(p, serviceableName), filepath.Join(p, "serviceable-1.0-darwin-x86_64", "main.go")), RunGo("build", "-o", filepath.Join(p2, "exec-1.0-darwin-x86_64", execName), filepath.Join(p2, "exec-1.0-darwin-x86_64", "main.go")), + RunGo("build", "-o", filepath.Join(p3, "fake", fakeName), filepath.Join(p3, "fake", "main.go")), ) } diff --git a/main.go b/main.go index 81fb4712d01..ec2959614f3 100644 --- a/main.go +++ b/main.go @@ -11,7 +11,7 @@ import ( "time" "github.com/elastic/elastic-agent/internal/pkg/agent/cmd" - "github.com/elastic/elastic-agent/internal/pkg/agent/cmd/proc" + "github.com/elastic/elastic-agent/pkg/core/process" ) // Setups and Runs agent. @@ -21,7 +21,7 @@ func main() { os.Exit(1) } - pj, err := proc.CreateJobObject() + pj, err := process.CreateJobObject() if err != nil { fmt.Fprintf(os.Stderr, "Failed to initialize process job object: %v\n", err) os.Exit(1) diff --git a/pkg/component/fake/README.md b/pkg/component/fake/README.md new file mode 100644 index 00000000000..678fb3dd4f2 --- /dev/null +++ b/pkg/component/fake/README.md @@ -0,0 +1,3 @@ +# Fake Component + +Controllable through GRPC control protocol with actions. Allows unit tests to simulate control and communication with a running sub-process. diff --git a/pkg/component/fake/main.go b/pkg/component/fake/main.go new file mode 100644 index 00000000000..d92586aa0a3 --- /dev/null +++ b/pkg/component/fake/main.go @@ -0,0 +1,285 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package main + +import ( + "context" + "errors" + "fmt" + "io" + "os" + "os/signal" + "syscall" + "time" + + "gopkg.in/yaml.v2" + + "github.com/elastic/elastic-agent-client/v7/pkg/client" +) + +const ( + fake = "fake" +) + +func main() { + err := run() + if err != nil { + fmt.Fprintf(os.Stderr, "%s\n", err) + os.Exit(1) + } +} + +func run() error { + ver := client.VersionInfo{ + Name: fake, + Version: "1.0", + Meta: map[string]string{ + "input": fake, + }, + } + c, _, err := client.NewV2FromReader(os.Stdin, ver) + if err != nil { + return fmt.Errorf("failed to create GRPC client: %w", err) + } + + ctx, cancel := context.WithCancel(context.Background()) + n := make(chan os.Signal, 1) + signal.Notify(n, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) + defer func() { + signal.Stop(n) + cancel() + }() + go func() { + select { + case <-n: + cancel() + case <-ctx.Done(): + } + }() + + err = c.Start(ctx) + if err != nil { + return fmt.Errorf("failed to start GRPC client: %w", err) + } + + s := newStateManager() + for { + select { + case <-ctx.Done(): + return nil + case change := <-c.UnitChanges(): + switch change.Type { + case client.UnitChangedAdded: + s.added(change.Unit) + case client.UnitChangedModified: + s.modified(change.Unit) + case client.UnitChangedRemoved: + s.removed(change.Unit) + } + case err := <-c.Errors(): + if err != nil && !errors.Is(err, context.Canceled) && !errors.Is(err, io.EOF) { + fmt.Fprintf(os.Stderr, "GRPC client error: %s", err) + } + } + } +} + +type unitKey struct { + unitType client.UnitType + unitID string +} + +type stateManager struct { + units map[unitKey]runningUnit +} + +func newStateManager() *stateManager { + return &stateManager{units: make(map[unitKey]runningUnit)} +} + +func (s *stateManager) added(unit *client.Unit) { + k := newUnitKey(unit) + _, ok := s.units[k] + if ok { + _ = unit.UpdateState(client.UnitStateFailed, "Error: duplicate unit", nil) + return + } + r, err := newRunningUnit(unit) + if err != nil { + _ = unit.UpdateState(client.UnitStateFailed, fmt.Sprintf("Error: %s", err), nil) + return + } + s.units[k] = r +} + +func (s *stateManager) modified(unit *client.Unit) { + existing, ok := s.units[newUnitKey(unit)] + if !ok { + _ = unit.UpdateState(client.UnitStateFailed, "Error: unknown unit", nil) + return + } + err := existing.Update(unit) + if err != nil { + _ = unit.UpdateState(client.UnitStateFailed, fmt.Sprintf("Error: %s", err), nil) + } +} + +func (s *stateManager) removed(unit *client.Unit) { + k := newUnitKey(unit) + _, ok := s.units[k] + if !ok { + return + } + delete(s.units, k) +} + +type runningUnit interface { + Unit() *client.Unit + Update(u *client.Unit) error +} + +type fakeInput struct { + unit *client.Unit + cfg inputConfig + + state client.UnitState + stateMsg string +} + +func newFakeInput(unit *client.Unit, cfg inputConfig) (*fakeInput, error) { + i := &fakeInput{ + unit: unit, + cfg: cfg, + state: cfg.State, + stateMsg: cfg.Message, + } + unit.RegisterAction(&stateSetterAction{i}) + unit.RegisterAction(&killAction{}) + _ = unit.UpdateState(i.state, i.stateMsg, nil) + return i, nil +} + +func (f *fakeInput) Unit() *client.Unit { + return f.unit +} + +func (f *fakeInput) Update(u *client.Unit) error { + expected, config := u.Expected() + if expected == client.UnitStateStopped { + // agent is requesting this input to stop + _ = u.UpdateState(client.UnitStateStopping, "Stopping", nil) + go func() { + <-time.After(1 * time.Second) + _ = u.UpdateState(client.UnitStateStopped, "Stopped", nil) + }() + return nil + } + + var cfg map[string]interface{} + err := yaml.Unmarshal([]byte(config), &cfg) + if err != nil { + return fmt.Errorf("failed to unmarshal YAML: %w", err) + } + unitType, ok := cfg["type"] + if !ok { + return fmt.Errorf("unit missing config type") + } + if unitType != fake { + return fmt.Errorf("unit type changed with the same unit ID: %s", unitType) + } + + state, stateMsg, err := getStateFromMap(cfg) + if err != nil { + return fmt.Errorf("unit config parsing error: %w", err) + } + f.state = state + f.stateMsg = stateMsg + _ = u.UpdateState(f.state, f.stateMsg, nil) + return nil +} + +type stateSetterAction struct { + input *fakeInput +} + +func (s *stateSetterAction) Name() string { + return "set_state" +} + +func (s *stateSetterAction) Execute(_ context.Context, params map[string]interface{}) (map[string]interface{}, error) { + state, stateMsg, err := getStateFromMap(params) + if err != nil { + return nil, err + } + s.input.state = state + s.input.stateMsg = stateMsg + _ = s.input.unit.UpdateState(s.input.state, s.input.stateMsg, nil) + return nil, nil +} + +type killAction struct { +} + +func (s *killAction) Name() string { + return "kill" +} + +func (s *killAction) Execute(_ context.Context, params map[string]interface{}) (map[string]interface{}, error) { + os.Exit(1) + return nil, nil +} + +func newRunningUnit(unit *client.Unit) (runningUnit, error) { + _, config := unit.Expected() + var cfg inputConfig + err := yaml.Unmarshal([]byte(config), &cfg) + if err != nil { + return nil, fmt.Errorf("failed to unmarshal YAML: %w", err) + } + if cfg.Type == "" { + return nil, fmt.Errorf("unit config type empty") + } + switch cfg.Type { + case fake: + return newFakeInput(unit, cfg) + } + return nil, fmt.Errorf("unknown unit config type: %s", cfg.Type) +} + +func newUnitKey(unit *client.Unit) unitKey { + return unitKey{ + unitType: unit.Type(), + unitID: unit.ID(), + } +} + +func getStateFromMap(cfg map[string]interface{}) (client.UnitState, string, error) { + state, ok := cfg["state"] + if !ok { + return client.UnitStateStarting, "", errors.New("missing required state parameter") + } + stateTypeI, ok := state.(int) + if !ok { + // try float64 (JSON) does it differently than YAML + stateTypeF, ok := state.(float64) + if !ok { + return client.UnitStateStarting, "", fmt.Errorf("state parameter is not a valid unit state: %T", state) + } + stateTypeI = int(stateTypeF) + } + stateType := client.UnitState(stateTypeI) + stateMsgStr := "" + stateMsg, ok := cfg["message"] + if ok { + stateMsgStr, _ = stateMsg.(string) + } + return stateType, stateMsgStr, nil +} + +type inputConfig struct { + Type string `json:"type" yaml:"type"` + State client.UnitState `json:"state" yaml:"state"` + Message string `json:"message" yaml:"message"` +} diff --git a/pkg/component/runtime/command.go b/pkg/component/runtime/command.go new file mode 100644 index 00000000000..dce34e4bcd4 --- /dev/null +++ b/pkg/component/runtime/command.go @@ -0,0 +1,397 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package runtime + +import ( + "context" + "errors" + "fmt" + "os" + "os/exec" + "time" + + "gopkg.in/yaml.v2" + + "github.com/elastic/elastic-agent-client/v7/pkg/proto" + + "github.com/elastic/elastic-agent-client/v7/pkg/client" + + "github.com/elastic/elastic-agent/pkg/component" + "github.com/elastic/elastic-agent/pkg/core/process" +) + +type actionMode int + +const ( + actionStart = actionMode(0) + actionStop = actionMode(1) +) + +type procState struct { + proc *process.Info + state *os.ProcessState +} + +// CommandRuntime provides the command runtime for running a component as a subprocess. +type CommandRuntime struct { + current component.Component + + ch chan ComponentState + actionCh chan actionMode + procCh chan procState + compCh chan component.Component + + actionState actionMode + proc *process.Info + + expected ComponentState + observed ComponentState + lastCheckin time.Time + missedCheckins int +} + +// NewCommandRuntime creates a new command runtime for the provided component. +func NewCommandRuntime(comp component.Component) (ComponentRuntime, error) { + if comp.Spec.Spec.Command == nil { + return nil, errors.New("must have command defined in specification") + } + expected := newComponentState(&comp, client.UnitStateHealthy, "", 1) + observed := newComponentState(&comp, client.UnitStateStarting, "Starting", 0) + return &CommandRuntime{ + current: comp, + ch: make(chan ComponentState), + actionCh: make(chan actionMode), + procCh: make(chan procState), + compCh: make(chan component.Component), + actionState: actionStart, + expected: expected, + observed: observed, + }, nil +} + +// Run starts the runtime for the component. +// +// Called by Manager inside a go-routine. Run should not return until the passed in context is done. Run is always +// called before any of the other methods in the interface and once the context is done none of those methods will +// ever be called again. +func (c *CommandRuntime) Run(ctx context.Context, comm Communicator) error { + checkinPeriod := c.current.Spec.Spec.Command.Timeouts.Checkin + c.forceCompState(client.UnitStateStarting, "Starting") + t := time.NewTicker(checkinPeriod) + defer t.Stop() + for { + select { + case <-ctx.Done(): + return ctx.Err() + case as := <-c.actionCh: + c.actionState = as + switch as { + case actionStart: + if err := c.start(comm); err != nil { + c.forceCompState(client.UnitStateFailed, err.Error()) + } + t.Reset(checkinPeriod) + case actionStop: + if err := c.stop(ctx); err != nil { + c.forceCompState(client.UnitStateFailed, err.Error()) + } + } + case ps := <-c.procCh: + // ignores old processes + if ps.proc == c.proc { + c.proc = nil + if c.handleProc(ps.state) { + // start again + if err := c.start(comm); err != nil { + c.forceCompState(client.UnitStateFailed, err.Error()) + } + } + t.Reset(checkinPeriod) + } + case newComp := <-c.compCh: + c.expected.syncComponent(&newComp, client.UnitStateHealthy, "Healthy", 1) + if c.mustSendExpected() { + c.sendExpected(comm) + } + case checkin := <-comm.CheckinObserved(): + sendExpected := false + changed := false + if c.observed.State == client.UnitStateStarting { + // first observation after start set component to healthy + c.observed.State = client.UnitStateHealthy + c.observed.Message = fmt.Sprintf("Healthy: communicating with pid '%d'", c.proc.PID) + changed = true + } + if c.lastCheckin.IsZero() { + // first check-in + sendExpected = true + } + c.lastCheckin = time.Now().UTC() + if c.observed.syncCheckin(checkin) { + changed = true + } + if c.mustSendExpected() { + sendExpected = true + } + if sendExpected { + c.sendExpected(comm) + } + if changed { + c.sendObserved() + } + if c.cleanupStopped() { + c.sendObserved() + } + case <-t.C: + if c.proc != nil && c.actionState == actionStart { + // running and should be running + now := time.Now().UTC() + if c.lastCheckin.IsZero() { + // never checked-in + c.missedCheckins++ + } else if now.Sub(c.lastCheckin) > checkinPeriod { + // missed check-in during required period + c.missedCheckins++ + } else if now.Sub(c.lastCheckin) <= checkinPeriod { + c.missedCheckins = 0 + } + if c.missedCheckins == 0 { + c.compState(client.UnitStateHealthy) + } else if c.missedCheckins > 0 && c.missedCheckins < maxCheckinMisses { + c.compState(client.UnitStateDegraded) + } else if c.missedCheckins >= maxCheckinMisses { + // something is wrong; the command should be checking in + // + // at this point it is assumed the sub-process has locked up and will not respond to a nice + // termination signal, so we jump directly to killing the process + msg := fmt.Sprintf("Failed: pid '%d' missed %d check-ins and will be killed", c.proc.PID, maxCheckinMisses) + c.forceCompState(client.UnitStateFailed, msg) + _ = c.proc.Kill() // watcher will handle it from here + } + } + } + } +} + +// Watch returns the channel that sends component state. +// +// Channel should send a new state anytime a state for a unit or the whole component changes. +func (c *CommandRuntime) Watch() <-chan ComponentState { + return c.ch +} + +// Start starts the component. +// +// Non-blocking and never returns an error. +func (c *CommandRuntime) Start() error { + c.actionCh <- actionStart + return nil +} + +// Update updates the currComp runtime with a new-revision for the component definition. +// +// Non-blocking and never returns an error. +func (c *CommandRuntime) Update(comp component.Component) error { + c.compCh <- comp + return nil +} + +// Stop stops the component. +// +// Non-blocking and never returns an error. +func (c *CommandRuntime) Stop() error { + c.actionCh <- actionStop + return nil +} + +// Teardown tears down the component. +// +// Non-blocking and never returns an error. +func (c *CommandRuntime) Teardown() error { + // teardown is not different from stop for command runtime + return c.Stop() +} + +// forceCompState force updates the state for the entire component, forcing that state on all units. +func (c *CommandRuntime) forceCompState(state client.UnitState, msg string) { + c.observed.State = state + c.observed.Message = msg + for k, unit := range c.observed.Units { + unit.State = state + unit.Message = msg + unit.Payload = nil + unit.configStateIdx = 0 + + // unit is a copy and must be set back into the map + c.observed.Units[k] = unit + } + c.sendObserved() +} + +// compState updates just the component state not all the units. +func (c *CommandRuntime) compState(state client.UnitState) { + msg := "Unknown" + if state == client.UnitStateHealthy { + msg = fmt.Sprintf("Healthy: communicating with pid '%d'", c.proc.PID) + } else if state == client.UnitStateDegraded { + if c.missedCheckins == 1 { + msg = fmt.Sprintf("Degraded: pid '%d' missed 1 check-in", c.proc.PID) + } else { + msg = fmt.Sprintf("Degraded: pid '%d' missed %d check-ins", c.proc.PID, c.missedCheckins) + } + } + if c.observed.State != state || c.observed.Message != msg { + c.observed.State = state + c.observed.Message = msg + c.sendObserved() + } +} + +func (c *CommandRuntime) sendObserved() { + c.ch <- c.observed.Copy() +} + +func (c *CommandRuntime) start(comm Communicator) error { + if c.proc != nil { + // already running + return nil + } + cmdSpec := c.current.Spec.Spec.Command + var env []string + for _, e := range cmdSpec.Env { + env = append(env, fmt.Sprintf("%s=%s", e.Name, e.Value)) + } + proc, err := process.Start(c.current.Spec.BinaryPath, os.Geteuid(), os.Getgid(), cmdSpec.Args, env, attachOutErr) + if err != nil { + return err + } + c.lastCheckin = time.Time{} + c.missedCheckins = 0 + c.proc = proc + c.forceCompState(client.UnitStateStarting, fmt.Sprintf("Starting: spawned pid '%d'", c.proc.PID)) + c.startWatcher(proc, comm) + return nil +} + +func (c *CommandRuntime) stop(ctx context.Context) error { + if c.proc == nil { + // already stopped + return nil + } + cmdSpec := c.current.Spec.Spec.Command + go func(info *process.Info, timeout time.Duration) { + t := time.NewTimer(timeout) + defer t.Stop() + select { + case <-ctx.Done(): + return + case <-t.C: + // kill no matter what (might already be stopped) + _ = info.Kill() + } + }(c.proc, cmdSpec.Timeouts.Stop) + return c.proc.Stop() +} + +func (c *CommandRuntime) startWatcher(info *process.Info, comm Communicator) { + go func() { + err := comm.WriteConnInfo(info.Stdin) + if err != nil { + c.forceCompState(client.UnitStateFailed, fmt.Sprintf("Failed: failed to provide connection information to spawned pid '%d': %s", info.PID, err)) + // kill instantly + info.Kill() + } else { + _ = info.Stdin.Close() + } + + ch := info.Wait() + s := <-ch + c.procCh <- procState{ + proc: info, + state: s, + } + }() +} + +func (c *CommandRuntime) handleProc(state *os.ProcessState) bool { + switch c.actionState { + case actionStart: + // should still be running + stopMsg := fmt.Sprintf("Failed: pid '%d' exited with code '%d'", state.Pid(), state.ExitCode()) + c.forceCompState(client.UnitStateFailed, stopMsg) + return true + case actionStop: + // stopping (should have exited) + stopMsg := fmt.Sprintf("Stopped: pid '%d' exited with code '%d'", state.Pid(), state.ExitCode()) + c.forceCompState(client.UnitStateStopped, stopMsg) + } + return false +} + +func (c *CommandRuntime) mustSendExpected() bool { + if len(c.expected.Units) != len(c.observed.Units) { + // mismatch on unit count + return true + } + for ek, e := range c.expected.Units { + o, ok := c.observed.Units[ek] + if !ok { + // unit missing + return true + } + if o.configStateIdx != e.configStateIdx || e.State != o.State { + // config or state mismatch + return true + } + } + return false +} + +func (c *CommandRuntime) sendExpected(comm Communicator) error { + units := make([]*proto.UnitExpected, 0, len(c.expected.Units)) + for k, u := range c.expected.Units { + e := &proto.UnitExpected{ + Id: k.UnitID, + Type: proto.UnitType(k.UnitType), + State: proto.State(u.State), + ConfigStateIdx: u.configStateIdx, + Config: "", + } + o, ok := c.observed.Units[k] + if !ok || o.configStateIdx != u.configStateIdx { + cfg, err := yaml.Marshal(u.config) + if err != nil { + return fmt.Errorf("failed to marshal YAML for unit %s: %w", k.UnitID, err) + } + e.Config = string(cfg) + } + units = append(units, e) + } + comm.CheckinExpected(&proto.CheckinExpected{Units: units}) + return nil +} + +func (c *CommandRuntime) cleanupStopped() bool { + cleaned := false + for ek, e := range c.expected.Units { + if e.State == client.UnitStateStopped { + // should be stopped; check if observed is also reporting stopped + o, ok := c.observed.Units[ek] + if ok && o.State == client.UnitStateStopped { + // its also stopped; so it can now be removed from both + delete(c.expected.Units, ek) + delete(c.observed.Units, ek) + cleaned = true + } + } + } + return cleaned +} + +func attachOutErr(cmd *exec.Cmd) error { + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + return nil +} diff --git a/pkg/component/runtime/failed.go b/pkg/component/runtime/failed.go new file mode 100644 index 00000000000..b495eeb4d2a --- /dev/null +++ b/pkg/component/runtime/failed.go @@ -0,0 +1,104 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package runtime + +import ( + "context" + "errors" + + "github.com/elastic/elastic-agent-client/v7/pkg/client" + + "github.com/elastic/elastic-agent/pkg/component" +) + +// FailedRuntime is used for a component that has an error from the component loader. +type FailedRuntime struct { + ch chan ComponentState + current component.Component + done chan bool +} + +// NewFailedRuntime creates a runtime for a component that has an error from the component loader. +func NewFailedRuntime(comp component.Component) (ComponentRuntime, error) { + if comp.Err == nil { + return nil, errors.New("must be a component that has a defined error") + } + return &FailedRuntime{ + ch: make(chan ComponentState), + current: comp, + done: make(chan bool), + }, nil +} + +// Run runs the runtime for a component that got an error from the component loader. +func (c *FailedRuntime) Run(ctx context.Context, _ Communicator) error { + // state is hard coded to failed + c.ch <- createState(c.current, false) + select { + case <-ctx.Done(): + return ctx.Err() + case <-c.done: + // set to stopped as soon as done is given + c.ch <- createState(c.current, true) + } + <-ctx.Done() + return ctx.Err() +} + +// Watch returns the watch channel. +func (c *FailedRuntime) Watch() <-chan ComponentState { + return c.ch +} + +// Start does nothing. +func (c *FailedRuntime) Start() error { + return nil +} + +// Update updates the component state. +func (c *FailedRuntime) Update(comp component.Component) error { + if comp.Err == nil { + return errors.New("cannot update to a component without a defined error") + } + c.current = comp + return nil +} + +// Stop marks it stopped. +func (c *FailedRuntime) Stop() error { + go func() { + close(c.done) + }() + return nil +} + +// Teardown marks it stopped. +func (c *FailedRuntime) Teardown() error { + return c.Stop() +} + +func createState(comp component.Component, done bool) ComponentState { + state := client.UnitStateFailed + if done { + state = client.UnitStateStopped + } + unitErrs := make(map[ComponentUnitKey]ComponentUnitState) + for _, unit := range comp.Units { + key := ComponentUnitKey{ + UnitType: unit.Type, + UnitID: unit.ID, + } + unitErrs[key] = ComponentUnitState{ + State: state, + Message: comp.Err.Error(), + Payload: nil, + } + } + return ComponentState{ + State: state, + Message: comp.Err.Error(), + Units: unitErrs, + } +} diff --git a/pkg/component/runtime/manager.go b/pkg/component/runtime/manager.go new file mode 100644 index 00000000000..e7125e82f68 --- /dev/null +++ b/pkg/component/runtime/manager.go @@ -0,0 +1,578 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package runtime + +import ( + "context" + "crypto/tls" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "net" + "strings" + "sync" + "time" + + "github.com/gofrs/uuid" + + "go.elastic.co/apm" + "go.elastic.co/apm/module/apmgrpc" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials" + "google.golang.org/grpc/status" + + "github.com/elastic/elastic-agent-client/v7/pkg/client" + "github.com/elastic/elastic-agent-client/v7/pkg/proto" + "github.com/elastic/elastic-agent-libs/atomic" + + "github.com/elastic/elastic-agent/internal/pkg/core/authority" + "github.com/elastic/elastic-agent/pkg/component" + "github.com/elastic/elastic-agent/pkg/core/logger" +) + +const ( + // initialCheckinTimeout is the maximum amount of wait time from initial check-in stream to + // getting the first check-in observed state. + initialCheckinTimeout = 5 * time.Second + // maxCheckinMisses is the maximum number of check-in misses a component can miss before it is killed + // and restarted. + maxCheckinMisses = 3 +) + +var ( + // ErrNoUnit returned when manager is not controlling this unit. + ErrNoUnit = errors.New("no unit under control of this manager") +) + +// Manager for the entire runtime of operating components. +type Manager struct { + proto.UnimplementedElasticAgentServer + + logger *logger.Logger + ca *authority.CertificateAuthority + listenAddr string + tracer *apm.Tracer + + netMx sync.RWMutex + listener net.Listener + server *grpc.Server + + waitMx sync.RWMutex + waitReady map[string]waitForReady + + mx sync.RWMutex + current map[string]*componentRuntimeState + + subMx sync.RWMutex + subscriptions map[string][]*Subscription + + shuttingDown atomic.Bool +} + +// NewManager creates a new manager. +func NewManager(logger *logger.Logger, listenAddr string, tracer *apm.Tracer) (*Manager, error) { + ca, err := authority.NewCA() + if err != nil { + return nil, err + } + m := &Manager{ + logger: logger, + ca: ca, + listenAddr: listenAddr, + tracer: tracer, + waitReady: make(map[string]waitForReady), + current: make(map[string]*componentRuntimeState), + subscriptions: make(map[string][]*Subscription), + } + return m, nil +} + +// Run runs the manager. +// +// Blocks until the context is done. +func (m *Manager) Run(ctx context.Context) error { + lis, err := net.Listen("tcp", m.listenAddr) + if err != nil { + return err + } + m.netMx.Lock() + m.listener = lis + m.netMx.Unlock() + + certPool := x509.NewCertPool() + if ok := certPool.AppendCertsFromPEM(m.ca.Crt()); !ok { + return errors.New("failed to append root CA") + } + creds := credentials.NewTLS(&tls.Config{ + ClientAuth: tls.RequireAndVerifyClientCert, + ClientCAs: certPool, + GetCertificate: m.getCertificate, + MinVersion: tls.VersionTLS12, + }) + + var server *grpc.Server + if m.tracer != nil { + apmInterceptor := apmgrpc.NewUnaryServerInterceptor(apmgrpc.WithRecovery(), apmgrpc.WithTracer(m.tracer)) + server = grpc.NewServer( + grpc.UnaryInterceptor(apmInterceptor), + grpc.Creds(creds), + ) + } else { + server = grpc.NewServer(grpc.Creds(creds)) + } + m.netMx.Lock() + m.server = server + m.netMx.Unlock() + proto.RegisterElasticAgentServer(m.server, m) + m.shuttingDown.Store(false) + + // start serving GRPC connections + errCh := make(chan error) + go func() { + errCh <- server.Serve(lis) + }() + + select { + case <-ctx.Done(): + server.Stop() + err = <-errCh + case err = <-errCh: + } + m.shutdown() + m.netMx.Lock() + m.listener = nil + m.server = nil + m.netMx.Unlock() + return err +} + +// WaitForReady waits until the manager is ready to be used. +// +// This verifies that the GRPC server is up and running. +func (m *Manager) WaitForReady(ctx context.Context) error { + tk, err := uuid.NewV4() + if err != nil { + return err + } + token := tk.String() + name, err := genServerName() + if err != nil { + return err + } + pair, err := m.ca.GeneratePairWithName(name) + if err != nil { + return err + } + cert, err := tls.X509KeyPair(pair.Crt, pair.Key) + if err != nil { + return err + } + caCertPool := x509.NewCertPool() + caCertPool.AppendCertsFromPEM(m.ca.Crt()) + trans := credentials.NewTLS(&tls.Config{ + ServerName: name, + Certificates: []tls.Certificate{cert}, + RootCAs: caCertPool, + }) + + m.waitMx.Lock() + m.waitReady[token] = waitForReady{ + name: name, + cert: pair, + } + m.waitMx.Unlock() + + defer func() { + m.waitMx.Lock() + delete(m.waitReady, token) + m.waitMx.Unlock() + }() + + for { + m.netMx.RLock() + lis := m.listener + srv := m.server + m.netMx.RUnlock() + if lis != nil && srv != nil { + addr := m.getListenAddr() + c, err := grpc.Dial(addr, grpc.WithTransportCredentials(trans)) + if err == nil { + _ = c.Close() + return nil + } + } + + t := time.NewTimer(100 * time.Millisecond) + select { + case <-ctx.Done(): + return ctx.Err() + case <-t.C: + } + } +} + +// Update updates the currComp state of the running components. +// +// This returns as soon as possible, work is performed in the background to +func (m *Manager) Update(components []component.Component) error { + shuttingDown := m.shuttingDown.Load() + if shuttingDown { + // ignore any updates once shutdown started + return nil + } + // teardown is true because the public `Update` method would be coming directly from + // policy so if a component was removed it needs to be torn down. + return m.update(components, true) +} + +// PerformAction executes an action on a unit. +func (m *Manager) PerformAction(ctx context.Context, unit component.Unit, name string, params map[string]interface{}) (map[string]interface{}, error) { + id, err := uuid.NewV4() + if err != nil { + return nil, err + } + paramBytes := []byte("{}") + if params != nil { + paramBytes, err = json.Marshal(params) + if err != nil { + return nil, err + } + } + runtime := m.getRuntimeFromUnit(unit) + if runtime == nil { + return nil, ErrNoUnit + } + + req := &proto.ActionRequest{ + Id: id.String(), + Name: name, + Params: paramBytes, + UnitId: unit.ID, + UnitType: proto.UnitType(unit.Type), + Type: proto.ActionRequest_CUSTOM, + } + + res, err := runtime.performAction(ctx, req) + if err != nil { + return nil, err + } + + var respBody map[string]interface{} + if res.Status == proto.ActionResponse_FAILED { + if res.Result != nil { + err = json.Unmarshal(res.Result, &respBody) + if err != nil { + return nil, err + } + errMsgT, ok := respBody["error"] + if ok { + errMsg, ok := errMsgT.(string) + if ok { + return nil, errors.New(errMsg) + } + } + } + return nil, errors.New("generic action failure") + } + if res.Result != nil { + err = json.Unmarshal(res.Result, &respBody) + if err != nil { + return nil, err + } + } + return respBody, nil +} + +// Subscribe to changes in a component. +// +// Allows a component without that ID to exists. Once a component starts matching that ID then changes will start to +// be provided over the channel. +// +// Note: Not reading from a subscription channel will cause the Manager to block. +func (m *Manager) Subscribe(componentID string) *Subscription { + sub := newSubscription(m) + + // add latestState to channel + m.mx.RLock() + comp, ok := m.current[componentID] + m.mx.RUnlock() + if ok { + comp.latestMx.RLock() + sub.ch <- comp.latestState + comp.latestMx.RUnlock() + } + + // add subscription for future changes + m.subMx.Lock() + m.subscriptions[componentID] = append(m.subscriptions[componentID], sub) + defer m.subMx.Unlock() + + return sub +} + +// Checkin is called by v1 sub-processes and has been removed. +func (m *Manager) Checkin(_ proto.ElasticAgent_CheckinServer) error { + return status.Error(codes.Unavailable, "removed; upgrade to V2") +} + +// CheckinV2 is the new v2 communication for components. +func (m *Manager) CheckinV2(server proto.ElasticAgent_CheckinV2Server) error { + initCheckinChan := make(chan *proto.CheckinObserved) + go func() { + // go func will not be leaked, because when the main function + // returns it will close the connection. that will cause this + // function to return. + observed, err := server.Recv() + if err != nil { + close(initCheckinChan) + return + } + initCheckinChan <- observed + }() + + var ok bool + var initCheckin *proto.CheckinObserved + + t := time.NewTimer(initialCheckinTimeout) + select { + case initCheckin, ok = <-initCheckinChan: + t.Stop() + case <-t.C: + // close connection + m.logger.Debug("check-in stream never sent initial observed message; closing connection") + return status.Error(codes.DeadlineExceeded, "never sent initial observed message") + } + if !ok { + // close connection + return nil + } + + runtime := m.getRuntimeFromToken(initCheckin.Token) + if runtime == nil { + // no component runtime with token; close connection + m.logger.Debug("check-in stream sent an invalid token; closing connection") + return status.Error(codes.PermissionDenied, "invalid token") + } + + return runtime.comm.checkin(server, initCheckin) +} + +// Actions is the actions stream used to broker actions between Elastic Agent and components. +func (m *Manager) Actions(server proto.ElasticAgent_ActionsServer) error { + initRespChan := make(chan *proto.ActionResponse) + go func() { + // go func will not be leaked, because when the main function + // returns it will close the connection. that will cause this + // function to return. + observed, err := server.Recv() + if err != nil { + close(initRespChan) + return + } + initRespChan <- observed + }() + + var ok bool + var initResp *proto.ActionResponse + + t := time.NewTimer(initialCheckinTimeout) + select { + case initResp, ok = <-initRespChan: + t.Stop() + case <-t.C: + // close connection + m.logger.Debug("actions stream never sent initial response message; closing connection") + return status.Error(codes.DeadlineExceeded, "never sent initial response message") + } + if !ok { + // close connection + return nil + } + if initResp.Id != client.ActionResponseInitID { + // close connection + m.logger.Debug("actions stream first response message must be an init message; closing connection") + return status.Error(codes.InvalidArgument, "initial response must be an init message") + } + + runtime := m.getRuntimeFromToken(initResp.Token) + if runtime == nil { + // no component runtime with token; close connection + m.logger.Debug("actions stream sent an invalid token; closing connection") + return status.Error(codes.PermissionDenied, "invalid token") + } + + return runtime.comm.actions(server) +} + +// update updates the current state of the running components. +// +// This returns as soon as possible, work is performed in the background to +func (m *Manager) update(components []component.Component, teardown bool) error { + m.mx.Lock() + defer m.mx.Unlock() + + touched := make(map[string]bool) + for _, comp := range components { + touched[comp.ID] = true + existing, ok := m.current[comp.ID] + if ok { + // existing component; send runtime updated value + existing.currComp = comp + if err := existing.runtime.Update(comp); err != nil { + return fmt.Errorf("failed to update component %s: %w", comp.ID, err) + } + } else { + // new component; create its runtime + logger := m.logger.Named(fmt.Sprintf("component.runtime.%s", comp.ID)) + state, err := newComponentRuntimeState(m, logger, comp) + if err != nil { + return fmt.Errorf("failed to create new component %s: %w", comp.ID, err) + } + m.current[comp.ID] = state + err = state.start() + if err != nil { + return fmt.Errorf("failed to start component %s: %w", comp.ID, err) + } + } + } + for id, existing := range m.current { + // skip if already touched (meaning it still existing) + if _, done := touched[id]; done { + continue + } + // component was removed (time to clean it up) + existing.stop(teardown) + } + return nil +} + +func (m *Manager) shutdown() { + m.shuttingDown.Store(true) + + // don't tear down as this is just a shutdown, so components most likely will come back + // on next start of the manager + _ = m.update([]component.Component{}, false) + + // wait until all components are removed + for { + m.mx.Lock() + length := len(m.current) + m.mx.Unlock() + if length <= 0 { + return + } + <-time.After(100 * time.Millisecond) + } +} + +func (m *Manager) stateChanged(state *componentRuntimeState, latest ComponentState) { + m.subMx.RLock() + subs, ok := m.subscriptions[state.currComp.ID] + if ok { + for _, sub := range subs { + sub.ch <- latest + } + } + m.subMx.RUnlock() + + shutdown := state.shuttingDown.Load() + if shutdown && latest.State == client.UnitStateStopped { + // shutdown is complete; remove from currComp + m.mx.Lock() + delete(m.current, state.currComp.ID) + m.mx.Unlock() + + state.destroy() + } +} + +func (m *Manager) unsubscribe(subscription *Subscription) { + m.subMx.Lock() + defer m.subMx.Unlock() + for key, subs := range m.subscriptions { + for i, sub := range subs { + if subscription == sub { + m.subscriptions[key] = append(m.subscriptions[key][:i], m.subscriptions[key][i+1:]...) + return + } + } + } +} + +func (m *Manager) getCertificate(chi *tls.ClientHelloInfo) (*tls.Certificate, error) { + var cert *tls.Certificate + + m.mx.RLock() + for _, runtime := range m.current { + if runtime.comm.name == chi.ServerName { + cert = runtime.comm.cert.Certificate + break + } + } + m.mx.RUnlock() + if cert != nil { + return cert, nil + } + + m.waitMx.RLock() + for _, waiter := range m.waitReady { + if waiter.name == chi.ServerName { + cert = waiter.cert.Certificate + break + } + } + m.waitMx.RUnlock() + if cert != nil { + return cert, nil + } + + return nil, errors.New("no supported TLS certificate") +} + +func (m *Manager) getRuntimeFromToken(token string) *componentRuntimeState { + m.mx.RLock() + defer m.mx.RUnlock() + + for _, runtime := range m.current { + if runtime.comm.token == token { + return runtime + } + } + return nil +} + +func (m *Manager) getRuntimeFromUnit(unit component.Unit) *componentRuntimeState { + m.mx.RLock() + defer m.mx.RUnlock() + for _, comp := range m.current { + for _, u := range comp.currComp.Units { + if u.Type == unit.Type && u.ID == unit.ID { + return comp + } + } + } + return nil +} + +func (m *Manager) getListenAddr() string { + addr := strings.SplitN(m.listenAddr, ":", 2) + if len(addr) == 2 && addr[1] == "0" { + m.netMx.RLock() + lis := m.listener + m.netMx.RUnlock() + if lis != nil { + port := lis.Addr().(*net.TCPAddr).Port + return fmt.Sprintf("%s:%d", addr[0], port) + } + } + return m.listenAddr +} + +type waitForReady struct { + name string + cert *authority.Pair +} diff --git a/pkg/component/runtime/manager_test.go b/pkg/component/runtime/manager_test.go new file mode 100644 index 00000000000..adeb2b1243a --- /dev/null +++ b/pkg/component/runtime/manager_test.go @@ -0,0 +1,1288 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package runtime + +import ( + "context" + "errors" + "fmt" + "path/filepath" + "runtime" + "testing" + "time" + + "go.elastic.co/apm/apmtest" + + "github.com/elastic/elastic-agent-libs/logp" + "github.com/elastic/elastic-agent/pkg/core/logger" + + "github.com/stretchr/testify/require" + + "github.com/elastic/elastic-agent-client/v7/pkg/client" + + "github.com/elastic/elastic-agent/pkg/component" +) + +var ( + fakeInputSpec = component.InputSpec{ + Name: "fake", + Command: &component.CommandSpec{ + Timeouts: component.CommandTimeoutSpec{ + Checkin: 30 * time.Second, + Stop: 30 * time.Second, + }, + }, + } +) + +func TestManager_SimpleComponentErr(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + m, err := NewManager(newErrorLogger(t), "localhost:0", apmtest.DiscardTracer) + require.NoError(t, err) + errCh := make(chan error) + go func() { + errCh <- m.Run(ctx) + }() + + waitCtx, waitCancel := context.WithTimeout(ctx, 1*time.Second) + defer waitCancel() + if err := m.WaitForReady(waitCtx); err != nil { + require.NoError(t, err) + } + + comp := component.Component{ + ID: "error-default", + Err: errors.New("hard-coded error"), + Units: []component.Unit{ + { + ID: "error-input", + Type: client.UnitTypeInput, + Config: map[string]interface{}{}, + }, + }, + } + + subCtx, subCancel := context.WithCancel(context.Background()) + defer subCancel() + subErrCh := make(chan error) + go func() { + sub := m.Subscribe("error-default") + defer sub.Unsubscribe() + for { + select { + case <-subCtx.Done(): + return + case state := <-sub.Ch(): + t.Logf("component state changed: %+v", state) + if state.State == client.UnitStateStarting { + // initial is starting + } else if state.State == client.UnitStateFailed { + unit, ok := state.Units[ComponentUnitKey{UnitType: client.UnitTypeInput, UnitID: "error-input"}] + if ok { + if unit.State == client.UnitStateFailed { + // should be failed + subErrCh <- nil + } else { + // unknown state that should not have occurred + subErrCh <- fmt.Errorf("unit reported unexpected state: %v", unit.State) + } + } + } else { + subErrCh <- fmt.Errorf("component reported unexpected state: %v", state.State) + } + } + } + }() + + defer drainErrChan(errCh) + defer drainErrChan(subErrCh) + + startTimer := time.NewTimer(100 * time.Millisecond) + defer startTimer.Stop() + select { + case <-startTimer.C: + err = m.Update([]component.Component{comp}) + require.NoError(t, err) + case err := <-errCh: + t.Fatalf("failed early: %s", err) + } + + endTimer := time.NewTimer(30 * time.Second) + defer endTimer.Stop() +LOOP: + for { + select { + case <-endTimer.C: + t.Fatalf("timed out after 30 seconds") + case err := <-errCh: + require.NoError(t, err) + case err := <-subErrCh: + require.NoError(t, err) + break LOOP + } + } + + subCancel() + cancel() + + err = <-errCh + require.NoError(t, err) +} + +func TestManager_FakeInput_StartStop(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + m, err := NewManager(newErrorLogger(t), "localhost:0", apmtest.DiscardTracer) + require.NoError(t, err) + errCh := make(chan error) + go func() { + errCh <- m.Run(ctx) + }() + + waitCtx, waitCancel := context.WithTimeout(ctx, 1*time.Second) + defer waitCancel() + if err := m.WaitForReady(waitCtx); err != nil { + require.NoError(t, err) + } + + binaryPath := filepath.Join("..", "fake", "fake") + if runtime.GOOS == component.Windows { + binaryPath += ".exe" + } + comp := component.Component{ + ID: "fake-default", + Spec: component.InputRuntimeSpec{ + InputType: "fake", + BinaryName: "", + BinaryPath: binaryPath, + Spec: fakeInputSpec, + }, + Units: []component.Unit{ + { + ID: "fake-input", + Type: client.UnitTypeInput, + Config: map[string]interface{}{ + "type": "fake", + "state": client.UnitStateHealthy, + "message": "Fake Healthy", + }, + }, + }, + } + + subCtx, subCancel := context.WithCancel(context.Background()) + defer subCancel() + subErrCh := make(chan error) + go func() { + sub := m.Subscribe("fake-default") + defer sub.Unsubscribe() + for { + select { + case <-subCtx.Done(): + return + case state := <-sub.Ch(): + t.Logf("component state changed: %+v", state) + if state.State == client.UnitStateFailed { + subErrCh <- fmt.Errorf("component failed: %s", state.Message) + } else { + unit, ok := state.Units[ComponentUnitKey{UnitType: client.UnitTypeInput, UnitID: "fake-input"}] + if ok { + if unit.State == client.UnitStateFailed { + subErrCh <- fmt.Errorf("unit failed: %s", unit.Message) + } else if unit.State == client.UnitStateHealthy { + // remove the component which will stop it + err := m.Update([]component.Component{}) + if err != nil { + subErrCh <- err + } + } else if unit.State == client.UnitStateStopped { + subErrCh <- nil + } else if unit.State == client.UnitStateStarting { + // acceptable + } else { + // unknown state that should not have occurred + subErrCh <- fmt.Errorf("unit reported unexpected state: %v", unit.State) + } + } else { + subErrCh <- errors.New("unit missing: fake-input") + } + } + } + } + }() + + defer drainErrChan(errCh) + defer drainErrChan(subErrCh) + + startTimer := time.NewTimer(100 * time.Millisecond) + defer startTimer.Stop() + select { + case <-startTimer.C: + err = m.Update([]component.Component{comp}) + require.NoError(t, err) + case err := <-errCh: + t.Fatalf("failed early: %s", err) + } + + endTimer := time.NewTimer(30 * time.Second) + defer endTimer.Stop() +LOOP: + for { + select { + case <-endTimer.C: + t.Fatalf("timed out after 30 seconds") + case err := <-errCh: + require.NoError(t, err) + case err := <-subErrCh: + require.NoError(t, err) + break LOOP + } + } + + subCancel() + cancel() + + err = <-errCh + require.NoError(t, err) +} + +func TestManager_FakeInput_Configure(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + m, err := NewManager(newErrorLogger(t), "localhost:0", apmtest.DiscardTracer) + require.NoError(t, err) + errCh := make(chan error) + go func() { + errCh <- m.Run(ctx) + }() + + waitCtx, waitCancel := context.WithTimeout(ctx, 1*time.Second) + defer waitCancel() + if err := m.WaitForReady(waitCtx); err != nil { + require.NoError(t, err) + } + + binaryPath := filepath.Join("..", "fake", "fake") + if runtime.GOOS == component.Windows { + binaryPath += ".exe" + } + comp := component.Component{ + ID: "fake-default", + Spec: component.InputRuntimeSpec{ + InputType: "fake", + BinaryName: "", + BinaryPath: binaryPath, + Spec: fakeInputSpec, + }, + Units: []component.Unit{ + { + ID: "fake-input", + Type: client.UnitTypeInput, + Config: map[string]interface{}{ + "type": "fake", + "state": client.UnitStateHealthy, + "message": "Fake Healthy", + }, + }, + }, + } + + subCtx, subCancel := context.WithCancel(context.Background()) + defer subCancel() + subErrCh := make(chan error) + go func() { + sub := m.Subscribe("fake-default") + defer sub.Unsubscribe() + for { + select { + case <-subCtx.Done(): + return + case state := <-sub.Ch(): + t.Logf("component state changed: %+v", state) + if state.State == client.UnitStateFailed { + subErrCh <- fmt.Errorf("component failed: %s", state.Message) + } else { + unit, ok := state.Units[ComponentUnitKey{UnitType: client.UnitTypeInput, UnitID: "fake-input"}] + if ok { + if unit.State == client.UnitStateFailed { + subErrCh <- fmt.Errorf("unit failed: %s", unit.Message) + } else if unit.State == client.UnitStateHealthy { + // update config to change the state to degraded + comp.Units[0].Config = map[string]interface{}{ + "type": "fake", + "state": client.UnitStateDegraded, + "message": "Fake Degraded", + } + err := m.Update([]component.Component{comp}) + if err != nil { + subErrCh <- err + } + } else if unit.State == client.UnitStateDegraded { + subErrCh <- nil + } else if unit.State == client.UnitStateStarting { + // acceptable + } else { + // unknown state that should not have occurred + subErrCh <- fmt.Errorf("unit reported unexpected state: %v", unit.State) + } + } else { + subErrCh <- errors.New("unit missing: fake-input") + } + } + } + } + }() + + defer drainErrChan(errCh) + defer drainErrChan(subErrCh) + + startTimer := time.NewTimer(100 * time.Millisecond) + defer startTimer.Stop() + select { + case <-startTimer.C: + err = m.Update([]component.Component{comp}) + require.NoError(t, err) + case err := <-errCh: + t.Fatalf("failed early: %s", err) + } + + endTimer := time.NewTimer(30 * time.Second) + defer endTimer.Stop() +LOOP: + for { + select { + case <-endTimer.C: + t.Fatalf("timed out after 30 seconds") + case err := <-errCh: + require.NoError(t, err) + case err := <-subErrCh: + require.NoError(t, err) + break LOOP + } + } + + subCancel() + cancel() + + err = <-errCh + require.NoError(t, err) +} + +func TestManager_FakeInput_RemoveUnit(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + m, err := NewManager(newErrorLogger(t), "localhost:0", apmtest.DiscardTracer) + require.NoError(t, err) + errCh := make(chan error) + go func() { + errCh <- m.Run(ctx) + }() + + waitCtx, waitCancel := context.WithTimeout(ctx, 1*time.Second) + defer waitCancel() + if err := m.WaitForReady(waitCtx); err != nil { + require.NoError(t, err) + } + + binaryPath := filepath.Join("..", "fake", "fake") + if runtime.GOOS == component.Windows { + binaryPath += ".exe" + } + comp := component.Component{ + ID: "fake-default", + Spec: component.InputRuntimeSpec{ + InputType: "fake", + BinaryName: "", + BinaryPath: binaryPath, + Spec: fakeInputSpec, + }, + Units: []component.Unit{ + { + ID: "fake-input-0", + Type: client.UnitTypeInput, + Config: map[string]interface{}{ + "type": "fake", + "state": client.UnitStateHealthy, + "message": "Fake Healthy 0", + }, + }, + { + ID: "fake-input-1", + Type: client.UnitTypeInput, + Config: map[string]interface{}{ + "type": "fake", + "state": client.UnitStateHealthy, + "message": "Fake Healthy 1", + }, + }, + }, + } + + subCtx, subCancel := context.WithCancel(context.Background()) + defer subCancel() + subErrCh := make(chan error) + go func() { + unit1Stopped := false + + sub := m.Subscribe("fake-default") + defer sub.Unsubscribe() + for { + select { + case <-subCtx.Done(): + return + case state := <-sub.Ch(): + t.Logf("component state changed: %+v", state) + if state.State == client.UnitStateFailed { + subErrCh <- fmt.Errorf("component failed: %s", state.Message) + } else { + unit0, ok := state.Units[ComponentUnitKey{UnitType: client.UnitTypeInput, UnitID: "fake-input-0"}] + if ok { + if unit0.State == client.UnitStateFailed { + subErrCh <- fmt.Errorf("unit 0 failed: %s", unit0.Message) + } else if unit0.State == client.UnitStateStarting || unit0.State == client.UnitStateHealthy { + // acceptable + } else { + // unknown state that should not have occurred + subErrCh <- fmt.Errorf("unit 0 reported unexpected state: %v", unit0.State) + } + } else { + subErrCh <- errors.New("unit missing: fake-input-0") + } + unit1, ok := state.Units[ComponentUnitKey{UnitType: client.UnitTypeInput, UnitID: "fake-input-1"}] + if ok { + if unit1.State == client.UnitStateFailed { + subErrCh <- fmt.Errorf("unit 1 failed: %s", unit1.Message) + } else if unit1.State == client.UnitStateHealthy { + // unit1 is healthy lets remove it from the component + comp.Units = comp.Units[0:1] + err := m.Update([]component.Component{comp}) + if err != nil { + subErrCh <- err + } + } else if unit1.State == client.UnitStateStarting || unit1.State == client.UnitStateStopping { + // acceptable + } else if unit1.State == client.UnitStateStopped { + // unit should have been reported stopped before being removed + unit1Stopped = true + } else { + // unknown state that should not have occurred + subErrCh <- fmt.Errorf("unit 1 reported unexpected state: %v", unit1.State) + } + } else { + if len(comp.Units) == 1 { + if unit1Stopped { + // unit reported stopped then removed (perfect!) + subErrCh <- nil + } else { + // never reported stopped + subErrCh <- errors.New("unit 1 removed but not reported stop first") + } + } else { + // should not be removed + subErrCh <- errors.New("unit missing: fake-input-1") + } + } + } + } + } + }() + + defer drainErrChan(errCh) + defer drainErrChan(subErrCh) + + startTimer := time.NewTimer(100 * time.Millisecond) + defer startTimer.Stop() + select { + case <-startTimer.C: + err = m.Update([]component.Component{comp}) + require.NoError(t, err) + case err := <-errCh: + t.Fatalf("failed early: %s", err) + } + + endTimer := time.NewTimer(30 * time.Second) + defer endTimer.Stop() +LOOP: + for { + select { + case <-endTimer.C: + t.Fatalf("timed out after 30 seconds") + case err := <-errCh: + require.NoError(t, err) + case err := <-subErrCh: + require.NoError(t, err) + break LOOP + } + } + + subCancel() + cancel() + + err = <-errCh + require.NoError(t, err) +} + +func TestManager_FakeInput_ActionState(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + m, err := NewManager(newErrorLogger(t), "localhost:0", apmtest.DiscardTracer) + require.NoError(t, err) + errCh := make(chan error) + go func() { + errCh <- m.Run(ctx) + }() + + waitCtx, waitCancel := context.WithTimeout(ctx, 1*time.Second) + defer waitCancel() + if err := m.WaitForReady(waitCtx); err != nil { + require.NoError(t, err) + } + + binaryPath := filepath.Join("..", "fake", "fake") + if runtime.GOOS == component.Windows { + binaryPath += ".exe" + } + comp := component.Component{ + ID: "fake-default", + Spec: component.InputRuntimeSpec{ + InputType: "fake", + BinaryName: "", + BinaryPath: binaryPath, + Spec: fakeInputSpec, + }, + Units: []component.Unit{ + { + ID: "fake-input", + Type: client.UnitTypeInput, + Config: map[string]interface{}{ + "type": "fake", + "state": client.UnitStateHealthy, + "message": "Fake Healthy", + }, + }, + }, + } + + subCtx, subCancel := context.WithCancel(context.Background()) + defer subCancel() + subErrCh := make(chan error) + go func() { + sub := m.Subscribe("fake-default") + defer sub.Unsubscribe() + for { + select { + case <-subCtx.Done(): + return + case state := <-sub.Ch(): + t.Logf("component state changed: %+v", state) + if state.State == client.UnitStateFailed { + subErrCh <- fmt.Errorf("component failed: %s", state.Message) + } else { + unit, ok := state.Units[ComponentUnitKey{UnitType: client.UnitTypeInput, UnitID: "fake-input"}] + if ok { + if unit.State == client.UnitStateFailed { + subErrCh <- fmt.Errorf("unit failed: %s", unit.Message) + } else if unit.State == client.UnitStateHealthy { + actionCtx, actionCancel := context.WithTimeout(context.Background(), 3*time.Second) + _, err := m.PerformAction(actionCtx, comp.Units[0], "set_state", map[string]interface{}{ + "state": client.UnitStateDegraded, + "message": "Action Set Degraded", + }) + actionCancel() + if err != nil { + subErrCh <- err + } + } else if unit.State == client.UnitStateDegraded { + // action set it to degraded + subErrCh <- nil + } else if unit.State == client.UnitStateStarting { + // acceptable + } else { + // unknown state that should not have occurred + subErrCh <- fmt.Errorf("unit reported unexpected state: %v", unit.State) + } + } else { + subErrCh <- errors.New("unit missing: fake-input") + } + } + } + } + }() + + defer drainErrChan(errCh) + defer drainErrChan(subErrCh) + + startTimer := time.NewTimer(100 * time.Millisecond) + defer startTimer.Stop() + select { + case <-startTimer.C: + err = m.Update([]component.Component{comp}) + require.NoError(t, err) + case err := <-errCh: + t.Fatalf("failed early: %s", err) + } + + endTimer := time.NewTimer(30 * time.Second) + defer endTimer.Stop() +LOOP: + for { + select { + case <-endTimer.C: + t.Fatalf("timed out after 30 seconds") + case err := <-errCh: + require.NoError(t, err) + case err := <-subErrCh: + require.NoError(t, err) + break LOOP + } + } + + subCancel() + cancel() + + err = <-errCh + require.NoError(t, err) +} + +func TestManager_FakeInput_Restarts(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + m, err := NewManager(newErrorLogger(t), "localhost:0", apmtest.DiscardTracer) + require.NoError(t, err) + errCh := make(chan error) + go func() { + errCh <- m.Run(ctx) + }() + + waitCtx, waitCancel := context.WithTimeout(ctx, 1*time.Second) + defer waitCancel() + if err := m.WaitForReady(waitCtx); err != nil { + require.NoError(t, err) + } + + binaryPath := filepath.Join("..", "fake", "fake") + if runtime.GOOS == component.Windows { + binaryPath += ".exe" + } + comp := component.Component{ + ID: "fake-default", + Spec: component.InputRuntimeSpec{ + InputType: "fake", + BinaryName: "", + BinaryPath: binaryPath, + Spec: fakeInputSpec, + }, + Units: []component.Unit{ + { + ID: "fake-input", + Type: client.UnitTypeInput, + Config: map[string]interface{}{ + "type": "fake", + "state": client.UnitStateHealthy, + "message": "Fake Healthy", + }, + }, + }, + } + + subCtx, subCancel := context.WithCancel(context.Background()) + defer subCancel() + subErrCh := make(chan error) + go func() { + killed := false + + sub := m.Subscribe("fake-default") + defer sub.Unsubscribe() + for { + select { + case <-subCtx.Done(): + return + case state := <-sub.Ch(): + t.Logf("component state changed: %+v", state) + if state.State == client.UnitStateFailed { + if !killed { + subErrCh <- fmt.Errorf("component failed: %s", state.Message) + } else { + // expected to go to failed as it was killed with the action + } + } else { + unit, ok := state.Units[ComponentUnitKey{UnitType: client.UnitTypeInput, UnitID: "fake-input"}] + if ok { + if unit.State == client.UnitStateFailed { + if !killed { + subErrCh <- fmt.Errorf("unit failed: %s", unit.Message) + } else { + // expected to go to failed as it was killed with the action + } + } else if unit.State == client.UnitStateHealthy { + // force the input to exit and it should be restarted + if !killed { + killed = true + actionCtx, actionCancel := context.WithTimeout(context.Background(), 500*time.Millisecond) + _, err := m.PerformAction(actionCtx, comp.Units[0], "kill", nil) + actionCancel() + if !errors.Is(err, context.DeadlineExceeded) { + // should have got deadline exceeded for this call + if err == nil { + err = fmt.Errorf("should have got deadline exceeded") + } else { + err = fmt.Errorf("should have got deadline exceeded, instead got: %w", err) + } + subErrCh <- err + } + } else { + // got back to healthy after kill + subErrCh <- nil + } + } else if unit.State == client.UnitStateStarting { + // acceptable + } else { + // unknown state that should not have occurred + subErrCh <- fmt.Errorf("unit reported unexpected state: %v", unit.State) + } + } else { + subErrCh <- errors.New("unit missing: fake-input") + } + } + } + } + }() + + defer drainErrChan(errCh) + defer drainErrChan(subErrCh) + + startTimer := time.NewTimer(100 * time.Millisecond) + defer startTimer.Stop() + select { + case <-startTimer.C: + err = m.Update([]component.Component{comp}) + require.NoError(t, err) + case err := <-errCh: + t.Fatalf("failed early: %s", err) + } + + endTimer := time.NewTimer(30 * time.Second) + defer endTimer.Stop() +LOOP: + for { + select { + case <-endTimer.C: + t.Fatalf("timed out after 30 seconds") + case err := <-errCh: + require.NoError(t, err) + case err := <-subErrCh: + require.NoError(t, err) + break LOOP + } + } + + subCancel() + cancel() + + err = <-errCh + require.NoError(t, err) +} + +func TestManager_FakeInput_RestartsOnMissedCheckins(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + m, err := NewManager(newErrorLogger(t), "localhost:0", apmtest.DiscardTracer) + require.NoError(t, err) + errCh := make(chan error) + go func() { + errCh <- m.Run(ctx) + }() + + waitCtx, waitCancel := context.WithTimeout(ctx, 1*time.Second) + defer waitCancel() + if err := m.WaitForReady(waitCtx); err != nil { + require.NoError(t, err) + } + + binaryPath := filepath.Join("..", "fake", "fake") + if runtime.GOOS == component.Windows { + binaryPath += ".exe" + } + comp := component.Component{ + ID: "fake-default", + Spec: component.InputRuntimeSpec{ + InputType: "fake", + BinaryName: "", + BinaryPath: binaryPath, + Spec: component.InputSpec{ + Name: "fake", + Command: &component.CommandSpec{ + Timeouts: component.CommandTimeoutSpec{ + // very low checkin timeout so we can cause missed check-ins + Checkin: 100 * time.Millisecond, + Stop: 30 * time.Second, + }, + }, + }, + }, + Units: []component.Unit{ + { + ID: "fake-input", + Type: client.UnitTypeInput, + Config: map[string]interface{}{ + "type": "fake", + "state": client.UnitStateHealthy, + "message": "Fake Healthy", + }, + }, + }, + } + + subCtx, subCancel := context.WithCancel(context.Background()) + defer subCancel() + subErrCh := make(chan error) + go func() { + wasDegraded := false + + sub := m.Subscribe("fake-default") + defer sub.Unsubscribe() + for { + select { + case <-subCtx.Done(): + return + case state := <-sub.Ch(): + t.Logf("component state changed: %+v", state) + if state.State == client.UnitStateStarting || state.State == client.UnitStateHealthy { + // starting and healthy are allowed + } else if state.State == client.UnitStateDegraded { + // should go to degraded first + wasDegraded = true + } else if state.State == client.UnitStateFailed { + if wasDegraded { + subErrCh <- nil + } else { + subErrCh <- errors.New("should have been degraded before failed") + } + } else { + subErrCh <- fmt.Errorf("unknown component state: %v", state.State) + } + } + } + }() + + defer drainErrChan(errCh) + defer drainErrChan(subErrCh) + + startTimer := time.NewTimer(100 * time.Millisecond) + defer startTimer.Stop() + select { + case <-startTimer.C: + err = m.Update([]component.Component{comp}) + require.NoError(t, err) + case err := <-errCh: + t.Fatalf("failed early: %s", err) + } + + endTimer := time.NewTimer(30 * time.Second) + defer endTimer.Stop() +LOOP: + for { + select { + case <-endTimer.C: + t.Fatalf("timed out after 30 seconds") + case err := <-errCh: + require.NoError(t, err) + case err := <-subErrCh: + require.NoError(t, err) + break LOOP + } + } + + subCancel() + cancel() + + err = <-errCh + require.NoError(t, err) +} + +func TestManager_FakeInput_InvalidAction(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + m, err := NewManager(newErrorLogger(t), "localhost:0", apmtest.DiscardTracer) + require.NoError(t, err) + errCh := make(chan error) + go func() { + errCh <- m.Run(ctx) + }() + + waitCtx, waitCancel := context.WithTimeout(ctx, 1*time.Second) + defer waitCancel() + if err := m.WaitForReady(waitCtx); err != nil { + require.NoError(t, err) + } + + binaryPath := filepath.Join("..", "fake", "fake") + if runtime.GOOS == component.Windows { + binaryPath += ".exe" + } + comp := component.Component{ + ID: "fake-default", + Spec: component.InputRuntimeSpec{ + InputType: "fake", + BinaryName: "", + BinaryPath: binaryPath, + Spec: fakeInputSpec, + }, + Units: []component.Unit{ + { + ID: "fake-input", + Type: client.UnitTypeInput, + Config: map[string]interface{}{ + "type": "fake", + "state": client.UnitStateHealthy, + "message": "Fake Healthy", + }, + }, + }, + } + + subCtx, subCancel := context.WithCancel(context.Background()) + defer subCancel() + subErrCh := make(chan error) + go func() { + sub := m.Subscribe("fake-default") + defer sub.Unsubscribe() + for { + select { + case <-subCtx.Done(): + return + case state := <-sub.Ch(): + t.Logf("component state changed: %+v", state) + if state.State == client.UnitStateFailed { + subErrCh <- fmt.Errorf("component failed: %s", state.Message) + } else { + unit, ok := state.Units[ComponentUnitKey{UnitType: client.UnitTypeInput, UnitID: "fake-input"}] + if ok { + if unit.State == client.UnitStateFailed { + subErrCh <- fmt.Errorf("unit failed: %s", unit.Message) + } else if unit.State == client.UnitStateHealthy { + actionCtx, actionCancel := context.WithTimeout(context.Background(), 5*time.Second) + _, err := m.PerformAction(actionCtx, comp.Units[0], "invalid_missing_action", nil) + actionCancel() + if err == nil { + subErrCh <- fmt.Errorf("should have returned an error") + } else if err.Error() != "action undefined" { + subErrCh <- fmt.Errorf("should have returned error: action undefined") + } else { + subErrCh <- nil + } + } else if unit.State == client.UnitStateStarting { + // acceptable + } else { + // unknown state that should not have occurred + subErrCh <- fmt.Errorf("unit reported unexpected state: %v", unit.State) + } + } else { + subErrCh <- errors.New("unit missing: fake-input") + } + } + } + } + }() + + defer drainErrChan(errCh) + defer drainErrChan(subErrCh) + + startTimer := time.NewTimer(100 * time.Millisecond) + defer startTimer.Stop() + select { + case <-startTimer.C: + err = m.Update([]component.Component{comp}) + require.NoError(t, err) + case err := <-errCh: + t.Fatalf("failed early: %s", err) + } + + endTimer := time.NewTimer(30 * time.Second) + defer endTimer.Stop() +LOOP: + for { + select { + case <-endTimer.C: + t.Fatalf("timed out after 30 seconds") + case err := <-errCh: + require.NoError(t, err) + case err := <-subErrCh: + require.NoError(t, err) + break LOOP + } + } + + subCancel() + cancel() + + err = <-errCh + require.NoError(t, err) +} + +func TestManager_FakeInput_MultiComponent(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + m, err := NewManager(newErrorLogger(t), "localhost:0", apmtest.DiscardTracer) + require.NoError(t, err) + errCh := make(chan error) + go func() { + errCh <- m.Run(ctx) + }() + + waitCtx, waitCancel := context.WithTimeout(ctx, 1*time.Second) + defer waitCancel() + if err := m.WaitForReady(waitCtx); err != nil { + require.NoError(t, err) + } + + binaryPath := filepath.Join("..", "fake", "fake") + if runtime.GOOS == component.Windows { + binaryPath += ".exe" + } + runtimeSpec := component.InputRuntimeSpec{ + InputType: "fake", + BinaryName: "", + BinaryPath: binaryPath, + Spec: fakeInputSpec, + } + components := []component.Component{ + { + ID: "fake-0", + Spec: runtimeSpec, + Units: []component.Unit{ + { + ID: "fake-input-0-0", + Type: client.UnitTypeInput, + Config: map[string]interface{}{ + "type": "fake", + "state": client.UnitStateHealthy, + "message": "Fake Healthy 0-0", + }, + }, + { + ID: "fake-input-0-1", + Type: client.UnitTypeInput, + Config: map[string]interface{}{ + "type": "fake", + "state": client.UnitStateHealthy, + "message": "Fake Healthy 0-1", + }, + }, + { + ID: "fake-input-0-2", + Type: client.UnitTypeInput, + Config: map[string]interface{}{ + "type": "fake", + "state": client.UnitStateHealthy, + "message": "Fake Healthy 0-2", + }, + }, + }, + }, + { + ID: "fake-1", + Spec: runtimeSpec, + Units: []component.Unit{ + { + ID: "fake-input-1-0", + Type: client.UnitTypeInput, + Config: map[string]interface{}{ + "type": "fake", + "state": client.UnitStateHealthy, + "message": "Fake Healthy 1-0", + }, + }, + { + ID: "fake-input-1-1", + Type: client.UnitTypeInput, + Config: map[string]interface{}{ + "type": "fake", + "state": client.UnitStateHealthy, + "message": "Fake Healthy 1-1", + }, + }, + { + ID: "fake-input-1-2", + Type: client.UnitTypeInput, + Config: map[string]interface{}{ + "type": "fake", + "state": client.UnitStateHealthy, + "message": "Fake Healthy 1-2", + }, + }, + }, + }, + { + ID: "fake-2", + Spec: runtimeSpec, + Units: []component.Unit{ + { + ID: "fake-input-2-0", + Type: client.UnitTypeInput, + Config: map[string]interface{}{ + "type": "fake", + "state": client.UnitStateHealthy, + "message": "Fake Healthy 2-0", + }, + }, + { + ID: "fake-input-2-1", + Type: client.UnitTypeInput, + Config: map[string]interface{}{ + "type": "fake", + "state": client.UnitStateHealthy, + "message": "Fake Healthy 2-1", + }, + }, + { + ID: "fake-input-2-2", + Type: client.UnitTypeInput, + Config: map[string]interface{}{ + "type": "fake", + "state": client.UnitStateHealthy, + "message": "Fake Healthy 2-2", + }, + }, + }, + }, + } + + subCtx, subCancel := context.WithCancel(context.Background()) + defer subCancel() + subErrCh0 := make(chan error) + subErrCh1 := make(chan error) + subErrCh2 := make(chan error) + go func() { + sub0 := m.Subscribe("fake-0") + defer sub0.Unsubscribe() + sub1 := m.Subscribe("fake-1") + defer sub1.Unsubscribe() + sub2 := m.Subscribe("fake-2") + defer sub2.Unsubscribe() + for { + select { + case <-subCtx.Done(): + return + case state := <-sub0.Ch(): + t.Logf("component fake-0 state changed: %+v", state) + signalState(subErrCh0, &state) + case state := <-sub1.Ch(): + t.Logf("component fake-1 state changed: %+v", state) + signalState(subErrCh1, &state) + case state := <-sub2.Ch(): + t.Logf("component fake-2 state changed: %+v", state) + signalState(subErrCh2, &state) + } + } + }() + + defer drainErrChan(errCh) + defer drainErrChan(subErrCh0) + defer drainErrChan(subErrCh1) + defer drainErrChan(subErrCh2) + + startTimer := time.NewTimer(100 * time.Millisecond) + defer startTimer.Stop() + select { + case <-startTimer.C: + err = m.Update(components) + require.NoError(t, err) + case err := <-errCh: + t.Fatalf("failed early: %s", err) + } + + count := 0 + endTimer := time.NewTimer(30 * time.Second) + defer endTimer.Stop() +LOOP: + for { + select { + case <-endTimer.C: + t.Fatalf("timed out after 30 seconds") + case err := <-errCh: + require.NoError(t, err) + case err := <-subErrCh0: + require.NoError(t, err) + count++ + if count >= 3 { + break LOOP + } + case err := <-subErrCh1: + require.NoError(t, err) + count++ + if count >= 3 { + break LOOP + } + case err := <-subErrCh2: + require.NoError(t, err) + count++ + if count >= 3 { + break LOOP + } + } + } + + subCancel() + cancel() + + err = <-errCh + require.NoError(t, err) +} + +func newErrorLogger(t *testing.T) *logger.Logger { + t.Helper() + + loggerCfg := logger.DefaultLoggingConfig() + loggerCfg.Level = logp.ErrorLevel + + log, err := logger.NewFromConfig("", loggerCfg, false) + require.NoError(t, err) + return log +} + +func drainErrChan(ch chan error) { + for { + select { + case <-ch: + default: + return + } + } +} + +func signalState(subErrCh chan error, state *ComponentState) { + if state.State == client.UnitStateFailed { + subErrCh <- fmt.Errorf("component failed: %s", state.Message) + } else { + issue := "" + healthy := 0 + for key, unit := range state.Units { + if unit.State == client.UnitStateStarting { + // acceptable + } else if unit.State == client.UnitStateHealthy { + healthy++ + } else if issue == "" { + issue = fmt.Sprintf("unit %s in invalid state %v", key.UnitID, unit.State) + } + } + if issue != "" { + subErrCh <- fmt.Errorf("%s", issue) + } + if healthy == 3 { + subErrCh <- nil + } + } +} diff --git a/pkg/component/runtime/runtime.go b/pkg/component/runtime/runtime.go new file mode 100644 index 00000000000..ee4800ce36b --- /dev/null +++ b/pkg/component/runtime/runtime.go @@ -0,0 +1,356 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package runtime + +import ( + "context" + "encoding/json" + "errors" + "sync" + + "github.com/elastic/elastic-agent-client/v7/pkg/client" + "github.com/elastic/elastic-agent-client/v7/pkg/proto" + "github.com/elastic/elastic-agent-libs/atomic" + "github.com/elastic/elastic-agent/pkg/component" + "github.com/elastic/elastic-agent/pkg/core/logger" +) + +// ComponentUnitState is the state for a unit running in a component. +type ComponentUnitState struct { + State client.UnitState + Message string + Payload map[string]interface{} + + // internal + configStateIdx uint64 + config map[string]interface{} + payloadStr string +} + +// ComponentUnitKey is a composite key to identify a unit by its type and ID. +type ComponentUnitKey struct { + UnitType client.UnitType + UnitID string +} + +// ComponentState is the overall state of the component. +type ComponentState struct { + State client.UnitState + Message string + + Units map[ComponentUnitKey]ComponentUnitState +} + +func newComponentState(comp *component.Component, initState client.UnitState, initMessage string, initCfgIdx uint64) (s ComponentState) { + s.Units = make(map[ComponentUnitKey]ComponentUnitState) + s.syncComponent(comp, initState, initMessage, initCfgIdx) + return s +} + +// Copy returns a copy of the structure. +func (s *ComponentState) Copy() (c ComponentState) { + c = *s + c.Units = make(map[ComponentUnitKey]ComponentUnitState) + for k, v := range s.Units { + c.Units[k] = v + } + return c +} + +func (s *ComponentState) syncComponent(comp *component.Component, initState client.UnitState, initMessage string, initCfgIdx uint64) { + s.State = initState + s.Message = initMessage + touched := make(map[ComponentUnitKey]bool) + for _, unit := range comp.Units { + key := ComponentUnitKey{ + UnitType: unit.Type, + UnitID: unit.ID, + } + + touched[key] = true + existing, ok := s.Units[key] + existing.State = initState + existing.Message = initMessage + existing.Payload = nil + existing.config = unit.Config + if ok { + existing.configStateIdx++ + } else { + existing.configStateIdx = initCfgIdx + } + s.Units[key] = existing + } + for key, unit := range s.Units { + _, ok := touched[key] + if !ok { + if unit.State != client.UnitStateStopped { + unit.State = client.UnitStateStopped + unit.Message = "Stopped" + + // unit is a copy and must be set back into the map + s.Units[key] = unit + } + } + } +} + +func (s *ComponentState) syncCheckin(checkin *proto.CheckinObserved) bool { + changed := false + touched := make(map[ComponentUnitKey]bool) + for _, unit := range checkin.Units { + key := ComponentUnitKey{ + UnitType: client.UnitType(unit.Type), + UnitID: unit.Id, + } + + var payloadStr string + var payload map[string]interface{} + if unit.Payload != nil { + payloadStr = string(unit.Payload) + // err is ignored (must be valid JSON for Agent to use it) + _ = json.Unmarshal(unit.Payload, &payload) + } + + touched[key] = true + existing, ok := s.Units[key] + if !ok { + changed = true + existing = ComponentUnitState{ + State: client.UnitState(unit.State), + Message: unit.Message, + Payload: payload, + configStateIdx: unit.ConfigStateIdx, + payloadStr: payloadStr, + } + } else { + existing.configStateIdx = unit.ConfigStateIdx + if existing.State != client.UnitState(unit.State) || existing.Message != unit.Message || existing.payloadStr != payloadStr { + changed = true + existing.State = client.UnitState(unit.State) + existing.Message = unit.Message + existing.Payload = payload + existing.payloadStr = payloadStr + } + } + s.Units[key] = existing + } + for key, unit := range s.Units { + _, ok := touched[key] + if !ok { + unit.configStateIdx = 0 + if unit.State != client.UnitStateStarting { + state := client.UnitStateFailed + msg := "Failed: not reported in check-in" + payloadStr := "" + if unit.State != state || unit.Message != msg || unit.payloadStr != payloadStr { + changed = true + unit.State = state + unit.Message = msg + unit.Payload = nil + unit.payloadStr = payloadStr + + // unit is a copy and must be set back into the map + s.Units[key] = unit + } + } + } + } + return changed +} + +// ComponentRuntime manages runtime lifecycle operations for a component and stores its state. +type ComponentRuntime interface { + // Run starts the runtime for the component. + // + // Called by Manager inside a go-routine. Run should not return until the passed in context is done. Run is always + // called before any of the other methods in the interface and once the context is done none of those methods will + // ever be called again. + Run(ctx context.Context, comm Communicator) error + // Watch returns the channel that sends component state. + // + // Channel should send a new state anytime a state for a unit or the whole component changes. + Watch() <-chan ComponentState + // Start starts the component. + // + // Must be non-blocking and never return an error unless the whole Elastic Agent needs to exit. + Start() error + // Update updates the currComp runtime with a new-revision for the component definition. + // + // Must be non-blocking and never return an error unless the whole Elastic Agent needs to exit. + Update(comp component.Component) error + // Stop stops the component. + // + // Must be non-blocking and never return an error unless the whole Elastic Agent needs to exit. + // + // Used to stop the running component. This is used when it will be restarted or upgraded. If the component + // is being completely removed Teardown will be used instead. + Stop() error + // Teardown both stops and performs cleanup for the component. + // + // Must be non-blocking and never return an error unless the whole Elastic Agent needs to exit. + // + // Used to tell control the difference between stopping a component to restart it or upgrade it, versus + // the component being completely removed. + Teardown() error +} + +// NewComponentRuntime creates the proper runtime based on the input specification for the component. +func NewComponentRuntime(comp component.Component) (ComponentRuntime, error) { + if comp.Err != nil { + return NewFailedRuntime(comp) + } else if comp.Spec.Spec.Command != nil { + return NewCommandRuntime(comp) + } else if comp.Spec.Spec.Service != nil { + return nil, errors.New("service component runtime not implemented") + } + return nil, errors.New("unknown component runtime") +} + +type componentRuntimeState struct { + manager *Manager + logger *logger.Logger + comm *runtimeComm + + currComp component.Component + runtime ComponentRuntime + + shuttingDown atomic.Bool + + latestMx sync.RWMutex + latestState ComponentState + + watchChan chan bool + watchCanceller context.CancelFunc + + runChan chan bool + runCanceller context.CancelFunc + + actionsMx sync.Mutex + actions map[string]func(*proto.ActionResponse) +} + +func newComponentRuntimeState(m *Manager, logger *logger.Logger, comp component.Component) (*componentRuntimeState, error) { + comm, err := newRuntimeComm(logger, m.getListenAddr(), m.ca) + if err != nil { + return nil, err + } + runtime, err := NewComponentRuntime(comp) + if err != nil { + return nil, err + } + + watchChan := make(chan bool) + runChan := make(chan bool) + state := &componentRuntimeState{ + manager: m, + logger: logger, + comm: comm, + currComp: comp, + runtime: runtime, + latestState: ComponentState{ + State: client.UnitStateStarting, + Message: "Starting", + Units: nil, + }, + watchChan: watchChan, + runChan: runChan, + actions: make(map[string]func(response *proto.ActionResponse)), + } + + // start the go-routine that watches for updates from the component + watchCtx, watchCanceller := context.WithCancel(context.Background()) + state.watchCanceller = watchCanceller + go func() { + defer close(watchChan) + for { + select { + case <-watchCtx.Done(): + return + case s := <-runtime.Watch(): + state.latestMx.Lock() + state.latestState = s + state.latestMx.Unlock() + state.manager.stateChanged(state, s) + case ar := <-comm.actionsResponse: + state.actionsMx.Lock() + callback, ok := state.actions[ar.Id] + if ok { + delete(state.actions, ar.Id) + } + state.actionsMx.Unlock() + callback(ar) + } + } + }() + + // start the go-routine that operates the runtime for the component + runCtx, runCanceller := context.WithCancel(context.Background()) + state.runCanceller = runCanceller + go func() { + defer close(runChan) + defer comm.destroy() + _ = runtime.Run(runCtx, comm) + }() + + return state, nil +} + +func (s *componentRuntimeState) start() error { + return s.runtime.Start() +} + +func (s *componentRuntimeState) stop(teardown bool) error { + s.shuttingDown.Store(true) + if teardown { + return s.runtime.Teardown() + } + return s.runtime.Stop() +} + +func (s *componentRuntimeState) destroy() { + if s.runCanceller != nil { + s.runCanceller() + s.runCanceller = nil + <-s.runChan + } + if s.watchCanceller != nil { + s.watchCanceller() + s.watchCanceller = nil + <-s.watchChan + } +} + +func (s *componentRuntimeState) performAction(ctx context.Context, req *proto.ActionRequest) (*proto.ActionResponse, error) { + ch := make(chan *proto.ActionResponse) + callback := func(response *proto.ActionResponse) { + ch <- response + } + + s.actionsMx.Lock() + s.actions[req.Id] = callback + s.actionsMx.Unlock() + + select { + case <-ctx.Done(): + s.actionsMx.Lock() + delete(s.actions, req.Id) + s.actionsMx.Unlock() + return nil, ctx.Err() + case s.comm.actionsRequest <- req: + } + + var resp *proto.ActionResponse + + select { + case <-ctx.Done(): + s.actionsMx.Lock() + delete(s.actions, req.Id) + s.actionsMx.Unlock() + return nil, ctx.Err() + case resp = <-ch: + } + + return resp, nil +} diff --git a/pkg/component/runtime/runtime_comm.go b/pkg/component/runtime/runtime_comm.go new file mode 100644 index 00000000000..622b514c230 --- /dev/null +++ b/pkg/component/runtime/runtime_comm.go @@ -0,0 +1,318 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package runtime + +import ( + "errors" + "fmt" + "io" + "strings" + "sync" + + protobuf "google.golang.org/protobuf/proto" + + "github.com/elastic/elastic-agent-client/v7/pkg/client" + + "github.com/gofrs/uuid" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + + "github.com/elastic/elastic-agent-client/v7/pkg/proto" + + "github.com/elastic/elastic-agent/internal/pkg/core/authority" + "github.com/elastic/elastic-agent/pkg/core/logger" +) + +// Communicator provides an interface for a runtime to communicate with its running component. +type Communicator interface { + // WriteConnInfo writes the connection information to the writer, informing the component it has access + // to the provided services. + WriteConnInfo(w io.Writer, services ...client.Service) error + // CheckinExpected sends the expected state to the component. + CheckinExpected(expected *proto.CheckinExpected) + // CheckinObserved receives the observed state from the component. + CheckinObserved() <-chan *proto.CheckinObserved +} + +type runtimeComm struct { + logger *logger.Logger + listenAddr string + ca *authority.CertificateAuthority + + name string + token string + cert *authority.Pair + + checkinConn bool + checkinDone chan bool + checkinLock sync.RWMutex + checkinExpected chan *proto.CheckinExpected + checkinObserved chan *proto.CheckinObserved + + actionsConn bool + actionsDone chan bool + actionsLock sync.RWMutex + actionsRequest chan *proto.ActionRequest + actionsResponse chan *proto.ActionResponse +} + +func newRuntimeComm(logger *logger.Logger, listenAddr string, ca *authority.CertificateAuthority) (*runtimeComm, error) { + token, err := uuid.NewV4() + if err != nil { + return nil, err + } + name, err := genServerName() + if err != nil { + return nil, err + } + pair, err := ca.GeneratePairWithName(name) + if err != nil { + return nil, err + } + return &runtimeComm{ + logger: logger, + listenAddr: listenAddr, + ca: ca, + name: name, + token: token.String(), + cert: pair, + checkinConn: true, + checkinExpected: make(chan *proto.CheckinExpected), + checkinObserved: make(chan *proto.CheckinObserved), + actionsConn: true, + actionsRequest: make(chan *proto.ActionRequest), + actionsResponse: make(chan *proto.ActionResponse), + }, nil +} + +func (c *runtimeComm) WriteConnInfo(w io.Writer, services ...client.Service) error { + hasV2 := false + srvs := make([]proto.ConnInfoServices, 0, len(services)) + for _, srv := range services { + if srv == client.ServiceCheckin { + return fmt.Errorf("cannot provide access to v1 checkin service") + } + if srv == client.ServiceCheckinV2 { + hasV2 = true + } + srvs = append(srvs, proto.ConnInfoServices(srv)) + } + if !hasV2 { + srvs = append(srvs, proto.ConnInfoServices_CheckinV2) + } + connInfo := &proto.ConnInfo{ + Addr: c.listenAddr, + ServerName: c.name, + Token: c.token, + CaCert: c.ca.Crt(), + PeerCert: c.cert.Crt, + PeerKey: c.cert.Key, + Services: srvs, + } + infoBytes, err := protobuf.Marshal(connInfo) + if err != nil { + return fmt.Errorf("failed to marshal connection information: %w", err) + } + _, err = w.Write(infoBytes) + if err != nil { + return fmt.Errorf("failed to write connection information: %w", err) + } + return nil +} + +func (c *runtimeComm) CheckinExpected(expected *proto.CheckinExpected) { + c.checkinExpected <- expected +} + +func (c *runtimeComm) CheckinObserved() <-chan *proto.CheckinObserved { + return c.checkinObserved +} + +func (c *runtimeComm) checkin(server proto.ElasticAgent_CheckinV2Server, init *proto.CheckinObserved) error { + c.checkinLock.Lock() + if c.checkinDone != nil { + // already connected (cannot have multiple); close connection + c.checkinLock.Unlock() + c.logger.Debug("check-in stream already connected for component; closing connection") + return status.Error(codes.AlreadyExists, "component already connected") + } + if !c.checkinConn { + // being destroyed cannot reconnect; close connection + c.checkinLock.Unlock() + c.logger.Debug("check-in stream being destroyed connection not allowed; closing connection") + return status.Error(codes.Unavailable, "component cannot connect being destroyed") + } + + checkinDone := make(chan bool) + c.checkinDone = checkinDone + c.checkinLock.Unlock() + + defer func() { + c.checkinLock.Lock() + c.checkinDone = nil + c.checkinLock.Unlock() + }() + + recvDone := make(chan bool) + sendDone := make(chan bool) + go func() { + defer func() { + close(sendDone) + }() + for { + var expected *proto.CheckinExpected + select { + case <-checkinDone: + return + case <-recvDone: + return + case expected = <-c.checkinExpected: + } + + err := server.Send(expected) + if err != nil { + if reportableErr(err) { + c.logger.Debugf("check-in stream failed to send expected state: %s", err) + } + return + } + } + }() + + c.checkinObserved <- init + + go func() { + for { + checkin, err := server.Recv() + if err != nil { + if reportableErr(err) { + c.logger.Debugf("check-in stream failed to receive data: %s", err) + } + close(recvDone) + return + } + c.checkinObserved <- checkin + } + }() + + <-sendDone + return nil +} + +func (c *runtimeComm) actions(server proto.ElasticAgent_ActionsServer) error { + c.actionsLock.Lock() + if c.actionsDone != nil { + // already connected (cannot have multiple); close connection + c.actionsLock.Unlock() + c.logger.Debug("check-in stream already connected for component; closing connection") + return status.Error(codes.AlreadyExists, "application already connected") + } + if !c.actionsConn { + // being destroyed cannot reconnect; close connection + c.actionsLock.Unlock() + c.logger.Debug("check-in stream being destroyed connection not allowed; closing connection") + return status.Error(codes.Unavailable, "application cannot connect being destroyed") + } + + actionsDone := make(chan bool) + c.actionsDone = actionsDone + c.actionsLock.Unlock() + + defer func() { + c.actionsLock.Lock() + c.actionsDone = nil + c.actionsLock.Unlock() + }() + + recvDone := make(chan bool) + sendDone := make(chan bool) + go func() { + defer func() { + close(sendDone) + }() + for { + var req *proto.ActionRequest + select { + case <-actionsDone: + return + case <-recvDone: + return + case req = <-c.actionsRequest: + } + + err := server.Send(req) + if err != nil { + if reportableErr(err) { + c.logger.Debugf("actions stream failed to send action request: %s", err) + } + return + } + } + }() + + go func() { + for { + resp, err := server.Recv() + if err != nil { + if reportableErr(err) { + c.logger.Debugf("check-in stream failed to receive data: %s", err) + } + close(recvDone) + return + } + c.actionsResponse <- resp + } + }() + + <-sendDone + return nil +} + +func (c *runtimeComm) destroy() { + c.destroyCheckin() + c.destroyActions() +} + +func (c *runtimeComm) destroyCheckin() { + c.checkinLock.Lock() + c.checkinConn = false + if c.checkinDone != nil { + close(c.checkinDone) + c.checkinDone = nil + } + c.checkinLock.Unlock() +} + +func (c *runtimeComm) destroyActions() { + c.actionsLock.Lock() + c.actionsConn = false + if c.actionsDone != nil { + close(c.actionsDone) + c.actionsDone = nil + } + c.actionsLock.Unlock() +} + +func reportableErr(err error) bool { + if errors.Is(err, io.EOF) { + return false + } + s, ok := status.FromError(err) + if !ok { + return true + } + if s.Code() == codes.Canceled { + return false + } + return true +} + +func genServerName() (string, error) { + u, err := uuid.NewV4() + if err != nil { + return "", err + } + return strings.Replace(u.String(), "-", "", -1), nil +} diff --git a/pkg/component/runtime/subscription.go b/pkg/component/runtime/subscription.go new file mode 100644 index 00000000000..88f4106d21a --- /dev/null +++ b/pkg/component/runtime/subscription.go @@ -0,0 +1,28 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package runtime + +// Subscription provides a channel for notifications on a component state. +type Subscription struct { + manager *Manager + ch chan ComponentState +} + +func newSubscription(manager *Manager) *Subscription { + return &Subscription{ + manager: manager, + ch: make(chan ComponentState, 1), // buffer of 1 to allow initial latestState state + } +} + +// Ch provides the channel to get state changes. +func (s *Subscription) Ch() <-chan ComponentState { + return s.ch +} + +// Unsubscribe removes the subscription. +func (s *Subscription) Unsubscribe() { + s.manager.unsubscribe(s) +} diff --git a/pkg/component/spec.go b/pkg/component/spec.go index bfd0efedb86..d4ae56e147b 100644 --- a/pkg/component/spec.go +++ b/pkg/component/spec.go @@ -56,8 +56,9 @@ type RuntimePreventionSpec struct { // CommandSpec is the specification for an input that executes as a subprocess. type CommandSpec struct { - Args []string `config:"args,omitempty" yaml:"args,omitempty"` - Env []CommandEnvSpec `config:"env,omitempty" yaml:"env,omitempty"` + Args []string `config:"args,omitempty" yaml:"args,omitempty"` + Env []CommandEnvSpec `config:"env,omitempty" yaml:"env,omitempty"` + Timeouts CommandTimeoutSpec `config:"timeouts" yaml:"timeouts"` } // CommandEnvSpec is the specification that defines environment variables that will be set to execute the subprocess. @@ -66,6 +67,18 @@ type CommandEnvSpec struct { Value string `config:"value" yaml:"value" validate:"required"` } +// CommandTimeoutSpec is the timeout specification for subprocess. +type CommandTimeoutSpec struct { + Checkin time.Duration `config:"checkin" yaml:"checkin"` + Stop time.Duration `config:"stop" yaml:"stop"` +} + +// InitDefaults initialized the defaults for the timeouts. +func (t *CommandTimeoutSpec) InitDefaults() { + t.Checkin = 30 * time.Second + t.Stop = 30 * time.Second +} + // ServiceSpec is the specification for an input that executes as a service. type ServiceSpec struct { Operations ServiceOperationsSpec `config:"operations" yaml:"operations" validate:"required"` diff --git a/internal/pkg/core/process/cmd.go b/pkg/core/process/cmd.go similarity index 78% rename from internal/pkg/core/process/cmd.go rename to pkg/core/process/cmd.go index a43ea7d62a6..898b2f0aab1 100644 --- a/internal/pkg/core/process/cmd.go +++ b/pkg/core/process/cmd.go @@ -12,11 +12,9 @@ import ( "os" "os/exec" "path/filepath" - - "github.com/elastic/elastic-agent/pkg/core/logger" ) -func getCmd(ctx context.Context, logger *logger.Logger, path string, env []string, uid, gid int, arg ...string) *exec.Cmd { +func getCmd(ctx context.Context, path string, env []string, uid, gid int, arg ...string) (*exec.Cmd, error) { var cmd *exec.Cmd if ctx == nil { cmd = exec.Command(path, arg...) @@ -27,7 +25,11 @@ func getCmd(ctx context.Context, logger *logger.Logger, path string, env []strin cmd.Env = append(cmd.Env, env...) cmd.Dir = filepath.Dir(path) - return cmd + return cmd, nil +} + +func killCmd(proc *os.Process) error { + return proc.Kill() } func terminateCmd(proc *os.Process) error { diff --git a/internal/pkg/core/process/cmd_darwin.go b/pkg/core/process/cmd_darwin.go similarity index 77% rename from internal/pkg/core/process/cmd_darwin.go rename to pkg/core/process/cmd_darwin.go index aa4b96d827e..4533b351e38 100644 --- a/internal/pkg/core/process/cmd_darwin.go +++ b/pkg/core/process/cmd_darwin.go @@ -9,16 +9,15 @@ package process import ( "context" + "fmt" "math" "os" "os/exec" "path/filepath" "syscall" - - "github.com/elastic/elastic-agent/pkg/core/logger" ) -func getCmd(ctx context.Context, logger *logger.Logger, path string, env []string, uid, gid int, arg ...string) *exec.Cmd { +func getCmd(ctx context.Context, path string, env []string, uid, gid int, arg ...string) (*exec.Cmd, error) { var cmd *exec.Cmd if ctx == nil { cmd = exec.Command(path, arg...) @@ -37,16 +36,20 @@ func getCmd(ctx context.Context, logger *logger.Logger, path string, env []strin }, } } else { - logger.Errorf("provided uid or gid for %s is invalid. uid: '%d' gid: '%d'.", path, uid, gid) + return nil, fmt.Errorf("invalid uid: '%d' or gid: '%d'", uid, gid) } - return cmd + return cmd, nil } func isInt32(val int) bool { return val >= 0 && val <= math.MaxInt32 } +func killCmd(proc *os.Process) error { + return proc.Kill() +} + func terminateCmd(proc *os.Process) error { return proc.Signal(syscall.SIGTERM) } diff --git a/internal/pkg/core/process/cmd_linux.go b/pkg/core/process/cmd_linux.go similarity index 80% rename from internal/pkg/core/process/cmd_linux.go rename to pkg/core/process/cmd_linux.go index ffaa62e577c..88e42d6d9e3 100644 --- a/internal/pkg/core/process/cmd_linux.go +++ b/pkg/core/process/cmd_linux.go @@ -9,16 +9,15 @@ package process import ( "context" + "fmt" "math" "os" "os/exec" "path/filepath" "syscall" - - "github.com/elastic/elastic-agent/pkg/core/logger" ) -func getCmd(ctx context.Context, logger *logger.Logger, path string, env []string, uid, gid int, arg ...string) *exec.Cmd { +func getCmd(ctx context.Context, path string, env []string, uid, gid int, arg ...string) (*exec.Cmd, error) { var cmd *exec.Cmd if ctx == nil { cmd = exec.Command(path, arg...) @@ -40,16 +39,20 @@ func getCmd(ctx context.Context, logger *logger.Logger, path string, env []strin }, } } else { - logger.Errorf("provided uid or gid for %s is invalid. uid: '%d' gid: '%d'.", path, uid, gid) + return nil, fmt.Errorf("invalid uid: '%d' or gid: '%d'", uid, gid) } - return cmd + return cmd, nil } func isInt32(val int) bool { return val >= 0 && val <= math.MaxInt32 } +func killCmd(proc *os.Process) error { + return proc.Kill() +} + func terminateCmd(proc *os.Process) error { return proc.Signal(syscall.SIGTERM) } diff --git a/internal/pkg/core/process/config.go b/pkg/core/process/config.go similarity index 100% rename from internal/pkg/core/process/config.go rename to pkg/core/process/config.go diff --git a/pkg/core/process/external_unix.go b/pkg/core/process/external_unix.go new file mode 100644 index 00000000000..c97b02ed58f --- /dev/null +++ b/pkg/core/process/external_unix.go @@ -0,0 +1,30 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build !windows +// +build !windows + +package process + +import ( + "os" + "syscall" + "time" +) + +// externalProcess is a watch mechanism used in cases where OS requires a process to be a child +// for waiting for process. We need to be able to await any process. +func externalProcess(proc *os.Process) { + if proc == nil { + return + } + + for { + <-time.After(1 * time.Second) + if proc.Signal(syscall.Signal(0)) != nil { + // failed to contact process, return + return + } + } +} diff --git a/pkg/core/process/external_windows.go b/pkg/core/process/external_windows.go new file mode 100644 index 00000000000..d6898588889 --- /dev/null +++ b/pkg/core/process/external_windows.go @@ -0,0 +1,53 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build windows +// +build windows + +package process + +import ( + "os" + "syscall" + "time" +) + +const ( + // exitCodeStillActive according to docs.microsoft.com/en-us/windows/desktop/api/processthreadsapi/nf-processthreadsapi-getexitcodeprocess + exitCodeStillActive = 259 +) + +// externalProcess is a watch mechanism used in cases where OS requires a process to be a child +// for waiting for process. We need to be able to await any process. +func externalProcess(proc *os.Process) { + if proc == nil { + return + } + + for { + <-time.After(1 * time.Second) + if isWindowsProcessExited(proc.Pid) { + return + } + } +} + +func isWindowsProcessExited(pid int) bool { + const desiredAccess = syscall.STANDARD_RIGHTS_READ | syscall.PROCESS_QUERY_INFORMATION | syscall.SYNCHRONIZE + h, err := syscall.OpenProcess(desiredAccess, false, uint32(pid)) + if err != nil { + // failed to open handle, report exited + return true + } + + // get exit code, this returns immediately in case it is still running + // it returns exitCodeStillActive + var ec uint32 + if err := syscall.GetExitCodeProcess(h, &ec); err != nil { + // failed to contact, report exited + return true + } + + return ec != exitCodeStillActive +} diff --git a/internal/pkg/agent/cmd/proc/job_unix.go b/pkg/core/process/job_unix.go similarity index 97% rename from internal/pkg/agent/cmd/proc/job_unix.go rename to pkg/core/process/job_unix.go index b336575c72c..72d0386cade 100644 --- a/internal/pkg/agent/cmd/proc/job_unix.go +++ b/pkg/core/process/job_unix.go @@ -5,7 +5,7 @@ //go:build !windows // +build !windows -package proc +package process import ( "os" diff --git a/internal/pkg/agent/cmd/proc/job_windows.go b/pkg/core/process/job_windows.go similarity index 99% rename from internal/pkg/agent/cmd/proc/job_windows.go rename to pkg/core/process/job_windows.go index bff42183d71..214d75d9b3c 100644 --- a/internal/pkg/agent/cmd/proc/job_windows.go +++ b/pkg/core/process/job_windows.go @@ -5,7 +5,7 @@ //go:build windows // +build windows -package proc +package process import ( "os" diff --git a/pkg/core/process/process.go b/pkg/core/process/process.go new file mode 100644 index 00000000000..428469687b6 --- /dev/null +++ b/pkg/core/process/process.go @@ -0,0 +1,101 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package process + +import ( + "context" + "fmt" + "io" + "os" + "os/exec" +) + +// Info groups information about fresh new process +type Info struct { + PID int + Process *os.Process + Stdin io.WriteCloser +} + +// Option is an option func to change the underlying command +type Option func(c *exec.Cmd) error + +// Start starts a new process +func Start(path string, uid, gid int, args []string, env []string, opts ...Option) (proc *Info, err error) { + return StartContext(nil, path, uid, gid, args, env, opts...) //nolint:staticcheck // calls a different function if no ctx +} + +// StartContext starts a new process with context. +func StartContext(ctx context.Context, path string, uid, gid int, args []string, env []string, opts ...Option) (*Info, error) { + cmd, err := getCmd(ctx, path, env, uid, gid, args...) + if err != nil { + return nil, fmt.Errorf("failed to create command for %q: %w", path, err) + } + for _, o := range opts { + if err := o(cmd); err != nil { + return nil, fmt.Errorf("failed to set option command for %q: %w", path, err) + } + } + stdin, err := cmd.StdinPipe() + if err != nil { + return nil, fmt.Errorf("failed to create stdin for %q: %w", path, err) + } + + // start process + if err := cmd.Start(); err != nil { + return nil, fmt.Errorf("failed to start %q: %w", path, err) + } + + // Hook to JobObject on windows, noop on other platforms. + // This ties the application processes lifespan to the agent's. + // Fixes the orphaned beats processes left behind situation + // after the agent process gets killed. + if err := JobObject.Assign(cmd.Process); err != nil { + _ = killCmd(cmd.Process) + return nil, fmt.Errorf("failed job assignment %q: %w", path, err) + } + + return &Info{ + PID: cmd.Process.Pid, + Process: cmd.Process, + Stdin: stdin, + }, err +} + +// Kill kills the process. +func (i *Info) Kill() error { + return killCmd(i.Process) +} + +// Stop stops the process cleanly. +func (i *Info) Stop() error { + return terminateCmd(i.Process) +} + +// StopWait stops the process and waits for it to exit. +func (i *Info) StopWait() error { + err := i.Stop() + if err != nil { + return err + } + _, err = i.Process.Wait() + return err +} + +// Wait returns a channel that will send process state once it exits. +func (i *Info) Wait() <-chan *os.ProcessState { + ch := make(chan *os.ProcessState) + + go func() { + procState, err := i.Process.Wait() + if err != nil { + // process is not a child - some OSs requires process to be child + externalProcess(i.Process) + } + ch <- procState + }() + + return ch +} From 5acdc400521a9a716470584e5f1a9200b6c06359 Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Tue, 26 Jul 2022 13:22:26 -0400 Subject: [PATCH 07/49] [v2] Use the v2 components runtime as the core of the Elastic Agent (#753) * Add runtime for command v2 components. * Fix imports. * Add tests for watching checkins. * Fix lint and move checkin period to a configurable timeout. * Fix tests now that checkin timeout needs to be defined. * Fix code review and lint. * Work on actually running the v2 runtime. * Work on switching to the v2 runtime. * More work on switching to v2 runtime. * Cleanup some imports. * More import cleanups. * Add TODO to FleetServerComponentModifier. * Remove outdated managed_mode_test.go. * Fixes from code review and lint. --- .gitignore | 2 + control.proto | 74 +- .../{pipeline => }/actions/action.go | 4 +- .../handlers/handler_action_application.go | 42 +- .../actions/handlers/handler_action_cancel.go | 4 +- .../handlers/handler_action_policy_change.go | 58 +- .../handler_action_policy_change_test.go | 101 +- .../handler_action_policy_reassign.go | 4 +- .../handlers/handler_action_settings.go | 13 +- .../handlers/handler_action_unenroll.go | 42 +- .../handlers/handler_action_upgrade.go | 35 +- .../actions/handlers/handler_default.go | 4 +- .../actions/handlers/handler_unknown.go | 4 +- internal/pkg/agent/application/application.go | 193 +++- .../application/coordinator/coordinator.go | 592 ++++++++++ .../{pipeline => }/dispatcher/dispatcher.go | 33 +- .../dispatcher/dispatcher_test.go | 56 +- .../application/fleet_server_bootstrap.go | 257 +---- .../gateway/fleet/fleet_gateway.go | 208 ++-- .../gateway/fleet/fleet_gateway_test.go | 306 +++-- .../gateway/fleet/noop_status_controller.go | 27 - .../fleetserver/fleet_gateway_local.go | 116 -- .../pkg/agent/application/gateway/gateway.go | 15 +- internal/pkg/agent/application/local_mode.go | 224 ---- .../pkg/agent/application/managed_mode.go | 467 ++++---- .../agent/application/managed_mode_test.go | 303 ----- internal/pkg/agent/application/once.go | 36 +- .../pkg/agent/application/paths/common.go | 3 +- internal/pkg/agent/application/periodic.go | 82 +- .../pipeline/emitter/controller.go | 196 ---- .../application/pipeline/emitter/emitter.go | 39 - .../pipeline/emitter/emitter_test.go | 5 - .../emitter/modifiers/fleet_decorator.go | 76 -- .../emitter/modifiers/monitoring_decorator.go | 105 -- .../modifiers/monitoring_decorator_test.go | 686 ----------- .../agent/application/pipeline/pipeline.go | 69 -- .../application/pipeline/router/router.go | 121 -- .../pipeline/router/router_test.go | 233 ---- .../application/pipeline/stream/factory.go | 96 -- .../pipeline/stream/operator_stream.go | 62 - .../application/upgrade/error_checker.go | 10 +- .../agent/application/upgrade/step_mark.go | 4 +- .../pkg/agent/application/upgrade/upgrade.go | 150 +-- internal/pkg/agent/cmd/diagnostics.go | 42 +- internal/pkg/agent/cmd/enroll_cmd.go | 43 +- internal/pkg/agent/cmd/inspect.go | 85 +- internal/pkg/agent/cmd/inspect_test.go | 2 + internal/pkg/agent/cmd/run.go | 65 +- internal/pkg/agent/cmd/status.go | 36 +- internal/pkg/agent/configuration/grpc.go | 26 + internal/pkg/agent/configuration/settings.go | 5 +- internal/pkg/agent/control/client/client.go | 124 +- .../pkg/agent/control/cproto/control.pb.go | 856 +++++++++----- .../agent/control/cproto/control_grpc.pb.go | 30 +- internal/pkg/agent/control/server/server.go | 351 +++--- internal/pkg/agent/install/uninstall.go | 13 +- .../pkg/agent/storage/store/state_store.go | 15 +- internal/pkg/capabilities/capabilities.go | 19 +- internal/pkg/capabilities/input.go | 20 +- internal/pkg/capabilities/output.go | 20 +- internal/pkg/capabilities/upgrade.go | 46 +- internal/pkg/composable/context.go | 8 + internal/pkg/composable/controller.go | 160 ++- internal/pkg/composable/controller_test.go | 43 +- internal/pkg/composable/dynamic.go | 8 + .../pkg/composable/providers/agent/agent.go | 2 +- .../pkg/composable/providers/docker/docker.go | 85 +- internal/pkg/composable/providers/env/env.go | 2 +- .../pkg/composable/providers/host/host.go | 50 +- .../composable/providers/host/host_test.go | 17 +- .../providers/kubernetes/kubernetes.go | 36 +- .../kubernetes_leaderelection.go | 44 +- .../kubernetessecrets/kubernetes_secrets.go | 26 +- .../kubernetes_secrets_test.go | 48 +- .../pkg/composable/providers/local/local.go | 2 +- .../providers/localdynamic/localdynamic.go | 2 +- .../pkg/composable/providers/path/path.go | 2 +- internal/pkg/core/composable/providers.go | 7 +- .../pkg/fleetapi/acker/noop/noop_acker.go | 18 +- pkg/component/load.go | 9 + pkg/component/runtime/manager.go | 151 ++- pkg/component/runtime/manager_test.go | 33 +- pkg/component/runtime/runtime.go | 27 +- pkg/component/runtime/subscription.go | 31 +- pkg/core/server/config.go | 32 - pkg/core/server/config_test.go | 23 - pkg/core/server/server.go | 1018 ----------------- pkg/core/server/server_test.go | 794 ------------- 88 files changed, 3109 insertions(+), 6524 deletions(-) rename internal/pkg/agent/application/{pipeline => }/actions/action.go (79%) rename internal/pkg/agent/application/{pipeline => }/actions/handlers/handler_action_application.go (77%) rename internal/pkg/agent/application/{pipeline => }/actions/handlers/handler_action_cancel.go (92%) rename internal/pkg/agent/application/{pipeline => }/actions/handlers/handler_action_policy_change.go (86%) rename internal/pkg/agent/application/{pipeline => }/actions/handlers/handler_action_policy_change_test.go (56%) rename internal/pkg/agent/application/{pipeline => }/actions/handlers/handler_action_policy_reassign.go (91%) rename internal/pkg/agent/application/{pipeline => }/actions/handlers/handler_action_settings.go (87%) rename internal/pkg/agent/application/{pipeline => }/actions/handlers/handler_action_unenroll.go (67%) rename internal/pkg/agent/application/{pipeline => }/actions/handlers/handler_action_upgrade.go (60%) rename internal/pkg/agent/application/{pipeline => }/actions/handlers/handler_default.go (88%) rename internal/pkg/agent/application/{pipeline => }/actions/handlers/handler_unknown.go (88%) create mode 100644 internal/pkg/agent/application/coordinator/coordinator.go rename internal/pkg/agent/application/{pipeline => }/dispatcher/dispatcher.go (76%) rename internal/pkg/agent/application/{pipeline => }/dispatcher/dispatcher_test.go (64%) delete mode 100644 internal/pkg/agent/application/gateway/fleet/noop_status_controller.go delete mode 100644 internal/pkg/agent/application/gateway/fleetserver/fleet_gateway_local.go delete mode 100644 internal/pkg/agent/application/local_mode.go delete mode 100644 internal/pkg/agent/application/managed_mode_test.go delete mode 100644 internal/pkg/agent/application/pipeline/emitter/controller.go delete mode 100644 internal/pkg/agent/application/pipeline/emitter/emitter.go delete mode 100644 internal/pkg/agent/application/pipeline/emitter/emitter_test.go delete mode 100644 internal/pkg/agent/application/pipeline/emitter/modifiers/fleet_decorator.go delete mode 100644 internal/pkg/agent/application/pipeline/emitter/modifiers/monitoring_decorator.go delete mode 100644 internal/pkg/agent/application/pipeline/emitter/modifiers/monitoring_decorator_test.go delete mode 100644 internal/pkg/agent/application/pipeline/pipeline.go delete mode 100644 internal/pkg/agent/application/pipeline/router/router.go delete mode 100644 internal/pkg/agent/application/pipeline/router/router_test.go delete mode 100644 internal/pkg/agent/application/pipeline/stream/factory.go delete mode 100644 internal/pkg/agent/application/pipeline/stream/operator_stream.go create mode 100644 internal/pkg/agent/configuration/grpc.go delete mode 100644 pkg/core/server/config.go delete mode 100644 pkg/core/server/config_test.go delete mode 100644 pkg/core/server/server.go delete mode 100644 pkg/core/server/server_test.go diff --git a/.gitignore b/.gitignore index f0b7911dbef..7bfae9cc392 100644 --- a/.gitignore +++ b/.gitignore @@ -21,6 +21,8 @@ go_env.properties mage_output_file.go elastic_agent fleet.yml +fleet.enc +fleet.enc.lock # Editor swap files *.swp diff --git a/control.proto b/control.proto index efd063822de..a1a7a3f8b82 100644 --- a/control.proto +++ b/control.proto @@ -9,16 +9,23 @@ package cproto; option cc_enable_arenas = true; option go_package = "internal/pkg/agent/control/cproto"; -// Status codes for the current state. -enum Status { +// State codes for the current state. +enum State { STARTING = 0; CONFIGURING = 1; HEALTHY = 2; DEGRADED = 3; FAILED = 4; STOPPING = 5; - UPGRADING = 6; - ROLLBACK = 7; + STOPPED = 6; + UPGRADING = 7; + ROLLBACK = 8; +} + +// Unit Type running inside a component. +enum UnitType { + INPUT = 0; + OUTPUT = 1; } // Action status codes for restart and upgrade response. @@ -93,18 +100,43 @@ message UpgradeResponse { string error = 3; } -// Current status of the application in Elastic Agent. -message ApplicationStatus { - // Unique application ID. +message ComponentUnitState { + // Type of unit in the component. + UnitType unit_type = 1; + // ID of the unit in the component. + string unit_id = 2; + // Current state. + State state = 3; + // Current state message. + string message = 4; + // Current state payload. + string payload = 5; +} + +// Version information reported by the component to Elastic Agent. +message ComponentVersionInfo { + // Name of the component. + string name = 1; + // Version of the component. + string version = 2; + // Extra meta information about the version. + map meta = 3; +} + +// Current state of a running component by Elastic Agent. +message ComponentState { + // Unique component ID. string id = 1; - // Application name. + // Component name. string name = 2; - // Current status. - Status status = 3; - // Current status message. + // Current state. + State state = 3; + // Current state message. string message = 4; - // Current status payload. - string payload = 5; + // Current units running in the component. + repeated ComponentUnitState units = 5; + // Current version information for the running component. + ComponentVersionInfo version_info = 6; } // Current metadata for a running process. @@ -126,14 +158,14 @@ message ProcMeta { string error = 15; } -// Status is the current status of Elastic Agent. -message StatusResponse { - // Overall status of Elastic Agent. - Status status = 1; +// StateResponse is the current state of Elastic Agent. +message StateResponse { + // Overall state of Elastic Agent. + State state = 1; // Overall status message of Elastic Agent. string message = 2; - // Status of each application in Elastic Agent. - repeated ApplicationStatus applications = 3; + // Status of each component in Elastic Agent. + repeated ComponentState components = 3; } // ProcMetaResponse is the current running version infomation for all processes. @@ -184,8 +216,8 @@ service ElasticAgentControl { // Fetches the currently running version of the Elastic Agent. rpc Version(Empty) returns (VersionResponse); - // Fetches the currently status of the Elastic Agent. - rpc Status(Empty) returns (StatusResponse); + // Fetches the currently states of the Elastic Agent. + rpc State(Empty) returns (StateResponse); // Restart restarts the current running Elastic Agent. rpc Restart(Empty) returns (RestartResponse); diff --git a/internal/pkg/agent/application/pipeline/actions/action.go b/internal/pkg/agent/application/actions/action.go similarity index 79% rename from internal/pkg/agent/application/pipeline/actions/action.go rename to internal/pkg/agent/application/actions/action.go index 794ee5ca3df..120316e1dfb 100644 --- a/internal/pkg/agent/application/pipeline/actions/action.go +++ b/internal/pkg/agent/application/actions/action.go @@ -7,14 +7,14 @@ package actions import ( "context" - "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "github.com/elastic/elastic-agent/internal/pkg/fleetapi/client" ) // Handler handles action coming from fleet. type Handler interface { - Handle(ctx context.Context, a fleetapi.Action, acker store.FleetAcker) error + Handle(ctx context.Context, a fleetapi.Action, acker acker.Acker) error } // ClientSetter sets the client for communication. diff --git a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_application.go b/internal/pkg/agent/application/actions/handlers/handler_action_application.go similarity index 77% rename from internal/pkg/agent/application/pipeline/actions/handlers/handler_action_application.go rename to internal/pkg/agent/application/actions/handlers/handler_action_application.go index 8d8ce830421..d36f8f1d33a 100644 --- a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_application.go +++ b/internal/pkg/agent/application/actions/handlers/handler_action_application.go @@ -9,11 +9,14 @@ import ( "fmt" "time" + "github.com/elastic/elastic-agent-client/v7/pkg/client" + + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" + "github.com/elastic/elastic-agent/pkg/component" "github.com/elastic/elastic-agent/pkg/core/logger" - "github.com/elastic/elastic-agent/pkg/core/server" ) const ( @@ -25,27 +28,28 @@ var errActionTimeoutInvalid = errors.New("action timeout is invalid") // AppAction is a handler for application actions. type AppAction struct { - log *logger.Logger - srv *server.Server + log *logger.Logger + coord *coordinator.Coordinator } // NewAppAction creates a new AppAction handler. -func NewAppAction(log *logger.Logger, srv *server.Server) *AppAction { +func NewAppAction(log *logger.Logger, coord *coordinator.Coordinator) *AppAction { return &AppAction{ - log: log, - srv: srv, + log: log, + coord: coord, } } // Handle handles application action. -func (h *AppAction) Handle(ctx context.Context, a fleetapi.Action, acker store.FleetAcker) error { +func (h *AppAction) Handle(ctx context.Context, a fleetapi.Action, acker acker.Acker) error { h.log.Debugf("handlerAppAction: action '%+v' received", a) action, ok := a.(*fleetapi.ActionApp) if !ok { return fmt.Errorf("invalid type, expected ActionApp and received %T", a) } - appState, ok := h.srv.FindByInputType(action.InputType) + state := h.coord.State() + unit, ok := findUnitFromInputType(state, action.InputType) if !ok { // If the matching action is not found ack the action with the error for action result document action.StartedAt = time.Now().UTC().Format(time.RFC3339Nano) @@ -71,8 +75,10 @@ func (h *AppAction) Handle(ctx context.Context, a fleetapi.Action, acker store.F var res map[string]interface{} if err == nil { - h.log.Debugf("handlerAppAction: action '%v' started with timeout: %v", action.InputType, timeout) - res, err = appState.PerformAction(action.InputType, params, timeout) + h.log.Debugf("handlerAppAction: action '%v' started with timeout: %v", action.ActionType, timeout) + ctx, cancel := context.WithTimeout(ctx, timeout) + defer cancel() + res, err = h.coord.PerformAction(ctx, unit, action.ActionType, params) } end := time.Now().UTC() @@ -143,3 +149,17 @@ func readMapString(m map[string]interface{}, key string, def string) string { } return def } + +func findUnitFromInputType(state coordinator.State, inputType string) (component.Unit, bool) { + for _, comp := range state.Components { + for _, unit := range comp.Component.Units { + if unit.Type == client.UnitTypeInput { + it, ok := unit.Config["type"] + if ok && it == inputType { + return unit, true + } + } + } + } + return component.Unit{}, false +} diff --git a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_cancel.go b/internal/pkg/agent/application/actions/handlers/handler_action_cancel.go similarity index 92% rename from internal/pkg/agent/application/pipeline/actions/handlers/handler_action_cancel.go rename to internal/pkg/agent/application/actions/handlers/handler_action_cancel.go index a2208c7294d..bb48b2bd753 100644 --- a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_cancel.go +++ b/internal/pkg/agent/application/actions/handlers/handler_action_cancel.go @@ -8,8 +8,8 @@ import ( "context" "fmt" - "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -32,7 +32,7 @@ func NewCancel(log *logger.Logger, cancel queueCanceler) *Cancel { } // Handle will cancel any actions in the queue that match target_id. -func (h *Cancel) Handle(ctx context.Context, a fleetapi.Action, acker store.FleetAcker) error { +func (h *Cancel) Handle(ctx context.Context, a fleetapi.Action, acker acker.Acker) error { action, ok := a.(*fleetapi.ActionCancel) if !ok { return fmt.Errorf("invalid type, expected ActionCancel and received %T", a) diff --git a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_policy_change.go b/internal/pkg/agent/application/actions/handlers/handler_action_policy_change.go similarity index 86% rename from internal/pkg/agent/application/pipeline/actions/handlers/handler_action_policy_change.go rename to internal/pkg/agent/application/actions/handlers/handler_action_policy_change.go index 3775d12b352..a3f4ff0b3ea 100644 --- a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_policy_change.go +++ b/internal/pkg/agent/application/actions/handlers/handler_action_policy_change.go @@ -15,15 +15,15 @@ import ( "gopkg.in/yaml.v2" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/actions" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/actions" "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/agent/storage" - "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" "github.com/elastic/elastic-agent/internal/pkg/config" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "github.com/elastic/elastic-agent/internal/pkg/fleetapi/client" "github.com/elastic/elastic-agent/internal/pkg/remote" "github.com/elastic/elastic-agent/pkg/core/logger" @@ -36,28 +36,28 @@ const ( // PolicyChange is a handler for POLICY_CHANGE action. type PolicyChange struct { log *logger.Logger - emitter pipeline.EmitterFunc agentInfo *info.AgentInfo config *configuration.Configuration store storage.Store + ch chan coordinator.ConfigChange setters []actions.ClientSetter } // NewPolicyChange creates a new PolicyChange handler. func NewPolicyChange( log *logger.Logger, - emitter pipeline.EmitterFunc, agentInfo *info.AgentInfo, config *configuration.Configuration, store storage.Store, + ch chan coordinator.ConfigChange, setters ...actions.ClientSetter, ) *PolicyChange { return &PolicyChange{ log: log, - emitter: emitter, agentInfo: agentInfo, config: config, store: store, + ch: ch, setters: setters, } } @@ -72,7 +72,7 @@ func (h *PolicyChange) AddSetter(cs actions.ClientSetter) { } // Handle handles policy change action. -func (h *PolicyChange) Handle(ctx context.Context, a fleetapi.Action, acker store.FleetAcker) error { +func (h *PolicyChange) Handle(ctx context.Context, a fleetapi.Action, acker acker.Acker) error { h.log.Debugf("handlerPolicyChange: action '%+v' received", a) action, ok := a.(*fleetapi.ActionPolicyChange) if !ok { @@ -89,11 +89,19 @@ func (h *PolicyChange) Handle(ctx context.Context, a fleetapi.Action, acker stor if err != nil { return err } - if err := h.emitter(ctx, c); err != nil { - return err + + h.ch <- &policyChange{ + ctx: ctx, + cfg: c, + action: a, + acker: acker, } + return nil +} - return acker.Ack(ctx, action) +// Watch returns the channel for configuration change notifications. +func (h *PolicyChange) Watch() <-chan coordinator.ConfigChange { + return h.ch } func (h *PolicyChange) handleFleetServerHosts(ctx context.Context, c *config.Config) (err error) { @@ -210,3 +218,33 @@ func fleetToReader(agentInfo *info.AgentInfo, cfg *configuration.Configuration) } return bytes.NewReader(data), nil } + +type policyChange struct { + ctx context.Context + cfg *config.Config + action fleetapi.Action + acker acker.Acker + commit bool +} + +func (l *policyChange) Config() *config.Config { + return l.cfg +} + +func (l *policyChange) Ack() error { + if l.action == nil { + return nil + } + err := l.acker.Ack(l.ctx, l.action) + if err != nil { + return err + } + if l.commit { + return l.acker.Commit(l.ctx) + } + return nil +} + +func (l *policyChange) Fail(_ error) { + // do nothing +} diff --git a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_policy_change_test.go b/internal/pkg/agent/application/actions/handlers/handler_action_policy_change_test.go similarity index 56% rename from internal/pkg/agent/application/pipeline/actions/handlers/handler_action_policy_change_test.go rename to internal/pkg/agent/application/actions/handlers/handler_action_policy_change_test.go index d887e755154..34114153875 100644 --- a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_policy_change_test.go +++ b/internal/pkg/agent/application/actions/handlers/handler_action_policy_change_test.go @@ -9,38 +9,27 @@ import ( "sync" "testing" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" - "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" - "github.com/elastic/elastic-agent/internal/pkg/agent/storage" - - "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" + "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" + "github.com/elastic/elastic-agent/internal/pkg/agent/storage" "github.com/elastic/elastic-agent/internal/pkg/config" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" noopacker "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker/noop" "github.com/elastic/elastic-agent/pkg/core/logger" ) -type mockEmitter struct { - err error - policy *config.Config -} - -func (m *mockEmitter) Emitter(_ context.Context, policy *config.Config) error { - m.policy = policy - return m.err -} - func TestPolicyChange(t *testing.T) { log, _ := logger.New("", false) - ack := noopacker.NewAcker() + ack := noopacker.New() agentInfo, _ := info.NewAgentInfo(true) nullStore := &storage.NullStore{} t.Run("Receive a config change and successfully emits a raw configuration", func(t *testing.T) { - emitter := &mockEmitter{} + ch := make(chan coordinator.ConfigChange, 1) conf := map[string]interface{}{"hello": "world"} action := &fleetapi.ActionPolicyChange{ @@ -50,41 +39,13 @@ func TestPolicyChange(t *testing.T) { } cfg := configuration.DefaultConfiguration() - handler := &PolicyChange{ - log: log, - emitter: emitter.Emitter, - agentInfo: agentInfo, - config: cfg, - store: nullStore, - } + handler := NewPolicyChange(log, agentInfo, cfg, nullStore, ch) err := handler.Handle(context.Background(), action, ack) require.NoError(t, err) - require.Equal(t, config.MustNewConfigFrom(conf), emitter.policy) - }) - - t.Run("Receive a config and fail to emits a raw configuration", func(t *testing.T) { - mockErr := errors.New("error returned") - emitter := &mockEmitter{err: mockErr} - - conf := map[string]interface{}{"hello": "world"} - action := &fleetapi.ActionPolicyChange{ - ActionID: "abc123", - ActionType: "POLICY_CHANGE", - Policy: conf, - } - - cfg := configuration.DefaultConfiguration() - handler := &PolicyChange{ - log: log, - emitter: emitter.Emitter, - agentInfo: agentInfo, - config: cfg, - store: nullStore, - } - err := handler.Handle(context.Background(), action, ack) - require.Error(t, err) + change := <-ch + require.Equal(t, config.MustNewConfigFrom(conf), change.Config()) }) } @@ -93,41 +54,10 @@ func TestPolicyAcked(t *testing.T) { agentInfo, _ := info.NewAgentInfo(true) nullStore := &storage.NullStore{} - t.Run("Config change should not ACK on error", func(t *testing.T) { - tacker := &testAcker{} - - mockErr := errors.New("error returned") - emitter := &mockEmitter{err: mockErr} - - config := map[string]interface{}{"hello": "world"} - actionID := "abc123" - action := &fleetapi.ActionPolicyChange{ - ActionID: actionID, - ActionType: "POLICY_CHANGE", - Policy: config, - } - - cfg := configuration.DefaultConfiguration() - handler := &PolicyChange{ - log: log, - emitter: emitter.Emitter, - agentInfo: agentInfo, - config: cfg, - store: nullStore, - } - - err := handler.Handle(context.Background(), action, tacker) - require.Error(t, err) - - actions := tacker.Items() - assert.EqualValues(t, 0, len(actions)) - }) - t.Run("Config change should ACK", func(t *testing.T) { + ch := make(chan coordinator.ConfigChange, 1) tacker := &testAcker{} - emitter := &mockEmitter{} - config := map[string]interface{}{"hello": "world"} actionID := "abc123" action := &fleetapi.ActionPolicyChange{ @@ -137,17 +67,14 @@ func TestPolicyAcked(t *testing.T) { } cfg := configuration.DefaultConfiguration() - handler := &PolicyChange{ - log: log, - emitter: emitter.Emitter, - agentInfo: agentInfo, - config: cfg, - store: nullStore, - } + handler := NewPolicyChange(log, agentInfo, cfg, nullStore, ch) err := handler.Handle(context.Background(), action, tacker) require.NoError(t, err) + change := <-ch + require.NoError(t, change.Ack()) + actions := tacker.Items() assert.EqualValues(t, 1, len(actions)) assert.Equal(t, actionID, actions[0]) diff --git a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_policy_reassign.go b/internal/pkg/agent/application/actions/handlers/handler_action_policy_reassign.go similarity index 91% rename from internal/pkg/agent/application/pipeline/actions/handlers/handler_action_policy_reassign.go rename to internal/pkg/agent/application/actions/handlers/handler_action_policy_reassign.go index 962447b8a35..2044052d48b 100644 --- a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_policy_reassign.go +++ b/internal/pkg/agent/application/actions/handlers/handler_action_policy_reassign.go @@ -7,8 +7,8 @@ package handlers import ( "context" - "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -25,7 +25,7 @@ func NewPolicyReassign(log *logger.Logger) *PolicyReassign { } // Handle handles POLICY_REASSIGN action. -func (h *PolicyReassign) Handle(ctx context.Context, a fleetapi.Action, acker store.FleetAcker) error { +func (h *PolicyReassign) Handle(ctx context.Context, a fleetapi.Action, acker acker.Acker) error { h.log.Debugf("handlerPolicyReassign: action '%+v' received", a) if err := acker.Ack(ctx, a); err != nil { diff --git a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_settings.go b/internal/pkg/agent/application/actions/handlers/handler_action_settings.go similarity index 87% rename from internal/pkg/agent/application/pipeline/actions/handlers/handler_action_settings.go rename to internal/pkg/agent/application/actions/handlers/handler_action_settings.go index 5418a0f3eb6..eed67a50682 100644 --- a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_settings.go +++ b/internal/pkg/agent/application/actions/handlers/handler_action_settings.go @@ -8,11 +8,12 @@ import ( "context" "fmt" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" "github.com/elastic/elastic-agent/internal/pkg/agent/application/reexec" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -23,25 +24,25 @@ type reexecManager interface { // Settings handles settings change coming from fleet and updates log level. type Settings struct { log *logger.Logger - reexec reexecManager agentInfo *info.AgentInfo + coord *coordinator.Coordinator } // NewSettings creates a new Settings handler. func NewSettings( log *logger.Logger, - reexec reexecManager, agentInfo *info.AgentInfo, + coord *coordinator.Coordinator, ) *Settings { return &Settings{ log: log, - reexec: reexec, agentInfo: agentInfo, + coord: coord, } } // Handle handles SETTINGS action. -func (h *Settings) Handle(ctx context.Context, a fleetapi.Action, acker store.FleetAcker) error { +func (h *Settings) Handle(ctx context.Context, a fleetapi.Action, acker acker.Acker) error { h.log.Debugf("handlerUpgrade: action '%+v' received", a) action, ok := a.(*fleetapi.ActionSettings) if !ok { @@ -62,7 +63,7 @@ func (h *Settings) Handle(ctx context.Context, a fleetapi.Action, acker store.Fl h.log.Errorf("failed to commit acker after acknowledging action with id '%s'", action.ActionID) } - h.reexec.ReExec(nil) + h.coord.ReExec(nil) return nil } diff --git a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_unenroll.go b/internal/pkg/agent/application/actions/handlers/handler_action_unenroll.go similarity index 67% rename from internal/pkg/agent/application/pipeline/actions/handlers/handler_action_unenroll.go rename to internal/pkg/agent/application/actions/handlers/handler_action_unenroll.go index 71fe0f30644..045d52a4fcf 100644 --- a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_unenroll.go +++ b/internal/pkg/agent/application/actions/handlers/handler_action_unenroll.go @@ -8,10 +8,10 @@ import ( "context" "fmt" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" + "github.com/elastic/elastic-agent/internal/pkg/config" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -27,8 +27,7 @@ type stateStore interface { // For it to be operational again it needs to be either enrolled or reconfigured. type Unenroll struct { log *logger.Logger - emitter pipeline.EmitterFunc - dispatcher pipeline.Router + ch chan coordinator.ConfigChange closers []context.CancelFunc stateStore stateStore } @@ -36,43 +35,40 @@ type Unenroll struct { // NewUnenroll creates a new Unenroll handler. func NewUnenroll( log *logger.Logger, - emitter pipeline.EmitterFunc, - dispatcher pipeline.Router, + ch chan coordinator.ConfigChange, closers []context.CancelFunc, stateStore stateStore, ) *Unenroll { return &Unenroll{ log: log, - emitter: emitter, - dispatcher: dispatcher, + ch: ch, closers: closers, stateStore: stateStore, } } // Handle handles UNENROLL action. -func (h *Unenroll) Handle(ctx context.Context, a fleetapi.Action, acker store.FleetAcker) error { +func (h *Unenroll) Handle(ctx context.Context, a fleetapi.Action, acker acker.Acker) error { h.log.Debugf("handlerUnenroll: action '%+v' received", a) action, ok := a.(*fleetapi.ActionUnenroll) if !ok { return fmt.Errorf("invalid type, expected ActionUnenroll and received %T", a) } - // Providing empty map will close all pipelines - noPrograms := make(map[pipeline.RoutingKey][]program.Program) - _ = h.dispatcher.Route(ctx, a.ID(), noPrograms) + if action.IsDetected { + // not from Fleet; so we set it to nil so policyChange doesn't ack it + a = nil + } - if !action.IsDetected { - // ACK only events received from fleet. - if err := acker.Ack(ctx, action); err != nil { - return err - } + h.ch <- &policyChange{ + ctx: ctx, + cfg: config.New(), + action: a, + acker: acker, + commit: true, + } - // commit all acks before quitting. - if err := acker.Commit(ctx); err != nil { - return err - } - } else if h.stateStore != nil { + if h.stateStore != nil { // backup action for future start to avoid starting fleet gateway loop h.stateStore.Add(a) h.stateStore.Save() diff --git a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_upgrade.go b/internal/pkg/agent/application/actions/handlers/handler_action_upgrade.go similarity index 60% rename from internal/pkg/agent/application/pipeline/actions/handlers/handler_action_upgrade.go rename to internal/pkg/agent/application/actions/handlers/handler_action_upgrade.go index cfc7ea83749..1760c96d369 100644 --- a/internal/pkg/agent/application/pipeline/actions/handlers/handler_action_upgrade.go +++ b/internal/pkg/agent/application/actions/handlers/handler_action_upgrade.go @@ -8,9 +8,9 @@ import ( "context" "fmt" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade" - "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -18,42 +18,25 @@ import ( // After running Upgrade agent should download its own version specified by action // from repository specified by fleet. type Upgrade struct { - log *logger.Logger - upgrader *upgrade.Upgrader + log *logger.Logger + coord *coordinator.Coordinator } // NewUpgrade creates a new Upgrade handler. -func NewUpgrade(log *logger.Logger, upgrader *upgrade.Upgrader) *Upgrade { +func NewUpgrade(log *logger.Logger, coord *coordinator.Coordinator) *Upgrade { return &Upgrade{ - log: log, - upgrader: upgrader, + log: log, + coord: coord, } } // Handle handles UPGRADE action. -func (h *Upgrade) Handle(ctx context.Context, a fleetapi.Action, acker store.FleetAcker) error { +func (h *Upgrade) Handle(ctx context.Context, a fleetapi.Action, _ acker.Acker) error { h.log.Debugf("handlerUpgrade: action '%+v' received", a) action, ok := a.(*fleetapi.ActionUpgrade) if !ok { return fmt.Errorf("invalid type, expected ActionUpgrade and received %T", a) } - _, err := h.upgrader.Upgrade(ctx, &upgradeAction{action}, true) - return err -} - -type upgradeAction struct { - *fleetapi.ActionUpgrade -} - -func (a *upgradeAction) Version() string { - return a.ActionUpgrade.Version -} - -func (a *upgradeAction) SourceURI() string { - return a.ActionUpgrade.SourceURI -} - -func (a *upgradeAction) FleetAction() *fleetapi.ActionUpgrade { - return a.ActionUpgrade + return h.coord.Upgrade(ctx, action.Version, action.SourceURI, action) } diff --git a/internal/pkg/agent/application/pipeline/actions/handlers/handler_default.go b/internal/pkg/agent/application/actions/handlers/handler_default.go similarity index 88% rename from internal/pkg/agent/application/pipeline/actions/handlers/handler_default.go rename to internal/pkg/agent/application/actions/handlers/handler_default.go index 873c3fd7c5a..dd59861f584 100644 --- a/internal/pkg/agent/application/pipeline/actions/handlers/handler_default.go +++ b/internal/pkg/agent/application/actions/handlers/handler_default.go @@ -7,8 +7,8 @@ package handlers import ( "context" - "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -25,7 +25,7 @@ func NewDefault(log *logger.Logger) *Default { } // Handle is a default handler, no action is taken. -func (h *Default) Handle(_ context.Context, a fleetapi.Action, acker store.FleetAcker) error { +func (h *Default) Handle(_ context.Context, a fleetapi.Action, acker acker.Acker) error { h.log.Errorf("HandlerDefault: action '%+v' received", a) return nil } diff --git a/internal/pkg/agent/application/pipeline/actions/handlers/handler_unknown.go b/internal/pkg/agent/application/actions/handlers/handler_unknown.go similarity index 88% rename from internal/pkg/agent/application/pipeline/actions/handlers/handler_unknown.go rename to internal/pkg/agent/application/actions/handlers/handler_unknown.go index 58e0640fe4d..e0fdf4c81ab 100644 --- a/internal/pkg/agent/application/pipeline/actions/handlers/handler_unknown.go +++ b/internal/pkg/agent/application/actions/handlers/handler_unknown.go @@ -7,8 +7,8 @@ package handlers import ( "context" - "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -25,7 +25,7 @@ func NewUnknown(log *logger.Logger) *Unknown { } // Handle handles unknown actions, no action is taken. -func (h *Unknown) Handle(_ context.Context, a fleetapi.Action, acker store.FleetAcker) error { +func (h *Unknown) Handle(_ context.Context, a fleetapi.Action, acker acker.Acker) error { h.log.Errorf("HandlerUnknown: action '%+v' received", a) return nil } diff --git a/internal/pkg/agent/application/application.go b/internal/pkg/agent/application/application.go index 7bc0089940f..788e189cb60 100644 --- a/internal/pkg/agent/application/application.go +++ b/internal/pkg/agent/application/application.go @@ -5,102 +5,158 @@ package application import ( - "context" "fmt" + "path/filepath" + goruntime "runtime" + "strconv" "go.elastic.co/apm" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/storage" - "github.com/elastic/elastic-agent/internal/pkg/core/status" - "github.com/elastic/elastic-agent/internal/pkg/sorted" + "github.com/elastic/go-sysinfo" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/reexec" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade" "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" + "github.com/elastic/elastic-agent/internal/pkg/agent/errors" + "github.com/elastic/elastic-agent/internal/pkg/agent/storage" + "github.com/elastic/elastic-agent/internal/pkg/capabilities" + "github.com/elastic/elastic-agent/internal/pkg/composable" "github.com/elastic/elastic-agent/internal/pkg/config" + "github.com/elastic/elastic-agent/internal/pkg/dir" + "github.com/elastic/elastic-agent/pkg/component" + "github.com/elastic/elastic-agent/pkg/component/runtime" "github.com/elastic/elastic-agent/pkg/core/logger" ) -// Application is the application interface implemented by the different running mode. -type Application interface { - Start() error - Stop() error - AgentInfo() *info.AgentInfo - Routes() *sorted.Set -} +type discoverFunc func() ([]string, error) -type reexecManager interface { - ReExec(callback reexec.ShutdownCallbackFn, argOverrides ...string) -} +// ErrNoConfiguration is returned when no configuration are found. +var ErrNoConfiguration = errors.New("no configuration found", errors.TypeConfig) -type upgraderControl interface { - SetUpgrader(upgrader *upgrade.Upgrader) -} +// PlatformModifier can modify the platform details before the runtime specifications are loaded. +type PlatformModifier func(detail component.PlatformDetail) component.PlatformDetail // New creates a new Agent and bootstrap the required subsystem. func New( log *logger.Logger, - reexec reexecManager, - statusCtrl status.Controller, - uc upgraderControl, agentInfo *info.AgentInfo, + reexec coordinator.ReExecManager, tracer *apm.Tracer, -) (Application, error) { - // Load configuration from disk to understand in which mode of operation - // we must start the elastic-agent, the mode of operation cannot be changed without restarting the - // elastic-agent. + modifiers ...PlatformModifier, +) (*coordinator.Coordinator, error) { + platform, err := getPlatformDetail(modifiers...) + if err != nil { + return nil, fmt.Errorf("failed to gather system information: %w", err) + } + log.Info("Gathered system information") + + specs, err := component.LoadRuntimeSpecs(paths.Components(), platform) + if err != nil { + return nil, fmt.Errorf("failed to detect inputs and outputs: %w", err) + } + log.With("inputs", specs.Inputs()).Info("Detected available inputs and outputs") + + caps, err := capabilities.Load(paths.AgentCapabilitiesPath(), log) + if err != nil { + return nil, fmt.Errorf("failed to determine capabilities: %w", err) + } + log.Info("Determined allowed capabilities") + pathConfigFile := paths.ConfigFile() rawConfig, err := config.LoadFile(pathConfigFile) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to load configuration: %w", err) } - if err := info.InjectAgentConfig(rawConfig); err != nil { - return nil, err + return nil, fmt.Errorf("failed to load configuration: %w", err) + } + cfg, err := configuration.NewFromConfig(rawConfig) + if err != nil { + return nil, fmt.Errorf("failed to load configuration: %w", err) } - return createApplication(log, pathConfigFile, rawConfig, reexec, statusCtrl, uc, agentInfo, tracer) -} + upgrader := upgrade.NewUpgrader(log, cfg.Settings.DownloadConfig) -func createApplication( - log *logger.Logger, - pathConfigFile string, - rawConfig *config.Config, - reexec reexecManager, - statusCtrl status.Controller, - uc upgraderControl, - agentInfo *info.AgentInfo, - tracer *apm.Tracer, -) (Application, error) { - log.Info("Detecting execution mode") - ctx := context.Background() - cfg, err := configuration.NewFromConfig(rawConfig) + runtime, err := runtime.NewManager(log, cfg.Settings.GRPC.String(), tracer) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to initialize runtime manager: %w", err) } + var configMgr coordinator.ConfigManager + var managed *managedConfigManager + var compModifiers []coordinator.ComponentsModifier if configuration.IsStandalone(cfg.Fleet) { - log.Info("Agent is managed locally") - return newLocal(ctx, log, paths.ConfigFile(), rawConfig, reexec, statusCtrl, uc, agentInfo, tracer) + log.Info("Parsed configuration and determined agent is managed locally") + + loader := config.NewLoader(log, externalConfigsGlob()) + discover := discoverer(pathConfigFile, cfg.Settings.Path, externalConfigsGlob()) + if !cfg.Settings.Reload.Enabled { + log.Debug("Reloading of configuration is off") + configMgr = newOnce(log, discover, loader) + } else { + log.Debugf("Reloading of configuration is on, frequency is set to %s", cfg.Settings.Reload.Period) + configMgr = newPeriodic(log, cfg.Settings.Reload.Period, discover, loader) + } + } else if configuration.IsFleetServerBootstrap(cfg.Fleet) { + log.Info("Parsed configuration and determined agent is in Fleet Server bootstrap mode") + compModifiers = append(compModifiers, FleetServerComponentModifier) + configMgr, err = newFleetServerBootstrapManager(log) + if err != nil { + return nil, err + } + } else { + var store storage.Store + store, cfg, err = mergeFleetConfig(rawConfig) + if err != nil { + return nil, err + } + + log.Info("Parsed configuration and determined agent is managed by Fleet") + + compModifiers = append(compModifiers, FleetServerComponentModifier) + managed, err = newManagedConfigManager(log, agentInfo, cfg, store, runtime) + if err != nil { + return nil, err + } + configMgr = managed } - // not in standalone; both modes require reading the fleet.yml configuration file - var store storage.Store - store, cfg, err = mergeFleetConfig(rawConfig) + composable, err := composable.New(log, rawConfig) if err != nil { - return nil, err + return nil, errors.New(err, "failed to initialize composable controller") } - if configuration.IsFleetServerBootstrap(cfg.Fleet) { - log.Info("Agent is in Fleet Server bootstrap mode") - return newFleetServerBootstrap(ctx, log, pathConfigFile, rawConfig, statusCtrl, agentInfo, tracer) + coord := coordinator.New(log, specs, reexec, upgrader, runtime, configMgr, composable, caps, compModifiers...) + if managed != nil { + // the coordinator requires the config manager as well as in managed-mode the config manager requires the + // coordinator, so it must be set here once the coordinator is created + managed.coord = coord } + return coord, nil +} - log.Info("Agent is managed by Fleet") - return newManaged(ctx, log, store, cfg, rawConfig, reexec, statusCtrl, agentInfo, tracer) +func getPlatformDetail(modifiers ...PlatformModifier) (component.PlatformDetail, error) { + info, err := sysinfo.Host() + if err != nil { + return component.PlatformDetail{}, err + } + os := info.Info().OS + detail := component.PlatformDetail{ + Platform: component.Platform{ + OS: goruntime.GOOS, + Arch: goruntime.GOARCH, + GOOS: goruntime.GOOS, + }, + Family: os.Family, + Major: strconv.Itoa(os.Major), + Minor: strconv.Itoa(os.Minor), + } + for _, modifier := range modifiers { + detail = modifier(detail) + } + return detail, nil } func mergeFleetConfig(rawConfig *config.Config) (storage.Store, *configuration.Configuration, error) { @@ -146,3 +202,28 @@ func mergeFleetConfig(rawConfig *config.Config) (storage.Store, *configuration.C return store, cfg, nil } + +func externalConfigsGlob() string { + return filepath.Join(paths.Config(), configuration.ExternalInputsPattern) +} + +func discoverer(patterns ...string) discoverFunc { + var p []string + for _, newP := range patterns { + if len(newP) == 0 { + continue + } + + p = append(p, newP) + } + + if len(p) == 0 { + return func() ([]string, error) { + return []string{}, ErrNoConfiguration + } + } + + return func() ([]string, error) { + return dir.DiscoverFiles(p...) + } +} diff --git a/internal/pkg/agent/application/coordinator/coordinator.go b/internal/pkg/agent/application/coordinator/coordinator.go new file mode 100644 index 00000000000..dac48400179 --- /dev/null +++ b/internal/pkg/agent/application/coordinator/coordinator.go @@ -0,0 +1,592 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package coordinator + +import ( + "context" + "errors" + "fmt" + + "github.com/elastic/elastic-agent/internal/pkg/fleetapi" + + "go.elastic.co/apm" + + "github.com/elastic/elastic-agent-client/v7/pkg/client" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/reexec" + agentclient "github.com/elastic/elastic-agent/internal/pkg/agent/control/client" + "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" + "github.com/elastic/elastic-agent/internal/pkg/capabilities" + "github.com/elastic/elastic-agent/internal/pkg/config" + "github.com/elastic/elastic-agent/pkg/component" + "github.com/elastic/elastic-agent/pkg/component/runtime" + "github.com/elastic/elastic-agent/pkg/core/logger" +) + +var ( + // ErrNotUpgradable error is returned when upgrade cannot be performed. + ErrNotUpgradable = errors.New( + "cannot be upgraded; must be installed with install sub-command and " + + "running under control of the systems supervisor") +) + +// ReExecManager provides an interface to perform re-execution of the entire agent. +type ReExecManager interface { + ReExec(callback reexec.ShutdownCallbackFn, argOverrides ...string) +} + +// UpgradeManager provides an interface to perform the upgrade action for the agent. +type UpgradeManager interface { + // Upgradeable returns true if can be upgraded. + Upgradeable() bool + + // Upgrade upgrades running agent. + Upgrade(ctx context.Context, version string, sourceURI string, action *fleetapi.ActionUpgrade) (_ reexec.ShutdownCallbackFn, err error) +} + +// Runner provides interface to run a manager and receive running errors. +type Runner interface { + // Run runs the manager. + Run(context.Context) error + + // Errors returns the channel to listen to errors on. + // + // A manager should send a nil error to clear its previous error when it should no longer report as an error. + Errors() <-chan error +} + +// RuntimeManager provides an interface to run and update the runtime. +type RuntimeManager interface { + Runner + + // Update updates the current components model. + Update([]component.Component) error + + // State returns the current components model state. + State() []runtime.ComponentComponentState + + // PerformAction executes an action on a unit. + PerformAction(ctx context.Context, unit component.Unit, name string, params map[string]interface{}) (map[string]interface{}, error) + + // SubscribeAll provides an interface to watch for changes in all components. + SubscribeAll(context.Context) *runtime.SubscriptionAll +} + +// ConfigChange provides an interface for receiving a new configuration. +// +// Ack must be called if the configuration change was accepted and Fail should be called if it fails to be accepted. +type ConfigChange interface { + // Config returns the configuration for this change. + Config() *config.Config + + // Ack marks the configuration change as accepted. + Ack() error + + // Fail marks the configuration change as failed. + Fail(err error) +} + +// ErrorReporter provides an interface for any manager that is handled by the coordinator to report errors. +type ErrorReporter interface { +} + +// ConfigManager provides an interface to run and watch for configuration changes. +type ConfigManager interface { + Runner + + // Watch returns the chanel to watch for configuration changes. + Watch() <-chan ConfigChange +} + +// VarsManager provides an interface to run and watch for variable changes. +type VarsManager interface { + Runner + + // Watch returns the chanel to watch for variable changes. + Watch() <-chan []*transpiler.Vars +} + +// ComponentsModifier is a function that takes the computed components model and modifies it before +// passing it into the components runtime manager. +type ComponentsModifier func(comps []component.Component, policy map[string]interface{}) ([]component.Component, error) + +// State provides the current state of the coordinator along with all the current states of components and units. +type State struct { + State agentclient.State + Message string + Components []runtime.ComponentComponentState +} + +// StateFetcher provides an interface to fetch the current state of the coordinator. +type StateFetcher interface { + // State returns the current state of the coordinator. + State() State +} + +// Coordinator manages the entire state of the Elastic Agent. +// +// All configuration changes, update variables, and upgrade actions are managed and controlled by the coordinator. +type Coordinator struct { + logger *logger.Logger + + specs component.RuntimeSpecs + + reexecMgr ReExecManager + upgradeMgr UpgradeManager + + runtimeMgr RuntimeManager + runtimeMgrErr error + configMgr ConfigManager + configMgrErr error + varsMgr VarsManager + varsMgrErr error + + caps capabilities.Capability + modifiers []ComponentsModifier + + state coordinatorState +} + +// New creates a new coordinator. +func New(logger *logger.Logger, specs component.RuntimeSpecs, reexecMgr ReExecManager, upgradeMgr UpgradeManager, runtimeMgr RuntimeManager, configMgr ConfigManager, varsMgr VarsManager, caps capabilities.Capability, modifiers ...ComponentsModifier) *Coordinator { + return &Coordinator{ + logger: logger, + specs: specs, + reexecMgr: reexecMgr, + upgradeMgr: upgradeMgr, + runtimeMgr: runtimeMgr, + configMgr: configMgr, + varsMgr: varsMgr, + caps: caps, + modifiers: modifiers, + state: coordinatorState{ + state: agentclient.Starting, + }, + } +} + +// State returns the current state for the coordinator. +func (c *Coordinator) State() (s State) { + s.State = c.state.state + s.Message = c.state.message + s.Components = c.runtimeMgr.State() + if c.state.overrideState != nil { + // state has been overridden due to an action that is occurring + s.State = c.state.overrideState.state + s.Message = c.state.overrideState.message + } else if s.State == agentclient.Healthy { + // if any of the managers are reporting an error then something is wrong + // or + // coordinator overall is reported is healthy; in the case any component or unit is not healthy then we report + // as degraded because we are not fully healthy + if c.runtimeMgrErr != nil { + s.State = agentclient.Failed + s.Message = c.runtimeMgrErr.Error() + } else if c.configMgrErr != nil { + s.State = agentclient.Failed + s.Message = c.configMgrErr.Error() + } else if c.varsMgrErr != nil { + s.State = agentclient.Failed + s.Message = c.varsMgrErr.Error() + } else if hasState(s.Components, client.UnitStateFailed) { + s.State = agentclient.Degraded + s.Message = "1 or more components/units in a failed state" + } else if hasState(s.Components, client.UnitStateDegraded) { + s.State = agentclient.Degraded + s.Message = "1 or more components/units in a degraded state" + } + } + return s +} + +// ReExec performs the re-execution. +func (c *Coordinator) ReExec(callback reexec.ShutdownCallbackFn, argOverrides ...string) { + // override the overall state to stopping until the re-execution is complete + c.state.overrideState = &coordinatorOverrideState{ + state: agentclient.Stopping, + message: "Re-executing", + } + c.reexecMgr.ReExec(callback, argOverrides...) +} + +// Upgrade runs the upgrade process. +func (c *Coordinator) Upgrade(ctx context.Context, version string, sourceURI string, action *fleetapi.ActionUpgrade) error { + // early check outside of upgrader before overridding the state + if c.upgradeMgr.Upgradeable() { + return ErrNotUpgradable + } + + // early check capabilities to ensure this upgrade actions is allowed + if c.caps != nil { + if _, err := c.caps.Apply(map[string]interface{}{ + "version": version, + "sourceURI": sourceURI, + }); errors.Is(err, capabilities.ErrBlocked) { + return ErrNotUpgradable + } + } + + // override the overall state to upgrading until the re-execution is complete + c.state.overrideState = &coordinatorOverrideState{ + state: agentclient.Upgrading, + message: fmt.Sprintf("Upgrading to version %s", version), + } + cb, err := c.upgradeMgr.Upgrade(ctx, version, sourceURI, action) + if err != nil { + c.state.overrideState = nil + return err + } + c.ReExec(cb) + return nil +} + +// PerformAction executes an action on a unit. +func (c *Coordinator) PerformAction(ctx context.Context, unit component.Unit, name string, params map[string]interface{}) (map[string]interface{}, error) { + return c.runtimeMgr.PerformAction(ctx, unit, name, params) +} + +// Run runs the coordinator. +// +// The RuntimeManager, ConfigManager and VarsManager that is passed into NewCoordinator are also ran and lifecycle controlled by the Run. +// +// In the case that either of the above managers fail, they will all be restarted unless the context was explicitly cancelled or timed out. +func (c *Coordinator) Run(ctx context.Context) error { + // log all changes in the state of the runtime + go func() { + state := make(map[string]coordinatorComponentLogState) + + sub := c.runtimeMgr.SubscribeAll(ctx) + for { + select { + case <-ctx.Done(): + return + case s := <-sub.Ch(): + logState := newCoordinatorComponentLogState(&s) + _, ok := state[s.Component.ID] + if !ok { + c.logger.With("component", logState).Info("New component created") + } else { + c.logger.With("component", logState).Info("Existing component state changed") + } + state[s.Component.ID] = logState + if s.State.State == client.UnitStateStopped { + delete(state, s.Component.ID) + } + } + } + }() + + for { + c.state.state = agentclient.Starting + c.state.message = "Waiting for initial configuration and composable variables" + err := c.runner(ctx) + if err != nil { + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + c.state.state = agentclient.Stopped + c.state.message = "Requested to be stopped" + // do not restart + return err + } + } + c.state.state = agentclient.Failed + c.state.message = fmt.Sprintf("Coordinator failed and will be restarted: %s", err) + c.logger.Errorf("coordinator failed and will be restarted: %s", err) + } +} + +// runner performs the actual work of running all the managers +// +// if one of the managers fails the others are also stopped and then the whole runner returns +func (c *Coordinator) runner(ctx context.Context) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + runtimeWatcher := c.runtimeMgr + runtimeRun := make(chan bool) + runtimeErrCh := make(chan error) + go func(manager Runner) { + err := manager.Run(ctx) + close(runtimeRun) + runtimeErrCh <- err + }(runtimeWatcher) + + configWatcher := c.configMgr + configRun := make(chan bool) + configErrCh := make(chan error) + go func(manager Runner) { + err := manager.Run(ctx) + close(configRun) + configErrCh <- err + }(configWatcher) + + varsWatcher := c.varsMgr + varsRun := make(chan bool) + varsErrCh := make(chan error) + go func(manager Runner) { + err := manager.Run(ctx) + close(varsRun) + varsErrCh <- err + }(varsWatcher) + + for { + select { + case <-ctx.Done(): + runtimeErr := <-runtimeErrCh + c.runtimeMgrErr = runtimeErr + configErr := <-configErrCh + c.configMgrErr = configErr + varsErr := <-varsErrCh + c.varsMgrErr = varsErr + if runtimeErr != nil && !errors.Is(runtimeErr, context.Canceled) { + return runtimeErr + } + if configErr != nil && !errors.Is(configErr, context.Canceled) { + return configErr + } + if varsErr != nil && !errors.Is(varsErr, context.Canceled) { + return varsErr + } + return ctx.Err() + case <-runtimeRun: + if ctx.Err() == nil { + cancel() + } + case <-configRun: + if ctx.Err() == nil { + cancel() + } + case <-varsRun: + if ctx.Err() == nil { + cancel() + } + case runtimeErr := <-c.runtimeMgr.Errors(): + c.runtimeMgrErr = runtimeErr + case configErr := <-c.configMgr.Errors(): + c.configMgrErr = configErr + case varsErr := <-c.varsMgr.Errors(): + c.varsMgrErr = varsErr + case change := <-configWatcher.Watch(): + if ctx.Err() == nil { + if err := c.processConfig(ctx, change.Config()); err != nil { + c.state.state = agentclient.Failed + c.state.message = err.Error() + c.logger.Errorf("%s", err) + change.Fail(err) + } else { + if err := change.Ack(); err != nil { + err = fmt.Errorf("failed to ack configuration change: %w", err) + c.state.state = agentclient.Failed + c.state.message = err.Error() + c.logger.Errorf("%s", err) + } + } + } + case vars := <-varsWatcher.Watch(): + if ctx.Err() == nil { + if err := c.processVars(ctx, vars); err != nil { + c.state.state = agentclient.Failed + c.state.message = err.Error() + c.logger.Errorf("%s", err) + } + } + } + } +} + +func (c *Coordinator) processConfig(ctx context.Context, cfg *config.Config) (err error) { + span, ctx := apm.StartSpan(ctx, "config", "app.internal") + defer func() { + apm.CaptureError(ctx, err).Send() + span.End() + }() + + if err := info.InjectAgentConfig(cfg); err != nil { + return err + } + + // perform and verify ast translation + m, err := cfg.ToMapStr() + if err != nil { + return fmt.Errorf("could not create the AST from the configuration: %w", err) + } + rawAst, err := transpiler.NewAST(m) + if err != nil { + return fmt.Errorf("could not create the AST from the configuration: %w", err) + } + + if c.caps != nil { + var ok bool + updatedAst, err := c.caps.Apply(rawAst) + if err != nil { + return fmt.Errorf("failed to apply capabilities: %w", err) + } + + rawAst, ok = updatedAst.(*transpiler.AST) + if !ok { + return fmt.Errorf("failed to transform object returned from capabilities to AST: %w", err) + } + } + + c.state.config = cfg + c.state.ast = rawAst + + if c.state.vars != nil { + return c.process(ctx) + } + return nil +} + +func (c *Coordinator) processVars(ctx context.Context, vars []*transpiler.Vars) (err error) { + span, ctx := apm.StartSpan(ctx, "vars", "app.internal") + defer func() { + apm.CaptureError(ctx, err).Send() + span.End() + }() + + c.state.vars = vars + + if c.state.ast != nil { + return c.process(ctx) + } + return nil +} + +func (c *Coordinator) process(ctx context.Context) (err error) { + span, ctx := apm.StartSpan(ctx, "process", "app.internal") + defer func() { + apm.CaptureError(ctx, err).Send() + span.End() + }() + + ast := c.state.ast.Clone() + inputs, ok := transpiler.Lookup(ast, "inputs") + if ok { + renderedInputs, err := transpiler.RenderInputs(inputs, c.state.vars) + if err != nil { + return fmt.Errorf("rendering inputs failed: %w", err) + } + err = transpiler.Insert(ast, renderedInputs, "inputs") + if err != nil { + return fmt.Errorf("inserting rendered inputs failed: %w", err) + } + } + + cfg, err := ast.Map() + if err != nil { + return fmt.Errorf("failed to convert ast to map[string]interface{}: %w", err) + } + comps, err := c.specs.ToComponents(cfg) + if err != nil { + return fmt.Errorf("failed to render components: %w", err) + } + + for _, modifier := range c.modifiers { + comps, err = modifier(comps, cfg) + if err != nil { + return fmt.Errorf("failed to modify components: %w", err) + } + } + + c.logger.Info("Updating running component model") + c.logger.With("components", comps).Debug("Updating running component model") + err = c.runtimeMgr.Update(comps) + if err != nil { + return err + } + c.state.state = agentclient.Healthy + c.state.message = "Running" + return nil +} + +type coordinatorState struct { + state agentclient.State + message string + overrideState *coordinatorOverrideState + + config *config.Config + ast *transpiler.AST + vars []*transpiler.Vars + components []component.Component +} + +type coordinatorOverrideState struct { + state agentclient.State + message string +} + +type coordinatorComponentLogState struct { + ID string `json:"id"` + State string `json:"state"` + Message string `json:"message"` + Inputs []coordinatorComponentUnitLogState `json:"inputs"` + Output coordinatorComponentUnitLogState `json:"output,omitempty"` +} + +type coordinatorComponentUnitLogState struct { + ID string `json:"id"` + State string `json:"state"` + Message string `json:"message"` +} + +func newCoordinatorComponentLogState(state *runtime.ComponentComponentState) coordinatorComponentLogState { + var output coordinatorComponentUnitLogState + inputs := make([]coordinatorComponentUnitLogState, 0, len(state.State.Units)) + for key, unit := range state.State.Units { + if key.UnitType == client.UnitTypeInput { + inputs = append(inputs, coordinatorComponentUnitLogState{ + ID: key.UnitID, + State: newCoordinatorComponentStateStr(unit.State), + Message: unit.Message, + }) + } else { + output = coordinatorComponentUnitLogState{ + ID: key.UnitID, + State: newCoordinatorComponentStateStr(unit.State), + Message: unit.Message, + } + } + } + return coordinatorComponentLogState{ + ID: state.Component.ID, + State: newCoordinatorComponentStateStr(state.State.State), + Message: state.State.Message, + Inputs: inputs, + Output: output, + } +} + +func newCoordinatorComponentStateStr(state client.UnitState) string { + switch state { + case client.UnitStateStarting: + return "Starting" + case client.UnitStateConfiguring: + return "Configuring" + case client.UnitStateDegraded: + return "Degraded" + case client.UnitStateHealthy: + return "Healthy" + case client.UnitStateFailed: + return "Failed" + case client.UnitStateStopping: + return "Stopping" + case client.UnitStateStopped: + return "Stopped" + } + return "Unknown" +} + +func hasState(components []runtime.ComponentComponentState, state client.UnitState) bool { + for _, comp := range components { + if comp.State.State == state { + return true + } + for _, unit := range comp.State.Units { + if unit.State == state { + return true + } + } + } + return false +} diff --git a/internal/pkg/agent/application/pipeline/dispatcher/dispatcher.go b/internal/pkg/agent/application/dispatcher/dispatcher.go similarity index 76% rename from internal/pkg/agent/application/pipeline/dispatcher/dispatcher.go rename to internal/pkg/agent/application/dispatcher/dispatcher.go index 6f036b57b21..8628cf5a59f 100644 --- a/internal/pkg/agent/application/pipeline/dispatcher/dispatcher.go +++ b/internal/pkg/agent/application/dispatcher/dispatcher.go @@ -12,25 +12,29 @@ import ( "go.elastic.co/apm" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/actions" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/actions" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "github.com/elastic/elastic-agent/pkg/core/logger" ) type actionHandlers map[string]actions.Handler +// Dispatcher processes actions coming from fleet api. +type Dispatcher interface { + Dispatch(context.Context, acker.Acker, ...fleetapi.Action) error +} + // ActionDispatcher processes actions coming from fleet using registered set of handlers. type ActionDispatcher struct { - ctx context.Context log *logger.Logger handlers actionHandlers def actions.Handler } // New creates a new action dispatcher. -func New(ctx context.Context, log *logger.Logger, def actions.Handler) (*ActionDispatcher, error) { +func New(log *logger.Logger, def actions.Handler) (*ActionDispatcher, error) { var err error if log == nil { log, err = logger.New("action_dispatcher", false) @@ -44,7 +48,6 @@ func New(ctx context.Context, log *logger.Logger, def actions.Handler) (*ActionD } return &ActionDispatcher{ - ctx: ctx, log: log, handlers: make(actionHandlers), def: def, @@ -76,21 +79,13 @@ func (ad *ActionDispatcher) key(a fleetapi.Action) string { } // Dispatch dispatches an action using pre-registered set of handlers. -// ctx is used here ONLY to carry the span, for cancelation use the cancel -// function of the ActionDispatcher.ctx. -func (ad *ActionDispatcher) Dispatch(ctx context.Context, acker store.FleetAcker, actions ...fleetapi.Action) (err error) { +func (ad *ActionDispatcher) Dispatch(ctx context.Context, acker acker.Acker, actions ...fleetapi.Action) (err error) { span, ctx := apm.StartSpan(ctx, "dispatch", "app.internal") defer func() { apm.CaptureError(ctx, err).Send() span.End() }() - // Creating a child context that carries both the ad.ctx cancelation and - // the span from ctx. - ctx, cancel := context.WithCancel(ad.ctx) - defer cancel() - ctx = apm.ContextWithSpan(ctx, span) - if len(actions) == 0 { ad.log.Debug("No action to dispatch") return nil @@ -103,11 +98,11 @@ func (ad *ActionDispatcher) Dispatch(ctx context.Context, acker store.FleetAcker ) for _, action := range actions { - if err := ad.ctx.Err(); err != nil { + if err := ctx.Err(); err != nil { return err } - if err := ad.dispatchAction(action, acker); err != nil { + if err := ad.dispatchAction(ctx, action, acker); err != nil { ad.log.Debugf("Failed to dispatch action '%+v', error: %+v", action, err) return err } @@ -117,13 +112,13 @@ func (ad *ActionDispatcher) Dispatch(ctx context.Context, acker store.FleetAcker return acker.Commit(ctx) } -func (ad *ActionDispatcher) dispatchAction(a fleetapi.Action, acker store.FleetAcker) error { +func (ad *ActionDispatcher) dispatchAction(ctx context.Context, a fleetapi.Action, acker acker.Acker) error { handler, found := ad.handlers[(ad.key(a))] if !found { - return ad.def.Handle(ad.ctx, a, acker) + return ad.def.Handle(ctx, a, acker) } - return handler.Handle(ad.ctx, a, acker) + return handler.Handle(ctx, a, acker) } func detectTypes(actions []fleetapi.Action) []string { diff --git a/internal/pkg/agent/application/pipeline/dispatcher/dispatcher_test.go b/internal/pkg/agent/application/dispatcher/dispatcher_test.go similarity index 64% rename from internal/pkg/agent/application/pipeline/dispatcher/dispatcher_test.go rename to internal/pkg/agent/application/dispatcher/dispatcher_test.go index 3c65dd4a2e7..4c19779688a 100644 --- a/internal/pkg/agent/application/pipeline/dispatcher/dispatcher_test.go +++ b/internal/pkg/agent/application/dispatcher/dispatcher_test.go @@ -9,22 +9,19 @@ import ( "testing" "time" - "go.elastic.co/apm" - "go.elastic.co/apm/apmtest" - "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" - noopacker "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker/noop" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker/noop" ) type mockHandler struct { mock.Mock } -func (h *mockHandler) Handle(ctx context.Context, a fleetapi.Action, acker store.FleetAcker) error { +func (h *mockHandler) Handle(ctx context.Context, a fleetapi.Action, acker acker.Acker) error { args := h.Called(ctx, a, acker) return args.Error(0) } @@ -61,52 +58,13 @@ func (m *mockAction) Expiration() (time.Time, error) { return args.Get(0).(time.Time), args.Error(1) } -type mockAcker struct { - mock.Mock -} - -func (m *mockAcker) Ack(ctx context.Context, action fleetapi.Action) error { - args := m.Called(ctx, action) - return args.Error(0) -} - -func (m *mockAcker) Commit(ctx context.Context) error { - args := m.Called(ctx) - return args.Error(0) -} - func TestActionDispatcher(t *testing.T) { - ack := noopacker.NewAcker() - - t.Run("Merges ActionDispatcher ctx cancel and Dispatch ctx value", func(t *testing.T) { - action1 := &mockAction{} - def := &mockHandler{} - def.On("Handle", mock.Anything, mock.Anything, mock.Anything).Return(nil).Once() - span := apmtest.NewRecordingTracer(). - StartTransaction("ignore", "ignore"). - StartSpan("ignore", "ignore", nil) - ctx1, cancel := context.WithCancel(context.Background()) - ack := &mockAcker{} - ack.On("Commit", mock.Anything).Run(func(args mock.Arguments) { - ctx, _ := args.Get(0).(context.Context) - require.NoError(t, ctx.Err()) - got := apm.SpanFromContext(ctx) - require.Equal(t, span.TraceContext().Span, got.ParentID()) - cancel() // cancel function from ctx1 - require.Equal(t, ctx.Err(), context.Canceled) - }).Return(nil) - d, err := New(ctx1, nil, def) - require.NoError(t, err) - ctx2 := apm.ContextWithSpan(context.Background(), span) - err = d.Dispatch(ctx2, ack, action1) - require.NoError(t, err) - ack.AssertExpectations(t) - }) + ack := noop.New() t.Run("Success to dispatch multiples events", func(t *testing.T) { ctx := context.Background() def := &mockHandler{} - d, err := New(ctx, nil, def) + d, err := New(nil, def) require.NoError(t, err) success1 := &mockHandler{} @@ -136,7 +94,7 @@ func TestActionDispatcher(t *testing.T) { def := &mockHandler{} def.On("Handle", mock.Anything, mock.Anything, mock.Anything).Return(nil).Once() ctx := context.Background() - d, err := New(ctx, nil, def) + d, err := New(nil, def) require.NoError(t, err) action := &mockUnknownAction{} @@ -151,7 +109,7 @@ func TestActionDispatcher(t *testing.T) { success2 := &mockHandler{} def := &mockHandler{} - d, err := New(context.Background(), nil, def) + d, err := New(nil, def) require.NoError(t, err) err = d.Register(&mockAction{}, success1) diff --git a/internal/pkg/agent/application/fleet_server_bootstrap.go b/internal/pkg/agent/application/fleet_server_bootstrap.go index 9b72b177eb9..bfb801b9dde 100644 --- a/internal/pkg/agent/application/fleet_server_bootstrap.go +++ b/internal/pkg/agent/application/fleet_server_bootstrap.go @@ -6,230 +6,73 @@ package application import ( "context" + "time" - "go.elastic.co/apm" - - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" - "github.com/elastic/elastic-agent/internal/pkg/sorted" - "github.com/elastic/go-sysinfo" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/filters" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/emitter/modifiers" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/router" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/stream" - "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/operation" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" "github.com/elastic/elastic-agent/internal/pkg/config" - "github.com/elastic/elastic-agent/internal/pkg/core/monitoring" - monitoringCfg "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/config" - "github.com/elastic/elastic-agent/internal/pkg/core/status" - reporting "github.com/elastic/elastic-agent/internal/pkg/reporter" - logreporter "github.com/elastic/elastic-agent/internal/pkg/reporter/log" + "github.com/elastic/elastic-agent/pkg/component" "github.com/elastic/elastic-agent/pkg/core/logger" - "github.com/elastic/elastic-agent/pkg/core/server" ) -// FleetServerBootstrap application, does just enough to get a Fleet Server up and running so enrollment -// can complete. -type FleetServerBootstrap struct { - bgContext context.Context - cancelCtxFn context.CancelFunc - log *logger.Logger - Config configuration.FleetAgentConfig - agentInfo *info.AgentInfo - router pipeline.Router - source source - srv *server.Server -} - -func newFleetServerBootstrap( - ctx context.Context, - log *logger.Logger, - pathConfigFile string, - rawConfig *config.Config, - statusCtrl status.Controller, - agentInfo *info.AgentInfo, - tracer *apm.Tracer, -) (*FleetServerBootstrap, error) { - cfg, err := configuration.NewFromConfig(rawConfig) - if err != nil { - return nil, err - } - - if log == nil { - log, err = logger.NewFromConfig("", cfg.Settings.LoggingConfig, false) - if err != nil { - return nil, err - } - } - - logR := logreporter.NewReporter(log) - - sysInfo, err := sysinfo.Host() - if err != nil { - return nil, errors.New(err, - "fail to get system information", - errors.TypeUnexpected) - } - - bootstrapApp := &FleetServerBootstrap{ - log: log, - agentInfo: agentInfo, - } - - bootstrapApp.bgContext, bootstrapApp.cancelCtxFn = context.WithCancel(ctx) - bootstrapApp.srv, err = server.NewFromConfig(log, cfg.Settings.GRPC, &operation.ApplicationStatusHandler{}, tracer) - if err != nil { - return nil, errors.New(err, "initialize GRPC listener") - } - - reporter := reporting.NewReporter(bootstrapApp.bgContext, log, bootstrapApp.agentInfo, logR) - - if cfg.Settings.MonitoringConfig != nil { - cfg.Settings.MonitoringConfig.Enabled = false - } else { - cfg.Settings.MonitoringConfig = &monitoringCfg.MonitoringConfig{Enabled: false} - } - monitor, err := monitoring.NewMonitor(cfg.Settings) - if err != nil { - return nil, errors.New(err, "failed to initialize monitoring") - } - - router, err := router.New(log, stream.Factory(bootstrapApp.bgContext, agentInfo, cfg.Settings, bootstrapApp.srv, reporter, monitor, statusCtrl)) - if err != nil { - return nil, errors.New(err, "fail to initialize pipeline router") - } - bootstrapApp.router = router - - emit, err := bootstrapEmitter( - bootstrapApp.bgContext, - log, - agentInfo, - router, - &pipeline.ConfigModifiers{ - Filters: []pipeline.FilterFunc{filters.StreamChecker, modifiers.InjectFleet(rawConfig, sysInfo.Info(), agentInfo)}, +// injectFleetServerInput is the base configuration that is used plus the FleetServerComponentModifier that adjusts +// the components before sending them to the runtime manager. +var injectFleetServerInput = config.MustNewConfigFrom(map[string]interface{}{ + "outputs": map[string]interface{}{ + "default": map[string]interface{}{ + "type": "elasticsearch", + "hosts": []string{"localhost:9200"}, }, - ) - if err != nil { - return nil, err - } - - loader := config.NewLoader(log, "") - discover := discoverer(pathConfigFile, cfg.Settings.Path) - bootstrapApp.source = newOnce(log, discover, loader, emit) - return bootstrapApp, nil -} - -// Routes returns a list of routes handled by server. -func (b *FleetServerBootstrap) Routes() *sorted.Set { - return b.router.Routes() -} - -// Start starts a managed elastic-agent. -func (b *FleetServerBootstrap) Start() error { - b.log.Info("Agent is starting") - defer b.log.Info("Agent is stopped") - - if err := b.srv.Start(); err != nil { - return err - } - if err := b.source.Start(); err != nil { - return err - } - - return nil + }, + "inputs": []interface{}{ + map[string]interface{}{ + "type": "fleet-server", + }, + }, +}) + +// FleetServerComponentModifier modifies the comps to inject extra information from the policy into +// the Fleet Server component and units needed to run Fleet Server correctly. +func FleetServerComponentModifier(comps []component.Component, policy map[string]interface{}) ([]component.Component, error) { + // TODO(blakerouse): Need to add logic to update the Fleet Server component with extra information from the policy. + return comps, nil } -// Stop stops a local agent. -func (b *FleetServerBootstrap) Stop() error { - err := b.source.Stop() - b.cancelCtxFn() - b.router.Shutdown() - b.srv.Stop() - return err -} +type fleetServerBootstrapManager struct { + log *logger.Logger -// AgentInfo retrieves elastic-agent information. -func (b *FleetServerBootstrap) AgentInfo() *info.AgentInfo { - return b.agentInfo + ch chan coordinator.ConfigChange + errCh chan error } -func bootstrapEmitter(ctx context.Context, log *logger.Logger, agentInfo transpiler.AgentInfo, router pipeline.Router, modifiers *pipeline.ConfigModifiers) (pipeline.EmitterFunc, error) { - ch := make(chan *config.Config) - - go func() { - for { - var c *config.Config - select { - case <-ctx.Done(): - return - case c = <-ch: - } - - err := emit(ctx, log, agentInfo, router, modifiers, c) - if err != nil { - log.Error(err) - } - } - }() - - return func(ctx context.Context, c *config.Config) error { - span, _ := apm.StartSpan(ctx, "emit", "app.internal") - defer span.End() - ch <- c - return nil +func newFleetServerBootstrapManager( + log *logger.Logger, +) (*fleetServerBootstrapManager, error) { + return &fleetServerBootstrapManager{ + log: log, + ch: make(chan coordinator.ConfigChange), + errCh: make(chan error), }, nil } -func emit(ctx context.Context, log *logger.Logger, agentInfo transpiler.AgentInfo, router pipeline.Router, modifiers *pipeline.ConfigModifiers, c *config.Config) error { - if err := info.InjectAgentConfig(c); err != nil { - return err - } +func (m *fleetServerBootstrapManager) Run(ctx context.Context) error { + ctx, cancel := context.WithTimeout(ctx, 15*time.Second) + defer cancel() - // perform and verify ast translation - m, err := c.ToMapStr() - if err != nil { - return errors.New(err, "could not create the AST from the configuration", errors.TypeConfig) - } - ast, err := transpiler.NewAST(m) - if err != nil { - return errors.New(err, "could not create the AST from the configuration", errors.TypeConfig) - } - for _, filter := range modifiers.Filters { - if err := filter(log, ast); err != nil { - return errors.New(err, "failed to filter configuration", errors.TypeConfig) - } + m.log.Debugf("injecting fleet-server for bootstrap") + select { + case <-ctx.Done(): + return ctx.Err() + case m.ch <- &localConfigChange{injectFleetServerInput}: } - // overwrite the inputs to only have a single fleet-server input - transpiler.Insert(ast, transpiler.NewList([]transpiler.Node{ - transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("type", transpiler.NewStrVal("fleet-server")), - }), - }), "inputs") + <-ctx.Done() + return ctx.Err() +} - spec, ok := program.SupportedMap["fleet-server"] - if !ok { - return errors.New("missing required fleet-server program specification") - } - ok, err = program.DetectProgram(spec.Rules, spec.When, spec.Constraints, agentInfo, ast) - if err != nil { - return errors.New(err, "failed parsing the configuration") - } - if !ok { - return errors.New("bootstrap configuration is incorrect causing fleet-server to not be started") - } +func (m *fleetServerBootstrapManager) Errors() <-chan error { + return m.errCh +} - return router.Route(ctx, ast.HashStr(), map[pipeline.RoutingKey][]program.Program{ - pipeline.DefaultRK: { - { - Spec: spec, - Config: ast, - }, - }, - }) +func (m *fleetServerBootstrapManager) Watch() <-chan coordinator.ConfigChange { + return m.ch } diff --git a/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go b/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go index 4ff4c34ad42..d8c21a580d3 100644 --- a/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go +++ b/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go @@ -8,20 +8,18 @@ import ( "context" stderr "errors" "fmt" - "sync" "time" - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "github.com/elastic/elastic-agent/internal/pkg/fleetapi/client" - + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/dispatcher" "github.com/elastic/elastic-agent/internal/pkg/agent/application/gateway" "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" + agentclient "github.com/elastic/elastic-agent/internal/pkg/agent/control/client" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" "github.com/elastic/elastic-agent/internal/pkg/core/backoff" - "github.com/elastic/elastic-agent/internal/pkg/core/status" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/client" "github.com/elastic/elastic-agent/internal/pkg/scheduler" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -54,10 +52,6 @@ type agentInfo interface { AgentID() string } -type fleetReporter interface { - Events() ([]fleetapi.SerializableEvent, func()) -} - type stateStore interface { Add(fleetapi.Action) AckToken() string @@ -75,109 +69,102 @@ type actionQueue interface { } type fleetGateway struct { - bgContext context.Context - log *logger.Logger - dispatcher pipeline.Dispatcher - client client.Sender - scheduler scheduler.Scheduler - backoff backoff.Backoff - settings *fleetGatewaySettings - agentInfo agentInfo - reporter fleetReporter - done chan struct{} - wg sync.WaitGroup - acker store.FleetAcker - unauthCounter int - statusController status.Controller - statusReporter status.Reporter - stateStore stateStore - queue actionQueue + log *logger.Logger + dispatcher dispatcher.Dispatcher + client client.Sender + scheduler scheduler.Scheduler + settings *fleetGatewaySettings + agentInfo agentInfo + acker acker.Acker + unauthCounter int + stateFetcher coordinator.StateFetcher + stateStore stateStore + queue actionQueue + errCh chan error } // New creates a new fleet gateway func New( - ctx context.Context, log *logger.Logger, agentInfo agentInfo, client client.Sender, - d pipeline.Dispatcher, - r fleetReporter, - acker store.FleetAcker, - statusController status.Controller, + d dispatcher.Dispatcher, + acker acker.Acker, + stateFetcher coordinator.StateFetcher, stateStore stateStore, queue actionQueue, ) (gateway.FleetGateway, error) { scheduler := scheduler.NewPeriodicJitter(defaultGatewaySettings.Duration, defaultGatewaySettings.Jitter) return newFleetGatewayWithScheduler( - ctx, log, defaultGatewaySettings, agentInfo, client, d, scheduler, - r, acker, - statusController, + stateFetcher, stateStore, queue, ) } func newFleetGatewayWithScheduler( - ctx context.Context, log *logger.Logger, settings *fleetGatewaySettings, agentInfo agentInfo, client client.Sender, - d pipeline.Dispatcher, + d dispatcher.Dispatcher, scheduler scheduler.Scheduler, - r fleetReporter, - acker store.FleetAcker, - statusController status.Controller, + acker acker.Acker, + stateFetcher coordinator.StateFetcher, stateStore stateStore, queue actionQueue, ) (gateway.FleetGateway, error) { - - // Backoff implementation doesn't support the use of a context [cancellation] - // as the shutdown mechanism. - // So we keep a done channel that will be closed when the current context is shutdown. - done := make(chan struct{}) - return &fleetGateway{ - bgContext: ctx, - log: log, - dispatcher: d, - client: client, - settings: settings, - agentInfo: agentInfo, - scheduler: scheduler, - backoff: backoff.NewEqualJitterBackoff( - done, - settings.Backoff.Init, - settings.Backoff.Max, - ), - done: done, - reporter: r, - acker: acker, - statusReporter: statusController.RegisterComponent("gateway"), - statusController: statusController, - stateStore: stateStore, - queue: queue, + log: log, + dispatcher: d, + client: client, + settings: settings, + agentInfo: agentInfo, + scheduler: scheduler, + acker: acker, + stateFetcher: stateFetcher, + stateStore: stateStore, + queue: queue, + errCh: make(chan error), }, nil } -func (f *fleetGateway) worker() { +func (f *fleetGateway) Run(ctx context.Context) error { + // Backoff implementation doesn't support the use of a context [cancellation] as the shutdown mechanism. + // So we keep a done channel that will be closed when the current context is shutdown. + done := make(chan struct{}) + backoff := backoff.NewEqualJitterBackoff( + done, + f.settings.Backoff.Init, + f.settings.Backoff.Max, + ) + go func() { + <-ctx.Done() + close(done) + }() + + f.log.Info("Fleet gateway started") for { select { + case <-ctx.Done(): + f.scheduler.Stop() + f.log.Info("Fleet gateway stopped") + return ctx.Err() case ts := <-f.scheduler.WaitTick(): f.log.Debug("FleetGateway calling Checkin API") // Execute the checkin call and for any errors returned by the fleet-server API // the function will retry to communicate with fleet-server with an exponential delay and some // jitter to help better distribute the load from a fleet of agents. - resp, err := f.doExecute() + resp, err := f.doExecute(ctx, backoff) if err != nil { continue } @@ -194,35 +181,36 @@ func (f *fleetGateway) worker() { actions = append(actions, queued...) - var errMsg string // Persist state + hadErr := false f.stateStore.SetQueue(f.queue.Actions()) if err := f.stateStore.Save(); err != nil { - errMsg = fmt.Sprintf("failed to persist action_queue, error: %s", err) - f.log.Error(errMsg) - f.statusReporter.Update(state.Failed, errMsg, nil) + err = fmt.Errorf("failed to persist action_queue, error: %w", err) + f.log.Error(err) + f.errCh <- err + hadErr = true } if err := f.dispatcher.Dispatch(context.Background(), f.acker, actions...); err != nil { - errMsg = fmt.Sprintf("failed to dispatch actions, error: %s", err) - f.log.Error(errMsg) - f.statusReporter.Update(state.Failed, errMsg, nil) + err = fmt.Errorf("failed to dispatch actions, error: %w", err) + f.log.Error(err) + f.errCh <- err + hadErr = true } f.log.Debugf("FleetGateway is sleeping, next update in %s", f.settings.Duration) - if errMsg != "" { - f.statusReporter.Update(state.Failed, errMsg, nil) - } else { - f.statusReporter.Update(state.Healthy, "", nil) + if !hadErr { + f.errCh <- nil } - - case <-f.bgContext.Done(): - f.stop() - return } } } +// Errors returns the channel to watch for reported errors. +func (f *fleetGateway) Errors() <-chan error { + return f.errCh +} + // queueScheduledActions will add any action in actions with a valid start time to the queue and return the rest. // start time to current time comparisons are purposefully not made in case of cancel actions. func (f *fleetGateway) queueScheduledActions(input fleetapi.Actions) []fleetapi.Action { @@ -277,17 +265,17 @@ func (f *fleetGateway) gatherQueuedActions(ts time.Time) (queued, expired []flee return queued, expired } -func (f *fleetGateway) doExecute() (*fleetapi.CheckinResponse, error) { - f.backoff.Reset() +func (f *fleetGateway) doExecute(ctx context.Context, bo backoff.Backoff) (*fleetapi.CheckinResponse, error) { + bo.Reset() // Guard if the context is stopped by a out of bound call, // this mean we are rebooting to change the log level or the system is shutting us down. - for f.bgContext.Err() == nil { + for ctx.Err() == nil { f.log.Debugf("Checking started") - resp, err := f.execute(f.bgContext) + resp, err := f.execute(ctx) if err != nil { f.log.Errorf("Could not communicate with fleet-server Checking API will retry, error: %s", err) - if !f.backoff.Wait() { + if !bo.Wait() { // Something bad has happened and we log it and we should update our current state. err := errors.New( "execute retry loop was stopped", @@ -296,7 +284,7 @@ func (f *fleetGateway) doExecute() (*fleetapi.CheckinResponse, error) { ) f.log.Error(err) - f.statusReporter.Update(state.Failed, err.Error(), nil) + f.errCh <- err return nil, err } continue @@ -307,13 +295,10 @@ func (f *fleetGateway) doExecute() (*fleetapi.CheckinResponse, error) { // This mean that the next loop was cancelled because of the context, we should return the error // but we should not log it, because we are in the process of shutting down. - return nil, f.bgContext.Err() + return nil, ctx.Err() } func (f *fleetGateway) execute(ctx context.Context) (*fleetapi.CheckinResponse, error) { - // get events - ee, ack := f.reporter.Events() - ecsMeta, err := info.Metadata() if err != nil { f.log.Error(errors.New("failed to load metadata", err)) @@ -325,13 +310,15 @@ func (f *fleetGateway) execute(ctx context.Context) (*fleetapi.CheckinResponse, f.log.Debugf("using previously saved ack token: %v", ackToken) } + // get current state + state := f.stateFetcher.State() + // checkin cmd := fleetapi.NewCheckinCmd(f.agentInfo, f.client) req := &fleetapi.CheckinRequest{ AckToken: ackToken, - Events: ee, Metadata: ecsMeta, - Status: f.statusController.StatusString(), + Status: agentStateToString(state.State), } resp, err := cmd.Execute(ctx, req) @@ -362,8 +349,6 @@ func (f *fleetGateway) execute(ctx context.Context) (*fleetapi.CheckinResponse, } } - // ack events so they are dropped from queue - ack() return resp, nil } @@ -376,25 +361,16 @@ func isUnauth(err error) bool { return errors.Is(err, client.ErrInvalidAPIKey) } -func (f *fleetGateway) Start() error { - f.wg.Add(1) - go func(wg *sync.WaitGroup) { - defer f.log.Info("Fleet gateway is stopped") - defer wg.Done() - - f.worker() - }(&f.wg) - return nil -} - -func (f *fleetGateway) stop() { - f.log.Info("Fleet gateway is stopping") - defer f.scheduler.Stop() - f.statusReporter.Unregister() - close(f.done) - f.wg.Wait() -} - func (f *fleetGateway) SetClient(c client.Sender) { f.client = c } + +func agentStateToString(state agentclient.State) string { + switch state { + case agentclient.Healthy: + return "online" + case agentclient.Failed: + return "error" + } + return "degraded" +} diff --git a/internal/pkg/agent/application/gateway/fleet/fleet_gateway_test.go b/internal/pkg/agent/application/gateway/fleet/fleet_gateway_test.go index a9b9380519f..deb871192bc 100644 --- a/internal/pkg/agent/application/gateway/fleet/fleet_gateway_test.go +++ b/internal/pkg/agent/application/gateway/fleet/fleet_gateway_test.go @@ -8,12 +8,13 @@ package fleet import ( "bytes" "context" - "encoding/json" "fmt" "io" "io/ioutil" "net/http" "net/url" + "os" + "path/filepath" "sync" "testing" "time" @@ -22,15 +23,13 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" "github.com/elastic/elastic-agent/internal/pkg/agent/application/gateway" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/storage" "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" - noopacker "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker/noop" - repo "github.com/elastic/elastic-agent/internal/pkg/reporter" - fleetreporter "github.com/elastic/elastic-agent/internal/pkg/reporter/fleet" - fleetreporterConfig "github.com/elastic/elastic-agent/internal/pkg/reporter/fleet/config" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker/noop" "github.com/elastic/elastic-agent/internal/pkg/scheduler" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -45,9 +44,9 @@ type testingClient struct { func (t *testingClient) Send( _ context.Context, - method string, - path string, - params url.Values, + _ string, + _ string, + _ url.Values, headers http.Header, body io.Reader, ) (*http.Response, error) { @@ -80,7 +79,7 @@ type testingDispatcher struct { received chan struct{} } -func (t *testingDispatcher) Dispatch(_ context.Context, acker store.FleetAcker, actions ...fleetapi.Action) error { +func (t *testingDispatcher) Dispatch(_ context.Context, acker acker.Acker, actions ...fleetapi.Action) error { t.Lock() defer t.Unlock() defer func() { t.received <- struct{}{} }() @@ -135,7 +134,7 @@ func (m *mockQueue) Actions() []fleetapi.Action { return args.Get(0).([]fleetapi.Action) } -type withGatewayFunc func(*testing.T, gateway.FleetGateway, *testingClient, *testingDispatcher, *scheduler.Stepper, repo.Backend) +type withGatewayFunc func(*testing.T, gateway.FleetGateway, *testingClient, *testingDispatcher, *scheduler.Stepper) func withGateway(agentInfo agentInfo, settings *fleetGatewaySettings, fn withGatewayFunc) func(t *testing.T) { return func(t *testing.T) { @@ -144,37 +143,29 @@ func withGateway(agentInfo agentInfo, settings *fleetGatewaySettings, fn withGat dispatcher := newTestingDispatcher() log, _ := logger.New("fleet_gateway", false) - rep := getReporter(agentInfo, log, t) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - diskStore := storage.NewDiskStore(paths.AgentStateStoreFile()) - stateStore, err := store.NewStateStore(log, diskStore) - require.NoError(t, err) + stateStore := newStateStore(t, log) queue := &mockQueue{} queue.On("DequeueActions").Return([]fleetapi.Action{}) queue.On("Actions").Return([]fleetapi.Action{}) gateway, err := newFleetGatewayWithScheduler( - ctx, log, settings, agentInfo, client, dispatcher, scheduler, - rep, - noopacker.NewAcker(), - &noopController{}, + noop.New(), + &emptyStateFetcher{}, stateStore, queue, ) require.NoError(t, err) - fn(t, gateway, client, dispatcher, scheduler, rep) + fn(t, gateway, client, dispatcher, scheduler) } } @@ -212,8 +203,10 @@ func TestFleetGateway(t *testing.T) { client *testingClient, dispatcher *testingDispatcher, scheduler *scheduler.Stepper, - rep repo.Backend, ) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + waitFn := ackSeq( client.Answer(func(headers http.Header, body io.Reader) (*http.Response, error) { resp := wrapStrToResp(http.StatusOK, `{ "actions": [] }`) @@ -224,12 +217,16 @@ func TestFleetGateway(t *testing.T) { return nil }), ) - err := gateway.Start() - require.NoError(t, err) + + errCh := runFleetGateway(ctx, gateway) // Synchronize scheduler and acking of calls from the worker go routine. scheduler.Next() waitFn() + + cancel() + err := <-errCh + require.NoError(t, err) })) t.Run("Successfully connects and receives a series of actions", withGateway(agentInfo, settings, func( @@ -238,8 +235,10 @@ func TestFleetGateway(t *testing.T) { client *testingClient, dispatcher *testingDispatcher, scheduler *scheduler.Stepper, - rep repo.Backend, ) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + waitFn := ackSeq( client.Answer(func(headers http.Header, body io.Reader) (*http.Response, error) { // TODO: assert no events @@ -269,11 +268,15 @@ func TestFleetGateway(t *testing.T) { return nil }), ) - err := gateway.Start() - require.NoError(t, err) + + errCh := runFleetGateway(ctx, gateway) scheduler.Next() waitFn() + + cancel() + err := <-errCh + require.NoError(t, err) })) // Test the normal time based execution. @@ -286,30 +289,24 @@ func TestFleetGateway(t *testing.T) { defer cancel() log, _ := logger.New("tst", false) - - diskStore := storage.NewDiskStore(paths.AgentStateStoreFile()) - stateStore, err := store.NewStateStore(log, diskStore) - require.NoError(t, err) + stateStore := newStateStore(t, log) queue := &mockQueue{} queue.On("DequeueActions").Return([]fleetapi.Action{}) queue.On("Actions").Return([]fleetapi.Action{}) gateway, err := newFleetGatewayWithScheduler( - ctx, log, settings, agentInfo, client, dispatcher, scheduler, - getReporter(agentInfo, log, t), - noopacker.NewAcker(), - &noopController{}, + noop.New(), + &emptyStateFetcher{}, stateStore, queue, ) - require.NoError(t, err) waitFn := ackSeq( @@ -323,8 +320,7 @@ func TestFleetGateway(t *testing.T) { }), ) - err = gateway.Start() - require.NoError(t, err) + errCh := runFleetGateway(ctx, gateway) var count int for { @@ -334,6 +330,10 @@ func TestFleetGateway(t *testing.T) { return } } + + cancel() + err = <-errCh + require.NoError(t, err) }) t.Run("queue action from checkin", func(t *testing.T) { @@ -345,10 +345,7 @@ func TestFleetGateway(t *testing.T) { defer cancel() log, _ := logger.New("tst", false) - - diskStore := storage.NewDiskStore(paths.AgentStateStoreFile()) - stateStore, err := store.NewStateStore(log, diskStore) - require.NoError(t, err) + stateStore := newStateStore(t, log) ts := time.Now().UTC().Round(time.Second) queue := &mockQueue{} @@ -357,20 +354,17 @@ func TestFleetGateway(t *testing.T) { queue.On("Actions").Return([]fleetapi.Action{}) gateway, err := newFleetGatewayWithScheduler( - ctx, log, settings, agentInfo, client, dispatcher, scheduler, - getReporter(agentInfo, log, t), - noopacker.NewAcker(), - &noopController{}, + noop.New(), + &emptyStateFetcher{}, stateStore, queue, ) - require.NoError(t, err) waitFn := ackSeq( @@ -395,12 +389,15 @@ func TestFleetGateway(t *testing.T) { }), ) - err = gateway.Start() - require.NoError(t, err) + errCh := runFleetGateway(ctx, gateway) scheduler.Next() waitFn() queue.AssertExpectations(t) + + cancel() + err = <-errCh + require.NoError(t, err) }) t.Run("run action from queue", func(t *testing.T) { @@ -412,10 +409,7 @@ func TestFleetGateway(t *testing.T) { defer cancel() log, _ := logger.New("tst", false) - - diskStore := storage.NewDiskStore(paths.AgentStateStoreFile()) - stateStore, err := store.NewStateStore(log, diskStore) - require.NoError(t, err) + stateStore := newStateStore(t, log) ts := time.Now().UTC().Round(time.Second) queue := &mockQueue{} @@ -423,20 +417,17 @@ func TestFleetGateway(t *testing.T) { queue.On("Actions").Return([]fleetapi.Action{}) gateway, err := newFleetGatewayWithScheduler( - ctx, log, settings, agentInfo, client, dispatcher, scheduler, - getReporter(agentInfo, log, t), - noopacker.NewAcker(), - &noopController{}, + noop.New(), + &emptyStateFetcher{}, stateStore, queue, ) - require.NoError(t, err) waitFn := ackSeq( @@ -450,12 +441,15 @@ func TestFleetGateway(t *testing.T) { }), ) - err = gateway.Start() - require.NoError(t, err) + errCh := runFleetGateway(ctx, gateway) scheduler.Next() waitFn() queue.AssertExpectations(t) + + cancel() + err = <-errCh + require.NoError(t, err) }) t.Run("discard expired action from queue", func(t *testing.T) { @@ -467,10 +461,7 @@ func TestFleetGateway(t *testing.T) { defer cancel() log, _ := logger.New("tst", false) - - diskStore := storage.NewDiskStore(paths.AgentStateStoreFile()) - stateStore, err := store.NewStateStore(log, diskStore) - require.NoError(t, err) + stateStore := newStateStore(t, log) ts := time.Now().UTC().Round(time.Second) queue := &mockQueue{} @@ -478,20 +469,17 @@ func TestFleetGateway(t *testing.T) { queue.On("Actions").Return([]fleetapi.Action{}) gateway, err := newFleetGatewayWithScheduler( - ctx, log, settings, agentInfo, client, dispatcher, scheduler, - getReporter(agentInfo, log, t), - noopacker.NewAcker(), - &noopController{}, + noop.New(), + &emptyStateFetcher{}, stateStore, queue, ) - require.NoError(t, err) waitFn := ackSeq( @@ -505,12 +493,15 @@ func TestFleetGateway(t *testing.T) { }), ) - err = gateway.Start() - require.NoError(t, err) + errCh := runFleetGateway(ctx, gateway) scheduler.Next() waitFn() queue.AssertExpectations(t) + + cancel() + err = <-errCh + require.NoError(t, err) }) t.Run("cancel action from checkin", func(t *testing.T) { @@ -522,10 +513,7 @@ func TestFleetGateway(t *testing.T) { defer cancel() log, _ := logger.New("tst", false) - - diskStore := storage.NewDiskStore(paths.AgentStateStoreFile()) - stateStore, err := store.NewStateStore(log, diskStore) - require.NoError(t, err) + stateStore := newStateStore(t, log) ts := time.Now().UTC().Round(time.Second) queue := &mockQueue{} @@ -535,20 +523,17 @@ func TestFleetGateway(t *testing.T) { // queue.Cancel does not need to be mocked here as it is ran in the cancel action dispatcher. gateway, err := newFleetGatewayWithScheduler( - ctx, log, settings, agentInfo, client, dispatcher, scheduler, - getReporter(agentInfo, log, t), - noopacker.NewAcker(), - &noopController{}, + noop.New(), + &emptyStateFetcher{}, stateStore, queue, ) - require.NoError(t, err) waitFn := ackSeq( @@ -578,52 +563,16 @@ func TestFleetGateway(t *testing.T) { }), ) - err = gateway.Start() - require.NoError(t, err) + errCh := runFleetGateway(ctx, gateway) scheduler.Next() waitFn() queue.AssertExpectations(t) - }) - t.Run("send event and receive no action", withGateway(agentInfo, settings, func( - t *testing.T, - gateway gateway.FleetGateway, - client *testingClient, - dispatcher *testingDispatcher, - scheduler *scheduler.Stepper, - rep repo.Backend, - ) { - _ = rep.Report(context.Background(), &testStateEvent{}) - waitFn := ackSeq( - client.Answer(func(headers http.Header, body io.Reader) (*http.Response, error) { - cr := &request{} - content, err := ioutil.ReadAll(body) - if err != nil { - t.Fatal(err) - } - err = json.Unmarshal(content, &cr) - if err != nil { - t.Fatal(err) - } - - require.Equal(t, 1, len(cr.Events)) - - resp := wrapStrToResp(http.StatusOK, `{ "actions": [] }`) - return resp, nil - }), - dispatcher.Answer(func(actions ...fleetapi.Action) error { - require.Equal(t, 0, len(actions)) - return nil - }), - ) - err := gateway.Start() + cancel() + err = <-errCh require.NoError(t, err) - - // Synchronize scheduler and acking of calls from the worker go routine. - scheduler.Next() - waitFn() - })) + }) t.Run("Test the wait loop is interruptible", func(t *testing.T) { // 20mins is the double of the base timeout values for golang test suites. @@ -634,18 +583,15 @@ func TestFleetGateway(t *testing.T) { dispatcher := newTestingDispatcher() ctx, cancel := context.WithCancel(context.Background()) - log, _ := logger.New("tst", false) - diskStore := storage.NewDiskStore(paths.AgentStateStoreFile()) - stateStore, err := store.NewStateStore(log, diskStore) - require.NoError(t, err) + log, _ := logger.New("tst", false) + stateStore := newStateStore(t, log) queue := &mockQueue{} queue.On("DequeueActions").Return([]fleetapi.Action{}) queue.On("Actions").Return([]fleetapi.Action{}) gateway, err := newFleetGatewayWithScheduler( - ctx, log, &fleetGatewaySettings{ Duration: d, @@ -655,13 +601,11 @@ func TestFleetGateway(t *testing.T) { client, dispatcher, scheduler, - getReporter(agentInfo, log, t), - noopacker.NewAcker(), - &noopController{}, + noop.New(), + &emptyStateFetcher{}, stateStore, queue, ) - require.NoError(t, err) ch1 := dispatcher.Answer(func(actions ...fleetapi.Action) error { return nil }) @@ -670,8 +614,7 @@ func TestFleetGateway(t *testing.T) { return resp, nil }) - err = gateway.Start() - require.NoError(t, err) + errCh := runFleetGateway(ctx, gateway) // Silently dispatch action. go func() { @@ -694,6 +637,8 @@ func TestFleetGateway(t *testing.T) { // 2. WaitTick() will block for 20 minutes. // 3. Stop will should unblock the wait. cancel() + err = <-errCh + require.NoError(t, err) }) } @@ -712,16 +657,16 @@ func TestRetriesOnFailures(t *testing.T) { client *testingClient, dispatcher *testingDispatcher, scheduler *scheduler.Stepper, - rep repo.Backend, ) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + fail := func(_ http.Header, _ io.Reader) (*http.Response, error) { return wrapStrToResp(http.StatusInternalServerError, "something is bad"), nil } clientWaitFn := client.Answer(fail) - err := gateway.Start() - require.NoError(t, err) - _ = rep.Report(context.Background(), &testStateEvent{}) + errCh := runFleetGateway(ctx, gateway) // Initial tick is done out of bound so we can block on channels. scheduler.Next() @@ -734,18 +679,6 @@ func TestRetriesOnFailures(t *testing.T) { // API recover waitFn := ackSeq( client.Answer(func(_ http.Header, body io.Reader) (*http.Response, error) { - cr := &request{} - content, err := ioutil.ReadAll(body) - if err != nil { - t.Fatal(err) - } - err = json.Unmarshal(content, &cr) - if err != nil { - t.Fatal(err) - } - - require.Equal(t, 1, len(cr.Events)) - resp := wrapStrToResp(http.StatusOK, `{ "actions": [] }`) return resp, nil }), @@ -757,6 +690,10 @@ func TestRetriesOnFailures(t *testing.T) { ) waitFn() + + cancel() + err := <-errCh + require.NoError(t, err) })) t.Run("The retry loop is interruptible", @@ -769,16 +706,16 @@ func TestRetriesOnFailures(t *testing.T) { client *testingClient, dispatcher *testingDispatcher, scheduler *scheduler.Stepper, - rep repo.Backend, ) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + fail := func(_ http.Header, _ io.Reader) (*http.Response, error) { return wrapStrToResp(http.StatusInternalServerError, "something is bad"), nil } waitChan := client.Answer(fail) - err := gateway.Start() - require.NoError(t, err) - _ = rep.Report(context.Background(), &testStateEvent{}) + errCh := runFleetGateway(ctx, gateway) // Initial tick is done out of bound so we can block on channels. scheduler.Next() @@ -787,32 +724,59 @@ func TestRetriesOnFailures(t *testing.T) { // delay. <-waitChan - // non-obvious but withGateway on return will stop the gateway before returning and we should - // exit the retry loop. The init value of the backoff is set to exceed the test default timeout. + cancel() + err := <-errCh + require.NoError(t, err) })) } -func getReporter(info agentInfo, log *logger.Logger, t *testing.T) *fleetreporter.Reporter { - fleetR, err := fleetreporter.NewReporter(info, log, fleetreporterConfig.DefaultConfig()) - if err != nil { - t.Fatal(errors.Wrap(err, "fail to create reporters")) - } +type testAgentInfo struct{} + +func (testAgentInfo) AgentID() string { return "agent-secret" } + +type emptyStateFetcher struct{} - return fleetR +func (e *emptyStateFetcher) State() coordinator.State { + return coordinator.State{} } -type testAgentInfo struct{} +func runFleetGateway(ctx context.Context, g gateway.FleetGateway) <-chan error { + done := make(chan bool) + errCh := make(chan error, 1) + go func() { + err := g.Run(ctx) + close(done) + if err != nil && !errors.Is(err, context.Canceled) { + errCh <- err + } else { + errCh <- nil + } + }() + go func() { + for { + select { + case <-done: + return + case <-g.Errors(): + // ignore errors here + } + } + }() + return errCh +} -func (testAgentInfo) AgentID() string { return "agent-secret" } +func newStateStore(t *testing.T, log *logger.Logger) *store.StateStore { + dir, err := ioutil.TempDir("", "fleet-gateway-unit-test") + require.NoError(t, err) -type testStateEvent struct{} + filename := filepath.Join(dir, "state.enc") + diskStore := storage.NewDiskStore(filename) + stateStore, err := store.NewStateStore(log, diskStore) + require.NoError(t, err) -func (testStateEvent) Type() string { return repo.EventTypeState } -func (testStateEvent) SubType() string { return repo.EventSubTypeInProgress } -func (testStateEvent) Time() time.Time { return time.Unix(0, 1) } -func (testStateEvent) Message() string { return "hello" } -func (testStateEvent) Payload() map[string]interface{} { return map[string]interface{}{"key": 1} } + t.Cleanup(func() { + os.RemoveAll(dir) + }) -type request struct { - Events []interface{} `json:"events"` + return stateStore } diff --git a/internal/pkg/agent/application/gateway/fleet/noop_status_controller.go b/internal/pkg/agent/application/gateway/fleet/noop_status_controller.go deleted file mode 100644 index d5097655a63..00000000000 --- a/internal/pkg/agent/application/gateway/fleet/noop_status_controller.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package fleet - -import ( - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "github.com/elastic/elastic-agent/internal/pkg/core/status" -) - -type noopController struct{} - -func (*noopController) RegisterComponent(_ string) status.Reporter { return &noopReporter{} } -func (*noopController) RegisterComponentWithPersistance(_ string, _ bool) status.Reporter { - return &noopReporter{} -} -func (*noopController) RegisterApp(_ string, _ string) status.Reporter { return &noopReporter{} } -func (*noopController) Status() status.AgentStatus { return status.AgentStatus{Status: status.Healthy} } -func (*noopController) StatusCode() status.AgentStatusCode { return status.Healthy } -func (*noopController) UpdateStateID(_ string) {} -func (*noopController) StatusString() string { return "online" } - -type noopReporter struct{} - -func (*noopReporter) Update(_ state.Status, _ string, _ map[string]interface{}) {} -func (*noopReporter) Unregister() {} diff --git a/internal/pkg/agent/application/gateway/fleetserver/fleet_gateway_local.go b/internal/pkg/agent/application/gateway/fleetserver/fleet_gateway_local.go deleted file mode 100644 index 763f003b25f..00000000000 --- a/internal/pkg/agent/application/gateway/fleetserver/fleet_gateway_local.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package fleetserver - -import ( - "context" - "time" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/gateway" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" - "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/config" - "github.com/elastic/elastic-agent/internal/pkg/fleetapi/client" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -const gatewayWait = 2 * time.Second - -var injectFleetServerInput = map[string]interface{}{ - // outputs is replaced by the fleet-server.spec - "outputs": map[string]interface{}{ - "default": map[string]interface{}{ - "type": "elasticsearch", - "hosts": []string{"localhost:9200"}, - }, - }, - "inputs": []interface{}{ - map[string]interface{}{ - "type": "fleet-server", - }, - }, -} - -// fleetServerWrapper wraps the fleetGateway to ensure that a local Fleet Server is running before trying -// to communicate with the gateway, which is local to the Elastic Agent. -type fleetServerWrapper struct { - bgContext context.Context - log *logger.Logger - cfg *configuration.FleetAgentConfig - injectedCfg *config.Config - wrapped gateway.FleetGateway - emitter pipeline.EmitterFunc -} - -// New creates a new fleet server gateway wrapping another fleet gateway. -func New( - ctx context.Context, - log *logger.Logger, - cfg *configuration.FleetAgentConfig, - rawConfig *config.Config, - wrapped gateway.FleetGateway, - emitter pipeline.EmitterFunc, - injectServer bool) (gateway.FleetGateway, error) { - if cfg.Server == nil || !injectServer { - // not running a local Fleet Server - return wrapped, nil - } - - injectedCfg, err := injectFleetServer(rawConfig) - if err != nil { - return nil, errors.New(err, "failed to inject fleet-server input to start local Fleet Server", errors.TypeConfig) - } - - return &fleetServerWrapper{ - bgContext: ctx, - log: log, - cfg: cfg, - injectedCfg: injectedCfg, - wrapped: wrapped, - emitter: emitter, - }, nil -} - -// Start starts the gateway. -func (w *fleetServerWrapper) Start() error { - err := w.emitter(context.Background(), w.injectedCfg) - if err != nil { - return err - } - sleep(w.bgContext, gatewayWait) - return w.wrapped.Start() -} - -// SetClient sets the client for the wrapped gateway. -func (w *fleetServerWrapper) SetClient(c client.Sender) { - w.wrapped.SetClient(c) -} - -func injectFleetServer(rawConfig *config.Config) (*config.Config, error) { - cfg := map[string]interface{}{} - err := rawConfig.Unpack(cfg) - if err != nil { - return nil, err - } - cloned, err := config.NewConfigFrom(cfg) - if err != nil { - return nil, err - } - err = cloned.Merge(injectFleetServerInput) - if err != nil { - return nil, err - } - return cloned, nil -} - -func sleep(ctx context.Context, d time.Duration) { - t := time.NewTimer(d) - defer t.Stop() - select { - case <-ctx.Done(): - case <-t.C: - } -} diff --git a/internal/pkg/agent/application/gateway/gateway.go b/internal/pkg/agent/application/gateway/gateway.go index 47591a4a04e..d43dd32a0c2 100644 --- a/internal/pkg/agent/application/gateway/gateway.go +++ b/internal/pkg/agent/application/gateway/gateway.go @@ -4,16 +4,23 @@ package gateway -import "github.com/elastic/elastic-agent/internal/pkg/fleetapi/client" +import ( + "context" + + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/client" +) // FleetGateway is a gateway between the Agent and the Fleet API, it's take cares of all the // bidirectional communication requirements. The gateway aggregates events and will periodically // call the API to send the events and will receive actions to be executed locally. // The only supported action for now is a "ActionPolicyChange". type FleetGateway interface { - // Start starts the gateway. - Start() error + // Run runs the gateway. + Run(ctx context.Context) error + + // Errors returns the channel to watch for reported errors. + Errors() <-chan error - // Set the client for the gateway. + // SetClient sets the client for the gateway. SetClient(client.Sender) } diff --git a/internal/pkg/agent/application/local_mode.go b/internal/pkg/agent/application/local_mode.go deleted file mode 100644 index 29f311fe582..00000000000 --- a/internal/pkg/agent/application/local_mode.go +++ /dev/null @@ -1,224 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package application - -import ( - "context" - "path/filepath" - - "go.elastic.co/apm" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/filters" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/emitter" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/emitter/modifiers" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/router" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/stream" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade" - "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/operation" - "github.com/elastic/elastic-agent/internal/pkg/capabilities" - "github.com/elastic/elastic-agent/internal/pkg/composable" - "github.com/elastic/elastic-agent/internal/pkg/config" - "github.com/elastic/elastic-agent/internal/pkg/core/monitoring" - "github.com/elastic/elastic-agent/internal/pkg/core/status" - "github.com/elastic/elastic-agent/internal/pkg/dir" - acker "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker/noop" - reporting "github.com/elastic/elastic-agent/internal/pkg/reporter" - logreporter "github.com/elastic/elastic-agent/internal/pkg/reporter/log" - "github.com/elastic/elastic-agent/internal/pkg/sorted" - "github.com/elastic/elastic-agent/pkg/core/logger" - "github.com/elastic/elastic-agent/pkg/core/server" -) - -type discoverFunc func() ([]string, error) - -// ErrNoConfiguration is returned when no configuration are found. -var ErrNoConfiguration = errors.New("no configuration found", errors.TypeConfig) - -// Local represents a standalone agents, that will read his configuration directly from disk. -// Some part of the configuration can be reloaded. -type Local struct { - bgContext context.Context - cancelCtxFn context.CancelFunc - log *logger.Logger - router pipeline.Router - source source - agentInfo *info.AgentInfo - srv *server.Server -} - -type source interface { - Start() error - Stop() error -} - -// newLocal return a agent managed by local configuration. -func newLocal( - ctx context.Context, - log *logger.Logger, - pathConfigFile string, - rawConfig *config.Config, - reexec reexecManager, - statusCtrl status.Controller, - uc upgraderControl, - agentInfo *info.AgentInfo, - tracer *apm.Tracer, -) (*Local, error) { - caps, err := capabilities.Load(paths.AgentCapabilitiesPath(), log, statusCtrl) - if err != nil { - return nil, err - } - - cfg, err := configuration.NewFromConfig(rawConfig) - if err != nil { - return nil, err - } - - if log == nil { - log, err = logger.NewFromConfig("", cfg.Settings.LoggingConfig, true) - if err != nil { - return nil, err - } - } - - logR := logreporter.NewReporter(log) - - localApplication := &Local{ - log: log, - agentInfo: agentInfo, - } - - localApplication.bgContext, localApplication.cancelCtxFn = context.WithCancel(ctx) - localApplication.srv, err = server.NewFromConfig(log, cfg.Settings.GRPC, &operation.ApplicationStatusHandler{}, tracer) - if err != nil { - return nil, errors.New(err, "initialize GRPC listener") - } - - reporter := reporting.NewReporter(localApplication.bgContext, log, localApplication.agentInfo, logR) - - monitor, err := monitoring.NewMonitor(cfg.Settings) - if err != nil { - return nil, errors.New(err, "failed to initialize monitoring") - } - - router, err := router.New(log, stream.Factory(localApplication.bgContext, agentInfo, cfg.Settings, localApplication.srv, reporter, monitor, statusCtrl)) - if err != nil { - return nil, errors.New(err, "fail to initialize pipeline router") - } - localApplication.router = router - - composableCtrl, err := composable.New(log, rawConfig) - if err != nil { - return nil, errors.New(err, "failed to initialize composable controller") - } - - discover := discoverer(pathConfigFile, cfg.Settings.Path, externalConfigsGlob()) - emit, err := emitter.New( - localApplication.bgContext, - log, - agentInfo, - composableCtrl, - router, - &pipeline.ConfigModifiers{ - Decorators: []pipeline.DecoratorFunc{modifiers.InjectMonitoring}, - Filters: []pipeline.FilterFunc{filters.StreamChecker}, - }, - caps, - monitor, - ) - if err != nil { - return nil, err - } - - loader := config.NewLoader(log, externalConfigsGlob()) - - var cfgSource source - if !cfg.Settings.Reload.Enabled { - log.Debug("Reloading of configuration is off") - cfgSource = newOnce(log, discover, loader, emit) - } else { - log.Debugf("Reloading of configuration is on, frequency is set to %s", cfg.Settings.Reload.Period) - cfgSource = newPeriodic(log, cfg.Settings.Reload.Period, discover, loader, emit) - } - - localApplication.source = cfgSource - - // create a upgrader to use in local mode - upgrader := upgrade.NewUpgrader( - agentInfo, - cfg.Settings.DownloadConfig, - log, - []context.CancelFunc{localApplication.cancelCtxFn}, - reexec, - acker.NewAcker(), - reporter, - caps) - uc.SetUpgrader(upgrader) - - return localApplication, nil -} - -func externalConfigsGlob() string { - return filepath.Join(paths.Config(), configuration.ExternalInputsPattern) -} - -// Routes returns a list of routes handled by agent. -func (l *Local) Routes() *sorted.Set { - return l.router.Routes() -} - -// Start starts a local agent. -func (l *Local) Start() error { - l.log.Info("Agent is starting") - defer l.log.Info("Agent is stopped") - - if err := l.srv.Start(); err != nil { - return err - } - if err := l.source.Start(); err != nil { - return err - } - - return nil -} - -// Stop stops a local agent. -func (l *Local) Stop() error { - err := l.source.Stop() - l.cancelCtxFn() - l.router.Shutdown() - l.srv.Stop() - return err -} - -// AgentInfo retrieves agent information. -func (l *Local) AgentInfo() *info.AgentInfo { - return l.agentInfo -} - -func discoverer(patterns ...string) discoverFunc { - var p []string - for _, newP := range patterns { - if len(newP) == 0 { - continue - } - - p = append(p, newP) - } - - if len(p) == 0 { - return func() ([]string, error) { - return []string{}, ErrNoConfiguration - } - } - - return func() ([]string, error) { - return dir.DiscoverFiles(p...) - } -} diff --git a/internal/pkg/agent/application/managed_mode.go b/internal/pkg/agent/application/managed_mode.go index d334ae0198c..893b7541606 100644 --- a/internal/pkg/agent/application/managed_mode.go +++ b/internal/pkg/agent/application/managed_mode.go @@ -7,90 +7,53 @@ package application import ( "context" "fmt" + "time" - "go.elastic.co/apm" - - "github.com/elastic/go-sysinfo" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/filters" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/gateway" + "github.com/elastic/elastic-agent-client/v7/pkg/client" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/actions/handlers" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/dispatcher" fleetgateway "github.com/elastic/elastic-agent/internal/pkg/agent/application/gateway/fleet" - localgateway "github.com/elastic/elastic-agent/internal/pkg/agent/application/gateway/fleetserver" "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/actions/handlers" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/dispatcher" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/emitter" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/emitter/modifiers" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/router" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/stream" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade" "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/operation" "github.com/elastic/elastic-agent/internal/pkg/agent/storage" "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" - "github.com/elastic/elastic-agent/internal/pkg/capabilities" - "github.com/elastic/elastic-agent/internal/pkg/composable" - "github.com/elastic/elastic-agent/internal/pkg/config" - "github.com/elastic/elastic-agent/internal/pkg/core/monitoring" - "github.com/elastic/elastic-agent/internal/pkg/core/status" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker/fleet" "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker/lazy" "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker/retrier" - "github.com/elastic/elastic-agent/internal/pkg/fleetapi/client" + fleetclient "github.com/elastic/elastic-agent/internal/pkg/fleetapi/client" "github.com/elastic/elastic-agent/internal/pkg/queue" - reporting "github.com/elastic/elastic-agent/internal/pkg/reporter" - fleetreporter "github.com/elastic/elastic-agent/internal/pkg/reporter/fleet" - logreporter "github.com/elastic/elastic-agent/internal/pkg/reporter/log" - "github.com/elastic/elastic-agent/internal/pkg/sorted" + "github.com/elastic/elastic-agent/internal/pkg/remote" + "github.com/elastic/elastic-agent/pkg/component/runtime" "github.com/elastic/elastic-agent/pkg/core/logger" - "github.com/elastic/elastic-agent/pkg/core/server" ) -type stateStore interface { - Add(fleetapi.Action) - AckToken() string - SetAckToken(ackToken string) - Save() error - Actions() []fleetapi.Action - Queue() []fleetapi.Action -} - -// Managed application, when the application is run in managed mode, most of the configuration are -// coming from the Fleet App. -type Managed struct { - bgContext context.Context - cancelCtxFn context.CancelFunc +type managedConfigManager struct { log *logger.Logger - Config configuration.FleetAgentConfig agentInfo *info.AgentInfo - gateway gateway.FleetGateway - router pipeline.Router - srv *server.Server - stateStore stateStore - upgrader *upgrade.Upgrader + cfg *configuration.Configuration + client *remote.Client + store storage.Store + stateStore *store.StateStore + actionQueue *queue.ActionQueue + runtime *runtime.Manager + coord *coordinator.Coordinator + + ch chan coordinator.ConfigChange + errCh chan error } -func newManaged( - ctx context.Context, +func newManagedConfigManager( log *logger.Logger, - storeSaver storage.Store, - cfg *configuration.Configuration, - rawConfig *config.Config, - reexec reexecManager, - statusCtrl status.Controller, agentInfo *info.AgentInfo, - tracer *apm.Tracer, -) (*Managed, error) { - caps, err := capabilities.Load(paths.AgentCapabilitiesPath(), log, statusCtrl) - if err != nil { - return nil, err - } - - client, err := client.NewAuthWithConfig(log, cfg.Fleet.AccessAPIKey, cfg.Fleet.Client) + cfg *configuration.Configuration, + storeSaver storage.Store, + runtime *runtime.Manager, +) (*managedConfigManager, error) { + client, err := fleetclient.NewAuthWithConfig(log, cfg.Fleet.AccessAPIKey, cfg.Fleet.Client) if err != nil { return nil, errors.New(err, "fail to create API client", @@ -98,115 +61,219 @@ func newManaged( errors.M(errors.MetaKeyURI, cfg.Fleet.Client.Host)) } - sysInfo, err := sysinfo.Host() + // Create the state store that will persist the last good policy change on disk. + stateStore, err := store.NewStateStoreWithMigration(log, paths.AgentActionStoreFile(), paths.AgentStateStoreFile()) if err != nil { - return nil, errors.New(err, - "fail to get system information", - errors.TypeUnexpected) + return nil, errors.New(err, fmt.Sprintf("fail to read action store '%s'", paths.AgentActionStoreFile())) } - managedApplication := &Managed{ - log: log, - agentInfo: agentInfo, + actionQueue, err := queue.NewActionQueue(stateStore.Queue()) + if err != nil { + return nil, fmt.Errorf("unable to initialize action queue: %w", err) } - managedApplication.bgContext, managedApplication.cancelCtxFn = context.WithCancel(ctx) - managedApplication.srv, err = server.NewFromConfig(log, cfg.Settings.GRPC, &operation.ApplicationStatusHandler{}, tracer) - if err != nil { - return nil, errors.New(err, "initialize GRPC listener", errors.TypeNetwork) + return &managedConfigManager{ + log: log, + agentInfo: agentInfo, + cfg: cfg, + client: client, + store: storeSaver, + stateStore: stateStore, + actionQueue: actionQueue, + runtime: runtime, + ch: make(chan coordinator.ConfigChange), + errCh: make(chan error), + }, nil +} + +func (m *managedConfigManager) Run(ctx context.Context) error { + // Check setup correctly in application (the actionDispatcher and coord must be set manually) + if m.coord == nil { + return errors.New("coord must be set before calling Run") } - // must start before `Start` is called as Fleet will already try to start applications - // before `Start` is even called. - err = managedApplication.srv.Start() - if err != nil { - return nil, errors.New(err, "starting GRPC listener", errors.TypeNetwork) + + // Un-enrolled so we will not do anything. + if m.wasUnenrolled() { + m.log.Warnf("Elastic Agent was previously unenrolled. To reactivate please reconfigure or enroll again.") + return nil } - logR := logreporter.NewReporter(log) - fleetR, err := fleetreporter.NewReporter(agentInfo, log, cfg.Fleet.Reporting) - if err != nil { - return nil, errors.New(err, "fail to create reporters") + // Reload ID because of win7 sync issue + if err := m.agentInfo.ReloadID(); err != nil { + return err } - combinedReporter := reporting.NewReporter(managedApplication.bgContext, log, agentInfo, logR, fleetR) - monitor, err := monitoring.NewMonitor(cfg.Settings) + // Create context that is cancelled on unenroll. + gatewayCtx, gatewayCancel := context.WithCancel(ctx) + defer gatewayCancel() + + // Create the actionDispatcher. + actionDispatcher, policyChanger, err := newManagedActionDispatcher(m, gatewayCancel) if err != nil { - return nil, errors.New(err, "failed to initialize monitoring") + return err } - router, err := router.New(log, stream.Factory(managedApplication.bgContext, agentInfo, cfg.Settings, managedApplication.srv, combinedReporter, monitor, statusCtrl)) + // Create ackers to enqueue/retry failed acks + ack, err := fleet.NewAcker(m.log, m.agentInfo, m.client) if err != nil { - return nil, errors.New(err, "fail to initialize pipeline router") + return fmt.Errorf("failed to create acker: %w", err) } - managedApplication.router = router + retrier := retrier.New(ack, m.log) + batchedAcker := lazy.NewAcker(ack, m.log, lazy.WithRetrier(retrier)) + actionAcker := store.NewStateStoreActionAcker(batchedAcker, m.stateStore) + + // Run the retrier. + retrierRun := make(chan bool) + retrierCtx, retrierCancel := context.WithCancel(ctx) + defer func() { + retrierCancel() + <-retrierRun + }() + go func() { + retrier.Run(retrierCtx) + close(retrierRun) + }() - composableCtrl, err := composable.New(log, rawConfig) - if err != nil { - return nil, errors.New(err, "failed to initialize composable controller") + actions := m.stateStore.Actions() + stateRestored := false + if len(actions) > 0 && !m.wasUnenrolled() { + // TODO(ph) We will need an improvement on fleet, if there is an error while dispatching a + // persisted action on disk we should be able to ask Fleet to get the latest configuration. + // But at the moment this is not possible because the policy change was acked. + if err := store.ReplayActions(ctx, m.log, actionDispatcher, actionAcker, actions...); err != nil { + m.log.Errorf("could not recover state, error %+v, skipping...", err) + } + stateRestored = true + } + + // In the case this is the first start and this Elastic Agent is running a Fleet Server; we need to ensure that + // the Fleet Server is running before the Fleet gateway is started. + if !stateRestored && m.cfg.Fleet.Server != nil { + err = m.initFleetServer(ctx) + if err != nil { + return fmt.Errorf("failed to initialize Fleet Server: %w", err) + } } - emit, err := emitter.New( - managedApplication.bgContext, - log, - agentInfo, - composableCtrl, - router, - &pipeline.ConfigModifiers{ - Decorators: []pipeline.DecoratorFunc{modifiers.InjectMonitoring}, - Filters: []pipeline.FilterFunc{filters.StreamChecker, modifiers.InjectFleet(rawConfig, sysInfo.Info(), agentInfo)}, - }, - caps, - monitor, + gateway, err := fleetgateway.New( + m.log, + m.agentInfo, + m.client, + actionDispatcher, + actionAcker, + m.coord, + m.stateStore, + m.actionQueue, ) if err != nil { - return nil, err + return err } - acker, err := fleet.NewAcker(log, agentInfo, client) - if err != nil { - return nil, err + + // Not running a Fleet Server so the gateway and acker can be changed based on the configuration change. + if m.cfg.Fleet.Server == nil { + policyChanger.AddSetter(gateway) + policyChanger.AddSetter(ack) } - // Create ack retrier that is used by lazyAcker to enqueue/retry failed acks - retrier := retrier.New(acker, log) - // Run acking retrier. The lazy acker sends failed actions acks to retrier. - go retrier.Run(ctx) + // Proxy errors from the gateway to our own channel. + go func() { + for { + select { + case <-ctx.Done(): + return + case err := <-gateway.Errors(): + m.errCh <- err + } + } + }() + + // Run the gateway. + gatewayRun := make(chan bool) + gatewayErrCh := make(chan error) + defer func() { + gatewayCancel() + <-gatewayRun + }() + go func() { + err := gateway.Run(gatewayCtx) + close(gatewayRun) + gatewayErrCh <- err + }() + + <-ctx.Done() + return <-gatewayErrCh +} + +func (m *managedConfigManager) Errors() <-chan error { + return m.errCh +} - batchedAcker := lazy.NewAcker(acker, log, lazy.WithRetrier(retrier)) +func (m *managedConfigManager) Watch() <-chan coordinator.ConfigChange { + return m.ch +} - // Create the state store that will persist the last good policy change on disk. - stateStore, err := store.NewStateStoreWithMigration(log, paths.AgentActionStoreFile(), paths.AgentStateStoreFile()) - if err != nil { - return nil, errors.New(err, fmt.Sprintf("fail to read action store '%s'", paths.AgentActionStoreFile())) +func (m *managedConfigManager) wasUnenrolled() bool { + actions := m.stateStore.Actions() + for _, a := range actions { + if a.Type() == "UNENROLL" { + return true + } } - managedApplication.stateStore = stateStore - actionAcker := store.NewStateStoreActionAcker(batchedAcker, stateStore) + return false +} - actionQueue, err := queue.NewActionQueue(stateStore.Queue()) - if err != nil { - return nil, fmt.Errorf("unable to initialize action queue: %w", err) +func (m *managedConfigManager) initFleetServer(ctx context.Context) error { + ctx, cancel := context.WithTimeout(ctx, 15*time.Second) + defer cancel() + + m.log.Debugf("injecting basic fleet-server for first start") + select { + case <-ctx.Done(): + return ctx.Err() + case m.ch <- &localConfigChange{injectFleetServerInput}: } - actionDispatcher, err := dispatcher.New(managedApplication.bgContext, log, handlers.NewDefault(log)) - if err != nil { - return nil, err + m.log.Debugf("watching fleet-server-default component state") + sub := m.runtime.Subscribe(ctx, "fleet-server-default") + for { + select { + case <-ctx.Done(): + return ctx.Err() + case state := <-sub.Ch(): + if fleetServerRunning(state) { + m.log.With("state", state).Debugf("fleet-server-default component is running") + return nil + } + m.log.With("state", state).Debugf("fleet-server-default component is not running") + } + } +} + +func fleetServerRunning(state runtime.ComponentState) bool { + if state.State == client.UnitStateHealthy || state.State == client.UnitStateDegraded { + for key, unit := range state.Units { + if key.UnitType == client.UnitTypeInput && key.UnitID == "fleet-server-default-fleet-server" { + if unit.State == client.UnitStateHealthy || unit.State == client.UnitStateDegraded { + return true + } + } + } } + return false +} - managedApplication.upgrader = upgrade.NewUpgrader( - agentInfo, - cfg.Settings.DownloadConfig, - log, - []context.CancelFunc{managedApplication.cancelCtxFn}, - reexec, - acker, - combinedReporter, - caps) +func newManagedActionDispatcher(m *managedConfigManager, canceller context.CancelFunc) (*dispatcher.ActionDispatcher, *handlers.PolicyChange, error) { + actionDispatcher, err := dispatcher.New(m.log, handlers.NewDefault(m.log)) + if err != nil { + return nil, nil, err + } policyChanger := handlers.NewPolicyChange( - log, - emit, - agentInfo, - cfg, - storeSaver, + m.log, + m.agentInfo, + m.cfg, + m.store, + m.ch, ) actionDispatcher.MustRegister( @@ -216,146 +283,50 @@ func newManaged( actionDispatcher.MustRegister( &fleetapi.ActionPolicyReassign{}, - handlers.NewPolicyReassign(log), + handlers.NewPolicyReassign(m.log), ) actionDispatcher.MustRegister( &fleetapi.ActionUnenroll{}, handlers.NewUnenroll( - log, - emit, - router, - []context.CancelFunc{managedApplication.cancelCtxFn}, - stateStore, + m.log, + m.ch, + []context.CancelFunc{canceller}, + m.stateStore, ), ) actionDispatcher.MustRegister( &fleetapi.ActionUpgrade{}, - handlers.NewUpgrade(log, managedApplication.upgrader), + handlers.NewUpgrade(m.log, m.coord), ) actionDispatcher.MustRegister( &fleetapi.ActionSettings{}, handlers.NewSettings( - log, - reexec, - agentInfo, + m.log, + m.agentInfo, + m.coord, ), ) actionDispatcher.MustRegister( &fleetapi.ActionCancel{}, handlers.NewCancel( - log, - actionQueue, + m.log, + m.actionQueue, ), ) actionDispatcher.MustRegister( &fleetapi.ActionApp{}, - handlers.NewAppAction(log, managedApplication.srv), + handlers.NewAppAction(m.log, m.coord), ) actionDispatcher.MustRegister( &fleetapi.ActionUnknown{}, - handlers.NewUnknown(log), - ) - - actions := stateStore.Actions() - stateRestored := false - if len(actions) > 0 && !managedApplication.wasUnenrolled() { - // TODO(ph) We will need an improvement on fleet, if there is an error while dispatching a - // persisted action on disk we should be able to ask Fleet to get the latest configuration. - // But at the moment this is not possible because the policy change was acked. - if err := store.ReplayActions(ctx, log, actionDispatcher, actionAcker, actions...); err != nil { - log.Errorf("could not recover state, error %+v, skipping...", err) - } - stateRestored = true - } - - gateway, err := fleetgateway.New( - managedApplication.bgContext, - log, - agentInfo, - client, - actionDispatcher, - fleetR, - actionAcker, - statusCtrl, - stateStore, - actionQueue, + handlers.NewUnknown(m.log), ) - if err != nil { - return nil, err - } - gateway, err = localgateway.New(managedApplication.bgContext, log, cfg.Fleet, rawConfig, gateway, emit, !stateRestored) - if err != nil { - return nil, err - } - // add the acker and gateway to setters, so the they can be updated - // when the hosts for Fleet Server are updated by the policy. - if cfg.Fleet.Server == nil { - // setters only set when not running a local Fleet Server - policyChanger.AddSetter(gateway) - policyChanger.AddSetter(acker) - } - managedApplication.gateway = gateway - return managedApplication, nil -} - -// Routes returns a list of routes handled by agent. -func (m *Managed) Routes() *sorted.Set { - return m.router.Routes() -} - -// Start starts a managed elastic-agent. -func (m *Managed) Start() error { - m.log.Info("Agent is starting") - if m.wasUnenrolled() { - m.log.Warnf("agent was previously unenrolled. To reactivate please reconfigure or enroll again.") - return nil - } - - // reload ID because of win7 sync issue - if err := m.agentInfo.ReloadID(); err != nil { - return err - } - - err := m.upgrader.Ack(m.bgContext) - if err != nil { - m.log.Warnf("failed to ack update %v", err) - } - - err = m.gateway.Start() - if err != nil { - return err - } - return nil -} - -// Stop stops a managed elastic-agent. -func (m *Managed) Stop() error { - defer m.log.Info("Agent is stopped") - m.cancelCtxFn() - m.router.Shutdown() - m.srv.Stop() - return nil -} - -// AgentInfo retrieves elastic-agent information. -func (m *Managed) AgentInfo() *info.AgentInfo { - return m.agentInfo -} - -func (m *Managed) wasUnenrolled() bool { - actions := m.stateStore.Actions() - for _, a := range actions { - if a.Type() == "UNENROLL" { - return true - } - } - - return false + return actionDispatcher, policyChanger, nil } diff --git a/internal/pkg/agent/application/managed_mode_test.go b/internal/pkg/agent/application/managed_mode_test.go deleted file mode 100644 index 847211dc079..00000000000 --- a/internal/pkg/agent/application/managed_mode_test.go +++ /dev/null @@ -1,303 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package application - -import ( - "context" - "encoding/json" - "testing" - - "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" - noopacker "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker/noop" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/actions/handlers" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/dispatcher" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/emitter" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/emitter/modifiers" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/router" - "github.com/elastic/elastic-agent/internal/pkg/agent/configrequest" - "github.com/elastic/elastic-agent/internal/pkg/agent/storage" - "github.com/elastic/elastic-agent/internal/pkg/composable" - "github.com/elastic/elastic-agent/internal/pkg/fleetapi" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -func TestManagedModeRouting(t *testing.T) { - - streams := make(map[pipeline.RoutingKey]pipeline.Stream) - streamFn := func(l *logger.Logger, r pipeline.RoutingKey) (pipeline.Stream, error) { - m := newMockStreamStore() - streams[r] = m - - return m, nil - } - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - log, _ := logger.New("", false) - router, _ := router.New(log, streamFn) - agentInfo, _ := info.NewAgentInfo(false) - nullStore := &storage.NullStore{} - composableCtrl, _ := composable.New(log, nil) - emit, err := emitter.New(ctx, log, agentInfo, composableCtrl, router, &pipeline.ConfigModifiers{Decorators: []pipeline.DecoratorFunc{modifiers.InjectMonitoring}}, nil) - require.NoError(t, err) - - actionDispatcher, err := dispatcher.New(ctx, log, handlers.NewDefault(log)) - require.NoError(t, err) - - cfg := configuration.DefaultConfiguration() - actionDispatcher.MustRegister( - &fleetapi.ActionPolicyChange{}, - handlers.NewPolicyChange( - log, - emit, - agentInfo, - cfg, - nullStore, - ), - ) - - actions, err := testActions() - require.NoError(t, err) - - err = actionDispatcher.Dispatch(context.Background(), noopacker.NewAcker(), actions...) - require.NoError(t, err) - - // has 1 config request for fb, mb and monitoring? - assert.Equal(t, 1, len(streams)) - - defaultStreamStore, found := streams["default"] - assert.True(t, found, "default group not found") - assert.Equal(t, 1, len(defaultStreamStore.(*mockStreamStore).store)) - - confReq := defaultStreamStore.(*mockStreamStore).store[0] - assert.Equal(t, 3, len(confReq.ProgramNames())) - assert.Equal(t, modifiers.MonitoringName, confReq.ProgramNames()[2]) -} - -func testActions() ([]fleetapi.Action, error) { - checkinResponse := &fleetapi.CheckinResponse{} - if err := json.Unmarshal([]byte(fleetResponse), &checkinResponse); err != nil { - return nil, err - } - - return checkinResponse.Actions, nil -} - -type mockStreamStore struct { - store []configrequest.Request -} - -func newMockStreamStore() *mockStreamStore { - return &mockStreamStore{ - store: make([]configrequest.Request, 0), - } -} - -func (m *mockStreamStore) Execute(_ context.Context, cr configrequest.Request) error { - m.store = append(m.store, cr) - return nil -} - -func (m *mockStreamStore) Close() error { - return nil -} - -func (m *mockStreamStore) Shutdown() {} - -const fleetResponse = ` -{ - "action": "checkin", - "actions": [{ - "agent_id": "17e93530-7f42-11ea-9330-71e968b29fa4", - "type": "POLICY_CHANGE", - "data": { - "policy": { - "id": "86561d50-7f3b-11ea-9fab-3db3bdb4efa4", - "outputs": { - "default": { - "type": "elasticsearch", - "hosts": [ - "http://localhost:9200" - ], - "api_key": "pNr6fnEBupQ3-5oEEkWJ:FzhrQOzZSG-Vpsq9CGk4oA" - } - }, - - "inputs": [{ - "type": "system/metrics", - "enabled": true, - "streams": [{ - "id": "system/metrics-system.core", - "enabled": true, - "data_stream.dataset": "system.core", - "period": "10s", - "metrics": [ - "percentages" - ] - }, - { - "id": "system/metrics-system.cpu", - "enabled": true, - "data_stream.dataset": "system.cpu", - "period": "10s", - "metrics": [ - "percentages", - "normalized_percentages" - ] - }, - { - "id": "system/metrics-system.diskio", - "enabled": true, - "data_stream.dataset": "system.diskio", - "period": "10s", - "include_devices": [] - }, - { - "id": "system/metrics-system.entropy", - "enabled": true, - "data_stream.dataset": "system.entropy", - "period": "10s", - "include_devices": [] - }, - { - "id": "system/metrics-system.filesystem", - "enabled": true, - "data_stream.dataset": "system.filesystem", - "period": "1m", - "ignore_types": [] - }, - { - "id": "system/metrics-system.fsstat", - "enabled": true, - "data_stream.dataset": "system.fsstat", - "period": "1m", - "ignore_types": [] - }, - { - "id": "system/metrics-system.load", - "enabled": true, - "data_stream.dataset": "system.load", - "period": "10s" - }, - { - "id": "system/metrics-system.memory", - "enabled": true, - "data_stream.dataset": "system.memory", - "period": "10s" - }, - { - "id": "system/metrics-system.network", - "enabled": true, - "data_stream.dataset": "system.network", - "period": "10s" - }, - { - "id": "system/metrics-system.network_summary", - "enabled": true, - "data_stream.dataset": "system.network_summary", - "period": "10s" - }, - { - "id": "system/metrics-system.process", - "enabled": true, - "data_stream.dataset": "system.process", - "period": "10s", - "processes": [ - ".*" - ], - "include_top_n.enabled": true, - "include_top_n.by_cpu": 5, - "include_top_n.by_memory": 5, - "cmdline.cache.enabled": true, - "cgroups.enabled": true, - "env.whitelist": [], - "include_cpu_ticks": false - }, - { - "id": "system/metrics-system.process_summary", - "enabled": true, - "data_stream.dataset": "system.process_summary", - "period": "10s" - }, - { - "id": "system/metrics-system.raid", - "enabled": true, - "data_stream.dataset": "system.raid", - "period": "10s", - "mount_point": "/" - }, - { - "id": "system/metrics-system.service", - "enabled": true, - "data_stream.dataset": "system.service", - "period": "10s", - "state_filter": [] - }, - { - "id": "system/metrics-system.socket_summary", - "enabled": true, - "data_stream.dataset": "system.socket_summary", - "period": "10s" - }, - { - "id": "system/metrics-system.uptime", - "enabled": true, - "data_stream.dataset": "system.uptime", - "period": "15m" - }, - { - "id": "system/metrics-system.users", - "enabled": true, - "data_stream.dataset": "system.users", - "period": "10s" - } - ] - }, - { - "type": "logfile", - "enabled": true, - "streams": [{ - "id": "logs-system.auth", - "enabled": true, - "data_stream.dataset": "system.auth", - "paths": [ - "/var/log/auth.log*", - "/var/log/secure*" - ] - }, - { - "id": "logs-system.syslog", - "enabled": true, - "data_stream.dataset": "system.syslog", - "paths": [ - "/var/log/messages*", - "/var/log/syslog*" - ] - } - ] - } - ], - - "revision": 3, - "agent.monitoring": { - "use_output": "default", - "enabled": true, - "logs": true, - "metrics": true - } - } - }, - "id": "1c7e26a0-7f42-11ea-9330-71e968b29fa4", - "created_at": "2020-04-15T17:54:11.081Z" - }] -} - ` diff --git a/internal/pkg/agent/application/once.go b/internal/pkg/agent/application/once.go index 19a17c61df9..7326612950b 100644 --- a/internal/pkg/agent/application/once.go +++ b/internal/pkg/agent/application/once.go @@ -6,8 +6,10 @@ package application import ( "context" + "fmt" + + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/config" "github.com/elastic/elastic-agent/pkg/core/logger" @@ -17,14 +19,15 @@ type once struct { log *logger.Logger discover discoverFunc loader *config.Loader - emitter pipeline.EmitterFunc + ch chan coordinator.ConfigChange + errCh chan error } -func newOnce(log *logger.Logger, discover discoverFunc, loader *config.Loader, emitter pipeline.EmitterFunc) *once { - return &once{log: log, discover: discover, loader: loader, emitter: emitter} +func newOnce(log *logger.Logger, discover discoverFunc, loader *config.Loader) *once { + return &once{log: log, discover: discover, loader: loader, ch: make(chan coordinator.ConfigChange), errCh: make(chan error)} } -func (o *once) Start() error { +func (o *once) Run(ctx context.Context) error { files, err := o.discover() if err != nil { return errors.New(err, "could not discover configuration files", errors.TypeConfig) @@ -34,18 +37,27 @@ func (o *once) Start() error { return ErrNoConfiguration } - return readfiles(context.Background(), files, o.loader, o.emitter) + cfg, err := readfiles(files, o.loader) + if err != nil { + return err + } + o.ch <- &localConfigChange{cfg} + <-ctx.Done() + return ctx.Err() +} + +func (o *once) Errors() <-chan error { + return o.errCh } -func (o *once) Stop() error { - return nil +func (o *once) Watch() <-chan coordinator.ConfigChange { + return o.ch } -func readfiles(ctx context.Context, files []string, loader *config.Loader, emitter pipeline.EmitterFunc) error { +func readfiles(files []string, loader *config.Loader) (*config.Config, error) { c, err := loader.Load(files) if err != nil { - return errors.New(err, "could not load or merge configuration", errors.TypeConfig) + return nil, fmt.Errorf("failed to load or merge configuration: %w", err) } - - return emitter(ctx, c) + return c, nil } diff --git a/internal/pkg/agent/application/paths/common.go b/internal/pkg/agent/application/paths/common.go index 315f515a13c..79b114144cc 100644 --- a/internal/pkg/agent/application/paths/common.go +++ b/internal/pkg/agent/application/paths/common.go @@ -128,11 +128,12 @@ func Data() string { return filepath.Join(Top(), "data") } +// Components returns the component directory for Agent func Components() string { return filepath.Join(Home(), "components") } -// Logs returns a the log directory for Agent +// Logs returns the log directory for Agent func Logs() string { return logsPath } diff --git a/internal/pkg/agent/application/periodic.go b/internal/pkg/agent/application/periodic.go index 10a3c26c11d..bb9f717a7af 100644 --- a/internal/pkg/agent/application/periodic.go +++ b/internal/pkg/agent/application/periodic.go @@ -9,7 +9,8 @@ import ( "strings" "time" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" + "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/config" "github.com/elastic/elastic-agent/internal/pkg/filewatcher" @@ -19,35 +20,39 @@ import ( type periodic struct { log *logger.Logger period time.Duration - done chan struct{} watcher *filewatcher.Watch loader *config.Loader - emitter pipeline.EmitterFunc discover discoverFunc + ch chan coordinator.ConfigChange + errCh chan error } -func (p *periodic) Start() error { - go func() { - if err := p.work(); err != nil { - p.log.Debugf("Failed to read configuration, error: %s", err) +func (p *periodic) Run(ctx context.Context) error { + if err := p.work(); err != nil { + return err + } + + t := time.NewTicker(p.period) + defer t.Stop() + for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-t.C: } - WORK: - for { - t := time.NewTimer(p.period) - select { - case <-p.done: - t.Stop() - break WORK - case <-t.C: - } - - if err := p.work(); err != nil { - p.log.Debugf("Failed to read configuration, error: %s", err) - } + if err := p.work(); err != nil { + return err } - }() - return nil + } +} + +func (p *periodic) Errors() <-chan error { + return p.errCh +} + +func (p *periodic) Watch() <-chan coordinator.ConfigChange { + return p.ch } func (p *periodic) work() error { @@ -92,30 +97,26 @@ func (p *periodic) work() error { p.log.Debugf("Unchanged %d files: %s", len(s.Unchanged), strings.Join(s.Updated, ", ")) } - err := readfiles(context.Background(), files, p.loader, p.emitter) + cfg, err := readfiles(files, p.loader) if err != nil { // assume something when really wrong and invalidate any cache // so we get a full new config on next tick. p.watcher.Invalidate() - return errors.New(err, "could not emit configuration") + return err } + p.ch <- &localConfigChange{cfg} + return nil } p.log.Info("No configuration change") return nil } -func (p *periodic) Stop() error { - close(p.done) - return nil -} - func newPeriodic( log *logger.Logger, period time.Duration, discover discoverFunc, loader *config.Loader, - emitter pipeline.EmitterFunc, ) *periodic { w, err := filewatcher.New(log, filewatcher.DefaultComparer) @@ -127,10 +128,27 @@ func newPeriodic( return &periodic{ log: log, period: period, - done: make(chan struct{}), watcher: w, discover: discover, loader: loader, - emitter: emitter, + ch: make(chan coordinator.ConfigChange), + errCh: make(chan error), } } + +type localConfigChange struct { + cfg *config.Config +} + +func (l *localConfigChange) Config() *config.Config { + return l.cfg +} + +func (l *localConfigChange) Ack() error { + // do nothing + return nil +} + +func (l *localConfigChange) Fail(_ error) { + // do nothing +} diff --git a/internal/pkg/agent/application/pipeline/emitter/controller.go b/internal/pkg/agent/application/pipeline/emitter/controller.go deleted file mode 100644 index 7f83961586c..00000000000 --- a/internal/pkg/agent/application/pipeline/emitter/controller.go +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package emitter - -import ( - "context" - "sync" - - "go.elastic.co/apm" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" - "github.com/elastic/elastic-agent/internal/pkg/capabilities" - "github.com/elastic/elastic-agent/internal/pkg/composable" - "github.com/elastic/elastic-agent/internal/pkg/config" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -type reloadable interface { - Reload(cfg *config.Config) error -} - -// Controller is an emitter controller handling config updates. -type Controller struct { - logger *logger.Logger - agentInfo *info.AgentInfo - controller composable.Controller - router pipeline.Router - modifiers *pipeline.ConfigModifiers - reloadables []reloadable - caps capabilities.Capability - - // state - lock sync.RWMutex - updateLock sync.Mutex - config *config.Config - ast *transpiler.AST - vars []*transpiler.Vars -} - -// NewController creates a new emitter controller. -func NewController( - log *logger.Logger, - agentInfo *info.AgentInfo, - controller composable.Controller, - router pipeline.Router, - modifiers *pipeline.ConfigModifiers, - caps capabilities.Capability, - reloadables ...reloadable, -) *Controller { - init, _ := transpiler.NewVars(map[string]interface{}{}, nil) - - return &Controller{ - logger: log, - agentInfo: agentInfo, - controller: controller, - router: router, - modifiers: modifiers, - reloadables: reloadables, - vars: []*transpiler.Vars{init}, - caps: caps, - } -} - -// Update applies config change and performes all steps necessary to apply it. -func (e *Controller) Update(ctx context.Context, c *config.Config) (err error) { - span, ctx := apm.StartSpan(ctx, "update", "app.internal") - defer func() { - apm.CaptureError(ctx, err).Send() - span.End() - }() - - if err := info.InjectAgentConfig(c); err != nil { - return err - } - - // perform and verify ast translation - m, err := c.ToMapStr() - if err != nil { - return errors.New(err, "could not create the AST from the configuration", errors.TypeConfig) - } - - rawAst, err := transpiler.NewAST(m) - if err != nil { - return errors.New(err, "could not create the AST from the configuration", errors.TypeConfig) - } - - if e.caps != nil { - var ok bool - updatedAst, err := e.caps.Apply(rawAst) - if err != nil { - return errors.New(err, "failed to apply capabilities") - } - - rawAst, ok = updatedAst.(*transpiler.AST) - if !ok { - return errors.New("failed to transform object returned from capabilities to AST", errors.TypeConfig) - } - } - - for _, filter := range e.modifiers.Filters { - if err := filter(e.logger, rawAst); err != nil { - return errors.New(err, "failed to filter configuration", errors.TypeConfig) - } - } - - e.lock.Lock() - e.config = c - e.ast = rawAst - e.lock.Unlock() - - return e.update(ctx) -} - -// Set sets the transpiler vars for dynamic inputs resolution. -func (e *Controller) Set(ctx context.Context, vars []*transpiler.Vars) { - if err := e.set(ctx, vars); err != nil { - e.logger.Errorf("Failed to render configuration with latest context from composable controller: %s", err) - } -} - -func (e *Controller) set(ctx context.Context, vars []*transpiler.Vars) (err error) { - span, ctx := apm.StartSpan(ctx, "set", "app.internal") - defer func() { - apm.CaptureError(ctx, err).Send() - span.End() - }() - e.lock.Lock() - ast := e.ast - e.vars = vars - e.lock.Unlock() - - if ast != nil { - return e.update(ctx) - } - return nil -} - -func (e *Controller) update(ctx context.Context) (err error) { - span, ctx := apm.StartSpan(ctx, "update", "app.internal") - defer func() { - apm.CaptureError(ctx, err).Send() - span.End() - }() - // locking whole update because it can be called concurrently via Set and Update method - e.updateLock.Lock() - defer e.updateLock.Unlock() - - e.lock.RLock() - cfg := e.config - rawAst := e.ast - varsArray := e.vars - e.lock.RUnlock() - - ast := rawAst.Clone() - inputs, ok := transpiler.Lookup(ast, "inputs") - if ok { - renderedInputs, err := transpiler.RenderInputs(inputs, varsArray) - if err != nil { - return err - } - err = transpiler.Insert(ast, renderedInputs, "inputs") - if err != nil { - return errors.New(err, "inserting rendered inputs failed") - } - } - - e.logger.Debug("Converting single configuration into specific programs configuration") - - programsToRun, err := program.Programs(e.agentInfo, ast) - if err != nil { - return err - } - - for _, decorator := range e.modifiers.Decorators { - for outputType, ptr := range programsToRun { - programsToRun[outputType], err = decorator(e.agentInfo, outputType, ast, ptr) - if err != nil { - return err - } - } - } - - for _, r := range e.reloadables { - if err := r.Reload(cfg); err != nil { - return err - } - } - - return e.router.Route(ctx, ast.HashStr(), programsToRun) -} diff --git a/internal/pkg/agent/application/pipeline/emitter/emitter.go b/internal/pkg/agent/application/pipeline/emitter/emitter.go deleted file mode 100644 index ac94d48d8b4..00000000000 --- a/internal/pkg/agent/application/pipeline/emitter/emitter.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package emitter - -import ( - "context" - - "go.elastic.co/apm" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" - "github.com/elastic/elastic-agent/internal/pkg/capabilities" - "github.com/elastic/elastic-agent/internal/pkg/composable" - "github.com/elastic/elastic-agent/internal/pkg/config" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -// New creates a new emitter function. -func New(ctx context.Context, log *logger.Logger, agentInfo *info.AgentInfo, controller composable.Controller, router pipeline.Router, modifiers *pipeline.ConfigModifiers, caps capabilities.Capability, reloadables ...reloadable) (pipeline.EmitterFunc, error) { - ctrl := NewController(log, agentInfo, controller, router, modifiers, caps, reloadables...) - err := controller.Run(ctx, func(vars []*transpiler.Vars) { - ctrl.Set(ctx, vars) - }) - if err != nil { - return nil, errors.New(err, "failed to start composable controller") - } - return func(ctx context.Context, c *config.Config) (err error) { - span, ctx := apm.StartSpan(ctx, "update", "app.internal") - defer func() { - apm.CaptureError(ctx, err).Send() - span.End() - }() - return ctrl.Update(ctx, c) - }, nil -} diff --git a/internal/pkg/agent/application/pipeline/emitter/emitter_test.go b/internal/pkg/agent/application/pipeline/emitter/emitter_test.go deleted file mode 100644 index a38b1bb1ded..00000000000 --- a/internal/pkg/agent/application/pipeline/emitter/emitter_test.go +++ /dev/null @@ -1,5 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package emitter diff --git a/internal/pkg/agent/application/pipeline/emitter/modifiers/fleet_decorator.go b/internal/pkg/agent/application/pipeline/emitter/modifiers/fleet_decorator.go deleted file mode 100644 index e1555393b84..00000000000 --- a/internal/pkg/agent/application/pipeline/emitter/modifiers/fleet_decorator.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package modifiers - -import ( - "github.com/elastic/go-sysinfo/types" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" - "github.com/elastic/elastic-agent/internal/pkg/config" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -// InjectFleet injects fleet metadata into a configuration. -func InjectFleet(cfg *config.Config, hostInfo types.HostInfo, agentInfo *info.AgentInfo) func(*logger.Logger, *transpiler.AST) error { - return func(logger *logger.Logger, rootAst *transpiler.AST) error { - config, err := cfg.ToMapStr() - if err != nil { - return err - } - ast, err := transpiler.NewAST(config) - if err != nil { - return err - } - fleet, ok := transpiler.Lookup(ast, "fleet") - if !ok { - // no fleet from configuration; skip - return nil - } - - // copy top-level agent.* into fleet.agent.* (this gets sent to Applications in this structure) - if agent, ok := transpiler.Lookup(ast, "agent"); ok { - if err := transpiler.Insert(ast, agent, "fleet"); err != nil { - return errors.New(err, "inserting agent info failed") - } - } - - // ensure that the agent.logging.level is present - if _, found := transpiler.Lookup(ast, "agent.logging.level"); !found { - transpiler.Insert(ast, transpiler.NewKey("level", transpiler.NewStrVal(agentInfo.LogLevel())), "agent.logging") - } - - // fleet.host to Agent can be the host to connect to Fleet Server, but to Applications it should - // be the fleet.host.id. move fleet.host to fleet.hosts if fleet.hosts doesn't exist - if _, ok := transpiler.Lookup(ast, "fleet.hosts"); !ok { - if host, ok := transpiler.Lookup(ast, "fleet.host"); ok { - if key, ok := host.(*transpiler.Key); ok { - if value, ok := key.Value().(*transpiler.StrVal); ok { - hosts := transpiler.NewList([]transpiler.Node{transpiler.NewStrVal(value.String())}) - if err := transpiler.Insert(ast, hosts, "fleet.hosts"); err != nil { - return errors.New(err, "inserting fleet hosts failed") - } - } - } - } - } - - // inject host.* into fleet.host.* (this gets sent to Applications in this structure) - host := transpiler.NewKey("host", transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("id", transpiler.NewStrVal(hostInfo.UniqueID)), - })) - if err := transpiler.Insert(ast, host, "fleet"); err != nil { - return errors.New(err, "inserting list of hosts failed") - } - - // inject fleet.* from local AST to the rootAST so its present when sending to Applications. - err = transpiler.Insert(rootAst, fleet.Value().(transpiler.Node), "fleet") - if err != nil { - return errors.New(err, "inserting fleet info failed") - } - return nil - } -} diff --git a/internal/pkg/agent/application/pipeline/emitter/modifiers/monitoring_decorator.go b/internal/pkg/agent/application/pipeline/emitter/modifiers/monitoring_decorator.go deleted file mode 100644 index d9377aa9e61..00000000000 --- a/internal/pkg/agent/application/pipeline/emitter/modifiers/monitoring_decorator.go +++ /dev/null @@ -1,105 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package modifiers - -import ( - "crypto/md5" - "fmt" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" -) - -const ( - // MonitoringName is a name used for artificial program generated when monitoring is needed. - MonitoringName = "FLEET_MONITORING" - programsKey = "programs" - monitoringChecksumKey = "monitoring_checksum" - monitoringKey = "agent.monitoring" - monitoringUseOutputKey = "agent.monitoring.use_output" - monitoringOutputFormatKey = "outputs.%s" - outputKey = "output" - - enabledKey = "agent.monitoring.enabled" - logsKey = "agent.monitoring.logs" - metricsKey = "agent.monitoring.metrics" - outputsKey = "outputs" - elasticsearchKey = "elasticsearch" - typeKey = "type" - defaultOutputName = "default" -) - -// InjectMonitoring injects a monitoring configuration into a group of programs if needed. -func InjectMonitoring(agentInfo *info.AgentInfo, outputGroup string, rootAst *transpiler.AST, programsToRun []program.Program) ([]program.Program, error) { - var err error - monitoringProgram := program.Program{ - Spec: program.Spec{ - Name: MonitoringName, - Cmd: MonitoringName, - }, - } - - // if monitoring is not specified use default one where everything is enabled - if _, found := transpiler.Lookup(rootAst, monitoringKey); !found { - monitoringNode := transpiler.NewDict([]transpiler.Node{ - transpiler.NewKey("enabled", transpiler.NewBoolVal(true)), - transpiler.NewKey("logs", transpiler.NewBoolVal(true)), - transpiler.NewKey("metrics", transpiler.NewBoolVal(true)), - transpiler.NewKey("use_output", transpiler.NewStrVal("default")), - transpiler.NewKey("namespace", transpiler.NewStrVal("default")), - }) - - transpiler.Insert(rootAst, transpiler.NewKey("monitoring", monitoringNode), "settings") - } - - // get monitoring output name to be used - monitoringOutputName, found := transpiler.LookupString(rootAst, monitoringUseOutputKey) - if !found { - monitoringOutputName = defaultOutputName - } - - typeValue, found := transpiler.LookupString(rootAst, fmt.Sprintf("%s.%s.type", outputsKey, monitoringOutputName)) - if !found { - typeValue = elasticsearchKey - } - - ast := rootAst.Clone() - if err := getMonitoringRule(monitoringOutputName, typeValue).Apply(agentInfo, ast); err != nil { - return programsToRun, err - } - - config, err := ast.Map() - if err != nil { - return programsToRun, err - } - - programList := make([]string, 0, len(programsToRun)) - cfgHash := md5.New() - for _, p := range programsToRun { - programList = append(programList, p.Spec.CommandName()) - cfgHash.Write(p.Config.Hash()) - } - // making program list and their hashes part of the config - // so it will get regenerated with every change - config[programsKey] = programList - config[monitoringChecksumKey] = fmt.Sprintf("%x", cfgHash.Sum(nil)) - - monitoringProgram.Config, err = transpiler.NewAST(config) - if err != nil { - return programsToRun, err - } - - return append(programsToRun, monitoringProgram), nil -} - -func getMonitoringRule(outputName string, t string) *transpiler.RuleList { - monitoringOutputSelector := fmt.Sprintf(monitoringOutputFormatKey, outputName) - return transpiler.NewRuleList( - transpiler.Copy(monitoringOutputSelector, outputKey), - transpiler.Rename(fmt.Sprintf("%s.%s", outputsKey, outputName), t), - transpiler.Filter(monitoringKey, programsKey, outputKey), - ) -} diff --git a/internal/pkg/agent/application/pipeline/emitter/modifiers/monitoring_decorator_test.go b/internal/pkg/agent/application/pipeline/emitter/modifiers/monitoring_decorator_test.go deleted file mode 100644 index 735e27cd725..00000000000 --- a/internal/pkg/agent/application/pipeline/emitter/modifiers/monitoring_decorator_test.go +++ /dev/null @@ -1,686 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package modifiers - -import ( - "fmt" - "testing" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" - "github.com/elastic/elastic-agent/internal/pkg/testutils" -) - -func TestMonitoringInjection(t *testing.T) { - tests := []struct { - name string - inputConfig map[string]interface{} - uname string - }{ - { - name: "testMonitoringInjection", - inputConfig: inputConfigMap, - uname: "monitoring-uname", - }, - { - name: "testMonitoringInjectionDefaults", - inputConfig: inputConfigMapDefaults, - uname: "xxx", - }, - } - - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - testMonitoringInjection(t, tc.inputConfig, tc.uname) - }) - } -} - -func testMonitoringInjection(t *testing.T, inputConfig map[string]interface{}, testUname string) { - testutils.InitStorage(t) - - agentInfo, err := info.NewAgentInfo(true) - if err != nil { - t.Fatal(err) - } - ast, err := transpiler.NewAST(inputConfig) - if err != nil { - t.Fatal(err) - } - - programsToRun, err := program.Programs(agentInfo, ast) - if err != nil { - t.Fatal(err) - } - - if len(programsToRun) != 1 { - t.Fatal(fmt.Errorf("programsToRun expected to have %d entries", 1)) - } - -GROUPLOOP: - for group, ptr := range programsToRun { - programsCount := len(ptr) - newPtr, err := InjectMonitoring(agentInfo, group, ast, ptr) - if err != nil { - t.Error(err) - continue GROUPLOOP - } - - if programsCount+1 != len(newPtr) { - t.Errorf("incorrect programs to run count, expected: %d, got %d", programsCount+1, len(newPtr)) - continue GROUPLOOP - } - - for _, p := range newPtr { - if p.Spec.Name != MonitoringName { - continue - } - - cm, err := p.Config.Map() - if err != nil { - t.Error(err) - continue GROUPLOOP - } - - outputCfg, found := cm[outputKey] - if !found { - t.Errorf("output not found for '%s'", group) - continue GROUPLOOP - } - - outputMap, ok := outputCfg.(map[string]interface{}) - if !ok { - t.Errorf("output is not a map for '%s'", group) - continue GROUPLOOP - } - - esCfg, found := outputMap["elasticsearch"] - if !found { - t.Errorf("elasticsearch output not found for '%s'", group) - continue GROUPLOOP - } - - esMap, ok := esCfg.(map[string]interface{}) - if !ok { - t.Errorf("output.elasticsearch is not a map for '%s'", group) - continue GROUPLOOP - } - - if uname, found := esMap["username"]; !found { - t.Errorf("output.elasticsearch.username output not found for '%s'", group) - continue GROUPLOOP - } else if uname != testUname { - t.Errorf("output.elasticsearch.username has incorrect value expected '%s', got '%s for %s", "monitoring-uname", uname, group) - continue GROUPLOOP - } - } - } -} - -func TestMonitoringToLogstashInjection(t *testing.T) { - testutils.InitStorage(t) - - agentInfo, err := info.NewAgentInfo(true) - if err != nil { - t.Fatal(err) - } - ast, err := transpiler.NewAST(inputConfigLS) - if err != nil { - t.Fatal(err) - } - - programsToRun, err := program.Programs(agentInfo, ast) - if err != nil { - t.Fatal(err) - } - - if len(programsToRun) != 1 { - t.Fatal(fmt.Errorf("programsToRun expected to have %d entries", 1)) - } - -GROUPLOOP: - for group, ptr := range programsToRun { - programsCount := len(ptr) - newPtr, err := InjectMonitoring(agentInfo, group, ast, ptr) - if err != nil { - t.Error(err) - continue GROUPLOOP - } - - if programsCount+1 != len(newPtr) { - t.Errorf("incorrect programs to run count, expected: %d, got %d", programsCount+1, len(newPtr)) - continue GROUPLOOP - } - - for _, p := range newPtr { - if p.Spec.Name != MonitoringName { - continue - } - - cm, err := p.Config.Map() - if err != nil { - t.Error(err) - continue GROUPLOOP - } - - outputCfg, found := cm[outputKey] - if !found { - t.Errorf("output not found for '%s'", group) - continue GROUPLOOP - } - - outputMap, ok := outputCfg.(map[string]interface{}) - if !ok { - t.Errorf("output is not a map for '%s'", group) - continue GROUPLOOP - } - - esCfg, found := outputMap["logstash"] - if !found { - t.Errorf("logstash output not found for '%s' %v", group, outputMap) - continue GROUPLOOP - } - - esMap, ok := esCfg.(map[string]interface{}) - if !ok { - t.Errorf("output.logstash is not a map for '%s'", group) - continue GROUPLOOP - } - - if uname, found := esMap["hosts"]; !found { - t.Errorf("output.logstash.hosts output not found for '%s'", group) - continue GROUPLOOP - } else if uname != "192.168.1.2" { - t.Errorf("output.logstash.hosts has incorrect value expected '%s', got '%s for %s", "monitoring-uname", uname, group) - continue GROUPLOOP - } - } - } -} - -func TestMonitoringInjectionDisabled(t *testing.T) { - testutils.InitStorage(t) - - agentInfo, err := info.NewAgentInfo(true) - if err != nil { - t.Fatal(err) - } - ast, err := transpiler.NewAST(inputConfigMapDisabled) - if err != nil { - t.Fatal(err) - } - - programsToRun, err := program.Programs(agentInfo, ast) - if err != nil { - t.Fatal(err) - } - - if len(programsToRun) != 2 { - t.Fatal(fmt.Errorf("programsToRun expected to have %d entries", 2)) - } - -GROUPLOOP: - for group, ptr := range programsToRun { - programsCount := len(ptr) - newPtr, err := InjectMonitoring(agentInfo, group, ast, ptr) - if err != nil { - t.Error(err) - continue GROUPLOOP - } - - if programsCount+1 != len(newPtr) { - t.Errorf("incorrect programs to run count, expected: %d, got %d", programsCount+1, len(newPtr)) - continue GROUPLOOP - } - - for _, p := range newPtr { - if p.Spec.Name != MonitoringName { - continue - } - - cm, err := p.Config.Map() - if err != nil { - t.Error(err) - continue GROUPLOOP - } - - // is enabled set - agentObj, found := cm["agent"] - if !found { - t.Errorf("settings not found for '%s(%s)': %v", group, p.Spec.Name, cm) - continue GROUPLOOP - } - - agentMap, ok := agentObj.(map[string]interface{}) - if !ok { - t.Errorf("settings not a map for '%s(%s)': %v", group, p.Spec.Name, cm) - continue GROUPLOOP - } - - monitoringObj, found := agentMap["monitoring"] - if !found { - t.Errorf("agent.monitoring not found for '%s(%s)': %v", group, p.Spec.Name, cm) - continue GROUPLOOP - } - - monitoringMap, ok := monitoringObj.(map[string]interface{}) - if !ok { - t.Errorf("agent.monitoring not a map for '%s(%s)': %v", group, p.Spec.Name, cm) - continue GROUPLOOP - } - - enabledVal, found := monitoringMap["enabled"] - if !found { - t.Errorf("monitoring.enabled not found for '%s(%s)': %v", group, p.Spec.Name, cm) - continue GROUPLOOP - } - - monitoringEnabled, ok := enabledVal.(bool) - if !ok { - t.Errorf("agent.monitoring.enabled is not a bool for '%s'", group) - continue GROUPLOOP - } - - if monitoringEnabled { - t.Errorf("agent.monitoring.enabled is enabled, should be disabled for '%s'", group) - continue GROUPLOOP - } - } - } -} - -func TestChangeInMonitoringWithChangeInInput(t *testing.T) { - testutils.InitStorage(t) - - agentInfo, err := info.NewAgentInfo(true) - if err != nil { - t.Fatal(err) - } - - astBefore, err := transpiler.NewAST(inputChange1) - if err != nil { - t.Fatal(err) - } - - programsToRunBefore, err := program.Programs(agentInfo, astBefore) - if err != nil { - t.Fatal(err) - } - - if len(programsToRunBefore) != 1 { - t.Fatal(fmt.Errorf("programsToRun expected to have %d entries", 1)) - } - - astAfter, err := transpiler.NewAST(inputChange2) - if err != nil { - t.Fatal(err) - } - - programsToRunAfter, err := program.Programs(agentInfo, astAfter) - if err != nil { - t.Fatal(err) - } - - if len(programsToRunAfter) != 1 { - t.Fatal(fmt.Errorf("programsToRun expected to have %d entries", 1)) - } - - // inject to both - var hashConfigBefore, hashConfigAfter string -GROUPLOOPBEFORE: - for group, ptr := range programsToRunBefore { - programsCount := len(ptr) - newPtr, err := InjectMonitoring(agentInfo, group, astBefore, ptr) - if err != nil { - t.Error(err) - continue GROUPLOOPBEFORE - } - - if programsCount+1 != len(newPtr) { - t.Errorf("incorrect programs to run count, expected: %d, got %d", programsCount+1, len(newPtr)) - continue GROUPLOOPBEFORE - } - - for _, p := range newPtr { - if p.Spec.Name != MonitoringName { - continue - } - - hashConfigBefore = p.Config.HashStr() - } - } - -GROUPLOOPAFTER: - for group, ptr := range programsToRunAfter { - programsCount := len(ptr) - newPtr, err := InjectMonitoring(agentInfo, group, astAfter, ptr) - if err != nil { - t.Error(err) - continue GROUPLOOPAFTER - } - - if programsCount+1 != len(newPtr) { - t.Errorf("incorrect programs to run count, expected: %d, got %d", programsCount+1, len(newPtr)) - continue GROUPLOOPAFTER - } - - for _, p := range newPtr { - if p.Spec.Name != MonitoringName { - continue - } - - hashConfigAfter = p.Config.HashStr() - } - } - - if hashConfigAfter == "" || hashConfigBefore == "" { - t.Fatal("hash configs uninitialized") - } - - if hashConfigAfter == hashConfigBefore { - t.Fatal("hash config equal, expected to be different") - } -} - -var inputConfigMap = map[string]interface{}{ - "agent.monitoring": map[string]interface{}{ - "enabled": true, - "logs": true, - "metrics": true, - "use_output": "monitoring", - }, - "outputs": map[string]interface{}{ - "default": map[string]interface{}{ - "index_name": "general", - "pass": "xxx", - "type": "elasticsearch", - "url": "xxxxx", - "username": "xxx", - }, - "infosec1": map[string]interface{}{ - "pass": "xxx", - "spool": map[string]interface{}{ - "file": "${path.data}/spool.dat", - }, - "type": "elasticsearch", - "url": "xxxxx", - "username": "xxx", - }, - "monitoring": map[string]interface{}{ - "type": "elasticsearch", - "index_name": "general", - "pass": "xxx", - "url": "xxxxx", - "username": "monitoring-uname", - }, - }, - "inputs": []map[string]interface{}{ - { - "type": "log", - "use_output": "infosec1", - "streams": []map[string]interface{}{ - {"paths": "/xxxx"}, - }, - "processors": []interface{}{ - map[string]interface{}{ - "dissect": map[string]interface{}{ - "tokenizer": "---", - }, - }, - }, - }, - { - "type": "system/metrics", - "use_output": "infosec1", - "streams": []map[string]interface{}{ - { - "id": "system/metrics-system.core", - "enabled": true, - "dataset": "system.core", - "period": "10s", - "metrics": []string{"percentages"}, - }, - }, - }, - }, -} - -var inputConfigMapDefaults = map[string]interface{}{ - "outputs": map[string]interface{}{ - "default": map[string]interface{}{ - "index_name": "general", - "pass": "xxx", - "type": "elasticsearch", - "url": "xxxxx", - "username": "xxx", - }, - "infosec1": map[string]interface{}{ - "pass": "xxx", - "spool": map[string]interface{}{ - "file": "${path.data}/spool.dat", - }, - "type": "elasticsearch", - "url": "xxxxx", - "username": "xxx", - }, - "monitoring": map[string]interface{}{ - "type": "elasticsearch", - "index_name": "general", - "pass": "xxx", - "url": "xxxxx", - "username": "monitoring-uname", - }, - }, - - "inputs": []map[string]interface{}{ - { - "type": "log", - "use_output": "infosec1", - "streams": []map[string]interface{}{ - {"paths": "/xxxx"}, - }, - "processors": []interface{}{ - map[string]interface{}{ - "dissect": map[string]interface{}{ - "tokenizer": "---", - }, - }, - }, - }, - { - "type": "system/metrics", - "use_output": "infosec1", - "streams": []map[string]interface{}{ - { - "id": "system/metrics-system.core", - "enabled": true, - "dataset": "system.core", - "period": "10s", - "metrics": []string{"percentages"}, - }, - }, - }, - }, -} - -var inputConfigMapDisabled = map[string]interface{}{ - "agent.monitoring": map[string]interface{}{ - "enabled": false, - }, - "outputs": map[string]interface{}{ - "default": map[string]interface{}{ - "index_name": "general", - "pass": "xxx", - "type": "elasticsearch", - "url": "xxxxx", - "username": "xxx", - }, - "infosec1": map[string]interface{}{ - "pass": "xxx", - "spool": map[string]interface{}{ - "file": "${path.data}/spool.dat", - }, - "type": "elasticsearch", - "url": "xxxxx", - "username": "xxx", - }, - "monitoring": map[string]interface{}{ - "type": "elasticsearch", - "index_name": "general", - "pass": "xxx", - "url": "xxxxx", - "username": "monitoring-uname", - }, - }, - - "inputs": []map[string]interface{}{ - { - "type": "log", - "streams": []map[string]interface{}{ - {"paths": "/xxxx"}, - }, - "processors": []interface{}{ - map[string]interface{}{ - "dissect": map[string]interface{}{ - "tokenizer": "---", - }, - }, - }, - }, - { - "type": "system/metrics", - "use_output": "infosec1", - "streams": []map[string]interface{}{ - { - "id": "system/metrics-system.core", - "enabled": true, - "dataset": "system.core", - "period": "10s", - "metrics": []string{"percentages"}, - }, - }, - }, - }, -} - -var inputChange1 = map[string]interface{}{ - "agent.monitoring": map[string]interface{}{ - "enabled": true, - "logs": true, - "metrics": true, - "use_output": "monitoring", - }, - "outputs": map[string]interface{}{ - "default": map[string]interface{}{ - "index_name": "general", - "pass": "xxx", - "type": "elasticsearch", - "url": "xxxxx", - "username": "xxx", - }, - "monitoring": map[string]interface{}{ - "type": "elasticsearch", - "index_name": "general", - "pass": "xxx", - "url": "xxxxx", - "username": "monitoring-uname", - }, - }, - "inputs": []map[string]interface{}{ - { - "type": "log", - "streams": []map[string]interface{}{ - {"paths": "/xxxx"}, - }, - "processors": []interface{}{ - map[string]interface{}{ - "dissect": map[string]interface{}{ - "tokenizer": "---", - }, - }, - }, - }, - }, -} - -var inputChange2 = map[string]interface{}{ - "agent.monitoring": map[string]interface{}{ - "enabled": true, - "logs": true, - "metrics": true, - "use_output": "monitoring", - }, - "outputs": map[string]interface{}{ - "default": map[string]interface{}{ - "index_name": "general", - "pass": "xxx", - "type": "elasticsearch", - "url": "xxxxx", - "username": "xxx", - }, - "monitoring": map[string]interface{}{ - "type": "elasticsearch", - "index_name": "general", - "pass": "xxx", - "url": "xxxxx", - "username": "monitoring-uname", - }, - }, - "inputs": []map[string]interface{}{ - { - "type": "log", - "streams": []map[string]interface{}{ - {"paths": "/xxxx"}, - {"paths": "/yyyy"}, - }, - "processors": []interface{}{ - map[string]interface{}{ - "dissect": map[string]interface{}{ - "tokenizer": "---", - }, - }, - }, - }, - }, -} - -var inputConfigLS = map[string]interface{}{ - "agent.monitoring": map[string]interface{}{ - "enabled": true, - "logs": true, - "metrics": true, - "use_output": "monitoring", - }, - "outputs": map[string]interface{}{ - "default": map[string]interface{}{ - "index_name": "general", - "pass": "xxx", - "type": "elasticsearch", - "url": "xxxxx", - "username": "xxx", - }, - "monitoring": map[string]interface{}{ - "type": "logstash", - "hosts": "192.168.1.2", - "ssl.certificate_authorities": []string{"/etc/pki.key"}, - }, - }, - "inputs": []map[string]interface{}{ - { - "type": "log", - "streams": []map[string]interface{}{ - {"paths": "/xxxx"}, - }, - "processors": []interface{}{ - map[string]interface{}{ - "dissect": map[string]interface{}{ - "tokenizer": "---", - }, - }, - }, - }, - }, -} diff --git a/internal/pkg/agent/application/pipeline/pipeline.go b/internal/pkg/agent/application/pipeline/pipeline.go deleted file mode 100644 index 764d920cff9..00000000000 --- a/internal/pkg/agent/application/pipeline/pipeline.go +++ /dev/null @@ -1,69 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package pipeline - -import ( - "context" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" - "github.com/elastic/elastic-agent/internal/pkg/agent/configrequest" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" - "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" - "github.com/elastic/elastic-agent/internal/pkg/config" - "github.com/elastic/elastic-agent/internal/pkg/fleetapi" - "github.com/elastic/elastic-agent/internal/pkg/sorted" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -// ConfigHandler is capable of handling configrequest. -type ConfigHandler interface { - HandleConfig(context.Context, configrequest.Request) error - Close() error - Shutdown() -} - -// DefaultRK default routing keys until we implement the routing key / config matrix. -var DefaultRK = "default" - -// RoutingKey is used for routing as pipeline id. -type RoutingKey = string - -// Router is an interface routing programs to the corresponding stream. -type Router interface { - Routes() *sorted.Set - Route(ctx context.Context, id string, grpProg map[RoutingKey][]program.Program) error - Shutdown() -} - -// StreamFunc creates a stream out of routing key. -type StreamFunc func(*logger.Logger, RoutingKey) (Stream, error) - -// Stream is capable of executing configrequest change. -type Stream interface { - Execute(context.Context, configrequest.Request) error - Close() error - Shutdown() -} - -// EmitterFunc emits configuration for processing. -type EmitterFunc func(context.Context, *config.Config) error - -// DecoratorFunc is a func for decorating a retrieved configuration before processing. -type DecoratorFunc = func(*info.AgentInfo, string, *transpiler.AST, []program.Program) ([]program.Program, error) - -// FilterFunc is a func for filtering a retrieved configuration before processing. -type FilterFunc = func(*logger.Logger, *transpiler.AST) error - -// ConfigModifiers is a collections of filters and decorators applied while processing configuration. -type ConfigModifiers struct { - Filters []FilterFunc - Decorators []DecoratorFunc -} - -// Dispatcher processes actions coming from fleet api. -type Dispatcher interface { - Dispatch(context.Context, store.FleetAcker, ...fleetapi.Action) error -} diff --git a/internal/pkg/agent/application/pipeline/router/router.go b/internal/pkg/agent/application/pipeline/router/router.go deleted file mode 100644 index e1f1d63c8b5..00000000000 --- a/internal/pkg/agent/application/pipeline/router/router.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package router - -import ( - "context" - "fmt" - "strings" - "time" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" - "github.com/elastic/elastic-agent/internal/pkg/agent/configrequest" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/sorted" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -type router struct { - log *logger.Logger - routes *sorted.Set - streamFactory pipeline.StreamFunc -} - -// New creates a new router. -func New(log *logger.Logger, factory pipeline.StreamFunc) (pipeline.Router, error) { - var err error - if log == nil { - log, err = logger.New("router", false) - if err != nil { - return nil, err - } - } - return &router{log: log, streamFactory: factory, routes: sorted.NewSet()}, nil -} - -func (r *router) Routes() *sorted.Set { - return r.routes -} - -func (r *router) Route(ctx context.Context, id string, grpProg map[pipeline.RoutingKey][]program.Program) error { - s := sorted.NewSet() - - // Make sure that starting and updating is always done in the same order. - for rk, programs := range grpProg { - s.Add(rk, programs) - } - - active := make(map[string]bool, len(grpProg)) - for _, rk := range s.Keys() { - active[rk] = true - - // Are we already runnings this streams? - // When it doesn't exist we just create it, if it already exist we forward the configuration. - p, ok := r.routes.Get(rk) - var err error - if !ok { - r.log.Debugf("Creating stream: %s", rk) - p, err = r.streamFactory(r.log, rk) - if err != nil { - return err - } - r.routes.Add(rk, p) - } - - programs, ok := s.Get(rk) - if !ok { - return fmt.Errorf("could not find programs for routing key %s", rk) - } - - req := configrequest.New(id, time.Now(), programs.([]program.Program)) - - r.log.Debugf( - "Streams %s need to run config with ID %s and programs: %s", - rk, - req.ShortID(), - strings.Join(req.ProgramNames(), ", "), - ) - - err = p.(pipeline.Stream).Execute(ctx, req) - if err != nil { - return err - } - } - - // cleanup inactive streams. - // streams are shutdown down in alphabetical order. - keys := r.routes.Keys() - for _, k := range keys { - _, ok := active[k] - if ok { - continue - } - - p, ok := r.routes.Get(k) - if !ok { - continue - } - - r.log.Debugf("Removing routing key %s", k) - - p.(pipeline.Stream).Close() - r.routes.Remove(k) - } - - return nil -} - -// Shutdown shutdowns the router because Agent is stopping. -func (r *router) Shutdown() { - keys := r.routes.Keys() - for _, k := range keys { - p, ok := r.routes.Get(k) - if !ok { - continue - } - p.(pipeline.Stream).Shutdown() - r.routes.Remove(k) - } -} diff --git a/internal/pkg/agent/application/pipeline/router/router_test.go b/internal/pkg/agent/application/pipeline/router/router_test.go deleted file mode 100644 index 75f33231b1b..00000000000 --- a/internal/pkg/agent/application/pipeline/router/router_test.go +++ /dev/null @@ -1,233 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package router - -import ( - "context" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" - "github.com/elastic/elastic-agent/internal/pkg/agent/configrequest" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -type rOp int - -const ( - createOp rOp = iota + 1 - executeOp - closeOp -) - -func (r *rOp) String() string { - m := map[rOp]string{ - 1: "create", - 2: "execute", - 3: "close", - } - v, ok := m[*r] - if !ok { - return "unknown operation" - } - return v -} - -type event struct { - rk pipeline.RoutingKey - op rOp -} - -type notifyFunc func(pipeline.RoutingKey, rOp, ...interface{}) - -func TestRouter(t *testing.T) { - programs := []program.Program{{Spec: getRandomSpec()}} - ctx := context.Background() - - t.Run("create new and destroy unused stream", func(t *testing.T) { - recorder := &recorder{} - r, err := New(nil, recorder.factory) - require.NoError(t, err) - _ = r.Route(ctx, "hello", map[pipeline.RoutingKey][]program.Program{ - pipeline.DefaultRK: programs, - }) - - assertOps(t, []event{ - e(pipeline.DefaultRK, createOp), - e(pipeline.DefaultRK, executeOp), - }, recorder.events) - - recorder.reset() - - nk := "NEW_KEY" - _ = r.Route(ctx, "hello-2", map[pipeline.RoutingKey][]program.Program{ - nk: programs, - }) - - assertOps(t, []event{ - e(nk, createOp), - e(nk, executeOp), - e(pipeline.DefaultRK, closeOp), - }, recorder.events) - }) - - t.Run("multiples create new and destroy unused stream", func(t *testing.T) { - k1 := "KEY_1" - k2 := "KEY_2" - - recorder := &recorder{} - r, err := New(nil, recorder.factory) - require.NoError(t, err) - _ = r.Route(ctx, "hello", map[pipeline.RoutingKey][]program.Program{ - pipeline.DefaultRK: programs, - k1: programs, - k2: programs, - }) - - assertOps(t, []event{ - e(k1, createOp), - e(k1, executeOp), - - e(k2, createOp), - e(k2, executeOp), - - e(pipeline.DefaultRK, createOp), - e(pipeline.DefaultRK, executeOp), - }, recorder.events) - - recorder.reset() - - nk := "SECOND_DISPATCH" - _ = r.Route(ctx, "hello-2", map[pipeline.RoutingKey][]program.Program{ - nk: programs, - }) - - assertOps(t, []event{ - e(nk, createOp), - e(nk, executeOp), - - e(k1, closeOp), - e(k2, closeOp), - e(pipeline.DefaultRK, closeOp), - }, recorder.events) - }) - - t.Run("create new and delegate program to existing stream", func(t *testing.T) { - recorder := &recorder{} - r, err := New(nil, recorder.factory) - require.NoError(t, err) - _ = r.Route(ctx, "hello", map[pipeline.RoutingKey][]program.Program{ - pipeline.DefaultRK: programs, - }) - - assertOps(t, []event{ - e(pipeline.DefaultRK, createOp), - e(pipeline.DefaultRK, executeOp), - }, recorder.events) - - recorder.reset() - - _ = r.Route(ctx, "hello-2", map[pipeline.RoutingKey][]program.Program{ - pipeline.DefaultRK: programs, - }) - - assertOps(t, []event{ - e(pipeline.DefaultRK, executeOp), - }, recorder.events) - }) - - t.Run("when no stream are detected we shutdown all the running streams", func(t *testing.T) { - k1 := "KEY_1" - k2 := "KEY_2" - - recorder := &recorder{} - r, err := New(nil, recorder.factory) - require.NoError(t, err) - _ = r.Route(ctx, "hello", map[pipeline.RoutingKey][]program.Program{ - pipeline.DefaultRK: programs, - k1: programs, - k2: programs, - }) - - assertOps(t, []event{ - e(k1, createOp), - e(k1, executeOp), - e(k2, createOp), - e(k2, executeOp), - e(pipeline.DefaultRK, createOp), - e(pipeline.DefaultRK, executeOp), - }, recorder.events) - - recorder.reset() - - _ = r.Route(ctx, "hello-2", map[pipeline.RoutingKey][]program.Program{}) - - assertOps(t, []event{ - e(k1, closeOp), - e(k2, closeOp), - e(pipeline.DefaultRK, closeOp), - }, recorder.events) - }) -} - -type recorder struct { - events []event -} - -func (r *recorder) factory(_ *logger.Logger, rk pipeline.RoutingKey) (pipeline.Stream, error) { - return newMockStream(rk, r.notify), nil -} - -func (r *recorder) notify(rk pipeline.RoutingKey, op rOp, args ...interface{}) { - r.events = append(r.events, e(rk, op)) -} - -func (r *recorder) reset() { - r.events = nil -} - -type mockStream struct { - rk pipeline.RoutingKey - notify notifyFunc -} - -func newMockStream(rk pipeline.RoutingKey, notify notifyFunc) *mockStream { - notify(rk, createOp) - return &mockStream{ - rk: rk, - notify: notify, - } -} - -func (m *mockStream) Execute(_ context.Context, req configrequest.Request) error { - m.event(executeOp, req) - return nil -} - -func (m *mockStream) Close() error { - m.event(closeOp) - return nil -} - -func (m *mockStream) Shutdown() {} - -func (m *mockStream) event(op rOp, args ...interface{}) { - m.notify(m.rk, op, args...) -} - -func assertOps(t *testing.T, expected []event, received []event) { - require.Equal(t, len(expected), len(received), "Received number of operation doesn't match") - require.Equal(t, expected, received) -} - -func e(rk pipeline.RoutingKey, op rOp) event { - return event{rk: rk, op: op} -} - -func getRandomSpec() program.Spec { - return program.Supported[1] -} diff --git a/internal/pkg/agent/application/pipeline/stream/factory.go b/internal/pkg/agent/application/pipeline/stream/factory.go deleted file mode 100644 index b7701e70e99..00000000000 --- a/internal/pkg/agent/application/pipeline/stream/factory.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package stream - -import ( - "context" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" - "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/operation" - "github.com/elastic/elastic-agent/internal/pkg/agent/stateresolver" - downloader "github.com/elastic/elastic-agent/internal/pkg/artifact/download/localremote" - "github.com/elastic/elastic-agent/internal/pkg/artifact/install" - "github.com/elastic/elastic-agent/internal/pkg/artifact/uninstall" - "github.com/elastic/elastic-agent/internal/pkg/core/monitoring" - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "github.com/elastic/elastic-agent/internal/pkg/core/status" - "github.com/elastic/elastic-agent/internal/pkg/release" - "github.com/elastic/elastic-agent/pkg/core/logger" - "github.com/elastic/elastic-agent/pkg/core/server" -) - -// Factory creates a new stream factory. -func Factory(ctx context.Context, agentInfo *info.AgentInfo, cfg *configuration.SettingsConfig, srv *server.Server, r state.Reporter, m monitoring.Monitor, statusController status.Controller) func(*logger.Logger, pipeline.RoutingKey) (pipeline.Stream, error) { - return func(log *logger.Logger, id pipeline.RoutingKey) (pipeline.Stream, error) { - // new operator per stream to isolate processes without using tags - operator, err := newOperator(ctx, log, agentInfo, id, cfg, srv, r, m, statusController) - if err != nil { - return nil, err - } - - return &operatorStream{ - log: log, - configHandler: operator, - }, nil - } -} - -func newOperator( - ctx context.Context, - log *logger.Logger, - agentInfo *info.AgentInfo, - id pipeline.RoutingKey, - config *configuration.SettingsConfig, - srv *server.Server, - r state.Reporter, - m monitoring.Monitor, - statusController status.Controller, -) (*operation.Operator, error) { - fetcher, err := downloader.NewDownloader(log, config.DownloadConfig) - if err != nil { - return nil, err - } - - allowEmptyPgp, pgp := release.PGP() - verifier, err := downloader.NewVerifier(log, config.DownloadConfig, allowEmptyPgp, pgp) - if err != nil { - return nil, errors.New(err, "initiating verifier") - } - - installer, err := install.NewInstaller(config.DownloadConfig) - if err != nil { - return nil, errors.New(err, "initiating installer") - } - - uninstaller, err := uninstall.NewUninstaller() - if err != nil { - return nil, errors.New(err, "initiating uninstaller") - } - - stateResolver, err := stateresolver.NewStateResolver(log) - if err != nil { - return nil, err - } - - return operation.NewOperator( - ctx, - log, - agentInfo, - id, - config, - fetcher, - verifier, - installer, - uninstaller, - stateResolver, - srv, - r, - m, - statusController, - ) -} diff --git a/internal/pkg/agent/application/pipeline/stream/operator_stream.go b/internal/pkg/agent/application/pipeline/stream/operator_stream.go deleted file mode 100644 index ee4ee44079e..00000000000 --- a/internal/pkg/agent/application/pipeline/stream/operator_stream.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package stream - -import ( - "context" - - "go.elastic.co/apm" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" - "github.com/elastic/elastic-agent/internal/pkg/agent/configrequest" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -type operatorStream struct { - configHandler pipeline.ConfigHandler - log *logger.Logger -} - -type stater interface { - State() map[string]state.State -} - -type specer interface { - Specs() map[string]program.Spec -} - -func (b *operatorStream) Close() error { - return b.configHandler.Close() -} - -func (b *operatorStream) State() map[string]state.State { - if s, ok := b.configHandler.(stater); ok { - return s.State() - } - - return nil -} - -func (b *operatorStream) Specs() map[string]program.Spec { - if s, ok := b.configHandler.(specer); ok { - return s.Specs() - } - return nil -} - -func (b *operatorStream) Execute(ctx context.Context, cfg configrequest.Request) (err error) { - span, ctx := apm.StartSpan(ctx, "route", "app.internal") - defer func() { - apm.CaptureError(ctx, err).Send() - span.End() - }() - return b.configHandler.HandleConfig(ctx, cfg) -} - -func (b *operatorStream) Shutdown() { - b.configHandler.Shutdown() -} diff --git a/internal/pkg/agent/application/upgrade/error_checker.go b/internal/pkg/agent/application/upgrade/error_checker.go index 099526b990b..8e308c4e080 100644 --- a/internal/pkg/agent/application/upgrade/error_checker.go +++ b/internal/pkg/agent/application/upgrade/error_checker.go @@ -64,7 +64,7 @@ func (ch *ErrorChecker) Run(ctx context.Context) { continue } - status, err := ch.agentClient.Status(ctx) + state, err := ch.agentClient.State(ctx) ch.agentClient.Disconnect() if err != nil { ch.log.Error("failed retrieving agent status", err) @@ -78,14 +78,14 @@ func (ch *ErrorChecker) Run(ctx context.Context) { // call was successful, reset counter ch.failuresCounter = 0 - if status.Status == client.Failed { + if state.State == client.Failed { ch.log.Error("error checker notifying failure of agent") ch.notifyChan <- ErrAgentStatusFailed } - for _, app := range status.Applications { - if app.Status == client.Failed { - err = multierror.Append(err, errors.New(fmt.Sprintf("application %s[%v] failed: %s", app.Name, app.ID, app.Message))) + for _, comp := range state.Components { + if comp.State == client.Failed { + err = multierror.Append(err, errors.New(fmt.Sprintf("component %s[%v] failed: %s", comp.Name, comp.ID, comp.Message))) } } diff --git a/internal/pkg/agent/application/upgrade/step_mark.go b/internal/pkg/agent/application/upgrade/step_mark.go index e176e4c5b96..51b0adbb184 100644 --- a/internal/pkg/agent/application/upgrade/step_mark.go +++ b/internal/pkg/agent/application/upgrade/step_mark.go @@ -39,7 +39,7 @@ type UpdateMarker struct { } // markUpgrade marks update happened so we can handle grace period -func (u *Upgrader) markUpgrade(_ context.Context, hash string, action Action) error { +func (u *Upgrader) markUpgrade(_ context.Context, hash string, action *fleetapi.ActionUpgrade) error { prevVersion := release.Version() prevHash := release.Commit() if len(prevHash) > hashLen { @@ -51,7 +51,7 @@ func (u *Upgrader) markUpgrade(_ context.Context, hash string, action Action) er UpdatedOn: time.Now(), PrevVersion: prevVersion, PrevHash: prevHash, - Action: action.FleetAction(), + Action: action, } markerBytes, err := yaml.Marshal(marker) diff --git a/internal/pkg/agent/application/upgrade/upgrade.go b/internal/pkg/agent/application/upgrade/upgrade.go index e2fe530ff77..cb6f827d8e2 100644 --- a/internal/pkg/agent/application/upgrade/upgrade.go +++ b/internal/pkg/agent/application/upgrade/upgrade.go @@ -14,6 +14,8 @@ import ( "runtime" "strings" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" + "github.com/otiai10/copy" "go.elastic.co/apm" @@ -25,8 +27,6 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/program" "github.com/elastic/elastic-agent/internal/pkg/agent/storage" "github.com/elastic/elastic-agent/internal/pkg/artifact" - "github.com/elastic/elastic-agent/internal/pkg/capabilities" - "github.com/elastic/elastic-agent/internal/pkg/core/state" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" "github.com/elastic/elastic-agent/internal/pkg/release" "github.com/elastic/elastic-agent/pkg/core/logger" @@ -47,40 +47,16 @@ var ( } ) +var ( + // ErrSameVersion error is returned when the upgrade results in the same installed version. + ErrSameVersion = errors.New("upgrade did not occur because its the same version") +) + // Upgrader performs an upgrade type Upgrader struct { - agentInfo *info.AgentInfo - settings *artifact.Config log *logger.Logger - closers []context.CancelFunc - reexec reexecManager - acker acker - reporter stateReporter + settings *artifact.Config upgradeable bool - caps capabilities.Capability -} - -// Action is the upgrade action state. -type Action interface { - // Version to upgrade to. - Version() string - // SourceURI for download. - SourceURI() string - // FleetAction is the action from fleet that started the action (optional). - FleetAction() *fleetapi.ActionUpgrade -} - -type reexecManager interface { - ReExec(callback reexec.ShutdownCallbackFn, argOverrides ...string) -} - -type acker interface { - Ack(ctx context.Context, action fleetapi.Action) error - Commit(ctx context.Context) error -} - -type stateReporter interface { - OnStateChange(id string, name string, s state.State) } // IsUpgradeable when agent is installed and running as a service or flag was provided. @@ -91,17 +67,11 @@ func IsUpgradeable() bool { } // NewUpgrader creates an upgrader which is capable of performing upgrade operation -func NewUpgrader(agentInfo *info.AgentInfo, settings *artifact.Config, log *logger.Logger, closers []context.CancelFunc, reexec reexecManager, a acker, r stateReporter, caps capabilities.Capability) *Upgrader { +func NewUpgrader(log *logger.Logger, settings *artifact.Config) *Upgrader { return &Upgrader{ - agentInfo: agentInfo, - settings: settings, log: log, - closers: closers, - reexec: reexec, - acker: a, - reporter: r, + settings: settings, upgradeable: IsUpgradeable(), - caps: caps, } } @@ -112,40 +82,17 @@ func (u *Upgrader) Upgradeable() bool { // Upgrade upgrades running agent, function returns shutdown callback if some needs to be executed for cases when // reexec is called by caller. -func (u *Upgrader) Upgrade(ctx context.Context, a Action, reexecNow bool) (_ reexec.ShutdownCallbackFn, err error) { +func (u *Upgrader) Upgrade(ctx context.Context, version string, sourceURI string, action *fleetapi.ActionUpgrade) (_ reexec.ShutdownCallbackFn, err error) { span, ctx := apm.StartSpan(ctx, "upgrade", "app.internal") defer span.End() - // report failed - defer func() { - if err != nil { - if action := a.FleetAction(); action != nil { - u.reportFailure(ctx, action, err) - } - apm.CaptureError(ctx, err).Send() - } - }() - - if !u.upgradeable { - return nil, fmt.Errorf( - "cannot be upgraded; must be installed with install sub-command and " + - "running under control of the systems supervisor") - } - if u.caps != nil { - if _, err := u.caps.Apply(a); errors.Is(err, capabilities.ErrBlocked) { - return nil, nil - } - } - - u.reportUpdating(a.Version()) - - sourceURI := u.sourceURI(a.SourceURI()) - archivePath, err := u.downloadArtifact(ctx, a.Version(), sourceURI) + sourceURI = u.sourceURI(sourceURI) + archivePath, err := u.downloadArtifact(ctx, version, sourceURI) if err != nil { return nil, err } - newHash, err := u.unpack(ctx, a.Version(), archivePath) + newHash, err := u.unpack(ctx, version, archivePath) if err != nil { return nil, err } @@ -155,13 +102,7 @@ func (u *Upgrader) Upgrade(ctx context.Context, a Action, reexecNow bool) (_ ree } if strings.HasPrefix(release.Commit(), newHash) { - // not an error - if action := a.FleetAction(); action != nil { - //nolint:errcheck // keeping the same behavior, and making linter happy - u.ackAction(ctx, action) - } - u.log.Warn("upgrading to same version") - return nil, nil + return nil, ErrSameVersion } // Copy vault directory for linux/windows only @@ -182,7 +123,7 @@ func (u *Upgrader) Upgrade(ctx context.Context, a Action, reexecNow bool) (_ ree return nil, err } - if err := u.markUpgrade(ctx, newHash, a); err != nil { + if err := u.markUpgrade(ctx, newHash, action); err != nil { rollbackInstall(ctx, newHash) return nil, err } @@ -192,17 +133,12 @@ func (u *Upgrader) Upgrade(ctx context.Context, a Action, reexecNow bool) (_ ree return nil, errors.New("failed to invoke rollback watcher", err) } - cb := shutdownCallback(u.log, paths.Home(), release.Version(), a.Version(), release.TrimCommit(newHash)) - if reexecNow { - u.reexec.ReExec(cb) - return nil, nil - } - + cb := shutdownCallback(u.log, paths.Home(), release.Version(), version, release.TrimCommit(newHash)) return cb, nil } // Ack acks last upgrade action -func (u *Upgrader) Ack(ctx context.Context) error { +func (u *Upgrader) Ack(ctx context.Context, acker acker.Acker) error { // get upgrade action marker, err := LoadMarker() if err != nil { @@ -216,7 +152,11 @@ func (u *Upgrader) Ack(ctx context.Context) error { return nil } - if err := u.ackAction(ctx, marker.Action); err != nil { + if err := acker.Ack(ctx, marker.Action); err != nil { + return err + } + + if err := acker.Commit(ctx); err != nil { return err } @@ -231,50 +171,6 @@ func (u *Upgrader) sourceURI(retrievedURI string) string { return u.settings.SourceURI } -// ackAction is used for successful updates, it was either updated successfully or to the same version -// so we need to remove updating state and get prevent from receiving same update action again. -func (u *Upgrader) ackAction(ctx context.Context, action fleetapi.Action) error { - if err := u.acker.Ack(ctx, action); err != nil { - return err - } - - if err := u.acker.Commit(ctx); err != nil { - return err - } - - u.reporter.OnStateChange( - "", - agentName, - state.State{Status: state.Healthy}, - ) - - return nil -} - -// report failure is used when update process fails. action is acked so it won't be received again -// and state is changed to FAILED -func (u *Upgrader) reportFailure(ctx context.Context, action fleetapi.Action, err error) { - // ack action - _ = u.acker.Ack(ctx, action) - - // report failure - u.reporter.OnStateChange( - "", - agentName, - state.State{Status: state.Failed, Message: err.Error()}, - ) -} - -// reportUpdating sets state of agent to updating. -func (u *Upgrader) reportUpdating(version string) { - // report failure - u.reporter.OnStateChange( - "", - agentName, - state.State{Status: state.Updating, Message: fmt.Sprintf("Update to version '%s' started", version)}, - ) -} - func rollbackInstall(ctx context.Context, hash string) { os.RemoveAll(filepath.Join(paths.Data(), fmt.Sprintf("%s-%s", agentName, hash))) _ = ChangeSymlink(ctx, release.ShortCommit()) diff --git a/internal/pkg/agent/cmd/diagnostics.go b/internal/pkg/agent/cmd/diagnostics.go index ad3a77e1a4f..718e1c4596f 100644 --- a/internal/pkg/agent/cmd/diagnostics.go +++ b/internal/pkg/agent/cmd/diagnostics.go @@ -428,29 +428,31 @@ func gatherConfig() (AgentConfig, error) { } cfg.ConfigRendered = mapCFG - // Gather vars to render process config - isStandalone, err := isStandalone(renderedCFG) - if err != nil { - return AgentConfig{}, err - } + /* + // Gather vars to render process config + isStandalone, err := isStandalone(renderedCFG) + if err != nil { + return AgentConfig{}, err + } - log, err := newErrorLogger() - if err != nil { - return AgentConfig{}, err - } + log, err := newErrorLogger() + if err != nil { + return AgentConfig{}, err + } - // Get process config - uses same approach as inspect output command. - // Does not contact server process to request configs. - pMap, err := getProgramsFromConfig(log, agentInfo, renderedCFG, isStandalone) - if err != nil { - return AgentConfig{}, err - } - cfg.AppConfig = make(map[string]interface{}, 0) - for rk, programs := range pMap { - for _, p := range programs { - cfg.AppConfig[p.Identifier()+"_"+rk] = p.Configuration() + // Get process config - uses same approach as inspect output command. + // Does not contact server process to request configs. + pMap, err := getProgramsFromConfig(log, agentInfo, renderedCFG, isStandalone) + if err != nil { + return AgentConfig{}, err } - } + cfg.AppConfig = make(map[string]interface{}, 0) + for rk, programs := range pMap { + for _, p := range programs { + cfg.AppConfig[p.Identifier()+"_"+rk] = p.Configuration() + } + } + */ return cfg, nil } diff --git a/internal/pkg/agent/cmd/enroll_cmd.go b/internal/pkg/agent/cmd/enroll_cmd.go index a886ca5bafb..77519772fe7 100644 --- a/internal/pkg/agent/cmd/enroll_cmd.go +++ b/internal/pkg/agent/cmd/enroll_cmd.go @@ -28,7 +28,6 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/secret" "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" "github.com/elastic/elastic-agent/internal/pkg/agent/control/client" - "github.com/elastic/elastic-agent/internal/pkg/agent/control/cproto" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/agent/install" "github.com/elastic/elastic-agent/internal/pkg/agent/storage" @@ -303,7 +302,7 @@ func (c *enrollCmd) writeDelayEnroll(streams *cli.IOStreams) error { func (c *enrollCmd) fleetServerBootstrap(ctx context.Context, persistentConfig map[string]interface{}) (string, error) { c.log.Debug("verifying communication with running Elastic Agent daemon") agentRunning := true - _, err := getDaemonStatus(ctx) + _, err := getDaemonState(ctx) if err != nil { if !c.options.FleetServer.SpawnAgent { // wait longer to try and communicate with the Elastic Agent @@ -641,7 +640,7 @@ func delay(ctx context.Context, d time.Duration) { } } -func getDaemonStatus(ctx context.Context) (*client.AgentStatus, error) { +func getDaemonState(ctx context.Context) (*client.AgentState, error) { ctx, cancel := context.WithTimeout(ctx, daemonTimeout) defer cancel() daemon := client.New() @@ -650,7 +649,7 @@ func getDaemonStatus(ctx context.Context) (*client.AgentStatus, error) { return nil, err } defer daemon.Disconnect() - return daemon.Status(ctx) + return daemon.State(ctx) } type waitResult struct { @@ -680,7 +679,7 @@ func waitForAgent(ctx context.Context, timeout time.Duration) error { backOff := expBackoffWithContext(innerCtx, 1*time.Second, maxBackoff) for { backOff.Wait() - _, err := getDaemonStatus(innerCtx) + _, err := getDaemonState(innerCtx) if errors.Is(err, context.Canceled) { resChan <- waitResult{err: err} return @@ -730,7 +729,7 @@ func waitForFleetServer(ctx context.Context, agentSubproc <-chan *os.ProcessStat backExp := expBackoffWithContext(innerCtx, 1*time.Second, maxBackoff) for { backExp.Wait() - status, err := getDaemonStatus(innerCtx) + state, err := getDaemonState(innerCtx) if errors.Is(err, context.Canceled) { resChan <- waitResult{err: err} return @@ -750,8 +749,8 @@ func waitForFleetServer(ctx context.Context, agentSubproc <-chan *os.ProcessStat } continue } - app := getAppFromStatus(status, "fleet-server") - if app == nil { + unit := getCompUnitFromStatus(state, "fleet-server") + if unit == nil { err = errors.New("no fleet-server application running") log.Debugf("%s: %s", waitingForFleetServer, err) if msg != waitingForFleetServer { @@ -767,16 +766,16 @@ func waitForFleetServer(ctx context.Context, agentSubproc <-chan *os.ProcessStat } continue } - log.Debugf("%s: %s - %s", waitingForFleetServer, app.Status, app.Message) - if app.Status == cproto.Status_DEGRADED || app.Status == cproto.Status_HEALTHY { + log.Debugf("%s: %s - %s", waitingForFleetServer, unit.State, unit.Message) + if unit.State == client.Degraded || unit.State == client.Healthy { // app has started and is running - if app.Message != "" { - log.Infof("Fleet Server - %s", app.Message) + if unit.Message != "" { + log.Infof("Fleet Server - %s", unit.Message) } // extract the enrollment token from the status payload token := "" - if app.Payload != nil { - if enrollToken, ok := app.Payload["enrollment_token"]; ok { + if unit.Payload != nil { + if enrollToken, ok := unit.Payload["enrollment_token"]; ok { if tokenStr, ok := enrollToken.(string); ok { token = tokenStr } @@ -785,8 +784,8 @@ func waitForFleetServer(ctx context.Context, agentSubproc <-chan *os.ProcessStat resChan <- waitResult{enrollmentToken: token} break } - if app.Message != "" { - appMsg := fmt.Sprintf("Fleet Server - %s", app.Message) + if unit.Message != "" { + appMsg := fmt.Sprintf("Fleet Server - %s", unit.Message) if msg != appMsg { msg = appMsg msgCount = 0 @@ -827,10 +826,14 @@ func waitForFleetServer(ctx context.Context, agentSubproc <-chan *os.ProcessStat return res.enrollmentToken, nil } -func getAppFromStatus(status *client.AgentStatus, name string) *client.ApplicationStatus { - for _, app := range status.Applications { - if app.Name == name { - return app +func getCompUnitFromStatus(state *client.AgentState, name string) *client.ComponentUnitState { + for _, comp := range state.Components { + if comp.Name == name { + for _, unit := range comp.Units { + if unit.UnitType == client.UnitTypeInput { + return &unit + } + } } } return nil diff --git a/internal/pkg/agent/cmd/inspect.go b/internal/pkg/agent/cmd/inspect.go index d7832f48772..03ea093cab6 100644 --- a/internal/pkg/agent/cmd/inspect.go +++ b/internal/pkg/agent/cmd/inspect.go @@ -5,34 +5,11 @@ package cmd import ( - "context" - "fmt" - "os" - "github.com/spf13/cobra" - "gopkg.in/yaml.v2" - - "github.com/elastic/elastic-agent-libs/logp" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/filters" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/emitter" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/pipeline/emitter/modifiers" + "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" - "github.com/elastic/elastic-agent/internal/pkg/capabilities" "github.com/elastic/elastic-agent/internal/pkg/cli" - "github.com/elastic/elastic-agent/internal/pkg/composable" "github.com/elastic/elastic-agent/internal/pkg/config" - "github.com/elastic/elastic-agent/internal/pkg/config/operations" - "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/noop" - "github.com/elastic/elastic-agent/internal/pkg/core/status" - "github.com/elastic/elastic-agent/internal/pkg/sorted" - "github.com/elastic/elastic-agent/pkg/core/logger" - "github.com/elastic/go-sysinfo" ) func newInspectCommandWithArgs(s []string, streams *cli.IOStreams) *cobra.Command { @@ -42,10 +19,12 @@ func newInspectCommandWithArgs(s []string, streams *cli.IOStreams) *cobra.Comman Long: "Shows current configuration of the agent", Args: cobra.ExactArgs(0), Run: func(c *cobra.Command, args []string) { - if err := inspectConfig(paths.ConfigFile()); err != nil { - fmt.Fprintf(streams.Err, "Error: %v\n%s\n", err, troubleshootMessage()) - os.Exit(1) - } + /* + if err := inspectConfig(paths.ConfigFile()); err != nil { + fmt.Fprintf(streams.Err, "Error: %v\n%s\n", err, troubleshootMessage()) + os.Exit(1) + } + */ }, } @@ -61,19 +40,22 @@ func newInspectOutputCommandWithArgs(_ []string) *cobra.Command { Long: "Displays configuration generated for output.\nIf no output is specified list of output is displayed", Args: cobra.MaximumNArgs(2), RunE: func(c *cobra.Command, args []string) error { - outName, _ := c.Flags().GetString("output") - program, _ := c.Flags().GetString("program") - cfgPath := paths.ConfigFile() - agentInfo, err := info.NewAgentInfo(false) - if err != nil { - return err - } - - if outName == "" { - return inspectOutputs(cfgPath, agentInfo) - } - - return inspectOutput(cfgPath, outName, program, agentInfo) + /* + outName, _ := c.Flags().GetString("output") + program, _ := c.Flags().GetString("program") + cfgPath := paths.ConfigFile() + agentInfo, err := info.NewAgentInfo(false) + if err != nil { + return err + } + + if outName == "" { + return inspectOutputs(cfgPath, agentInfo) + } + + return inspectOutput(cfgPath, outName, program, agentInfo) + */ + return nil }, } @@ -83,6 +65,7 @@ func newInspectOutputCommandWithArgs(_ []string) *cobra.Command { return cmd } +/* func inspectConfig(cfgPath string) error { err := tryContainerLoadPaths() if err != nil { @@ -102,7 +85,7 @@ func printMapStringConfig(mapStr map[string]interface{}) error { if err != nil { return err } - caps, err := capabilities.Load(paths.AgentCapabilitiesPath(), l, status.NewController(l)) + caps, err := capabilities.Load(paths.AgentCapabilitiesPath(), l) if err != nil { return err } @@ -279,7 +262,7 @@ func getProgramsFromConfig(log *logger.Logger, agentInfo *info.AgentInfo, cfg *c configModifiers.Filters = append(configModifiers.Filters, modifiers.InjectFleet(cfg, sysInfo.Info(), agentInfo)) } - caps, err := capabilities.Load(paths.AgentCapabilitiesPath(), log, status.NewController(log)) + caps, err := capabilities.Load(paths.AgentCapabilitiesPath(), log) if err != nil { return nil, err } @@ -374,17 +357,23 @@ func newWaitForCompose(wrapped composable.Controller) *waitForCompose { } } -func (w *waitForCompose) Run(ctx context.Context, cb composable.VarsCallback) error { - err := w.controller.Run(ctx, func(vars []*transpiler.Vars) { - cb(vars) - w.done <- true - }) +func (w *waitForCompose) Run(ctx context.Context) error { + err := w.controller.Run(ctx) return err } +func (w *waitForCompose) Errors() <-chan error { + return nil +} + +func (w *waitForCompose) Watch() <-chan []*transpiler.Vars { + return nil +} + func (w *waitForCompose) Wait() { <-w.done } +*/ func isStandalone(cfg *config.Config) (bool, error) { c, err := configuration.NewFromConfig(cfg) diff --git a/internal/pkg/agent/cmd/inspect_test.go b/internal/pkg/agent/cmd/inspect_test.go index 361f77d5904..3a5ffb35380 100644 --- a/internal/pkg/agent/cmd/inspect_test.go +++ b/internal/pkg/agent/cmd/inspect_test.go @@ -4,6 +4,7 @@ package cmd +/* import ( "testing" ) @@ -49,3 +50,4 @@ func TestGetFleetInput(t *testing.T) { }) } } +*/ diff --git a/internal/pkg/agent/cmd/run.go b/internal/pkg/agent/cmd/run.go index b584baf2f09..e2d3fc0e751 100644 --- a/internal/pkg/agent/cmd/run.go +++ b/internal/pkg/agent/cmd/run.go @@ -19,11 +19,7 @@ import ( apmtransport "go.elastic.co/apm/transport" "gopkg.in/yaml.v2" - "github.com/elastic/elastic-agent-libs/api" - "github.com/elastic/elastic-agent-libs/monitoring" "github.com/elastic/elastic-agent-libs/service" - "github.com/elastic/elastic-agent-system-metrics/report" - "github.com/elastic/elastic-agent/internal/pkg/agent/application" "github.com/elastic/elastic-agent/internal/pkg/agent/application/filelock" "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" @@ -37,13 +33,9 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/storage" "github.com/elastic/elastic-agent/internal/pkg/cli" "github.com/elastic/elastic-agent/internal/pkg/config" - "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/beats" monitoringCfg "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/config" - monitoringServer "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/server" - "github.com/elastic/elastic-agent/internal/pkg/core/status" "github.com/elastic/elastic-agent/internal/pkg/release" "github.com/elastic/elastic-agent/pkg/core/logger" - "github.com/elastic/elastic-agent/version" ) const ( @@ -65,7 +57,7 @@ func newRunCommandWithArgs(_ []string, streams *cli.IOStreams) *cobra.Command { } } -func run(override cfgOverrider) error { +func run(override cfgOverrider, modifiers ...application.PlatformModifier) error { // Windows: Mark service as stopped. // After this is run, the service is considered by the OS to be stopped. // This must be the first deferred cleanup task (last to execute). @@ -123,7 +115,7 @@ func run(override cfgOverrider) error { // that writes the agentID into fleet.enc (encrypted fleet.yml) before even loading the configuration. err = secret.CreateAgentSecret() if err != nil { - return err + return fmt.Errorf("failed to read/write secrets: %w", err) } agentInfo, err := info.NewAgentInfoWithLog(defaultLogLevel(cfg), createAgentID) @@ -151,8 +143,6 @@ func run(override cfgOverrider) error { rexLogger := logger.Named("reexec") rex := reexec.NewManager(rexLogger, execPath) - statusCtrl := status.NewController(logger) - tracer, err := initTracer(agentName, release.Version(), cfg.Settings.MonitoringConfig) if err != nil { return fmt.Errorf("could not initiate APM tracer: %w", err) @@ -167,32 +157,37 @@ func run(override cfgOverrider) error { logger.Info("APM instrumentation disabled") } - control := server.New(logger.Named("control"), rex, statusCtrl, nil, tracer) - // start the control listener - if err := control.Start(); err != nil { - return err - } - defer control.Stop() - - app, err := application.New(logger, rex, statusCtrl, control, agentInfo, tracer) + app, err := application.New(logger, agentInfo, rex, tracer, modifiers...) if err != nil { return err } - control.SetRouteFn(app.Routes) - control.SetMonitoringCfg(cfg.Settings.MonitoringConfig) - - serverStopFn, err := setupMetrics(agentInfo, logger, cfg.Settings.DownloadConfig.OS(), cfg.Settings.MonitoringConfig, app, tracer) - if err != nil { + control := server.New(logger.Named("control"), cfg.Settings.MonitoringConfig, app, tracer) + // start the control listener + if err := control.Start(); err != nil { return err } - defer func() { - _ = serverStopFn() - }() + defer control.Stop() - if err := app.Start(); err != nil { - return err - } + /* + serverStopFn, err := setupMetrics(agentInfo, logger, cfg.Settings.DownloadConfig.OS(), cfg.Settings.MonitoringConfig, app, tracer) + if err != nil { + return err + } + defer func() { + _ = serverStopFn() + }() + */ + + appDone := make(chan bool) + appErrCh := make(chan error) + ctx, cancel = context.WithCancel(context.Background()) + defer cancel() + go func() { + err := app.Run(ctx) + close(appDone) + appErrCh <- err + }() // listen for signals signals := make(chan os.Signal, 1) @@ -203,6 +198,8 @@ func run(override cfgOverrider) error { select { case <-stop: breakout = true + case <-appDone: + breakout = true case <-rex.ShutdownChan(): reexecing = true breakout = true @@ -222,7 +219,9 @@ func run(override cfgOverrider) error { } } - err = app.Stop() + cancel() + err = <-appErrCh + if !reexecing { logger.Info("Shutting down completed.") return err @@ -330,6 +329,7 @@ func defaultLogLevel(cfg *configuration.Configuration) string { return defaultLogLevel } +/* func setupMetrics( _ *info.AgentInfo, logger *logger.Logger, @@ -366,6 +366,7 @@ func setupMetrics( func isProcessStatsEnabled(cfg *monitoringCfg.MonitoringHTTPConfig) bool { return cfg != nil && cfg.Enabled } +*/ func tryDelayEnroll(ctx context.Context, logger *logger.Logger, cfg *configuration.Configuration, override cfgOverrider) (*configuration.Configuration, error) { enrollPath := paths.AgentEnrollFile() diff --git a/internal/pkg/agent/cmd/status.go b/internal/pkg/agent/cmd/status.go index f3649bafdc9..2f748e6dc89 100644 --- a/internal/pkg/agent/cmd/status.go +++ b/internal/pkg/agent/cmd/status.go @@ -25,7 +25,7 @@ import ( type outputter func(io.Writer, interface{}) error var statusOutputs = map[string]outputter{ - "human": humanStatusOutput, + "human": humanStateOutput, "json": jsonOutput, "yaml": yamlOutput, } @@ -64,7 +64,7 @@ func statusCmd(streams *cli.IOStreams, cmd *cobra.Command, args []string) error innerCtx, cancel := context.WithTimeout(ctx, 30*time.Second) defer cancel() - status, err := getDaemonStatus(innerCtx) + state, err := getDaemonState(innerCtx) if errors.Is(err, context.DeadlineExceeded) { return errors.New("timed out after 30 seconds trying to connect to Elastic Agent daemon") } else if errors.Is(err, context.Canceled) { @@ -73,12 +73,12 @@ func statusCmd(streams *cli.IOStreams, cmd *cobra.Command, args []string) error return fmt.Errorf("failed to communicate with Elastic Agent daemon: %w", err) } - err = outputFunc(streams.Out, status) + err = outputFunc(streams.Out, state) if err != nil { return err } // exit 0 only if the Elastic Agent daemon is healthy - if status.Status == client.Healthy { + if state.State == client.Healthy { os.Exit(0) } else { os.Exit(1) @@ -86,32 +86,32 @@ func statusCmd(streams *cli.IOStreams, cmd *cobra.Command, args []string) error return nil } -func humanStatusOutput(w io.Writer, obj interface{}) error { - status, ok := obj.(*client.AgentStatus) +func humanStateOutput(w io.Writer, obj interface{}) error { + status, ok := obj.(*client.AgentState) if !ok { return fmt.Errorf("unable to cast %T as *client.AgentStatus", obj) } - return outputStatus(w, status) + return outputState(w, status) } -func outputStatus(w io.Writer, status *client.AgentStatus) error { - fmt.Fprintf(w, "Status: %s\n", status.Status) - if status.Message == "" { +func outputState(w io.Writer, state *client.AgentState) error { + fmt.Fprintf(w, "State: %s\n", state.State) + if state.Message == "" { fmt.Fprint(w, "Message: (no message)\n") } else { - fmt.Fprintf(w, "Message: %s\n", status.Message) + fmt.Fprintf(w, "Message: %s\n", state.Message) } - if len(status.Applications) == 0 { - fmt.Fprint(w, "Applications: (none)\n") + if len(state.Components) == 0 { + fmt.Fprint(w, "Components: (none)\n") } else { - fmt.Fprint(w, "Applications:\n") + fmt.Fprint(w, "Components:\n") tw := tabwriter.NewWriter(w, 4, 1, 2, ' ', 0) - for _, app := range status.Applications { - fmt.Fprintf(tw, " * %s\t(%s)\n", app.Name, app.Status) - if app.Message == "" { + for _, comp := range state.Components { + fmt.Fprintf(tw, " * %s\t(%s)\n", comp.Name, comp.State) + if comp.Message == "" { fmt.Fprint(tw, "\t(no message)\n") } else { - fmt.Fprintf(tw, "\t%s\n", app.Message) + fmt.Fprintf(tw, "\t%s\n", comp.Message) } } tw.Flush() diff --git a/internal/pkg/agent/configuration/grpc.go b/internal/pkg/agent/configuration/grpc.go new file mode 100644 index 00000000000..6624e6a0c08 --- /dev/null +++ b/internal/pkg/agent/configuration/grpc.go @@ -0,0 +1,26 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package configuration + +import "fmt" + +// GRPCConfig is a configuration of GRPC server. +type GRPCConfig struct { + Address string `config:"address"` + Port uint16 `config:"port"` +} + +// DefaultGRPCConfig creates a default server configuration. +func DefaultGRPCConfig() *GRPCConfig { + return &GRPCConfig{ + Address: "localhost", + Port: 6789, + } +} + +// String returns the composed listen address for the GRPC. +func (cfg *GRPCConfig) String() string { + return fmt.Sprintf("%s:%d", cfg.Address, cfg.Port) +} diff --git a/internal/pkg/agent/configuration/settings.go b/internal/pkg/agent/configuration/settings.go index 7445f02a462..7c2c422a65b 100644 --- a/internal/pkg/agent/configuration/settings.go +++ b/internal/pkg/agent/configuration/settings.go @@ -12,7 +12,6 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/core/retry" "github.com/elastic/elastic-agent/pkg/core/logger" "github.com/elastic/elastic-agent/pkg/core/process" - "github.com/elastic/elastic-agent/pkg/core/server" ) // ExternalInputsPattern is a glob that matches the paths of external configuration files. @@ -22,7 +21,7 @@ var ExternalInputsPattern = filepath.Join("inputs.d", "*.yml") type SettingsConfig struct { DownloadConfig *artifact.Config `yaml:"download" config:"download" json:"download"` ProcessConfig *process.Config `yaml:"process" config:"process" json:"process"` - GRPC *server.Config `yaml:"grpc" config:"grpc" json:"grpc"` + GRPC *GRPCConfig `yaml:"grpc" config:"grpc" json:"grpc"` RetryConfig *retry.Config `yaml:"retry" config:"retry" json:"retry"` MonitoringConfig *monitoringCfg.MonitoringConfig `yaml:"monitoring" config:"monitoring" json:"monitoring"` LoggingConfig *logger.Config `yaml:"logging,omitempty" config:"logging,omitempty" json:"logging,omitempty"` @@ -40,7 +39,7 @@ func DefaultSettingsConfig() *SettingsConfig { DownloadConfig: artifact.DefaultConfig(), LoggingConfig: logger.DefaultLoggingConfig(), MonitoringConfig: monitoringCfg.DefaultConfig(), - GRPC: server.DefaultGRPCConfig(), + GRPC: DefaultGRPCConfig(), Reload: DefaultReloadConfig(), } } diff --git a/internal/pkg/agent/control/client/client.go b/internal/pkg/agent/control/client/client.go index 728e830b462..634cc25a5af 100644 --- a/internal/pkg/agent/control/client/client.go +++ b/internal/pkg/agent/control/client/client.go @@ -15,24 +15,38 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/control/cproto" ) -// Status is the status of the Elastic Agent -type Status = cproto.Status +// UnitType is the type of the unit +type UnitType = cproto.UnitType + +// State is the state codes +type State = cproto.State + +const ( + // UnitTypeInput is an input unit. + UnitTypeInput UnitType = cproto.UnitType_INPUT + // UnitTypeOutput is an output unit. + UnitTypeOutput UnitType = cproto.UnitType_OUTPUT +) const ( // Starting is when the it is still starting. - Starting Status = cproto.Status_STARTING + Starting State = cproto.State_STARTING // Configuring is when it is configuring. - Configuring Status = cproto.Status_CONFIGURING + Configuring State = cproto.State_CONFIGURING // Healthy is when it is healthy. - Healthy Status = cproto.Status_HEALTHY + Healthy State = cproto.State_HEALTHY // Degraded is when it is degraded. - Degraded Status = cproto.Status_DEGRADED + Degraded State = cproto.State_DEGRADED // Failed is when it is failed. - Failed Status = cproto.Status_FAILED + Failed State = cproto.State_FAILED // Stopping is when it is stopping. - Stopping Status = cproto.Status_STOPPING + Stopping State = cproto.State_STOPPING + // Stopped is when it is stopped. + Stopped State = cproto.State_STOPPED // Upgrading is when it is upgrading. - Upgrading Status = cproto.Status_UPGRADING + Upgrading State = cproto.State_UPGRADING + // Rollback is when it is upgrading is rolling back. + Rollback State = cproto.State_ROLLBACK ) // Version is the current running version of the daemon. @@ -43,14 +57,29 @@ type Version struct { Snapshot bool } -// ApplicationStatus is a status of an application managed by the Elastic Agent. -// TODO(Anderson): Implement sort.Interface and sort it. -type ApplicationStatus struct { - ID string - Name string - Status Status - Message string - Payload map[string]interface{} +// ComponentUnitState is a state of a unit running inside a component. +type ComponentUnitState struct { + UnitID string `json:"unit_id" yaml:"unit_id"` + UnitType UnitType `json:"unit_type" yaml:"unit_type"` + State State `json:"state" yaml:"state"` + Message string `json:"message" yaml:"message"` + Payload map[string]interface{} `json:"payload,omitempty" yaml:"payload,omitempty"` +} + +// ComponentState is a state of a component managed by the Elastic Agent. +type ComponentState struct { + ID string `json:"id" yaml:"id"` + Name string `json:"name" yaml:"name"` + State State `json:"state" yaml:"state"` + Message string `json:"message" yaml:"message"` + Units []ComponentUnitState `json:"units" yaml:"units"` +} + +// AgentState is the current state of the Elastic Agent. +type AgentState struct { + State State `json:"state" yaml:"state"` + Message string `json:"message" yaml:"message"` + Components []ComponentState `json:"components" yaml:"components"` } // ProcMeta is the running version and ID information for a running process. @@ -80,13 +109,6 @@ type ProcPProf struct { Error string } -// AgentStatus is the current status of the Elastic Agent. -type AgentStatus struct { - Status Status - Message string - Applications []*ApplicationStatus -} - // Client communicates to Elastic Agent through the control protocol. type Client interface { // Connect connects to the running Elastic Agent. @@ -95,8 +117,8 @@ type Client interface { Disconnect() // Version returns the current version of the running agent. Version(ctx context.Context) (Version, error) - // Status returns the current status of the running agent. - Status(ctx context.Context) (*AgentStatus, error) + // State returns the current state of the running agent. + State(ctx context.Context) (*AgentState, error) // Restart triggers restarting the current running daemon. Restart(ctx context.Context) error // Upgrade triggers upgrade of the current running daemon. @@ -161,32 +183,42 @@ func (c *client) Version(ctx context.Context) (Version, error) { }, nil } -// Status returns the current status of the running agent. -func (c *client) Status(ctx context.Context) (*AgentStatus, error) { - res, err := c.client.Status(ctx, &cproto.Empty{}) +// State returns the current state of the running agent. +func (c *client) State(ctx context.Context) (*AgentState, error) { + res, err := c.client.State(ctx, &cproto.Empty{}) if err != nil { return nil, err } - s := &AgentStatus{ - Status: res.Status, - Message: res.Message, - Applications: make([]*ApplicationStatus, len(res.Applications)), + s := &AgentState{ + State: res.State, + Message: res.Message, + Components: make([]ComponentState, 0, len(res.Components)), } - for i, appRes := range res.Applications { - var payload map[string]interface{} - if appRes.Payload != "" { - err := json.Unmarshal([]byte(appRes.Payload), &payload) - if err != nil { - return nil, err + for _, comp := range res.Components { + units := make([]ComponentUnitState, 0, len(comp.Units)) + for _, unit := range comp.Units { + var payload map[string]interface{} + if unit.Payload != "" { + err := json.Unmarshal([]byte(unit.Payload), &payload) + if err != nil { + return nil, err + } } + units = append(units, ComponentUnitState{ + UnitID: unit.UnitId, + UnitType: unit.UnitType, + State: unit.State, + Message: unit.Message, + Payload: payload, + }) } - s.Applications[i] = &ApplicationStatus{ - ID: appRes.Id, - Name: appRes.Name, - Status: appRes.Status, - Message: appRes.Message, - Payload: payload, - } + s.Components = append(s.Components, ComponentState{ + ID: comp.Id, + Name: comp.Name, + State: comp.State, + Message: comp.Message, + Units: units, + }) } return s, nil } diff --git a/internal/pkg/agent/control/cproto/control.pb.go b/internal/pkg/agent/control/cproto/control.pb.go index 43609b68f0a..7ada35a4fe0 100644 --- a/internal/pkg/agent/control/cproto/control.pb.go +++ b/internal/pkg/agent/control/cproto/control.pb.go @@ -25,71 +25,121 @@ const ( _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) -// Status codes for the current state. -type Status int32 +// State codes for the current state. +type State int32 const ( - Status_STARTING Status = 0 - Status_CONFIGURING Status = 1 - Status_HEALTHY Status = 2 - Status_DEGRADED Status = 3 - Status_FAILED Status = 4 - Status_STOPPING Status = 5 - Status_UPGRADING Status = 6 - Status_ROLLBACK Status = 7 + State_STARTING State = 0 + State_CONFIGURING State = 1 + State_HEALTHY State = 2 + State_DEGRADED State = 3 + State_FAILED State = 4 + State_STOPPING State = 5 + State_STOPPED State = 6 + State_UPGRADING State = 7 + State_ROLLBACK State = 8 ) -// Enum value maps for Status. +// Enum value maps for State. var ( - Status_name = map[int32]string{ + State_name = map[int32]string{ 0: "STARTING", 1: "CONFIGURING", 2: "HEALTHY", 3: "DEGRADED", 4: "FAILED", 5: "STOPPING", - 6: "UPGRADING", - 7: "ROLLBACK", + 6: "STOPPED", + 7: "UPGRADING", + 8: "ROLLBACK", } - Status_value = map[string]int32{ + State_value = map[string]int32{ "STARTING": 0, "CONFIGURING": 1, "HEALTHY": 2, "DEGRADED": 3, "FAILED": 4, "STOPPING": 5, - "UPGRADING": 6, - "ROLLBACK": 7, + "STOPPED": 6, + "UPGRADING": 7, + "ROLLBACK": 8, } ) -func (x Status) Enum() *Status { - p := new(Status) +func (x State) Enum() *State { + p := new(State) *p = x return p } -func (x Status) String() string { +func (x State) String() string { return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) } -func (Status) Descriptor() protoreflect.EnumDescriptor { +func (State) Descriptor() protoreflect.EnumDescriptor { return file_control_proto_enumTypes[0].Descriptor() } -func (Status) Type() protoreflect.EnumType { +func (State) Type() protoreflect.EnumType { return &file_control_proto_enumTypes[0] } -func (x Status) Number() protoreflect.EnumNumber { +func (x State) Number() protoreflect.EnumNumber { return protoreflect.EnumNumber(x) } -// Deprecated: Use Status.Descriptor instead. -func (Status) EnumDescriptor() ([]byte, []int) { +// Deprecated: Use State.Descriptor instead. +func (State) EnumDescriptor() ([]byte, []int) { return file_control_proto_rawDescGZIP(), []int{0} } +// Unit Type running inside a component. +type UnitType int32 + +const ( + UnitType_INPUT UnitType = 0 + UnitType_OUTPUT UnitType = 1 +) + +// Enum value maps for UnitType. +var ( + UnitType_name = map[int32]string{ + 0: "INPUT", + 1: "OUTPUT", + } + UnitType_value = map[string]int32{ + "INPUT": 0, + "OUTPUT": 1, + } +) + +func (x UnitType) Enum() *UnitType { + p := new(UnitType) + *p = x + return p +} + +func (x UnitType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (UnitType) Descriptor() protoreflect.EnumDescriptor { + return file_control_proto_enumTypes[1].Descriptor() +} + +func (UnitType) Type() protoreflect.EnumType { + return &file_control_proto_enumTypes[1] +} + +func (x UnitType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use UnitType.Descriptor instead. +func (UnitType) EnumDescriptor() ([]byte, []int) { + return file_control_proto_rawDescGZIP(), []int{1} +} + // Action status codes for restart and upgrade response. type ActionStatus int32 @@ -123,11 +173,11 @@ func (x ActionStatus) String() string { } func (ActionStatus) Descriptor() protoreflect.EnumDescriptor { - return file_control_proto_enumTypes[1].Descriptor() + return file_control_proto_enumTypes[2].Descriptor() } func (ActionStatus) Type() protoreflect.EnumType { - return &file_control_proto_enumTypes[1] + return &file_control_proto_enumTypes[2] } func (x ActionStatus) Number() protoreflect.EnumNumber { @@ -136,7 +186,7 @@ func (x ActionStatus) Number() protoreflect.EnumNumber { // Deprecated: Use ActionStatus.Descriptor instead. func (ActionStatus) EnumDescriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{1} + return file_control_proto_rawDescGZIP(), []int{2} } // pprof endpoint that can be requested. @@ -191,11 +241,11 @@ func (x PprofOption) String() string { } func (PprofOption) Descriptor() protoreflect.EnumDescriptor { - return file_control_proto_enumTypes[2].Descriptor() + return file_control_proto_enumTypes[3].Descriptor() } func (PprofOption) Type() protoreflect.EnumType { - return &file_control_proto_enumTypes[2] + return &file_control_proto_enumTypes[3] } func (x PprofOption) Number() protoreflect.EnumNumber { @@ -204,7 +254,7 @@ func (x PprofOption) Number() protoreflect.EnumNumber { // Deprecated: Use PprofOption.Descriptor instead. func (PprofOption) EnumDescriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{2} + return file_control_proto_rawDescGZIP(), []int{3} } // Empty message. @@ -511,26 +561,25 @@ func (x *UpgradeResponse) GetError() string { return "" } -// Current status of the application in Elastic Agent. -type ApplicationStatus struct { +type ComponentUnitState struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Unique application ID. - Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` - // Application name. - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - // Current status. - Status Status `protobuf:"varint,3,opt,name=status,proto3,enum=cproto.Status" json:"status,omitempty"` - // Current status message. + // Type of unit in the component. + UnitType UnitType `protobuf:"varint,1,opt,name=unit_type,json=unitType,proto3,enum=cproto.UnitType" json:"unit_type,omitempty"` + // ID of the unit in the component. + UnitId string `protobuf:"bytes,2,opt,name=unit_id,json=unitId,proto3" json:"unit_id,omitempty"` + // Current state. + State State `protobuf:"varint,3,opt,name=state,proto3,enum=cproto.State" json:"state,omitempty"` + // Current state message. Message string `protobuf:"bytes,4,opt,name=message,proto3" json:"message,omitempty"` - // Current status payload. + // Current state payload. Payload string `protobuf:"bytes,5,opt,name=payload,proto3" json:"payload,omitempty"` } -func (x *ApplicationStatus) Reset() { - *x = ApplicationStatus{} +func (x *ComponentUnitState) Reset() { + *x = ComponentUnitState{} if protoimpl.UnsafeEnabled { mi := &file_control_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -538,13 +587,13 @@ func (x *ApplicationStatus) Reset() { } } -func (x *ApplicationStatus) String() string { +func (x *ComponentUnitState) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ApplicationStatus) ProtoMessage() {} +func (*ComponentUnitState) ProtoMessage() {} -func (x *ApplicationStatus) ProtoReflect() protoreflect.Message { +func (x *ComponentUnitState) ProtoReflect() protoreflect.Message { mi := &file_control_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -556,44 +605,205 @@ func (x *ApplicationStatus) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ApplicationStatus.ProtoReflect.Descriptor instead. -func (*ApplicationStatus) Descriptor() ([]byte, []int) { +// Deprecated: Use ComponentUnitState.ProtoReflect.Descriptor instead. +func (*ComponentUnitState) Descriptor() ([]byte, []int) { return file_control_proto_rawDescGZIP(), []int{5} } -func (x *ApplicationStatus) GetId() string { +func (x *ComponentUnitState) GetUnitType() UnitType { + if x != nil { + return x.UnitType + } + return UnitType_INPUT +} + +func (x *ComponentUnitState) GetUnitId() string { + if x != nil { + return x.UnitId + } + return "" +} + +func (x *ComponentUnitState) GetState() State { + if x != nil { + return x.State + } + return State_STARTING +} + +func (x *ComponentUnitState) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *ComponentUnitState) GetPayload() string { + if x != nil { + return x.Payload + } + return "" +} + +// Version information reported by the component to Elastic Agent. +type ComponentVersionInfo struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Name of the component. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Version of the component. + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + // Extra meta information about the version. + Meta map[string]string `protobuf:"bytes,3,rep,name=meta,proto3" json:"meta,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (x *ComponentVersionInfo) Reset() { + *x = ComponentVersionInfo{} + if protoimpl.UnsafeEnabled { + mi := &file_control_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ComponentVersionInfo) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ComponentVersionInfo) ProtoMessage() {} + +func (x *ComponentVersionInfo) ProtoReflect() protoreflect.Message { + mi := &file_control_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ComponentVersionInfo.ProtoReflect.Descriptor instead. +func (*ComponentVersionInfo) Descriptor() ([]byte, []int) { + return file_control_proto_rawDescGZIP(), []int{6} +} + +func (x *ComponentVersionInfo) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *ComponentVersionInfo) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +func (x *ComponentVersionInfo) GetMeta() map[string]string { + if x != nil { + return x.Meta + } + return nil +} + +// Current state of a running component by Elastic Agent. +type ComponentState struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Unique component ID. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // Component name. + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // Current state. + State State `protobuf:"varint,3,opt,name=state,proto3,enum=cproto.State" json:"state,omitempty"` + // Current state message. + Message string `protobuf:"bytes,4,opt,name=message,proto3" json:"message,omitempty"` + // Current units running in the component. + Units []*ComponentUnitState `protobuf:"bytes,5,rep,name=units,proto3" json:"units,omitempty"` + // Current version information for the running component. + VersionInfo *ComponentVersionInfo `protobuf:"bytes,6,opt,name=version_info,json=versionInfo,proto3" json:"version_info,omitempty"` +} + +func (x *ComponentState) Reset() { + *x = ComponentState{} + if protoimpl.UnsafeEnabled { + mi := &file_control_proto_msgTypes[7] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ComponentState) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ComponentState) ProtoMessage() {} + +func (x *ComponentState) ProtoReflect() protoreflect.Message { + mi := &file_control_proto_msgTypes[7] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ComponentState.ProtoReflect.Descriptor instead. +func (*ComponentState) Descriptor() ([]byte, []int) { + return file_control_proto_rawDescGZIP(), []int{7} +} + +func (x *ComponentState) GetId() string { if x != nil { return x.Id } return "" } -func (x *ApplicationStatus) GetName() string { +func (x *ComponentState) GetName() string { if x != nil { return x.Name } return "" } -func (x *ApplicationStatus) GetStatus() Status { +func (x *ComponentState) GetState() State { if x != nil { - return x.Status + return x.State } - return Status_STARTING + return State_STARTING } -func (x *ApplicationStatus) GetMessage() string { +func (x *ComponentState) GetMessage() string { if x != nil { return x.Message } return "" } -func (x *ApplicationStatus) GetPayload() string { +func (x *ComponentState) GetUnits() []*ComponentUnitState { if x != nil { - return x.Payload + return x.Units } - return "" + return nil +} + +func (x *ComponentState) GetVersionInfo() *ComponentVersionInfo { + if x != nil { + return x.VersionInfo + } + return nil } // Current metadata for a running process. @@ -622,7 +832,7 @@ type ProcMeta struct { func (x *ProcMeta) Reset() { *x = ProcMeta{} if protoimpl.UnsafeEnabled { - mi := &file_control_proto_msgTypes[6] + mi := &file_control_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -635,7 +845,7 @@ func (x *ProcMeta) String() string { func (*ProcMeta) ProtoMessage() {} func (x *ProcMeta) ProtoReflect() protoreflect.Message { - mi := &file_control_proto_msgTypes[6] + mi := &file_control_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -648,7 +858,7 @@ func (x *ProcMeta) ProtoReflect() protoreflect.Message { // Deprecated: Use ProcMeta.ProtoReflect.Descriptor instead. func (*ProcMeta) Descriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{6} + return file_control_proto_rawDescGZIP(), []int{8} } func (x *ProcMeta) GetProcess() string { @@ -756,37 +966,37 @@ func (x *ProcMeta) GetError() string { return "" } -// Status is the current status of Elastic Agent. -type StatusResponse struct { +// StateResponse is the current state of Elastic Agent. +type StateResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - // Overall status of Elastic Agent. - Status Status `protobuf:"varint,1,opt,name=status,proto3,enum=cproto.Status" json:"status,omitempty"` + // Overall state of Elastic Agent. + State State `protobuf:"varint,1,opt,name=state,proto3,enum=cproto.State" json:"state,omitempty"` // Overall status message of Elastic Agent. Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` - // Status of each application in Elastic Agent. - Applications []*ApplicationStatus `protobuf:"bytes,3,rep,name=applications,proto3" json:"applications,omitempty"` + // Status of each component in Elastic Agent. + Components []*ComponentState `protobuf:"bytes,3,rep,name=components,proto3" json:"components,omitempty"` } -func (x *StatusResponse) Reset() { - *x = StatusResponse{} +func (x *StateResponse) Reset() { + *x = StateResponse{} if protoimpl.UnsafeEnabled { - mi := &file_control_proto_msgTypes[7] + mi := &file_control_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *StatusResponse) String() string { +func (x *StateResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*StatusResponse) ProtoMessage() {} +func (*StateResponse) ProtoMessage() {} -func (x *StatusResponse) ProtoReflect() protoreflect.Message { - mi := &file_control_proto_msgTypes[7] +func (x *StateResponse) ProtoReflect() protoreflect.Message { + mi := &file_control_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -797,28 +1007,28 @@ func (x *StatusResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use StatusResponse.ProtoReflect.Descriptor instead. -func (*StatusResponse) Descriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{7} +// Deprecated: Use StateResponse.ProtoReflect.Descriptor instead. +func (*StateResponse) Descriptor() ([]byte, []int) { + return file_control_proto_rawDescGZIP(), []int{9} } -func (x *StatusResponse) GetStatus() Status { +func (x *StateResponse) GetState() State { if x != nil { - return x.Status + return x.State } - return Status_STARTING + return State_STARTING } -func (x *StatusResponse) GetMessage() string { +func (x *StateResponse) GetMessage() string { if x != nil { return x.Message } return "" } -func (x *StatusResponse) GetApplications() []*ApplicationStatus { +func (x *StateResponse) GetComponents() []*ComponentState { if x != nil { - return x.Applications + return x.Components } return nil } @@ -835,7 +1045,7 @@ type ProcMetaResponse struct { func (x *ProcMetaResponse) Reset() { *x = ProcMetaResponse{} if protoimpl.UnsafeEnabled { - mi := &file_control_proto_msgTypes[8] + mi := &file_control_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -848,7 +1058,7 @@ func (x *ProcMetaResponse) String() string { func (*ProcMetaResponse) ProtoMessage() {} func (x *ProcMetaResponse) ProtoReflect() protoreflect.Message { - mi := &file_control_proto_msgTypes[8] + mi := &file_control_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -861,7 +1071,7 @@ func (x *ProcMetaResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ProcMetaResponse.ProtoReflect.Descriptor instead. func (*ProcMetaResponse) Descriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{8} + return file_control_proto_rawDescGZIP(), []int{10} } func (x *ProcMetaResponse) GetProcs() []*ProcMeta { @@ -890,7 +1100,7 @@ type PprofRequest struct { func (x *PprofRequest) Reset() { *x = PprofRequest{} if protoimpl.UnsafeEnabled { - mi := &file_control_proto_msgTypes[9] + mi := &file_control_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -903,7 +1113,7 @@ func (x *PprofRequest) String() string { func (*PprofRequest) ProtoMessage() {} func (x *PprofRequest) ProtoReflect() protoreflect.Message { - mi := &file_control_proto_msgTypes[9] + mi := &file_control_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -916,7 +1126,7 @@ func (x *PprofRequest) ProtoReflect() protoreflect.Message { // Deprecated: Use PprofRequest.ProtoReflect.Descriptor instead. func (*PprofRequest) Descriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{9} + return file_control_proto_rawDescGZIP(), []int{11} } func (x *PprofRequest) GetPprofType() []PprofOption { @@ -963,7 +1173,7 @@ type PprofResult struct { func (x *PprofResult) Reset() { *x = PprofResult{} if protoimpl.UnsafeEnabled { - mi := &file_control_proto_msgTypes[10] + mi := &file_control_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -976,7 +1186,7 @@ func (x *PprofResult) String() string { func (*PprofResult) ProtoMessage() {} func (x *PprofResult) ProtoReflect() protoreflect.Message { - mi := &file_control_proto_msgTypes[10] + mi := &file_control_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -989,7 +1199,7 @@ func (x *PprofResult) ProtoReflect() protoreflect.Message { // Deprecated: Use PprofResult.ProtoReflect.Descriptor instead. func (*PprofResult) Descriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{10} + return file_control_proto_rawDescGZIP(), []int{12} } func (x *PprofResult) GetAppName() string { @@ -1039,7 +1249,7 @@ type PprofResponse struct { func (x *PprofResponse) Reset() { *x = PprofResponse{} if protoimpl.UnsafeEnabled { - mi := &file_control_proto_msgTypes[11] + mi := &file_control_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1052,7 +1262,7 @@ func (x *PprofResponse) String() string { func (*PprofResponse) ProtoMessage() {} func (x *PprofResponse) ProtoReflect() protoreflect.Message { - mi := &file_control_proto_msgTypes[11] + mi := &file_control_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1065,7 +1275,7 @@ func (x *PprofResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use PprofResponse.ProtoReflect.Descriptor instead. func (*PprofResponse) Descriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{11} + return file_control_proto_rawDescGZIP(), []int{13} } func (x *PprofResponse) GetResults() []*PprofResult { @@ -1090,7 +1300,7 @@ type MetricsResponse struct { func (x *MetricsResponse) Reset() { *x = MetricsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_control_proto_msgTypes[12] + mi := &file_control_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1103,7 +1313,7 @@ func (x *MetricsResponse) String() string { func (*MetricsResponse) ProtoMessage() {} func (x *MetricsResponse) ProtoReflect() protoreflect.Message { - mi := &file_control_proto_msgTypes[12] + mi := &file_control_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1116,7 +1326,7 @@ func (x *MetricsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use MetricsResponse.ProtoReflect.Descriptor instead. func (*MetricsResponse) Descriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{12} + return file_control_proto_rawDescGZIP(), []int{14} } func (x *MetricsResponse) GetAppName() string { @@ -1159,7 +1369,7 @@ type ProcMetricsResponse struct { func (x *ProcMetricsResponse) Reset() { *x = ProcMetricsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_control_proto_msgTypes[13] + mi := &file_control_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } @@ -1172,7 +1382,7 @@ func (x *ProcMetricsResponse) String() string { func (*ProcMetricsResponse) ProtoMessage() {} func (x *ProcMetricsResponse) ProtoReflect() protoreflect.Message { - mi := &file_control_proto_msgTypes[13] + mi := &file_control_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1185,7 +1395,7 @@ func (x *ProcMetricsResponse) ProtoReflect() protoreflect.Message { // Deprecated: Use ProcMetricsResponse.ProtoReflect.Descriptor instead. func (*ProcMetricsResponse) Descriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{13} + return file_control_proto_rawDescGZIP(), []int{15} } func (x *ProcMetricsResponse) GetResult() []*MetricsResponse { @@ -1225,139 +1435,170 @@ var file_control_proto_rawDesc = []byte{ 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, - 0x72, 0x22, 0x93, 0x01, 0x0a, 0x11, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, - 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x73, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0e, 0x2e, 0x63, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, - 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x18, 0x0a, - 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, - 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0xb5, 0x03, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x63, - 0x4d, 0x65, 0x74, 0x61, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x12, 0x12, - 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, - 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0e, - 0x0a, 0x02, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x21, - 0x0a, 0x0c, 0x65, 0x70, 0x68, 0x65, 0x6d, 0x65, 0x72, 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x05, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x65, 0x70, 0x68, 0x65, 0x6d, 0x65, 0x72, 0x61, 0x6c, 0x49, - 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x62, - 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x0b, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x1d, - 0x0a, 0x0a, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x09, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1a, 0x0a, - 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, - 0x72, 0x5f, 0x69, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x73, 0x65, 0x72, - 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x67, 0x69, 0x64, 0x18, 0x0b, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x75, 0x73, 0x65, 0x72, 0x47, 0x69, 0x64, 0x12, 0x22, 0x0a, - 0x0c, 0x61, 0x72, 0x63, 0x68, 0x69, 0x74, 0x65, 0x63, 0x74, 0x75, 0x72, 0x65, 0x18, 0x0c, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x72, 0x63, 0x68, 0x69, 0x74, 0x65, 0x63, 0x74, 0x75, 0x72, - 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x0d, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x29, - 0x0a, 0x10, 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x5f, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, - 0x65, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0f, 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, - 0x63, 0x4c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, - 0x6f, 0x72, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, - 0x91, 0x01, 0x0a, 0x0e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x26, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x0e, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, - 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, - 0x73, 0x61, 0x67, 0x65, 0x12, 0x3d, 0x0a, 0x0c, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x63, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x41, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x53, - 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x0c, 0x61, 0x70, 0x70, 0x6c, 0x69, 0x63, 0x61, 0x74, 0x69, - 0x6f, 0x6e, 0x73, 0x22, 0x3a, 0x0a, 0x10, 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x05, 0x70, 0x72, 0x6f, 0x63, 0x73, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, - 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x05, 0x70, 0x72, 0x6f, 0x63, 0x73, 0x22, - 0x9d, 0x01, 0x0a, 0x0c, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x12, 0x31, 0x0a, 0x09, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x54, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0e, 0x32, 0x13, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x70, 0x72, - 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x54, - 0x79, 0x70, 0x65, 0x12, 0x24, 0x0a, 0x0d, 0x74, 0x72, 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x63, - 0x65, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x70, 0x70, - 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x70, 0x70, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x22, - 0xa4, 0x01, 0x0a, 0x0b, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, - 0x18, 0x0a, 0x07, 0x61, 0x70, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x07, 0x61, 0x70, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x6f, 0x75, - 0x74, 0x65, 0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x6f, 0x75, - 0x74, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x31, 0x0a, 0x09, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x54, 0x79, - 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x13, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x70, - 0x70, 0x72, 0x6f, 0x66, 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, - 0x6c, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, - 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x3e, 0x0a, 0x0d, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, - 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2e, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x75, 0x0a, 0x0f, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x70, 0x70, - 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x70, 0x70, 0x4e, - 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x12, - 0x16, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, - 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x46, 0x0a, - 0x13, 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4d, 0x65, - 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x06, 0x72, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x2a, 0x79, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, - 0x0c, 0x0a, 0x08, 0x53, 0x54, 0x41, 0x52, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x0f, 0x0a, - 0x0b, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x55, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0b, - 0x0a, 0x07, 0x48, 0x45, 0x41, 0x4c, 0x54, 0x48, 0x59, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x44, - 0x45, 0x47, 0x52, 0x41, 0x44, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x41, 0x49, - 0x4c, 0x45, 0x44, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x54, 0x4f, 0x50, 0x50, 0x49, 0x4e, - 0x47, 0x10, 0x05, 0x12, 0x0d, 0x0a, 0x09, 0x55, 0x50, 0x47, 0x52, 0x41, 0x44, 0x49, 0x4e, 0x47, - 0x10, 0x06, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x4f, 0x4c, 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x07, - 0x2a, 0x28, 0x0a, 0x0c, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x00, 0x12, 0x0b, 0x0a, - 0x07, 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x01, 0x2a, 0x7f, 0x0a, 0x0b, 0x50, 0x70, - 0x72, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x4c, 0x4c, - 0x4f, 0x43, 0x53, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x10, 0x01, - 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4d, 0x44, 0x4c, 0x49, 0x4e, 0x45, 0x10, 0x02, 0x12, 0x0d, 0x0a, - 0x09, 0x47, 0x4f, 0x52, 0x4f, 0x55, 0x54, 0x49, 0x4e, 0x45, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, - 0x48, 0x45, 0x41, 0x50, 0x10, 0x04, 0x12, 0x09, 0x0a, 0x05, 0x4d, 0x55, 0x54, 0x45, 0x58, 0x10, - 0x05, 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x52, 0x4f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x06, 0x12, 0x10, - 0x0a, 0x0c, 0x54, 0x48, 0x52, 0x45, 0x41, 0x44, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x10, 0x07, - 0x12, 0x09, 0x0a, 0x05, 0x54, 0x52, 0x41, 0x43, 0x45, 0x10, 0x08, 0x32, 0x8e, 0x03, 0x0a, 0x13, - 0x45, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x74, - 0x72, 0x6f, 0x6c, 0x12, 0x31, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0d, - 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x17, 0x2e, - 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2f, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, - 0x16, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x07, 0x52, 0x65, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, - 0x79, 0x1a, 0x17, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x07, 0x55, 0x70, - 0x67, 0x72, 0x61, 0x64, 0x65, 0x12, 0x16, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, - 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, - 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x33, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, - 0x74, 0x61, 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, - 0x79, 0x1a, 0x18, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x4d, - 0x65, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x05, 0x50, - 0x70, 0x72, 0x6f, 0x66, 0x12, 0x14, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x70, - 0x72, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x63, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x39, 0x0a, 0x0b, 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, - 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, - 0x1b, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x26, 0x5a, 0x21, - 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x61, 0x67, 0x65, - 0x6e, 0x74, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2f, 0x63, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x72, 0x22, 0xb5, 0x01, 0x0a, 0x12, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x55, + 0x6e, 0x69, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x6e, 0x69, 0x74, + 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x63, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x6e, 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x75, + 0x6e, 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x6e, 0x69, 0x74, 0x5f, + 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x6e, 0x69, 0x74, 0x49, 0x64, + 0x12, 0x23, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, + 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, + 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, + 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0xb9, 0x01, 0x0a, 0x14, 0x43, 0x6f, + 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, + 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x3a, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, + 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, + 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4d, 0x65, 0x74, + 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x1a, 0x37, 0x0a, 0x09, + 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, + 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xe6, 0x01, 0x0a, 0x0e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, + 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x05, + 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0d, 0x2e, 0x63, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x05, 0x75, + 0x6e, 0x69, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x55, 0x6e, 0x69, + 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x12, 0x3f, 0x0a, + 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6d, + 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, + 0x6f, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0xb5, + 0x03, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x18, 0x0a, 0x07, 0x70, + 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x72, + 0x6f, 0x63, 0x65, 0x73, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, + 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, + 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x65, 0x70, 0x68, 0x65, 0x6d, 0x65, 0x72, + 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x65, 0x70, 0x68, + 0x65, 0x6d, 0x65, 0x72, 0x61, 0x6c, 0x49, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, + 0x69, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x43, + 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x74, + 0x69, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x62, 0x75, 0x69, 0x6c, 0x64, + 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, + 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x06, 0x75, 0x73, 0x65, 0x72, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x75, 0x73, 0x65, + 0x72, 0x5f, 0x67, 0x69, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x75, 0x73, 0x65, + 0x72, 0x47, 0x69, 0x64, 0x12, 0x22, 0x0a, 0x0c, 0x61, 0x72, 0x63, 0x68, 0x69, 0x74, 0x65, 0x63, + 0x74, 0x75, 0x72, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x72, 0x63, 0x68, + 0x69, 0x74, 0x65, 0x63, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x6f, 0x75, 0x74, + 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x6f, 0x75, + 0x74, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, + 0x5f, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, + 0x0f, 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x4c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x64, + 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x86, 0x01, 0x0a, 0x0d, 0x53, 0x74, 0x61, 0x74, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x23, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, + 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, + 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x36, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x6f, + 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x52, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x22, + 0x3a, 0x0a, 0x10, 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x05, 0x70, 0x72, 0x6f, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, + 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x72, 0x6f, 0x63, + 0x4d, 0x65, 0x74, 0x61, 0x52, 0x05, 0x70, 0x72, 0x6f, 0x63, 0x73, 0x22, 0x9d, 0x01, 0x0a, 0x0c, + 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x09, + 0x70, 0x70, 0x72, 0x6f, 0x66, 0x54, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0e, 0x32, + 0x13, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x4f, 0x70, + 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x24, 0x0a, 0x0d, 0x74, 0x72, 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x70, 0x70, 0x4e, 0x61, 0x6d, 0x65, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x70, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x1a, 0x0a, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x22, 0xa4, 0x01, 0x0a, 0x0b, + 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x61, + 0x70, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x70, + 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, + 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, + 0x79, 0x12, 0x31, 0x0a, 0x09, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x54, 0x79, 0x70, 0x65, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x13, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x70, + 0x72, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x70, 0x70, 0x72, 0x6f, 0x66, + 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x14, 0x0a, 0x05, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x22, 0x3e, 0x0a, 0x0d, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x70, + 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x73, 0x22, 0x75, 0x0a, 0x0f, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x70, 0x70, 0x4e, 0x61, 0x6d, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x70, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x12, + 0x1a, 0x0a, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x72, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x72, 0x65, 0x73, + 0x75, 0x6c, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x46, 0x0a, 0x13, 0x50, 0x72, 0x6f, + 0x63, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x2f, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x17, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x2a, 0x85, 0x01, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0c, 0x0a, 0x08, 0x53, + 0x54, 0x41, 0x52, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x43, 0x4f, 0x4e, + 0x46, 0x49, 0x47, 0x55, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x48, 0x45, + 0x41, 0x4c, 0x54, 0x48, 0x59, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x45, 0x47, 0x52, 0x41, + 0x44, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, + 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x54, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, + 0x0b, 0x0a, 0x07, 0x53, 0x54, 0x4f, 0x50, 0x50, 0x45, 0x44, 0x10, 0x06, 0x12, 0x0d, 0x0a, 0x09, + 0x55, 0x50, 0x47, 0x52, 0x41, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x07, 0x12, 0x0c, 0x0a, 0x08, 0x52, + 0x4f, 0x4c, 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x08, 0x2a, 0x21, 0x0a, 0x08, 0x55, 0x6e, 0x69, + 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x49, 0x4e, 0x50, 0x55, 0x54, 0x10, 0x00, + 0x12, 0x0a, 0x0a, 0x06, 0x4f, 0x55, 0x54, 0x50, 0x55, 0x54, 0x10, 0x01, 0x2a, 0x28, 0x0a, 0x0c, + 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, + 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x46, 0x41, 0x49, + 0x4c, 0x55, 0x52, 0x45, 0x10, 0x01, 0x2a, 0x7f, 0x0a, 0x0b, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x4f, + 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x4c, 0x4c, 0x4f, 0x43, 0x53, 0x10, + 0x00, 0x12, 0x09, 0x0a, 0x05, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, + 0x43, 0x4d, 0x44, 0x4c, 0x49, 0x4e, 0x45, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x47, 0x4f, 0x52, + 0x4f, 0x55, 0x54, 0x49, 0x4e, 0x45, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x45, 0x41, 0x50, + 0x10, 0x04, 0x12, 0x09, 0x0a, 0x05, 0x4d, 0x55, 0x54, 0x45, 0x58, 0x10, 0x05, 0x12, 0x0b, 0x0a, + 0x07, 0x50, 0x52, 0x4f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x48, + 0x52, 0x45, 0x41, 0x44, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x10, 0x07, 0x12, 0x09, 0x0a, 0x05, + 0x54, 0x52, 0x41, 0x43, 0x45, 0x10, 0x08, 0x32, 0x8c, 0x03, 0x0a, 0x13, 0x45, 0x6c, 0x61, 0x73, + 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, + 0x31, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x17, 0x2e, 0x63, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0d, 0x2e, 0x63, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x15, 0x2e, 0x63, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x31, 0x0a, 0x07, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x0d, 0x2e, 0x63, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x17, 0x2e, 0x63, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x07, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x12, + 0x16, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, + 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x33, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x0d, 0x2e, 0x63, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x18, 0x2e, 0x63, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x05, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x12, 0x14, + 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x70, + 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x0b, 0x50, + 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1b, 0x2e, 0x63, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x26, 0x5a, 0x21, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, + 0x61, 0x6c, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x63, 0x6f, 0x6e, + 0x74, 0x72, 0x6f, 0x6c, 0x2f, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0xf8, 0x01, 0x01, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1372,57 +1613,66 @@ func file_control_proto_rawDescGZIP() []byte { return file_control_proto_rawDescData } -var file_control_proto_enumTypes = make([]protoimpl.EnumInfo, 3) -var file_control_proto_msgTypes = make([]protoimpl.MessageInfo, 14) +var file_control_proto_enumTypes = make([]protoimpl.EnumInfo, 4) +var file_control_proto_msgTypes = make([]protoimpl.MessageInfo, 17) var file_control_proto_goTypes = []interface{}{ - (Status)(0), // 0: cproto.Status - (ActionStatus)(0), // 1: cproto.ActionStatus - (PprofOption)(0), // 2: cproto.PprofOption - (*Empty)(nil), // 3: cproto.Empty - (*VersionResponse)(nil), // 4: cproto.VersionResponse - (*RestartResponse)(nil), // 5: cproto.RestartResponse - (*UpgradeRequest)(nil), // 6: cproto.UpgradeRequest - (*UpgradeResponse)(nil), // 7: cproto.UpgradeResponse - (*ApplicationStatus)(nil), // 8: cproto.ApplicationStatus - (*ProcMeta)(nil), // 9: cproto.ProcMeta - (*StatusResponse)(nil), // 10: cproto.StatusResponse - (*ProcMetaResponse)(nil), // 11: cproto.ProcMetaResponse - (*PprofRequest)(nil), // 12: cproto.PprofRequest - (*PprofResult)(nil), // 13: cproto.PprofResult - (*PprofResponse)(nil), // 14: cproto.PprofResponse - (*MetricsResponse)(nil), // 15: cproto.MetricsResponse - (*ProcMetricsResponse)(nil), // 16: cproto.ProcMetricsResponse + (State)(0), // 0: cproto.State + (UnitType)(0), // 1: cproto.UnitType + (ActionStatus)(0), // 2: cproto.ActionStatus + (PprofOption)(0), // 3: cproto.PprofOption + (*Empty)(nil), // 4: cproto.Empty + (*VersionResponse)(nil), // 5: cproto.VersionResponse + (*RestartResponse)(nil), // 6: cproto.RestartResponse + (*UpgradeRequest)(nil), // 7: cproto.UpgradeRequest + (*UpgradeResponse)(nil), // 8: cproto.UpgradeResponse + (*ComponentUnitState)(nil), // 9: cproto.ComponentUnitState + (*ComponentVersionInfo)(nil), // 10: cproto.ComponentVersionInfo + (*ComponentState)(nil), // 11: cproto.ComponentState + (*ProcMeta)(nil), // 12: cproto.ProcMeta + (*StateResponse)(nil), // 13: cproto.StateResponse + (*ProcMetaResponse)(nil), // 14: cproto.ProcMetaResponse + (*PprofRequest)(nil), // 15: cproto.PprofRequest + (*PprofResult)(nil), // 16: cproto.PprofResult + (*PprofResponse)(nil), // 17: cproto.PprofResponse + (*MetricsResponse)(nil), // 18: cproto.MetricsResponse + (*ProcMetricsResponse)(nil), // 19: cproto.ProcMetricsResponse + nil, // 20: cproto.ComponentVersionInfo.MetaEntry } var file_control_proto_depIdxs = []int32{ - 1, // 0: cproto.RestartResponse.status:type_name -> cproto.ActionStatus - 1, // 1: cproto.UpgradeResponse.status:type_name -> cproto.ActionStatus - 0, // 2: cproto.ApplicationStatus.status:type_name -> cproto.Status - 0, // 3: cproto.StatusResponse.status:type_name -> cproto.Status - 8, // 4: cproto.StatusResponse.applications:type_name -> cproto.ApplicationStatus - 9, // 5: cproto.ProcMetaResponse.procs:type_name -> cproto.ProcMeta - 2, // 6: cproto.PprofRequest.pprofType:type_name -> cproto.PprofOption - 2, // 7: cproto.PprofResult.pprofType:type_name -> cproto.PprofOption - 13, // 8: cproto.PprofResponse.results:type_name -> cproto.PprofResult - 15, // 9: cproto.ProcMetricsResponse.result:type_name -> cproto.MetricsResponse - 3, // 10: cproto.ElasticAgentControl.Version:input_type -> cproto.Empty - 3, // 11: cproto.ElasticAgentControl.Status:input_type -> cproto.Empty - 3, // 12: cproto.ElasticAgentControl.Restart:input_type -> cproto.Empty - 6, // 13: cproto.ElasticAgentControl.Upgrade:input_type -> cproto.UpgradeRequest - 3, // 14: cproto.ElasticAgentControl.ProcMeta:input_type -> cproto.Empty - 12, // 15: cproto.ElasticAgentControl.Pprof:input_type -> cproto.PprofRequest - 3, // 16: cproto.ElasticAgentControl.ProcMetrics:input_type -> cproto.Empty - 4, // 17: cproto.ElasticAgentControl.Version:output_type -> cproto.VersionResponse - 10, // 18: cproto.ElasticAgentControl.Status:output_type -> cproto.StatusResponse - 5, // 19: cproto.ElasticAgentControl.Restart:output_type -> cproto.RestartResponse - 7, // 20: cproto.ElasticAgentControl.Upgrade:output_type -> cproto.UpgradeResponse - 11, // 21: cproto.ElasticAgentControl.ProcMeta:output_type -> cproto.ProcMetaResponse - 14, // 22: cproto.ElasticAgentControl.Pprof:output_type -> cproto.PprofResponse - 16, // 23: cproto.ElasticAgentControl.ProcMetrics:output_type -> cproto.ProcMetricsResponse - 17, // [17:24] is the sub-list for method output_type - 10, // [10:17] is the sub-list for method input_type - 10, // [10:10] is the sub-list for extension type_name - 10, // [10:10] is the sub-list for extension extendee - 0, // [0:10] is the sub-list for field type_name + 2, // 0: cproto.RestartResponse.status:type_name -> cproto.ActionStatus + 2, // 1: cproto.UpgradeResponse.status:type_name -> cproto.ActionStatus + 1, // 2: cproto.ComponentUnitState.unit_type:type_name -> cproto.UnitType + 0, // 3: cproto.ComponentUnitState.state:type_name -> cproto.State + 20, // 4: cproto.ComponentVersionInfo.meta:type_name -> cproto.ComponentVersionInfo.MetaEntry + 0, // 5: cproto.ComponentState.state:type_name -> cproto.State + 9, // 6: cproto.ComponentState.units:type_name -> cproto.ComponentUnitState + 10, // 7: cproto.ComponentState.version_info:type_name -> cproto.ComponentVersionInfo + 0, // 8: cproto.StateResponse.state:type_name -> cproto.State + 11, // 9: cproto.StateResponse.components:type_name -> cproto.ComponentState + 12, // 10: cproto.ProcMetaResponse.procs:type_name -> cproto.ProcMeta + 3, // 11: cproto.PprofRequest.pprofType:type_name -> cproto.PprofOption + 3, // 12: cproto.PprofResult.pprofType:type_name -> cproto.PprofOption + 16, // 13: cproto.PprofResponse.results:type_name -> cproto.PprofResult + 18, // 14: cproto.ProcMetricsResponse.result:type_name -> cproto.MetricsResponse + 4, // 15: cproto.ElasticAgentControl.Version:input_type -> cproto.Empty + 4, // 16: cproto.ElasticAgentControl.State:input_type -> cproto.Empty + 4, // 17: cproto.ElasticAgentControl.Restart:input_type -> cproto.Empty + 7, // 18: cproto.ElasticAgentControl.Upgrade:input_type -> cproto.UpgradeRequest + 4, // 19: cproto.ElasticAgentControl.ProcMeta:input_type -> cproto.Empty + 15, // 20: cproto.ElasticAgentControl.Pprof:input_type -> cproto.PprofRequest + 4, // 21: cproto.ElasticAgentControl.ProcMetrics:input_type -> cproto.Empty + 5, // 22: cproto.ElasticAgentControl.Version:output_type -> cproto.VersionResponse + 13, // 23: cproto.ElasticAgentControl.State:output_type -> cproto.StateResponse + 6, // 24: cproto.ElasticAgentControl.Restart:output_type -> cproto.RestartResponse + 8, // 25: cproto.ElasticAgentControl.Upgrade:output_type -> cproto.UpgradeResponse + 14, // 26: cproto.ElasticAgentControl.ProcMeta:output_type -> cproto.ProcMetaResponse + 17, // 27: cproto.ElasticAgentControl.Pprof:output_type -> cproto.PprofResponse + 19, // 28: cproto.ElasticAgentControl.ProcMetrics:output_type -> cproto.ProcMetricsResponse + 22, // [22:29] is the sub-list for method output_type + 15, // [15:22] is the sub-list for method input_type + 15, // [15:15] is the sub-list for extension type_name + 15, // [15:15] is the sub-list for extension extendee + 0, // [0:15] is the sub-list for field type_name } func init() { file_control_proto_init() } @@ -1492,7 +1742,7 @@ func file_control_proto_init() { } } file_control_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ApplicationStatus); i { + switch v := v.(*ComponentUnitState); i { case 0: return &v.state case 1: @@ -1504,7 +1754,7 @@ func file_control_proto_init() { } } file_control_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProcMeta); i { + switch v := v.(*ComponentVersionInfo); i { case 0: return &v.state case 1: @@ -1516,7 +1766,7 @@ func file_control_proto_init() { } } file_control_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*StatusResponse); i { + switch v := v.(*ComponentState); i { case 0: return &v.state case 1: @@ -1528,7 +1778,7 @@ func file_control_proto_init() { } } file_control_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProcMetaResponse); i { + switch v := v.(*ProcMeta); i { case 0: return &v.state case 1: @@ -1540,7 +1790,7 @@ func file_control_proto_init() { } } file_control_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PprofRequest); i { + switch v := v.(*StateResponse); i { case 0: return &v.state case 1: @@ -1552,7 +1802,7 @@ func file_control_proto_init() { } } file_control_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PprofResult); i { + switch v := v.(*ProcMetaResponse); i { case 0: return &v.state case 1: @@ -1564,7 +1814,7 @@ func file_control_proto_init() { } } file_control_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PprofResponse); i { + switch v := v.(*PprofRequest); i { case 0: return &v.state case 1: @@ -1576,7 +1826,7 @@ func file_control_proto_init() { } } file_control_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MetricsResponse); i { + switch v := v.(*PprofResult); i { case 0: return &v.state case 1: @@ -1588,6 +1838,30 @@ func file_control_proto_init() { } } file_control_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*PprofResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_control_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*MetricsResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_control_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ProcMetricsResponse); i { case 0: return &v.state @@ -1605,8 +1879,8 @@ func file_control_proto_init() { File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_control_proto_rawDesc, - NumEnums: 3, - NumMessages: 14, + NumEnums: 4, + NumMessages: 17, NumExtensions: 0, NumServices: 1, }, diff --git a/internal/pkg/agent/control/cproto/control_grpc.pb.go b/internal/pkg/agent/control/cproto/control_grpc.pb.go index 3365f1a6496..c9e97f7047a 100644 --- a/internal/pkg/agent/control/cproto/control_grpc.pb.go +++ b/internal/pkg/agent/control/cproto/control_grpc.pb.go @@ -29,8 +29,8 @@ const _ = grpc.SupportPackageIsVersion7 type ElasticAgentControlClient interface { // Fetches the currently running version of the Elastic Agent. Version(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*VersionResponse, error) - // Fetches the currently status of the Elastic Agent. - Status(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*StatusResponse, error) + // Fetches the currently states of the Elastic Agent. + State(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*StateResponse, error) // Restart restarts the current running Elastic Agent. Restart(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*RestartResponse, error) // Upgrade starts the upgrade process of Elastic Agent. @@ -60,9 +60,9 @@ func (c *elasticAgentControlClient) Version(ctx context.Context, in *Empty, opts return out, nil } -func (c *elasticAgentControlClient) Status(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*StatusResponse, error) { - out := new(StatusResponse) - err := c.cc.Invoke(ctx, "/cproto.ElasticAgentControl/Status", in, out, opts...) +func (c *elasticAgentControlClient) State(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*StateResponse, error) { + out := new(StateResponse) + err := c.cc.Invoke(ctx, "/cproto.ElasticAgentControl/State", in, out, opts...) if err != nil { return nil, err } @@ -120,8 +120,8 @@ func (c *elasticAgentControlClient) ProcMetrics(ctx context.Context, in *Empty, type ElasticAgentControlServer interface { // Fetches the currently running version of the Elastic Agent. Version(context.Context, *Empty) (*VersionResponse, error) - // Fetches the currently status of the Elastic Agent. - Status(context.Context, *Empty) (*StatusResponse, error) + // Fetches the currently states of the Elastic Agent. + State(context.Context, *Empty) (*StateResponse, error) // Restart restarts the current running Elastic Agent. Restart(context.Context, *Empty) (*RestartResponse, error) // Upgrade starts the upgrade process of Elastic Agent. @@ -142,8 +142,8 @@ type UnimplementedElasticAgentControlServer struct { func (UnimplementedElasticAgentControlServer) Version(context.Context, *Empty) (*VersionResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Version not implemented") } -func (UnimplementedElasticAgentControlServer) Status(context.Context, *Empty) (*StatusResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Status not implemented") +func (UnimplementedElasticAgentControlServer) State(context.Context, *Empty) (*StateResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method State not implemented") } func (UnimplementedElasticAgentControlServer) Restart(context.Context, *Empty) (*RestartResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Restart not implemented") @@ -191,20 +191,20 @@ func _ElasticAgentControl_Version_Handler(srv interface{}, ctx context.Context, return interceptor(ctx, in, info, handler) } -func _ElasticAgentControl_Status_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { +func _ElasticAgentControl_State_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(Empty) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(ElasticAgentControlServer).Status(ctx, in) + return srv.(ElasticAgentControlServer).State(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/cproto.ElasticAgentControl/Status", + FullMethod: "/cproto.ElasticAgentControl/State", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ElasticAgentControlServer).Status(ctx, req.(*Empty)) + return srv.(ElasticAgentControlServer).State(ctx, req.(*Empty)) } return interceptor(ctx, in, info, handler) } @@ -311,8 +311,8 @@ var ElasticAgentControl_ServiceDesc = grpc.ServiceDesc{ Handler: _ElasticAgentControl_Version_Handler, }, { - MethodName: "Status", - Handler: _ElasticAgentControl_Status_Handler, + MethodName: "State", + Handler: _ElasticAgentControl_State_Handler, }, { MethodName: "Restart", diff --git a/internal/pkg/agent/control/server/server.go b/internal/pkg/agent/control/server/server.go index 6d3e5181729..620a5b7b024 100644 --- a/internal/pkg/agent/control/server/server.go +++ b/internal/pkg/agent/control/server/server.go @@ -8,31 +8,20 @@ import ( "context" "encoding/json" "fmt" - "io" "net" - "net/http" - "runtime" - "strings" "sync" - "time" "go.elastic.co/apm" "go.elastic.co/apm/module/apmgrpc" "google.golang.org/grpc" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/reexec" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" + "github.com/elastic/elastic-agent/internal/pkg/agent/control" "github.com/elastic/elastic-agent/internal/pkg/agent/control/cproto" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/agent/program" - monitoring "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/beats" monitoringCfg "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/config" - "github.com/elastic/elastic-agent/internal/pkg/core/socket" - "github.com/elastic/elastic-agent/internal/pkg/core/status" - "github.com/elastic/elastic-agent/internal/pkg/fleetapi" "github.com/elastic/elastic-agent/internal/pkg/release" - "github.com/elastic/elastic-agent/internal/pkg/sorted" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -43,11 +32,8 @@ type Server struct { cproto.UnimplementedElasticAgentControlServer logger *logger.Logger - rex reexec.ExecManager - statusCtrl status.Controller - up *upgrade.Upgrader - routeFn func() *sorted.Set monitoringCfg *monitoringCfg.MonitoringConfig + coord *coordinator.Coordinator listener net.Listener server *grpc.Server tracer *apm.Tracer @@ -65,38 +51,15 @@ type specInfo struct { } // New creates a new control protocol server. -func New(log *logger.Logger, rex reexec.ExecManager, statusCtrl status.Controller, up *upgrade.Upgrader, tracer *apm.Tracer) *Server { +func New(log *logger.Logger, cfg *monitoringCfg.MonitoringConfig, coord *coordinator.Coordinator, tracer *apm.Tracer) *Server { return &Server{ - logger: log, - rex: rex, - statusCtrl: statusCtrl, - tracer: tracer, - up: up, + logger: log, + monitoringCfg: cfg, + coord: coord, + tracer: tracer, } } -// SetUpgrader changes the upgrader. -func (s *Server) SetUpgrader(up *upgrade.Upgrader) { - s.lock.Lock() - defer s.lock.Unlock() - s.up = up -} - -// SetRouteFn changes the route retrieval function. -func (s *Server) SetRouteFn(routesFetchFn func() *sorted.Set) { - s.lock.Lock() - defer s.lock.Unlock() - s.routeFn = routesFetchFn -} - -// SetMonitoringCfg sets a reference to the monitoring config used by the running agent. -// the controller references this config to find out if pprof is enabled for the agent or not -func (s *Server) SetMonitoringCfg(cfg *monitoringCfg.MonitoringConfig) { - s.lock.Lock() - defer s.lock.Unlock() - s.monitoringCfg = cfg -} - // Start starts the GRPC endpoint and accepts new connections. func (s *Server) Start() error { if s.server != nil { @@ -149,19 +112,53 @@ func (s *Server) Version(_ context.Context, _ *cproto.Empty) (*cproto.VersionRes }, nil } -// Status returns the overall status of the agent. -func (s *Server) Status(_ context.Context, _ *cproto.Empty) (*cproto.StatusResponse, error) { - status := s.statusCtrl.Status() - return &cproto.StatusResponse{ - Status: agentStatusToProto(status.Status), - Message: status.Message, - Applications: agentAppStatusToProto(status.Applications), +// State returns the overall state of the agent. +func (s *Server) State(_ context.Context, _ *cproto.Empty) (*cproto.StateResponse, error) { + var err error + + state := s.coord.State() + components := make([]*cproto.ComponentState, 0, len(state.Components)) + for _, comp := range state.Components { + units := make([]*cproto.ComponentUnitState, 0, len(comp.State.Units)) + for key, unit := range comp.State.Units { + payload := []byte("") + if unit.Payload != nil { + payload, err = json.Marshal(unit.Payload) + if err != nil { + return nil, fmt.Errorf("failed to marshal componend %s unit %s payload: %w", comp.Component.ID, key.UnitID, err) + } + } + units = append(units, &cproto.ComponentUnitState{ + UnitType: cproto.UnitType(key.UnitType), + UnitId: key.UnitID, + State: cproto.State(unit.State), + Message: unit.Message, + Payload: string(payload), + }) + } + components = append(components, &cproto.ComponentState{ + Id: comp.Component.ID, + Name: comp.Component.Spec.BinaryName, + State: cproto.State(comp.State.State), + Message: comp.State.Message, + Units: units, + VersionInfo: &cproto.ComponentVersionInfo{ + Name: comp.State.VersionInfo.Name, + Version: comp.State.VersionInfo.Version, + Meta: comp.State.VersionInfo.Meta, + }, + }) + } + return &cproto.StateResponse{ + State: state.State, + Message: state.Message, + Components: components, }, nil } // Restart performs re-exec. func (s *Server) Restart(_ context.Context, _ *cproto.Empty) (*cproto.RestartResponse, error) { - s.rex.ReExec(nil) + s.coord.ReExec(nil) return &cproto.RestartResponse{ Status: cproto.ActionStatus_SUCCESS, }, nil @@ -169,29 +166,13 @@ func (s *Server) Restart(_ context.Context, _ *cproto.Empty) (*cproto.RestartRes // Upgrade performs the upgrade operation. func (s *Server) Upgrade(ctx context.Context, request *cproto.UpgradeRequest) (*cproto.UpgradeResponse, error) { - s.lock.RLock() - u := s.up - s.lock.RUnlock() - if u == nil { - // not running with upgrader (must be controlled by Fleet) - return &cproto.UpgradeResponse{ - Status: cproto.ActionStatus_FAILURE, - Error: "cannot be upgraded; perform upgrading using Fleet", - }, nil - } - cb, err := u.Upgrade(ctx, &upgradeRequest{request}, false) + err := s.coord.Upgrade(ctx, request.Version, request.SourceURI, nil) if err != nil { return &cproto.UpgradeResponse{ //nolint:nilerr // returns err as response Status: cproto.ActionStatus_FAILURE, Error: err.Error(), }, nil } - // perform the re-exec after a 1 second delay - // this ensures that the upgrade response over GRPC is returned - go func() { - <-time.After(time.Second) - s.rex.ReExec(cb) - }() return &cproto.UpgradeResponse{ Status: cproto.ActionStatus_SUCCESS, Version: request.Version, @@ -217,25 +198,28 @@ type BeatInfo struct { // ProcMeta returns version and beat inforation for all running processes. func (s *Server) ProcMeta(ctx context.Context, _ *cproto.Empty) (*cproto.ProcMetaResponse, error) { - if s.routeFn == nil { - return nil, errors.New("route function is nil") - } + /* + if s.routeFn == nil { + return nil, errors.New("route function is nil") + } - resp := &cproto.ProcMetaResponse{ - Procs: []*cproto.ProcMeta{}, - } + resp := &cproto.ProcMetaResponse{ + Procs: []*cproto.ProcMeta{}, + } - // gather spec data for all rk/apps running - specs := s.getSpecInfo("", "") - for _, si := range specs { - endpoint := monitoring.MonitoringEndpoint(si.spec, runtime.GOOS, si.rk) - client := newSocketRequester(si.app, si.rk, endpoint) + // gather spec data for all rk/apps running + specs := s.getSpecInfo("", "") + for _, si := range specs { + endpoint := monitoring.MonitoringEndpoint(si.spec, runtime.GOOS, si.rk) + client := newSocketRequester(si.app, si.rk, endpoint) - procMeta := client.procMeta(ctx) - resp.Procs = append(resp.Procs, procMeta) - } + procMeta := client.procMeta(ctx) + resp.Procs = append(resp.Procs, procMeta) + } - return resp, nil + return resp, nil + */ + return nil, nil } // Pprof returns /debug/pprof data for the requested applicaiont-route_key or all running applications. @@ -244,66 +228,69 @@ func (s *Server) Pprof(ctx context.Context, req *cproto.PprofRequest) (*cproto.P return nil, fmt.Errorf("agent.monitoring.pprof disabled") } - if s.routeFn == nil { - return nil, errors.New("route function is nil") - } - - dur, err := time.ParseDuration(req.TraceDuration) - if err != nil { - return nil, fmt.Errorf("unable to parse trace duration: %w", err) - } + /* + if s.routeFn == nil { + return nil, errors.New("route function is nil") + } - resp := &cproto.PprofResponse{ - Results: []*cproto.PprofResult{}, - } + dur, err := time.ParseDuration(req.TraceDuration) + if err != nil { + return nil, fmt.Errorf("unable to parse trace duration: %w", err) + } - var wg sync.WaitGroup - ch := make(chan *cproto.PprofResult, 1) + resp := &cproto.PprofResponse{ + Results: []*cproto.PprofResult{}, + } - // retrieve elastic-agent pprof data if requested or application is unspecified. - if req.AppName == "" || req.AppName == agentName { - endpoint := monitoring.AgentMonitoringEndpoint(runtime.GOOS, s.monitoringCfg.HTTP) - c := newSocketRequester(agentName, "", endpoint) - for _, opt := range req.PprofType { - wg.Add(1) - go func(opt cproto.PprofOption) { - res := c.getPprof(ctx, opt, dur) - ch <- res - wg.Done() - }(opt) + var wg sync.WaitGroup + ch := make(chan *cproto.PprofResult, 1) + + // retrieve elastic-agent pprof data if requested or application is unspecified. + if req.AppName == "" || req.AppName == agentName { + endpoint := monitoring.AgentMonitoringEndpoint(runtime.GOOS, s.monitoringCfg.HTTP) + c := newSocketRequester(agentName, "", endpoint) + for _, opt := range req.PprofType { + wg.Add(1) + go func(opt cproto.PprofOption) { + res := c.getPprof(ctx, opt, dur) + ch <- res + wg.Done() + }(opt) + } } - } - // get requested rk/appname spec or all specs - var specs []specInfo - if req.AppName != agentName { - specs = s.getSpecInfo(req.RouteKey, req.AppName) - } - for _, si := range specs { - endpoint := monitoring.MonitoringEndpoint(si.spec, runtime.GOOS, si.rk) - c := newSocketRequester(si.app, si.rk, endpoint) - // Launch a concurrent goroutine to gather all pprof endpoints from a socket. - for _, opt := range req.PprofType { - wg.Add(1) - go func(opt cproto.PprofOption) { - res := c.getPprof(ctx, opt, dur) - ch <- res - wg.Done() - }(opt) + // get requested rk/appname spec or all specs + var specs []specInfo + if req.AppName != agentName { + specs = s.getSpecInfo(req.RouteKey, req.AppName) + } + for _, si := range specs { + endpoint := monitoring.MonitoringEndpoint(si.spec, runtime.GOOS, si.rk) + c := newSocketRequester(si.app, si.rk, endpoint) + // Launch a concurrent goroutine to gather all pprof endpoints from a socket. + for _, opt := range req.PprofType { + wg.Add(1) + go func(opt cproto.PprofOption) { + res := c.getPprof(ctx, opt, dur) + ch <- res + wg.Done() + }(opt) + } } - } - // wait for the waitgroup to be done and close the channel - go func() { - wg.Wait() - close(ch) - }() + // wait for the waitgroup to be done and close the channel + go func() { + wg.Wait() + close(ch) + }() - // gather all results from channel until closed. - for res := range ch { - resp.Results = append(resp.Results, res) - } - return resp, nil + // gather all results from channel until closed. + for res := range ch { + resp.Results = append(resp.Results, res) + } + return resp, nil + */ + return nil, nil } // ProcMetrics returns all buffered metrics data for the agent and running processes. @@ -313,32 +300,36 @@ func (s *Server) ProcMetrics(ctx context.Context, _ *cproto.Empty) (*cproto.Proc return &cproto.ProcMetricsResponse{}, nil } - if s.routeFn == nil { - return nil, errors.New("route function is nil") - } + /* + if s.routeFn == nil { + return nil, errors.New("route function is nil") + } - // gather metrics buffer data from the elastic-agent - endpoint := monitoring.AgentMonitoringEndpoint(runtime.GOOS, s.monitoringCfg.HTTP) - c := newSocketRequester(agentName, "", endpoint) - metrics := c.procMetrics(ctx) + // gather metrics buffer data from the elastic-agent + endpoint := monitoring.AgentMonitoringEndpoint(runtime.GOOS, s.monitoringCfg.HTTP) + c := newSocketRequester(agentName, "", endpoint) + metrics := c.procMetrics(ctx) - resp := &cproto.ProcMetricsResponse{ - Result: []*cproto.MetricsResponse{metrics}, - } + resp := &cproto.ProcMetricsResponse{ + Result: []*cproto.MetricsResponse{metrics}, + } - // gather metrics buffer data from all other processes - specs := s.getSpecInfo("", "") - for _, si := range specs { - endpoint := monitoring.MonitoringEndpoint(si.spec, runtime.GOOS, si.rk) - client := newSocketRequester(si.app, si.rk, endpoint) + // gather metrics buffer data from all other processes + specs := s.getSpecInfo("", "") + for _, si := range specs { + endpoint := monitoring.MonitoringEndpoint(si.spec, runtime.GOOS, si.rk) + client := newSocketRequester(si.app, si.rk, endpoint) - s.logger.Infof("gather metrics from %s", endpoint) - metrics := client.procMetrics(ctx) - resp.Result = append(resp.Result, metrics) - } - return resp, nil + s.logger.Infof("gather metrics from %s", endpoint) + metrics := client.procMetrics(ctx) + resp.Result = append(resp.Result, metrics) + } + return resp, nil + */ + return nil, nil } +/* // getSpecs will return the specs for the program associated with the specified route key/app name, or all programs if no key(s) are specified. // if matchRK or matchApp are empty all results will be returned. func (s *Server) getSpecInfo(matchRK, matchApp string) []specInfo { @@ -554,48 +545,4 @@ func (r *socketRequester) procMetrics(ctx context.Context) *cproto.MetricsRespon res.Result = p return res } - -type upgradeRequest struct { - *cproto.UpgradeRequest -} - -func (r *upgradeRequest) Version() string { - return r.GetVersion() -} - -func (r *upgradeRequest) SourceURI() string { - return r.GetSourceURI() -} - -func (r *upgradeRequest) FleetAction() *fleetapi.ActionUpgrade { - // upgrade request not from Fleet - return nil -} - -func agentStatusToProto(code status.AgentStatusCode) cproto.Status { - if code == status.Degraded { - return cproto.Status_DEGRADED - } - if code == status.Failed { - return cproto.Status_FAILED - } - return cproto.Status_HEALTHY -} - -func agentAppStatusToProto(apps []status.AgentApplicationStatus) []*cproto.ApplicationStatus { - s := make([]*cproto.ApplicationStatus, len(apps)) - for i, a := range apps { - var payload []byte - if a.Payload != nil { - payload, _ = json.Marshal(a.Payload) - } - s[i] = &cproto.ApplicationStatus{ - Id: a.ID, - Name: a.Name, - Status: cproto.Status(a.Status.ToProto()), - Message: a.Message, - Payload: string(payload), - } - } - return s -} +*/ diff --git a/internal/pkg/agent/install/uninstall.go b/internal/pkg/agent/install/uninstall.go index 6b4e717fa73..b43db32892d 100644 --- a/internal/pkg/agent/install/uninstall.go +++ b/internal/pkg/agent/install/uninstall.go @@ -12,7 +12,6 @@ import ( "path/filepath" "runtime" "strings" - "sync" "github.com/kardianos/service" @@ -29,7 +28,6 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/config" "github.com/elastic/elastic-agent/internal/pkg/config/operations" "github.com/elastic/elastic-agent/internal/pkg/core/app" - "github.com/elastic/elastic-agent/internal/pkg/core/status" "github.com/elastic/elastic-agent/internal/pkg/release" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -233,19 +231,12 @@ func applyDynamics(ctx context.Context, log *logger.Logger, cfg *config.Config) inputs, ok := transpiler.Lookup(ast, "inputs") if ok { varsArray := make([]*transpiler.Vars, 0) - var wg sync.WaitGroup - wg.Add(1) - varsCallback := func(vv []*transpiler.Vars) { - varsArray = vv - wg.Done() - } ctrl, err := composable.New(log, cfg) if err != nil { return nil, err } - _ = ctrl.Run(ctx, varsCallback) - wg.Wait() + _ = ctrl.Run(ctx) renderedInputs, err := transpiler.RenderInputs(inputs, varsArray) if err != nil { @@ -258,7 +249,7 @@ func applyDynamics(ctx context.Context, log *logger.Logger, cfg *config.Config) } // apply caps - caps, err := capabilities.Load(paths.AgentCapabilitiesPath(), log, status.NewController(log)) + caps, err := capabilities.Load(paths.AgentCapabilitiesPath(), log) if err != nil { return nil, err } diff --git a/internal/pkg/agent/storage/store/state_store.go b/internal/pkg/agent/storage/store/state_store.go index 3316b34960b..8a6d3fc5e8d 100644 --- a/internal/pkg/agent/storage/store/state_store.go +++ b/internal/pkg/agent/storage/store/state_store.go @@ -16,23 +16,18 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/storage" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "github.com/elastic/elastic-agent/pkg/core/logger" ) type dispatcher interface { - Dispatch(context.Context, FleetAcker, ...action) error + Dispatch(context.Context, acker.Acker, ...action) error } type store interface { Save(io.Reader) error } -// FleetAcker is an acker of actions to fleet. -type FleetAcker interface { - Ack(ctx context.Context, action fleetapi.Action) error - Commit(ctx context.Context) error -} - type storeLoad interface { store Load() (io.ReadCloser, error) @@ -93,7 +88,7 @@ func NewStateStoreWithMigration(log *logger.Logger, actionStorePath, stateStoreP } // NewStateStoreActionAcker creates a new state store backed action acker. -func NewStateStoreActionAcker(acker FleetAcker, store *StateStore) *StateStoreActionAcker { +func NewStateStoreActionAcker(acker acker.Acker, store *StateStore) *StateStoreActionAcker { return &StateStoreActionAcker{acker: acker, store: store} } @@ -326,7 +321,7 @@ func (s *StateStore) AckToken() string { // its up to the action store to decide if we need to persist the event for future replay or just // discard the event. type StateStoreActionAcker struct { - acker FleetAcker + acker acker.Acker store *StateStore } @@ -350,7 +345,7 @@ func ReplayActions( ctx context.Context, log *logger.Logger, dispatcher dispatcher, - acker FleetAcker, + acker acker.Acker, actions ...action, ) error { log.Info("restoring current policy from disk") diff --git a/internal/pkg/capabilities/capabilities.go b/internal/pkg/capabilities/capabilities.go index fa360a53794..3d03fab9296 100644 --- a/internal/pkg/capabilities/capabilities.go +++ b/internal/pkg/capabilities/capabilities.go @@ -8,11 +8,8 @@ import ( "errors" "os" - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "gopkg.in/yaml.v2" - "github.com/elastic/elastic-agent/internal/pkg/core/status" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -30,14 +27,13 @@ var ( ) type capabilitiesManager struct { - caps []Capability - reporter status.Reporter + caps []Capability } -type capabilityFactory func(*logger.Logger, *ruleDefinitions, status.Reporter) (Capability, error) +type capabilityFactory func(*logger.Logger, *ruleDefinitions) (Capability, error) // Load loads capabilities files and prepares manager. -func Load(capsFile string, log *logger.Logger, sc status.Controller) (Capability, error) { +func Load(capsFile string, log *logger.Logger) (Capability, error) { handlers := []capabilityFactory{ newInputsCapability, newOutputsCapability, @@ -45,8 +41,7 @@ func Load(capsFile string, log *logger.Logger, sc status.Controller) (Capability } cm := &capabilitiesManager{ - caps: make([]Capability, 0), - reporter: sc.RegisterComponentWithPersistance("capabilities", true), + caps: make([]Capability, 0), } // load capabilities from file @@ -56,7 +51,7 @@ func Load(capsFile string, log *logger.Logger, sc status.Controller) (Capability } if os.IsNotExist(err) { - log.Infof("capabilities file not found in %s", capsFile) + log.Infof("Capabilities file not found in %s", capsFile) return cm, nil } defer fd.Close() @@ -69,7 +64,7 @@ func Load(capsFile string, log *logger.Logger, sc status.Controller) (Capability // make list of handlers out of capabilities definition for _, h := range handlers { - cap, err := h(log, definitions, cm.reporter) + cap, err := h(log, definitions) if err != nil { return nil, err } @@ -86,8 +81,6 @@ func Load(capsFile string, log *logger.Logger, sc status.Controller) (Capability func (mgr *capabilitiesManager) Apply(in interface{}) (interface{}, error) { var err error - // reset health on start, child caps will update to fail if needed - mgr.reporter.Update(state.Healthy, "", nil) for _, cap := range mgr.caps { in, err = cap.Apply(in) if err != nil { diff --git a/internal/pkg/capabilities/input.go b/internal/pkg/capabilities/input.go index 7ebc4b4fb15..2428c49f064 100644 --- a/internal/pkg/capabilities/input.go +++ b/internal/pkg/capabilities/input.go @@ -7,11 +7,8 @@ package capabilities import ( "fmt" - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" - "github.com/elastic/elastic-agent/internal/pkg/core/status" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -19,7 +16,7 @@ const ( inputsKey = "inputs" ) -func newInputsCapability(log *logger.Logger, rd *ruleDefinitions, reporter status.Reporter) (Capability, error) { +func newInputsCapability(log *logger.Logger, rd *ruleDefinitions) (Capability, error) { if rd == nil { return &multiInputsCapability{log: log, caps: []*inputCapability{}}, nil } @@ -27,7 +24,7 @@ func newInputsCapability(log *logger.Logger, rd *ruleDefinitions, reporter statu caps := make([]*inputCapability, 0, len(rd.Capabilities)) for _, r := range rd.Capabilities { - c, err := newInputCapability(log, r, reporter) + c, err := newInputCapability(log, r) if err != nil { return nil, err } @@ -40,23 +37,21 @@ func newInputsCapability(log *logger.Logger, rd *ruleDefinitions, reporter statu return &multiInputsCapability{log: log, caps: caps}, nil } -func newInputCapability(log *logger.Logger, r ruler, reporter status.Reporter) (*inputCapability, error) { +func newInputCapability(log *logger.Logger, r ruler) (*inputCapability, error) { cap, ok := r.(*inputCapability) if !ok { return nil, nil } cap.log = log - cap.reporter = reporter return cap, nil } type inputCapability struct { - log *logger.Logger - reporter status.Reporter - Name string `json:"name,omitempty" yaml:"name,omitempty"` - Type string `json:"rule" yaml:"rule"` - Input string `json:"input" yaml:"input"` + log *logger.Logger + Name string `json:"name,omitempty" yaml:"name,omitempty"` + Type string `json:"rule" yaml:"rule"` + Input string `json:"input" yaml:"input"` } func (c *inputCapability) Apply(cfgMap map[string]interface{}) (map[string]interface{}, error) { @@ -166,7 +161,6 @@ func (c *inputCapability) renderInputs(inputs []map[string]interface{}) ([]map[s if !isSupported { msg := fmt.Sprintf("input '%s' is not run due to capability restriction '%s'", inputType, c.name()) c.log.Infof(msg) - c.reporter.Update(state.Degraded, msg, nil) } newInputs = append(newInputs, input) diff --git a/internal/pkg/capabilities/output.go b/internal/pkg/capabilities/output.go index de11c3ce3b9..804ca64faa2 100644 --- a/internal/pkg/capabilities/output.go +++ b/internal/pkg/capabilities/output.go @@ -7,10 +7,7 @@ package capabilities import ( "fmt" - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/core/status" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -19,7 +16,7 @@ const ( typeKey = "type" ) -func newOutputsCapability(log *logger.Logger, rd *ruleDefinitions, reporter status.Reporter) (Capability, error) { +func newOutputsCapability(log *logger.Logger, rd *ruleDefinitions) (Capability, error) { if rd == nil { return &multiOutputsCapability{log: log, caps: []*outputCapability{}}, nil } @@ -27,7 +24,7 @@ func newOutputsCapability(log *logger.Logger, rd *ruleDefinitions, reporter stat caps := make([]*outputCapability, 0, len(rd.Capabilities)) for _, r := range rd.Capabilities { - c, err := newOutputCapability(log, r, reporter) + c, err := newOutputCapability(log, r) if err != nil { return nil, err } @@ -40,23 +37,21 @@ func newOutputsCapability(log *logger.Logger, rd *ruleDefinitions, reporter stat return &multiOutputsCapability{log: log, caps: caps}, nil } -func newOutputCapability(log *logger.Logger, r ruler, reporter status.Reporter) (*outputCapability, error) { +func newOutputCapability(log *logger.Logger, r ruler) (*outputCapability, error) { cap, ok := r.(*outputCapability) if !ok { return nil, nil } cap.log = log - cap.reporter = reporter return cap, nil } type outputCapability struct { - log *logger.Logger - reporter status.Reporter - Name string `json:"name,omitempty" yaml:"name,omitempty"` - Type string `json:"rule" yaml:"rule"` - Output string `json:"output" yaml:"output"` + log *logger.Logger + Name string `json:"name,omitempty" yaml:"name,omitempty"` + Type string `json:"rule" yaml:"rule"` + Output string `json:"output" yaml:"output"` } func (c *outputCapability) Apply(cfgMap map[string]interface{}) (map[string]interface{}, error) { @@ -133,7 +128,6 @@ func (c *outputCapability) renderOutputs(outputs map[string]interface{}) (map[st if !isSupported { msg := fmt.Sprintf("output '%s' is left out due to capability restriction '%s'", outputName, c.name()) c.log.Errorf(msg) - c.reporter.Update(state.Degraded, msg, nil) } } diff --git a/internal/pkg/capabilities/upgrade.go b/internal/pkg/capabilities/upgrade.go index e39c963e222..2773f6c9709 100644 --- a/internal/pkg/capabilities/upgrade.go +++ b/internal/pkg/capabilities/upgrade.go @@ -8,12 +8,8 @@ import ( "fmt" "strings" - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" - "github.com/elastic/elastic-agent/internal/pkg/core/status" "github.com/elastic/elastic-agent/internal/pkg/eql" - "github.com/elastic/elastic-agent/internal/pkg/fleetapi" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -26,7 +22,7 @@ const ( // Available variables: // - version // - source_uri -func newUpgradesCapability(log *logger.Logger, rd *ruleDefinitions, reporter status.Reporter) (Capability, error) { +func newUpgradesCapability(log *logger.Logger, rd *ruleDefinitions) (Capability, error) { if rd == nil { return &multiUpgradeCapability{caps: []*upgradeCapability{}}, nil } @@ -34,7 +30,7 @@ func newUpgradesCapability(log *logger.Logger, rd *ruleDefinitions, reporter sta caps := make([]*upgradeCapability, 0, len(rd.Capabilities)) for _, r := range rd.Capabilities { - c, err := newUpgradeCapability(log, r, reporter) + c, err := newUpgradeCapability(log, r) if err != nil { return nil, err } @@ -47,7 +43,7 @@ func newUpgradesCapability(log *logger.Logger, rd *ruleDefinitions, reporter sta return &multiUpgradeCapability{log: log, caps: caps}, nil } -func newUpgradeCapability(log *logger.Logger, r ruler, reporter status.Reporter) (*upgradeCapability, error) { +func newUpgradeCapability(log *logger.Logger, r ruler) (*upgradeCapability, error) { cap, ok := r.(*upgradeCapability) if !ok { return nil, nil @@ -70,15 +66,13 @@ func newUpgradeCapability(log *logger.Logger, r ruler, reporter status.Reporter) cap.upgradeEql = eqlExp cap.log = log - cap.reporter = reporter return cap, nil } type upgradeCapability struct { - log *logger.Logger - reporter status.Reporter - Name string `json:"name,omitempty" yaml:"name,omitempty"` - Type string `json:"rule" yaml:"rule"` + log *logger.Logger + Name string `json:"name,omitempty" yaml:"name,omitempty"` + Type string `json:"rule" yaml:"rule"` // UpgradeEql is eql expression defining upgrade UpgradeEqlDefinition string `json:"upgrade" yaml:"upgrade"` @@ -129,7 +123,6 @@ func (c *upgradeCapability) Apply(upgradeMap map[string]interface{}) (map[string isSupported = !isSupported msg := fmt.Sprintf("upgrade is blocked out due to capability restriction '%s'", c.name()) c.log.Errorf(msg) - c.reporter.Update(state.Degraded, msg, nil) } if !isSupported { @@ -163,31 +156,8 @@ func (c *multiUpgradeCapability) Apply(in interface{}) (interface{}, error) { } func upgradeObject(a interface{}) map[string]interface{} { - resultMap := make(map[string]interface{}) - if ua, ok := a.(upgradeAction); ok { - resultMap[versionKey] = ua.Version() - resultMap[sourceURIKey] = ua.SourceURI() - return resultMap + if m, ok := a.(map[string]interface{}); ok { + return m } - - if ua, ok := a.(*fleetapi.ActionUpgrade); ok { - resultMap[versionKey] = ua.Version - resultMap[sourceURIKey] = ua.SourceURI - return resultMap - } - - if ua, ok := a.(fleetapi.ActionUpgrade); ok { - resultMap[versionKey] = ua.Version - resultMap[sourceURIKey] = ua.SourceURI - return resultMap - } - return nil } - -type upgradeAction interface { - // Version to upgrade to. - Version() string - // SourceURI for download. - SourceURI() string -} diff --git a/internal/pkg/composable/context.go b/internal/pkg/composable/context.go index 1dcb50cf956..97767f4a5d5 100644 --- a/internal/pkg/composable/context.go +++ b/internal/pkg/composable/context.go @@ -16,6 +16,14 @@ import ( // ContextProviderBuilder creates a new context provider based on the given config and returns it. type ContextProviderBuilder func(log *logger.Logger, config *config.Config) (corecomp.ContextProvider, error) +// MustAddContextProvider adds a new ContextProviderBuilder and panics if it AddContextProvider returns an error. +func (r *providerRegistry) MustAddContextProvider(name string, builder ContextProviderBuilder) { + err := r.AddContextProvider(name, builder) + if err != nil { + panic(err) + } +} + // AddContextProvider adds a new ContextProviderBuilder func (r *providerRegistry) AddContextProvider(name string, builder ContextProviderBuilder) error { r.lock.Lock() diff --git a/internal/pkg/composable/controller.go b/internal/pkg/composable/controller.go index a14e111194f..116424ae8e4 100644 --- a/internal/pkg/composable/controller.go +++ b/internal/pkg/composable/controller.go @@ -22,19 +22,25 @@ import ( "github.com/elastic/elastic-agent/pkg/core/logger" ) -// VarsCallback is callback called when the current vars state changes. -type VarsCallback func([]*transpiler.Vars) - // Controller manages the state of the providers current context. type Controller interface { // Run runs the controller. // // Cancelling the context stops the controller. - Run(ctx context.Context, cb VarsCallback) error + Run(ctx context.Context) error + + // Errors returns the channel to watch for reported errors. + Errors() <-chan error + + // Watch returns the channel to watch for variable changes. + Watch() <-chan []*transpiler.Vars } // controller manages the state of the providers current context. type controller struct { + logger *logger.Logger + ch chan []*transpiler.Vars + errCh chan error contextProviders map[string]*contextProviderState dynamicProviders map[string]*dynamicProviderState } @@ -87,28 +93,40 @@ func New(log *logger.Logger, c *config.Config) (Controller, error) { } return &controller{ + logger: l, + ch: make(chan []*transpiler.Vars), + errCh: make(chan error), contextProviders: contextProviders, dynamicProviders: dynamicProviders, }, nil } // Run runs the controller. -func (c *controller) Run(ctx context.Context, cb VarsCallback) error { - // large number not to block performing Run on the provided providers - notify := make(chan bool, 5000) +func (c *controller) Run(ctx context.Context) error { + c.logger.Debugf("Starting controller for composable inputs") + defer c.logger.Debugf("Stopped controller for composable inputs") + + notify := make(chan bool) localCtx, cancel := context.WithCancel(ctx) + defer cancel() fetchContextProviders := mapstr.M{} + var wg sync.WaitGroup + wg.Add(len(c.contextProviders) + len(c.dynamicProviders)) + // run all the enabled context providers for name, state := range c.contextProviders { state.Context = localCtx state.signal = notify - err := state.provider.Run(state) - if err != nil { - cancel() - return errors.New(err, fmt.Sprintf("failed to run provider '%s'", name), errors.TypeConfig, errors.M("provider", name)) - } + go func(name string, state *contextProviderState) { + defer wg.Done() + err := state.provider.Run(state) + if err != nil && !errors.Is(err, context.Canceled) { + err = errors.New(err, fmt.Sprintf("failed to run provider '%s'", name), errors.TypeConfig, errors.M("provider", name)) + c.logger.Errorf("%s", err) + } + }(name, state) if p, ok := state.provider.(corecomp.FetchContextProvider); ok { _, _ = fetchContextProviders.Put(name, p) } @@ -118,65 +136,73 @@ func (c *controller) Run(ctx context.Context, cb VarsCallback) error { for name, state := range c.dynamicProviders { state.Context = localCtx state.signal = notify - err := state.provider.Run(state) - if err != nil { - cancel() - return errors.New(err, fmt.Sprintf("failed to run provider '%s'", name), errors.TypeConfig, errors.M("provider", name)) - } + go func(name string, state *dynamicProviderState) { + defer wg.Done() + err := state.provider.Run(state) + if err != nil && !errors.Is(err, context.Canceled) { + err = errors.New(err, fmt.Sprintf("failed to run provider '%s'", name), errors.TypeConfig, errors.M("provider", name)) + c.logger.Errorf("%s", err) + } + }(name, state) } - go func() { + c.logger.Debugf("Started controller for composable inputs") + + // performs debounce of notifies; accumulates them into 100 millisecond chunks + t := time.NewTimer(100 * time.Millisecond) + for { + DEBOUNCE: for { - // performs debounce of notifies; accumulates them into 100 millisecond chunks - changed := false - t := time.NewTimer(100 * time.Millisecond) - for { - exitloop := false - select { - case <-ctx.Done(): - cancel() - return - case <-notify: - changed = true - case <-t.C: - exitloop = true - } - if exitloop { - break - } + select { + case <-ctx.Done(): + c.logger.Debugf("Stopping controller for composable inputs") + t.Stop() + cancel() + wg.Wait() + return ctx.Err() + case <-notify: + t.Reset(100 * time.Millisecond) + c.logger.Debugf("Variable state changed for composable inputs; debounce started") + drainChan(notify) + case <-t.C: + break DEBOUNCE } + } - t.Stop() - if !changed { - continue - } + c.logger.Debugf("Computing new variable state for composable inputs") - // build the vars list of mappings - vars := make([]*transpiler.Vars, 1) - mapping := map[string]interface{}{} - for name, state := range c.contextProviders { - mapping[name] = state.Current() - } - // this is ensured not to error, by how the mappings states are verified - vars[0], _ = transpiler.NewVars(mapping, fetchContextProviders) - - // add to the vars list for each dynamic providers mappings - for name, state := range c.dynamicProviders { - for _, mappings := range state.Mappings() { - local, _ := cloneMap(mapping) // will not fail; already been successfully cloned once - local[name] = mappings.mapping - // this is ensured not to error, by how the mappings states are verified - v, _ := transpiler.NewVarsWithProcessors(local, name, mappings.processors, fetchContextProviders) - vars = append(vars, v) - } + // build the vars list of mappings + vars := make([]*transpiler.Vars, 1) + mapping := map[string]interface{}{} + for name, state := range c.contextProviders { + mapping[name] = state.Current() + } + // this is ensured not to error, by how the mappings states are verified + vars[0], _ = transpiler.NewVars(mapping, fetchContextProviders) + + // add to the vars list for each dynamic providers mappings + for name, state := range c.dynamicProviders { + for _, mappings := range state.Mappings() { + local, _ := cloneMap(mapping) // will not fail; already been successfully cloned once + local[name] = mappings.mapping + // this is ensured not to error, by how the mappings states are verified + v, _ := transpiler.NewVarsWithProcessors(local, name, mappings.processors, fetchContextProviders) + vars = append(vars, v) } - - // execute the callback - cb(vars) } - }() - return nil + c.ch <- vars + } +} + +// Errors returns the channel to watch for reported errors. +func (c *controller) Errors() <-chan error { + return c.errCh +} + +// Watch returns the channel for variable changes. +func (c *controller) Watch() <-chan []*transpiler.Vars { + return c.ch } type contextProviderState struct { @@ -351,3 +377,13 @@ func addToSet(set []int, i int) []int { } return append(set, i) } + +func drainChan(ch chan bool) { + for { + select { + case <-ch: + default: + return + } + } +} diff --git a/internal/pkg/composable/controller_test.go b/internal/pkg/composable/controller_test.go index 09780767928..a8c3ec7df93 100644 --- a/internal/pkg/composable/controller_test.go +++ b/internal/pkg/composable/controller_test.go @@ -6,8 +6,9 @@ package composable_test import ( "context" - "sync" + "errors" "testing" + "time" "github.com/elastic/elastic-agent/pkg/core/logger" @@ -80,17 +81,34 @@ func TestController(t *testing.T) { c, err := composable.New(log, cfg) require.NoError(t, err) - var wg sync.WaitGroup ctx, cancel := context.WithCancel(context.Background()) defer cancel() - wg.Add(1) + + timeoutCtx, timeoutCancel := context.WithTimeout(ctx, 1*time.Second) + defer timeoutCancel() + var setVars []*transpiler.Vars - err = c.Run(ctx, func(vars []*transpiler.Vars) { - setVars = vars - wg.Done() - }) + go func() { + defer cancel() + for { + select { + case <-timeoutCtx.Done(): + return + case vars := <-c.Watch(): + setVars = vars + } + } + }() + + errCh := make(chan error) + go func() { + errCh <- c.Run(ctx) + }() + err = <-errCh + if errors.Is(err, context.Canceled) { + err = nil + } require.NoError(t, err) - wg.Wait() assert.Len(t, setVars, 3) @@ -99,14 +117,17 @@ func TestController(t *testing.T) { _, envExists := setVars[0].Lookup("env") assert.False(t, envExists) local, _ := setVars[0].Lookup("local") - localMap := local.(map[string]interface{}) + localMap, ok := local.(map[string]interface{}) + require.True(t, ok) assert.Equal(t, "value1", localMap["key1"]) local, _ = setVars[1].Lookup("local_dynamic") - localMap = local.(map[string]interface{}) + localMap, ok = local.(map[string]interface{}) + require.True(t, ok) assert.Equal(t, "value1", localMap["key1"]) local, _ = setVars[2].Lookup("local_dynamic") - localMap = local.(map[string]interface{}) + localMap, ok = local.(map[string]interface{}) + require.True(t, ok) assert.Equal(t, "value2", localMap["key1"]) } diff --git a/internal/pkg/composable/dynamic.go b/internal/pkg/composable/dynamic.go index a0de3543a1c..c83c2ccc2e2 100644 --- a/internal/pkg/composable/dynamic.go +++ b/internal/pkg/composable/dynamic.go @@ -36,6 +36,14 @@ type DynamicProvider interface { // DynamicProviderBuilder creates a new dynamic provider based on the given config and returns it. type DynamicProviderBuilder func(log *logger.Logger, config *config.Config) (DynamicProvider, error) +// MustAddDynamicProvider adds a new DynamicProviderBuilder and panics if it AddDynamicProvider returns an error. +func (r *providerRegistry) MustAddDynamicProvider(name string, builder DynamicProviderBuilder) { + err := r.AddDynamicProvider(name, builder) + if err != nil { + panic(err) + } +} + // AddDynamicProvider adds a new DynamicProviderBuilder func (r *providerRegistry) AddDynamicProvider(name string, builder DynamicProviderBuilder) error { r.lock.Lock() diff --git a/internal/pkg/composable/providers/agent/agent.go b/internal/pkg/composable/providers/agent/agent.go index 2b9d0ff3deb..ed8eb956afe 100644 --- a/internal/pkg/composable/providers/agent/agent.go +++ b/internal/pkg/composable/providers/agent/agent.go @@ -15,7 +15,7 @@ import ( ) func init() { - composable.Providers.AddContextProvider("agent", ContextProviderBuilder) + composable.Providers.MustAddContextProvider("agent", ContextProviderBuilder) } type contextProvider struct{} diff --git a/internal/pkg/composable/providers/docker/docker.go b/internal/pkg/composable/providers/docker/docker.go index 4bdc6d11cfe..8647677e6e8 100644 --- a/internal/pkg/composable/providers/docker/docker.go +++ b/internal/pkg/composable/providers/docker/docker.go @@ -23,7 +23,7 @@ import ( const ContainerPriority = 0 func init() { - _ = composable.Providers.AddDynamicProvider("docker", DynamicProviderBuilder) + composable.Providers.MustAddDynamicProvider("docker", DynamicProviderBuilder) } type dockerContainerData struct { @@ -54,54 +54,51 @@ func (c *dynamicProvider) Run(comm composable.DynamicProviderComm) error { c.logger.Infof("Docker provider skipped, unable to connect: %s", err) return nil } + defer watcher.Stop() - go func() { - for { - select { - case <-comm.Done(): - startListener.Stop() - stopListener.Stop() + for { + select { + case <-comm.Done(): + startListener.Stop() + stopListener.Stop() - // Stop all timers before closing the channel - for _, stopper := range stoppers { - stopper.Stop() - } - close(stopTrigger) - return - case event := <-startListener.Events(): - data, err := generateData(event) - if err != nil { - c.logger.Errorf("%s", err) - continue - } - if stopper, ok := stoppers[data.container.ID]; ok { - c.logger.Debugf("container %s is restarting, aborting pending stop", data.container.ID) - stopper.Stop() - delete(stoppers, data.container.ID) - return - } - err = comm.AddOrUpdate(data.container.ID, ContainerPriority, data.mapping, data.processors) - if err != nil { - c.logger.Errorf("%s", err) - } - case event := <-stopListener.Events(): - data, err := generateData(event) - if err != nil { - c.logger.Errorf("%s", err) - continue - } - stopper := time.AfterFunc(c.config.CleanupTimeout, func() { - stopTrigger <- data - }) - stoppers[data.container.ID] = stopper - case data := <-stopTrigger: + // Stop all timers before closing the channel + for _, stopper := range stoppers { + stopper.Stop() + } + close(stopTrigger) + return comm.Err() + case event := <-startListener.Events(): + data, err := generateData(event) + if err != nil { + c.logger.Errorf("%s", err) + continue + } + if stopper, ok := stoppers[data.container.ID]; ok { + c.logger.Debugf("container %s is restarting, aborting pending stop", data.container.ID) + stopper.Stop() delete(stoppers, data.container.ID) - comm.Remove(data.container.ID) + continue + } + err = comm.AddOrUpdate(data.container.ID, ContainerPriority, data.mapping, data.processors) + if err != nil { + c.logger.Errorf("%s", err) } + case event := <-stopListener.Events(): + data, err := generateData(event) + if err != nil { + c.logger.Errorf("%s", err) + continue + } + stopper := time.AfterFunc(c.config.CleanupTimeout, func() { + stopTrigger <- data + }) + stoppers[data.container.ID] = stopper + case data := <-stopTrigger: + delete(stoppers, data.container.ID) + comm.Remove(data.container.ID) } - }() - - return nil + } } // DynamicProviderBuilder builds the dynamic provider. diff --git a/internal/pkg/composable/providers/env/env.go b/internal/pkg/composable/providers/env/env.go index 4c6b5911f47..6f65120de48 100644 --- a/internal/pkg/composable/providers/env/env.go +++ b/internal/pkg/composable/providers/env/env.go @@ -16,7 +16,7 @@ import ( ) func init() { - composable.Providers.AddContextProvider("env", ContextProviderBuilder) + composable.Providers.MustAddContextProvider("env", ContextProviderBuilder) } type contextProvider struct{} diff --git a/internal/pkg/composable/providers/host/host.go b/internal/pkg/composable/providers/host/host.go index 25d53430a2f..cc98021e77b 100644 --- a/internal/pkg/composable/providers/host/host.go +++ b/internal/pkg/composable/providers/host/host.go @@ -24,7 +24,7 @@ import ( const DefaultCheckInterval = 5 * time.Minute func init() { - composable.Providers.AddContextProvider("host", ContextProviderBuilder) + composable.Providers.MustAddContextProvider("host", ContextProviderBuilder) } type infoFetcher func() (map[string]interface{}, error) @@ -50,34 +50,30 @@ func (c *contextProvider) Run(comm corecomp.ContextProviderComm) error { } // Update context when any host information changes. - go func() { - for { - t := time.NewTimer(c.CheckInterval) - select { - case <-comm.Done(): - t.Stop() - return - case <-t.C: - } - - updated, err := c.fetcher() - if err != nil { - c.logger.Warnf("Failed fetching latest host information: %s", err) - continue - } - if reflect.DeepEqual(current, updated) { - // nothing to do - continue - } - current = updated - err = comm.Set(updated) - if err != nil { - c.logger.Errorf("Failed updating mapping to latest host information: %s", err) - } + for { + t := time.NewTimer(c.CheckInterval) + select { + case <-comm.Done(): + t.Stop() + return comm.Err() + case <-t.C: } - }() - return nil + updated, err := c.fetcher() + if err != nil { + c.logger.Warnf("Failed fetching latest host information: %s", err) + continue + } + if reflect.DeepEqual(current, updated) { + // nothing to do + continue + } + current = updated + err = comm.Set(updated) + if err != nil { + c.logger.Errorf("Failed updating mapping to latest host information: %s", err) + } + } } // ContextProviderBuilder builds the context provider. diff --git a/internal/pkg/composable/providers/host/host_test.go b/internal/pkg/composable/providers/host/host_test.go index 8e117fcbeb4..869f6a82050 100644 --- a/internal/pkg/composable/providers/host/host_test.go +++ b/internal/pkg/composable/providers/host/host_test.go @@ -41,15 +41,28 @@ func TestContextProvider(t *testing.T) { require.Equal(t, 100*time.Millisecond, hostProvider.CheckInterval) ctx, cancel := context.WithCancel(context.Background()) + defer cancel() comm := ctesting.NewContextComm(ctx) - err = provider.Run(comm) + + go func() { + err = provider.Run(comm) + }() + + // wait for it to be called once + var wg sync.WaitGroup + wg.Add(1) + comm.CallOnSet(func() { + wg.Done() + }) + wg.Wait() + comm.CallOnSet(nil) + require.NoError(t, err) starting, err = ctesting.CloneMap(starting) require.NoError(t, err) require.Equal(t, starting, comm.Current()) // wait for it to be called again - var wg sync.WaitGroup wg.Add(1) comm.CallOnSet(func() { wg.Done() diff --git a/internal/pkg/composable/providers/kubernetes/kubernetes.go b/internal/pkg/composable/providers/kubernetes/kubernetes.go index 91367c5252f..9f43522f2da 100644 --- a/internal/pkg/composable/providers/kubernetes/kubernetes.go +++ b/internal/pkg/composable/providers/kubernetes/kubernetes.go @@ -30,7 +30,7 @@ const ( const nodeScope = "node" func init() { - _ = composable.Providers.AddDynamicProvider("kubernetes", DynamicProviderBuilder) + composable.Providers.MustAddDynamicProvider("kubernetes", DynamicProviderBuilder) } type dynamicProvider struct { @@ -54,37 +54,51 @@ func DynamicProviderBuilder(logger *logger.Logger, c *config.Config) (composable // Run runs the kubernetes context provider. func (p *dynamicProvider) Run(comm composable.DynamicProviderComm) error { + eventers := make([]Eventer, 0, 3) if p.config.Resources.Pod.Enabled { - err := p.watchResource(comm, "pod") + eventer, err := p.watchResource(comm, "pod") if err != nil { return err } + if eventer != nil { + eventers = append(eventers, eventer) + } } if p.config.Resources.Node.Enabled { - err := p.watchResource(comm, nodeScope) + eventer, err := p.watchResource(comm, nodeScope) if err != nil { return err } + if eventer != nil { + eventers = append(eventers, eventer) + } } if p.config.Resources.Service.Enabled { - err := p.watchResource(comm, "service") + eventer, err := p.watchResource(comm, "service") if err != nil { return err } + if eventer != nil { + eventers = append(eventers, eventer) + } + } + <-comm.Done() + for _, eventer := range eventers { + eventer.Stop() } - return nil + return comm.Err() } // watchResource initializes the proper watcher according to the given resource (pod, node, service) // and starts watching for such resource's events. func (p *dynamicProvider) watchResource( comm composable.DynamicProviderComm, - resourceType string) error { + resourceType string) (Eventer, error) { client, err := kubernetes.GetKubernetesClient(p.config.KubeConfig, p.config.KubeClientOptions) if err != nil { // info only; return nil (do nothing) p.logger.Debugf("Kubernetes provider for resource %s skipped, unable to connect: %s", resourceType, err) - return nil + return nil, nil } // Ensure that node is set correctly whenever the scope is set to "node". Make sure that node is empty @@ -105,7 +119,7 @@ func (p *dynamicProvider) watchResource( p.config.Node, err = kubernetes.DiscoverKubernetesNode(p.logger, nd) if err != nil { p.logger.Debugf("Kubernetes provider skipped, unable to discover node: %w", err) - return nil + return nil, nil } } else { @@ -114,15 +128,15 @@ func (p *dynamicProvider) watchResource( eventer, err := p.newEventer(resourceType, comm, client) if err != nil { - return errors.New(err, "couldn't create kubernetes watcher for resource %s", resourceType) + return nil, errors.New(err, "couldn't create kubernetes watcher for resource %s", resourceType) } err = eventer.Start() if err != nil { - return errors.New(err, "couldn't start kubernetes eventer for resource %s", resourceType) + return nil, errors.New(err, "couldn't start kubernetes eventer for resource %s", resourceType) } - return nil + return eventer, nil } // Eventer allows defining ways in which kubernetes resource events are observed and processed diff --git a/internal/pkg/composable/providers/kubernetesleaderelection/kubernetes_leaderelection.go b/internal/pkg/composable/providers/kubernetesleaderelection/kubernetes_leaderelection.go index 410e13ec77d..d0d773d1663 100644 --- a/internal/pkg/composable/providers/kubernetesleaderelection/kubernetes_leaderelection.go +++ b/internal/pkg/composable/providers/kubernetesleaderelection/kubernetes_leaderelection.go @@ -23,15 +23,13 @@ import ( ) func init() { - _ = composable.Providers.AddContextProvider("kubernetes_leaderelection", ContextProviderBuilder) + composable.Providers.MustAddContextProvider("kubernetes_leaderelection", ContextProviderBuilder) } type contextProvider struct { - logger *logger.Logger - config *Config - comm corecomp.ContextProviderComm - leaderElection *leaderelection.LeaderElectionConfig - cancelLeaderElection context.CancelFunc + logger *logger.Logger + config *Config + leaderElection *leaderelection.LeaderElectionConfig } // ContextProviderBuilder builds the provider. @@ -44,7 +42,7 @@ func ContextProviderBuilder(logger *logger.Logger, c *config.Config) (corecomp.C if err != nil { return nil, errors.New(err, "failed to unpack configuration") } - return &contextProvider{logger, &cfg, nil, nil, nil}, nil + return &contextProvider{logger, &cfg, nil}, nil } // Run runs the leaderelection provider. @@ -91,57 +89,43 @@ func (p *contextProvider) Run(comm corecomp.ContextProviderComm) error { Callbacks: leaderelection.LeaderCallbacks{ OnStartedLeading: func(ctx context.Context) { p.logger.Debugf("leader election lock GAINED, id %v", id) - p.startLeading() + p.startLeading(comm) }, OnStoppedLeading: func() { p.logger.Debugf("leader election lock LOST, id %v", id) - p.stopLeading() + p.stopLeading(comm) }, }, } - ctx, cancel := context.WithCancel(context.TODO()) - p.cancelLeaderElection = cancel - p.comm = comm - p.startLeaderElector(ctx) - return nil -} - -// startLeaderElector starts a Leader Elector in the background with the provided config -func (p *contextProvider) startLeaderElector(ctx context.Context) { le, err := leaderelection.NewLeaderElector(*p.leaderElection) if err != nil { p.logger.Errorf("error while creating Leader Elector: %v", err) } p.logger.Debugf("Starting Leader Elector") - go le.Run(ctx) + le.Run(comm) + p.logger.Debugf("Stopped Leader Elector") + return comm.Err() } -func (p *contextProvider) startLeading() { +func (p *contextProvider) startLeading(comm corecomp.ContextProviderComm) { mapping := map[string]interface{}{ "leader": true, } - err := p.comm.Set(mapping) + err := comm.Set(mapping) if err != nil { p.logger.Errorf("Failed updating leaderelection status to leader TRUE: %s", err) } } -func (p *contextProvider) stopLeading() { +func (p *contextProvider) stopLeading(comm corecomp.ContextProviderComm) { mapping := map[string]interface{}{ "leader": false, } - err := p.comm.Set(mapping) + err := comm.Set(mapping) if err != nil { p.logger.Errorf("Failed updating leaderelection status to leader FALSE: %s", err) } } - -// Stop signals the stop channel to force the leader election loop routine to stop. -func (p *contextProvider) Stop() { - if p.cancelLeaderElection != nil { - p.cancelLeaderElection() - } -} diff --git a/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets.go b/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets.go index 0bc560295ed..d6e8190c13a 100644 --- a/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets.go +++ b/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets.go @@ -7,6 +7,7 @@ package kubernetessecrets import ( "context" "strings" + "sync" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" k8sclient "k8s.io/client-go/kubernetes" @@ -23,14 +24,15 @@ var _ corecomp.FetchContextProvider = (*contextProviderK8sSecrets)(nil) var getK8sClientFunc = getK8sClient func init() { - _ = composable.Providers.AddContextProvider("kubernetes_secrets", ContextProviderBuilder) + composable.Providers.MustAddContextProvider("kubernetes_secrets", ContextProviderBuilder) } type contextProviderK8sSecrets struct { logger *logger.Logger config *Config - client k8sclient.Interface + clientMx sync.Mutex + client k8sclient.Interface } // ContextProviderBuilder builds the context provider. @@ -43,12 +45,18 @@ func ContextProviderBuilder(logger *logger.Logger, c *config.Config) (corecomp.C if err != nil { return nil, errors.New(err, "failed to unpack configuration") } - return &contextProviderK8sSecrets{logger, &cfg, nil}, nil + return &contextProviderK8sSecrets{ + logger: logger, + config: &cfg, + }, nil } func (p *contextProviderK8sSecrets) Fetch(key string) (string, bool) { // key = "kubernetes_secrets.somenamespace.somesecret.value" - if p.client == nil { + p.clientMx.Lock() + client := p.client + p.clientMx.Unlock() + if client == nil { return "", false } tokens := strings.Split(key, ".") @@ -67,7 +75,7 @@ func (p *contextProviderK8sSecrets) Fetch(key string) (string, bool) { secretName := tokens[2] secretVar := tokens[3] - secretIntefrace := p.client.CoreV1().Secrets(ns) + secretIntefrace := client.CoreV1().Secrets(ns) ctx := context.TODO() secret, err := secretIntefrace.Get(ctx, secretName, metav1.GetOptions{}) if err != nil { @@ -89,8 +97,14 @@ func (p *contextProviderK8sSecrets) Run(comm corecomp.ContextProviderComm) error p.logger.Debugf("Kubernetes_secrets provider skipped, unable to connect: %s", err) return nil } + p.clientMx.Lock() p.client = client - return nil + p.clientMx.Unlock() + <-comm.Done() + p.clientMx.Lock() + p.client = nil + p.clientMx.Unlock() + return comm.Err() } func getK8sClient(kubeconfig string, opt kubernetes.KubeClientOptions) (k8sclient.Interface, error) { diff --git a/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets_test.go b/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets_test.go index 4c80800a59b..388f33074bb 100644 --- a/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets_test.go +++ b/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets_test.go @@ -7,6 +7,9 @@ package kubernetessecrets import ( "context" "testing" + "time" + + ctesting "github.com/elastic/elastic-agent/internal/pkg/composable/testing" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -19,7 +22,6 @@ import ( "github.com/elastic/elastic-agent-autodiscover/kubernetes" "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent/internal/pkg/config" - corecomp "github.com/elastic/elastic-agent/internal/pkg/core/composable" ) const ( @@ -52,13 +54,31 @@ func Test_K8sSecretsProvider_Fetch(t *testing.T) { p, err := ContextProviderBuilder(logger, cfg) require.NoError(t, err) - fp, _ := p.(corecomp.FetchContextProvider) + fp, _ := p.(*contextProviderK8sSecrets) getK8sClientFunc = func(kubeconfig string, opt kubernetes.KubeClientOptions) (k8sclient.Interface, error) { return client, nil } require.NoError(t, err) - _ = fp.Run(nil) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + comm := ctesting.NewContextComm(ctx) + + go func() { + _ = fp.Run(comm) + }() + + for { + fp.clientMx.Lock() + client := fp.client + fp.clientMx.Unlock() + if client != nil { + break + } + <-time.After(10 * time.Millisecond) + } + val, found := fp.Fetch("kubernetes_secrets.test_namespace.testing_secret.secret_value") assert.True(t, found) assert.Equal(t, val, pass) @@ -89,13 +109,31 @@ func Test_K8sSecretsProvider_FetchWrongSecret(t *testing.T) { p, err := ContextProviderBuilder(logger, cfg) require.NoError(t, err) - fp, _ := p.(corecomp.FetchContextProvider) + fp, _ := p.(*contextProviderK8sSecrets) getK8sClientFunc = func(kubeconfig string, opt kubernetes.KubeClientOptions) (k8sclient.Interface, error) { return client, nil } require.NoError(t, err) - _ = fp.Run(nil) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + comm := ctesting.NewContextComm(ctx) + + go func() { + _ = fp.Run(comm) + }() + + for { + fp.clientMx.Lock() + client := fp.client + fp.clientMx.Unlock() + if client != nil { + break + } + <-time.After(10 * time.Millisecond) + } + val, found := fp.Fetch("kubernetes_secrets.test_namespace.testing_secretHACK.secret_value") assert.False(t, found) assert.EqualValues(t, val, "") diff --git a/internal/pkg/composable/providers/local/local.go b/internal/pkg/composable/providers/local/local.go index 9c611ecbd13..b44affc78df 100644 --- a/internal/pkg/composable/providers/local/local.go +++ b/internal/pkg/composable/providers/local/local.go @@ -15,7 +15,7 @@ import ( ) func init() { - composable.Providers.AddContextProvider("local", ContextProviderBuilder) + composable.Providers.MustAddContextProvider("local", ContextProviderBuilder) } type contextProvider struct { diff --git a/internal/pkg/composable/providers/localdynamic/localdynamic.go b/internal/pkg/composable/providers/localdynamic/localdynamic.go index f4f99ca4030..0fd81738976 100644 --- a/internal/pkg/composable/providers/localdynamic/localdynamic.go +++ b/internal/pkg/composable/providers/localdynamic/localdynamic.go @@ -18,7 +18,7 @@ import ( const ItemPriority = 0 func init() { - composable.Providers.AddDynamicProvider("local_dynamic", DynamicProviderBuilder) + composable.Providers.MustAddDynamicProvider("local_dynamic", DynamicProviderBuilder) } type dynamicItem struct { diff --git a/internal/pkg/composable/providers/path/path.go b/internal/pkg/composable/providers/path/path.go index 455f46d2b28..05af5bcd0b0 100644 --- a/internal/pkg/composable/providers/path/path.go +++ b/internal/pkg/composable/providers/path/path.go @@ -14,7 +14,7 @@ import ( ) func init() { - composable.Providers.AddContextProvider("path", ContextProviderBuilder) + composable.Providers.MustAddContextProvider("path", ContextProviderBuilder) } type contextProvider struct{} diff --git a/internal/pkg/core/composable/providers.go b/internal/pkg/core/composable/providers.go index d87437e2dae..235e17d83fa 100644 --- a/internal/pkg/core/composable/providers.go +++ b/internal/pkg/core/composable/providers.go @@ -6,11 +6,12 @@ package composable import "context" -// FetchContextProvider is the interface that a context provider uses so as to be able to be called -// explicitly on demand by vars framework in order to fetch specific target values like a k8s secret. +// FetchContextProvider is the interface that a context provider uses allow variable values to be determined when the +// configuration is rendered versus it being known in advanced. type FetchContextProvider interface { ContextProvider - // Run runs the inventory provider. + + // Fetch tries to fetch a value for a variable. Fetch(string) (string, bool) } diff --git a/internal/pkg/fleetapi/acker/noop/noop_acker.go b/internal/pkg/fleetapi/acker/noop/noop_acker.go index 3e2716193f0..7c410d73bc0 100644 --- a/internal/pkg/fleetapi/acker/noop/noop_acker.go +++ b/internal/pkg/fleetapi/acker/noop/noop_acker.go @@ -2,27 +2,29 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. -package fleet +package noop import ( "context" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi" ) -// Acker is a noop acker. +// noopAcker is a noop acker. // Methods of these acker do nothing. -type Acker struct{} +type noopAcker struct{} -// NewAcker creates a new noop acker. -func NewAcker() *Acker { - return &Acker{} +// New creates a new noop acker. +func New() acker.Acker { + return &noopAcker{} } // Ack acknowledges action. -func (f *Acker) Ack(ctx context.Context, action fleetapi.Action) error { +func (f *noopAcker) Ack(ctx context.Context, action fleetapi.Action) error { return nil } // Commit commits ack actions. -func (*Acker) Commit(ctx context.Context) error { return nil } +func (*noopAcker) Commit(ctx context.Context) error { return nil } diff --git a/pkg/component/load.go b/pkg/component/load.go index 38e934b836f..62a983f1f9d 100644 --- a/pkg/component/load.go +++ b/pkg/component/load.go @@ -146,6 +146,15 @@ func LoadRuntimeSpecs(dir string, platform PlatformDetail, opts ...LoadRuntimeOp }, nil } +// Inputs returns the list of supported inputs for this platform. +func (r *RuntimeSpecs) Inputs() []string { + inputs := make([]string, 0, len(r.inputSpecs)) + for inputType := range r.inputSpecs { + inputs = append(inputs, inputType) + } + return inputs +} + // GetInput returns the input runtime specification for this input on this platform. func (r *RuntimeSpecs) GetInput(inputType string) (InputRuntimeSpec, error) { runtime, ok := r.inputSpecs[inputType] diff --git a/pkg/component/runtime/manager.go b/pkg/component/runtime/manager.go index e7125e82f68..8fbeeb73ff7 100644 --- a/pkg/component/runtime/manager.go +++ b/pkg/component/runtime/manager.go @@ -48,6 +48,12 @@ var ( ErrNoUnit = errors.New("no unit under control of this manager") ) +// ComponentComponentState provides a structure to map a component to current component state. +type ComponentComponentState struct { + Component component.Component + State ComponentState +} + // Manager for the entire runtime of operating components. type Manager struct { proto.UnimplementedElasticAgentServer @@ -67,8 +73,13 @@ type Manager struct { mx sync.RWMutex current map[string]*componentRuntimeState - subMx sync.RWMutex - subscriptions map[string][]*Subscription + subMx sync.RWMutex + subscriptions map[string][]*Subscription + subAllMx sync.RWMutex + subscribeAll []*SubscriptionAll + subscribeAllInit chan *SubscriptionAll + + errCh chan error shuttingDown atomic.Bool } @@ -87,6 +98,7 @@ func NewManager(logger *logger.Logger, listenAddr string, tracer *apm.Tracer) (* waitReady: make(map[string]waitForReady), current: make(map[string]*componentRuntimeState), subscriptions: make(map[string][]*Subscription), + errCh: make(chan error), } return m, nil } @@ -215,6 +227,11 @@ func (m *Manager) WaitForReady(ctx context.Context) error { } } +// Errors returns channel that errors are reported on. +func (m *Manager) Errors() <-chan error { + return m.errCh +} + // Update updates the currComp state of the running components. // // This returns as soon as possible, work is performed in the background to @@ -229,6 +246,22 @@ func (m *Manager) Update(components []component.Component) error { return m.update(components, true) } +// State returns the current component states. +func (m *Manager) State() []ComponentComponentState { + m.mx.RLock() + defer m.mx.RUnlock() + states := make([]ComponentComponentState, 0, len(m.current)) + for _, crs := range m.current { + crs.latestMx.RLock() + states = append(states, ComponentComponentState{ + Component: crs.currComp, + State: crs.latestState.Copy(), + }) + crs.latestMx.RUnlock() + } + return states +} + // PerformAction executes an action on a unit. func (m *Manager) PerformAction(ctx context.Context, unit component.Unit, name string, params map[string]interface{}) (map[string]interface{}, error) { id, err := uuid.NewV4() @@ -290,11 +323,11 @@ func (m *Manager) PerformAction(ctx context.Context, unit component.Unit, name s // Subscribe to changes in a component. // // Allows a component without that ID to exists. Once a component starts matching that ID then changes will start to -// be provided over the channel. +// be provided over the channel. Cancelling the context results in the subscription being unsubscribed. // // Note: Not reading from a subscription channel will cause the Manager to block. -func (m *Manager) Subscribe(componentID string) *Subscription { - sub := newSubscription(m) +func (m *Manager) Subscribe(ctx context.Context, componentID string) *Subscription { + sub := newSubscription(ctx, m) // add latestState to channel m.mx.RLock() @@ -302,14 +335,88 @@ func (m *Manager) Subscribe(componentID string) *Subscription { m.mx.RUnlock() if ok { comp.latestMx.RLock() - sub.ch <- comp.latestState + latestState := comp.latestState.Copy() comp.latestMx.RUnlock() + go func() { + select { + case <-ctx.Done(): + case sub.ch <- latestState: + } + }() } // add subscription for future changes m.subMx.Lock() m.subscriptions[componentID] = append(m.subscriptions[componentID], sub) - defer m.subMx.Unlock() + m.subMx.Unlock() + + go func() { + <-ctx.Done() + + // unsubscribe + m.subMx.Lock() + defer m.subMx.Unlock() + for key, subs := range m.subscriptions { + for i, s := range subs { + if sub == s { + m.subscriptions[key] = append(m.subscriptions[key][:i], m.subscriptions[key][i+1:]...) + return + } + } + } + }() + + return sub +} + +// SubscribeAll subscribes to all changes in all components. +// +// This provides the current state for existing components at the time of first subscription. Cancelling the context +// results in the subscription being unsubscribed. +// +// Note: Not reading from a subscription channel will cause the Manager to block. +func (m *Manager) SubscribeAll(ctx context.Context) *SubscriptionAll { + sub := newSubscriptionAll(ctx, m) + + // add latest states + m.mx.RLock() + latest := make([]ComponentComponentState, 0, len(m.current)) + for _, comp := range m.current { + comp.latestMx.RLock() + latest = append(latest, ComponentComponentState{Component: comp.currComp, State: comp.latestState.Copy()}) + comp.latestMx.RUnlock() + } + m.mx.RUnlock() + if len(latest) > 0 { + go func() { + for _, l := range latest { + select { + case <-ctx.Done(): + return + case sub.ch <- l: + } + } + }() + } + + // add subscription for future changes + m.subAllMx.Lock() + m.subscribeAll = append(m.subscribeAll, sub) + m.subAllMx.Unlock() + + go func() { + <-ctx.Done() + + // unsubscribe + m.subAllMx.Lock() + defer m.subAllMx.Unlock() + for i, s := range m.subscribeAll { + if sub == s { + m.subscribeAll = append(m.subscribeAll[:i], m.subscribeAll[i+1:]...) + return + } + } + }() return sub } @@ -470,11 +577,26 @@ func (m *Manager) shutdown() { } func (m *Manager) stateChanged(state *componentRuntimeState, latest ComponentState) { + m.subAllMx.RLock() + for _, sub := range m.subscribeAll { + select { + case <-sub.ctx.Done(): + case sub.ch <- ComponentComponentState{ + Component: state.currComp, + State: latest, + }: + } + } + m.subAllMx.RUnlock() + m.subMx.RLock() subs, ok := m.subscriptions[state.currComp.ID] if ok { for _, sub := range subs { - sub.ch <- latest + select { + case <-sub.ctx.Done(): + case sub.ch <- latest: + } } } m.subMx.RUnlock() @@ -490,19 +612,6 @@ func (m *Manager) stateChanged(state *componentRuntimeState, latest ComponentSta } } -func (m *Manager) unsubscribe(subscription *Subscription) { - m.subMx.Lock() - defer m.subMx.Unlock() - for key, subs := range m.subscriptions { - for i, sub := range subs { - if subscription == sub { - m.subscriptions[key] = append(m.subscriptions[key][:i], m.subscriptions[key][i+1:]...) - return - } - } - } -} - func (m *Manager) getCertificate(chi *tls.ClientHelloInfo) (*tls.Certificate, error) { var cert *tls.Certificate diff --git a/pkg/component/runtime/manager_test.go b/pkg/component/runtime/manager_test.go index adeb2b1243a..41a62557ea8 100644 --- a/pkg/component/runtime/manager_test.go +++ b/pkg/component/runtime/manager_test.go @@ -70,8 +70,7 @@ func TestManager_SimpleComponentErr(t *testing.T) { defer subCancel() subErrCh := make(chan error) go func() { - sub := m.Subscribe("error-default") - defer sub.Unsubscribe() + sub := m.Subscribe(subCtx, "error-default") for { select { case <-subCtx.Done(): @@ -179,8 +178,7 @@ func TestManager_FakeInput_StartStop(t *testing.T) { defer subCancel() subErrCh := make(chan error) go func() { - sub := m.Subscribe("fake-default") - defer sub.Unsubscribe() + sub := m.Subscribe(subCtx, "fake-default") for { select { case <-subCtx.Done(): @@ -297,8 +295,7 @@ func TestManager_FakeInput_Configure(t *testing.T) { defer subCancel() subErrCh := make(chan error) go func() { - sub := m.Subscribe("fake-default") - defer sub.Unsubscribe() + sub := m.Subscribe(subCtx, "fake-default") for { select { case <-subCtx.Done(): @@ -431,8 +428,7 @@ func TestManager_FakeInput_RemoveUnit(t *testing.T) { go func() { unit1Stopped := false - sub := m.Subscribe("fake-default") - defer sub.Unsubscribe() + sub := m.Subscribe(subCtx, "fake-default") for { select { case <-subCtx.Done(): @@ -575,8 +571,7 @@ func TestManager_FakeInput_ActionState(t *testing.T) { defer subCancel() subErrCh := make(chan error) go func() { - sub := m.Subscribe("fake-default") - defer sub.Unsubscribe() + sub := m.Subscribe(subCtx, "fake-default") for { select { case <-subCtx.Done(): @@ -700,8 +695,7 @@ func TestManager_FakeInput_Restarts(t *testing.T) { go func() { killed := false - sub := m.Subscribe("fake-default") - defer sub.Unsubscribe() + sub := m.Subscribe(subCtx, "fake-default") for { select { case <-subCtx.Done(): @@ -849,8 +843,7 @@ func TestManager_FakeInput_RestartsOnMissedCheckins(t *testing.T) { go func() { wasDegraded := false - sub := m.Subscribe("fake-default") - defer sub.Unsubscribe() + sub := m.Subscribe(subCtx, "fake-default") for { select { case <-subCtx.Done(): @@ -956,8 +949,7 @@ func TestManager_FakeInput_InvalidAction(t *testing.T) { defer subCancel() subErrCh := make(chan error) go func() { - sub := m.Subscribe("fake-default") - defer sub.Unsubscribe() + sub := m.Subscribe(subCtx, "fake-default") for { select { case <-subCtx.Done(): @@ -1166,12 +1158,9 @@ func TestManager_FakeInput_MultiComponent(t *testing.T) { subErrCh1 := make(chan error) subErrCh2 := make(chan error) go func() { - sub0 := m.Subscribe("fake-0") - defer sub0.Unsubscribe() - sub1 := m.Subscribe("fake-1") - defer sub1.Unsubscribe() - sub2 := m.Subscribe("fake-2") - defer sub2.Unsubscribe() + sub0 := m.Subscribe(subCtx, "fake-0") + sub1 := m.Subscribe(subCtx, "fake-1") + sub2 := m.Subscribe(subCtx, "fake-2") for { select { case <-subCtx.Done(): diff --git a/pkg/component/runtime/runtime.go b/pkg/component/runtime/runtime.go index ee4800ce36b..b84e5b48202 100644 --- a/pkg/component/runtime/runtime.go +++ b/pkg/component/runtime/runtime.go @@ -35,12 +35,24 @@ type ComponentUnitKey struct { UnitID string } +// ComponentVersionInfo provides version information reported by the component. +type ComponentVersionInfo struct { + // Name of the binary. + Name string + // Version of the binary. + Version string + // Additional metadata about the binary. + Meta map[string]string +} + // ComponentState is the overall state of the component. type ComponentState struct { State client.UnitState Message string Units map[ComponentUnitKey]ComponentUnitState + + VersionInfo ComponentVersionInfo } func newComponentState(comp *component.Component, initState client.UnitState, initMessage string, initCfgIdx uint64) (s ComponentState) { @@ -157,6 +169,17 @@ func (s *ComponentState) syncCheckin(checkin *proto.CheckinObserved) bool { } } } + if checkin.VersionInfo != nil { + if checkin.VersionInfo.Name != "" { + s.VersionInfo.Name = checkin.VersionInfo.Name + } + if checkin.VersionInfo.Version != "" { + s.VersionInfo.Version = checkin.VersionInfo.Version + } + if checkin.VersionInfo.Meta != nil { + s.VersionInfo.Meta = checkin.VersionInfo.Meta + } + } return changed } @@ -280,7 +303,9 @@ func newComponentRuntimeState(m *Manager, logger *logger.Logger, comp component. delete(state.actions, ar.Id) } state.actionsMx.Unlock() - callback(ar) + if ok { + callback(ar) + } } } }() diff --git a/pkg/component/runtime/subscription.go b/pkg/component/runtime/subscription.go index 88f4106d21a..15cfeac4f7d 100644 --- a/pkg/component/runtime/subscription.go +++ b/pkg/component/runtime/subscription.go @@ -4,16 +4,22 @@ package runtime +import ( + "context" +) + // Subscription provides a channel for notifications on a component state. type Subscription struct { + ctx context.Context manager *Manager ch chan ComponentState } -func newSubscription(manager *Manager) *Subscription { +func newSubscription(ctx context.Context, manager *Manager) *Subscription { return &Subscription{ + ctx: ctx, manager: manager, - ch: make(chan ComponentState, 1), // buffer of 1 to allow initial latestState state + ch: make(chan ComponentState), } } @@ -22,7 +28,22 @@ func (s *Subscription) Ch() <-chan ComponentState { return s.ch } -// Unsubscribe removes the subscription. -func (s *Subscription) Unsubscribe() { - s.manager.unsubscribe(s) +// SubscriptionAll provides a channel for notifications on all component state changes. +type SubscriptionAll struct { + ctx context.Context + manager *Manager + ch chan ComponentComponentState +} + +func newSubscriptionAll(ctx context.Context, manager *Manager) *SubscriptionAll { + return &SubscriptionAll{ + ctx: ctx, + manager: manager, + ch: make(chan ComponentComponentState), + } +} + +// Ch provides the channel to get state changes. +func (s *SubscriptionAll) Ch() <-chan ComponentComponentState { + return s.ch } diff --git a/pkg/core/server/config.go b/pkg/core/server/config.go deleted file mode 100644 index 0d1dbd9d5e3..00000000000 --- a/pkg/core/server/config.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package server - -import ( - "fmt" - - "go.elastic.co/apm" - - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -// Config is a configuration of GRPC server. -type Config struct { - Address string `config:"address"` - Port uint16 `config:"port"` -} - -// DefaultGRPCConfig creates a default server configuration. -func DefaultGRPCConfig() *Config { - return &Config{ - Address: "localhost", - Port: 6789, - } -} - -// NewFromConfig creates a new GRPC server for clients to connect to. -func NewFromConfig(logger *logger.Logger, cfg *Config, handler Handler, tracer *apm.Tracer) (*Server, error) { - return New(logger, fmt.Sprintf("%s:%d", cfg.Address, cfg.Port), handler, tracer) -} diff --git a/pkg/core/server/config_test.go b/pkg/core/server/config_test.go deleted file mode 100644 index 2c846d77892..00000000000 --- a/pkg/core/server/config_test.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package server - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestNewFromConfig(t *testing.T) { - l := newErrorLogger(t) - cfg := &Config{ - Address: "0.0.0.0", - Port: 9876, - } - srv, err := NewFromConfig(l, cfg, &StubHandler{}, nil) - require.NoError(t, err) - assert.Equal(t, "0.0.0.0:9876", srv.getListenAddr()) -} diff --git a/pkg/core/server/server.go b/pkg/core/server/server.go deleted file mode 100644 index 6d3a284cd79..00000000000 --- a/pkg/core/server/server.go +++ /dev/null @@ -1,1018 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package server - -import ( - "crypto/tls" - "crypto/x509" - "encoding/json" - "fmt" - "io" - "net" - "strings" - "sync" - "time" - - "go.elastic.co/apm" - "go.elastic.co/apm/module/apmgrpc" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - "github.com/gofrs/uuid" - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" - protobuf "google.golang.org/protobuf/proto" - - "github.com/elastic/elastic-agent-client/v7/pkg/client" - "github.com/elastic/elastic-agent-client/v7/pkg/proto" - - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/core/authority" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -const ( - // InitialCheckinTimeout is the maximum amount of wait time from initial check-in stream to - // getting the first check-in observed state. - InitialCheckinTimeout = 5 * time.Second - // CheckinMinimumTimeoutGracePeriod is additional time added to the client.CheckinMinimumTimeout - // to ensure the application is checking in correctly. - CheckinMinimumTimeoutGracePeriod = 30 * time.Second - // WatchdogCheckLoop is the amount of time that the watchdog will wait between checking for - // applications that have not checked in the correct amount of time. - WatchdogCheckLoop = 5 * time.Second -) - -var ( - // ErrApplicationAlreadyRegistered returned when trying to register an application more than once. - ErrApplicationAlreadyRegistered = errors.New("application already registered", errors.TypeApplication) - // ErrApplicationStopping returned when trying to update an application config but it is stopping. - ErrApplicationStopping = errors.New("application stopping", errors.TypeApplication) - // ErrApplicationStopTimedOut returned when calling Stop and the application timed out stopping. - ErrApplicationStopTimedOut = errors.New("application stopping timed out", errors.TypeApplication) - // ErrActionTimedOut returned on PerformAction when the action timed out. - ErrActionTimedOut = errors.New("application action timed out", errors.TypeApplication) - // ErrActionCancelled returned on PerformAction when an action is cancelled, normally due to the application - // being stopped or removed from the server. - ErrActionCancelled = errors.New("application action cancelled", errors.TypeApplication) -) - -// ApplicationState represents the applications state according to the server. -type ApplicationState struct { - srv *Server - app interface{} - - srvName string - token string - cert *authority.Pair - - pendingExpected chan *proto.StateExpected - expected proto.StateExpected_State - expectedConfigIdx uint64 - expectedConfig string - status proto.StateObserved_Status - statusMessage string - statusPayload map[string]interface{} - statusPayloadStr string - statusConfigIdx uint64 - statusTime time.Time - checkinConn bool - checkinDone chan bool - checkinLock sync.RWMutex - - pendingActions chan *pendingAction - sentActions map[string]*sentAction - actionsConn bool - actionsDone chan bool - actionsLock sync.RWMutex - - inputTypes map[string]struct{} -} - -// Handler is the used by the server to inform of status changes. -type Handler interface { - // OnStatusChange called when a registered application observed status is changed. - OnStatusChange(*ApplicationState, proto.StateObserved_Status, string, map[string]interface{}) -} - -// Server is the GRPC server that the launched applications connect back to. -type Server struct { - proto.UnimplementedElasticAgentServer - - logger *logger.Logger - ca *authority.CertificateAuthority - listenAddr string - handler Handler - tracer *apm.Tracer - - listener net.Listener - server *grpc.Server - watchdogDone chan bool - watchdogWG sync.WaitGroup - - apps sync.Map - - // overridden in tests - watchdogCheckInterval time.Duration - checkInMinTimeout time.Duration -} - -// New creates a new GRPC server for clients to connect to. -func New(logger *logger.Logger, listenAddr string, handler Handler, tracer *apm.Tracer) (*Server, error) { - ca, err := authority.NewCA() - if err != nil { - return nil, err - } - return &Server{ - logger: logger, - ca: ca, - listenAddr: listenAddr, - handler: handler, - watchdogCheckInterval: WatchdogCheckLoop, - checkInMinTimeout: client.CheckinMinimumTimeout + CheckinMinimumTimeoutGracePeriod, - tracer: tracer, - }, nil -} - -// Start starts the GRPC endpoint and accepts new connections. -func (s *Server) Start() error { - if s.server != nil { - // already started - return nil - } - - lis, err := net.Listen("tcp", s.listenAddr) - if err != nil { - return err - } - s.listener = lis - certPool := x509.NewCertPool() - if ok := certPool.AppendCertsFromPEM(s.ca.Crt()); !ok { - return errors.New("failed to append root CA", errors.TypeSecurity) - } - creds := credentials.NewTLS(&tls.Config{ - ClientAuth: tls.RequireAndVerifyClientCert, - ClientCAs: certPool, - GetCertificate: s.getCertificate, - MinVersion: tls.VersionTLS12, - }) - if s.tracer != nil { - apmInterceptor := apmgrpc.NewUnaryServerInterceptor(apmgrpc.WithRecovery(), apmgrpc.WithTracer(s.tracer)) - s.server = grpc.NewServer( - grpc.UnaryInterceptor(apmInterceptor), - grpc.Creds(creds), - ) - } else { - s.server = grpc.NewServer(grpc.Creds(creds)) - } - proto.RegisterElasticAgentServer(s.server, s) - - // start serving GRPC connections - go func() { - err := s.server.Serve(lis) - if err != nil { - s.logger.Errorf("error listening for GRPC: %s", err) - } - }() - - // start the watchdog - s.watchdogDone = make(chan bool) - s.watchdogWG.Add(1) - go s.watchdog() - - return nil -} - -// Stop stops the GRPC endpoint. -func (s *Server) Stop() { - if s.server != nil { - close(s.watchdogDone) - s.server.Stop() - s.server = nil - s.listener = nil - s.watchdogWG.Wait() - } -} - -// Get returns the application state from the server for the passed application. -func (s *Server) Get(app interface{}) (*ApplicationState, bool) { - var foundState *ApplicationState - s.apps.Range(func(_ interface{}, val interface{}) bool { - as, ok := val.(*ApplicationState) - if !ok { - return true - } - if as.app == app { - foundState = as - return false - } - return true - }) - return foundState, foundState != nil -} - -// FindByInputType application by input type -func (s *Server) FindByInputType(inputType string) (*ApplicationState, bool) { - var foundState *ApplicationState - s.apps.Range(func(_ interface{}, val interface{}) bool { - as, ok := val.(*ApplicationState) - if !ok { - return true - } - if as.inputTypes == nil { - return true - } - - if _, ok := as.inputTypes[inputType]; ok { - foundState = as - return false - } - return true - }) - return foundState, foundState != nil -} - -// Register registers a new application to connect to the server. -func (s *Server) Register(app interface{}, config string) (*ApplicationState, error) { - if _, ok := s.Get(app); ok { - return nil, ErrApplicationAlreadyRegistered - } - - id, err := uuid.NewV4() - if err != nil { - return nil, err - } - srvName, err := genServerName() - if err != nil { - return nil, err - } - pair, err := s.ca.GeneratePairWithName(srvName) - if err != nil { - return nil, err - } - appState := &ApplicationState{ - srv: s, - app: app, - srvName: srvName, - token: id.String(), - cert: pair, - pendingExpected: make(chan *proto.StateExpected), - expected: proto.StateExpected_RUNNING, - expectedConfigIdx: 1, - expectedConfig: config, - checkinConn: true, - status: proto.StateObserved_STARTING, - statusConfigIdx: client.InitialConfigIdx, - statusTime: time.Now().UTC(), - pendingActions: make(chan *pendingAction, 100), - sentActions: make(map[string]*sentAction), - actionsConn: true, - } - s.apps.Store(appState.token, appState) - return appState, nil -} - -// Checkin implements the GRPC bi-direction stream connection for check-ins. -func (s *Server) Checkin(server proto.ElasticAgent_CheckinServer) error { - firstCheckinChan := make(chan *proto.StateObserved) - go func() { - // go func will not be leaked, because when the main function - // returns it will close the connection. that will cause this - // function to return. - observed, err := server.Recv() - if err != nil { - close(firstCheckinChan) - return - } - firstCheckinChan <- observed - }() - - var ok bool - var observedConfigStateIdx uint64 - var firstCheckin *proto.StateObserved - select { - case firstCheckin, ok = <-firstCheckinChan: - if firstCheckin != nil { - observedConfigStateIdx = firstCheckin.ConfigStateIdx - } - break - case <-time.After(InitialCheckinTimeout): - // close connection - s.logger.Debug("check-in stream never sent initial observed message; closing connection") - return nil - } - if !ok { - // close connection - return nil - } - appState, ok := s.getByToken(firstCheckin.Token) - if !ok { - // no application with token; close connection - s.logger.Debug("check-in stream sent an invalid token; closing connection") - return status.Error(codes.PermissionDenied, "invalid token") - } - appState.checkinLock.Lock() - if appState.checkinDone != nil { - // application is already connected (cannot have multiple); close connection - appState.checkinLock.Unlock() - s.logger.Debug("check-in stream already exists for application; closing connection") - return status.Error(codes.AlreadyExists, "application already connected") - } - if !appState.checkinConn { - // application is being destroyed cannot reconnect; close connection - appState.checkinLock.Unlock() - s.logger.Debug("check-in stream cannot connect, application is being destroyed; closing connection") - return status.Error(codes.Unavailable, "application cannot connect being destroyed") - } - - // application is running as a service and counter is already counting - // force config reload - if observedConfigStateIdx > 0 { - appState.expectedConfigIdx = observedConfigStateIdx + 1 - } - - checkinDone := make(chan bool) - appState.checkinDone = checkinDone - appState.checkinLock.Unlock() - - defer func() { - appState.checkinLock.Lock() - appState.checkinDone = nil - appState.checkinLock.Unlock() - }() - - // send the config and expected state changes to the applications when - // pushed on the channel - recvDone := make(chan bool) - sendDone := make(chan bool) - go func() { - defer func() { - close(sendDone) - }() - for { - var expected *proto.StateExpected - select { - case <-checkinDone: - return - case <-recvDone: - return - case expected = <-appState.pendingExpected: - } - - err := server.Send(expected) - if err != nil { - if reportableErr(err) { - s.logger.Debugf("check-in stream failed to send expected state: %s", err) - } - return - } - } - }() - - // update status after the pendingExpected channel has a reader - appState.updateStatus(firstCheckin, true) - - // read incoming state observations from the application and act based on - // the servers expected state of the application - go func() { - for { - checkin, err := server.Recv() - if err != nil { - if reportableErr(err) { - s.logger.Debugf("check-in stream failed to receive data: %s", err) - } - close(recvDone) - return - } - appState.updateStatus(checkin, false) - } - }() - - <-sendDone - return nil -} - -// CheckinV2 implements the GRPC bi-direction stream connection for v2 check-ins. -func (s *Server) CheckinV2(server proto.ElasticAgent_CheckinV2Server) error { - return errors.New("not implemented") -} - -// Actions implements the GRPC bi-direction stream connection for actions. -func (s *Server) Actions(server proto.ElasticAgent_ActionsServer) error { - firstRespChan := make(chan *proto.ActionResponse) - go func() { - // go func will not be leaked, because when the main function - // returns it will close the connection. that will cause this - // function to return. - observed, err := server.Recv() - if err != nil { - close(firstRespChan) - return - } - firstRespChan <- observed - }() - - var ok bool - var firstResp *proto.ActionResponse - select { - case firstResp, ok = <-firstRespChan: - break - case <-time.After(InitialCheckinTimeout): - // close connection - s.logger.Debug("actions stream never sent initial response message; closing connection") - return nil - } - if !ok { - // close connection - return nil - } - if firstResp.Id != client.ActionResponseInitID { - // close connection - s.logger.Debug("actions stream first response message must be an init message; closing connection") - return status.Error(codes.InvalidArgument, "initial response must be an init message") - } - appState, ok := s.getByToken(firstResp.Token) - if !ok { - // no application with token; close connection - s.logger.Debug("actions stream sent an invalid token; closing connection") - return status.Error(codes.PermissionDenied, "invalid token") - } - appState.actionsLock.Lock() - if appState.actionsDone != nil { - // application is already connected (cannot have multiple); close connection - appState.actionsLock.Unlock() - s.logger.Debug("actions stream already exists for application; closing connection") - return status.Error(codes.AlreadyExists, "application already connected") - } - if !appState.actionsConn { - // application is being destroyed cannot reconnect; close connection - appState.actionsLock.Unlock() - s.logger.Debug("actions stream cannot connect, application is being destroyed; closing connection") - return status.Error(codes.Unavailable, "application cannot connect being destroyed") - } - actionsDone := make(chan bool) - appState.actionsDone = actionsDone - appState.actionsLock.Unlock() - - defer func() { - appState.actionsLock.Lock() - appState.actionsDone = nil - appState.actionsLock.Unlock() - }() - - // send the pending actions that need to be performed - recvDone := make(chan bool) - sendDone := make(chan bool) - go func() { - defer func() { close(sendDone) }() - for { - var pending *pendingAction - select { - case <-actionsDone: - return - case <-recvDone: - return - case pending = <-appState.pendingActions: - } - - if pending.expiresOn.Sub(time.Now().UTC()) <= 0 { - // to late action already expired - pending.callback(nil, ErrActionTimedOut) - continue - } - - appState.actionsLock.Lock() - err := server.Send(&proto.ActionRequest{ - Id: pending.id, - Name: pending.name, - Params: pending.params, - }) - if err != nil { - // failed to send action; add back to channel to retry on re-connect from the client - appState.actionsLock.Unlock() - appState.pendingActions <- pending - if reportableErr(err) { - s.logger.Debugf("failed to send pending action %s (will retry, after re-connect): %s", pending.id, err) - } - return - } - appState.sentActions[pending.id] = &sentAction{ - callback: pending.callback, - expiresOn: pending.expiresOn, - } - appState.actionsLock.Unlock() - } - }() - - // receive the finished actions - go func() { - for { - response, err := server.Recv() - if err != nil { - if reportableErr(err) { - s.logger.Debugf("actions stream failed to receive data: %s", err) - } - close(recvDone) - return - } - appState.actionsLock.Lock() - action, ok := appState.sentActions[response.Id] - if !ok { - // nothing to do, unknown action request - s.logger.Debugf("actions stream received an unknown action: %s", response.Id) - appState.actionsLock.Unlock() - continue - } - delete(appState.sentActions, response.Id) - appState.actionsLock.Unlock() - - var result map[string]interface{} - err = json.Unmarshal(response.Result, &result) - if err != nil { - action.callback(nil, err) - } else if response.Status == proto.ActionResponse_FAILED { - errStr, ok := result["error"] - if ok { - err = fmt.Errorf("%s", errStr) - } else { - err = fmt.Errorf("unknown error") - } - action.callback(nil, err) - } else { - action.callback(result, nil) - } - } - }() - - <-sendDone - return nil -} - -// WriteConnInfo writes the connection information for the application into the writer. -// -// Note: If the writer implements io.Closer the writer is also closed. -func (as *ApplicationState) WriteConnInfo(w io.Writer) error { - connInfo := &proto.ConnInfo{ - Addr: as.srv.getListenAddr(), - ServerName: as.srvName, - Token: as.token, - CaCert: as.srv.ca.Crt(), - PeerCert: as.cert.Crt, - PeerKey: as.cert.Key, - } - infoBytes, err := protobuf.Marshal(connInfo) - if err != nil { - return errors.New(err, "failed to marshal connection information", errors.TypeApplication) - } - _, err = w.Write(infoBytes) - if err != nil { - return errors.New(err, "failed to write connection information", errors.TypeApplication) - } - closer, ok := w.(io.Closer) - if ok { - _ = closer.Close() - } - return nil -} - -// Stop instructs the application to stop gracefully within the timeout. -// -// Once the application is stopped or the timeout is reached the application is destroyed. Even in the case -// the application times out during stop and ErrApplication -func (as *ApplicationState) Stop(timeout time.Duration) error { - as.checkinLock.Lock() - wasConn := as.checkinDone != nil - cfgIdx := as.statusConfigIdx - as.expected = proto.StateExpected_STOPPING - as.checkinLock.Unlock() - - // send it to the client if its connected, otherwise it will be sent once it connects. - as.sendExpectedState(&proto.StateExpected{ - State: proto.StateExpected_STOPPING, - ConfigStateIdx: cfgIdx, - Config: "", - }, false) - - started := time.Now().UTC() - for { - if time.Now().UTC().Sub(started) > timeout { - as.Destroy() - return ErrApplicationStopTimedOut - } - - as.checkinLock.RLock() - s := as.status - doneChan := as.checkinDone - as.checkinLock.RUnlock() - if (wasConn && doneChan == nil) || (!wasConn && s == proto.StateObserved_STOPPING && doneChan == nil) { - // either occurred: - // * client was connected then disconnected on stop - // * client was not connected; connected; received stopping; then disconnected - as.Destroy() - return nil - } - - <-time.After(500 * time.Millisecond) - } -} - -// Destroy completely removes the application from the server without sending any stop command to the application. -// -// The ApplicationState at this point cannot be used. -func (as *ApplicationState) Destroy() { - as.destroyActionsStream() - as.destroyCheckinStream() - as.srv.apps.Delete(as.token) -} - -// UpdateConfig pushes an updated configuration to the connected application. -func (as *ApplicationState) UpdateConfig(config string) error { - as.checkinLock.RLock() - expected := as.expected - currentCfg := as.expectedConfig - as.checkinLock.RUnlock() - if expected == proto.StateExpected_STOPPING { - return ErrApplicationStopping - } - if config == currentCfg { - // already at that expected config - return nil - } - - as.checkinLock.Lock() - idx := as.expectedConfigIdx + 1 - as.expectedConfigIdx = idx - as.expectedConfig = config - as.checkinLock.Unlock() - - // send it to the client if its connected, otherwise it will be sent once it connects. - as.sendExpectedState(&proto.StateExpected{ - State: expected, - ConfigStateIdx: idx, - Config: config, - }, false) - return nil -} - -// PerformAction synchronously performs an action on the application. -func (as *ApplicationState) PerformAction(name string, params map[string]interface{}, timeout time.Duration) (map[string]interface{}, error) { - paramBytes, err := json.Marshal(params) - if err != nil { - return nil, err - } - id, err := uuid.NewV4() - if err != nil { - return nil, err - } - if !as.actionsConn { - // actions stream destroyed, action cancelled - return nil, ErrActionCancelled - } - - resChan := make(chan actionResult) - as.pendingActions <- &pendingAction{ - id: id.String(), - name: name, - params: paramBytes, - callback: func(m map[string]interface{}, err error) { - resChan <- actionResult{ - result: m, - err: err, - } - }, - expiresOn: time.Now().UTC().Add(timeout), - } - res := <-resChan - return res.result, res.err -} - -// App returns the registered app for the state. -func (as *ApplicationState) App() interface{} { - return as.app -} - -// Expected returns the expected state of the process. -func (as *ApplicationState) Expected() proto.StateExpected_State { - as.checkinLock.RLock() - defer as.checkinLock.RUnlock() - return as.expected -} - -// Config returns the expected config of the process. -func (as *ApplicationState) Config() string { - as.checkinLock.RLock() - defer as.checkinLock.RUnlock() - return as.expectedConfig -} - -// Status returns the current observed status. -func (as *ApplicationState) Status() (proto.StateObserved_Status, string, map[string]interface{}) { - as.checkinLock.RLock() - defer as.checkinLock.RUnlock() - return as.status, as.statusMessage, as.statusPayload -} - -// SetStatus allows the status to be overwritten by the agent. -// -// This status will be overwritten by the client if it reconnects and updates it status. -func (as *ApplicationState) SetStatus(status proto.StateObserved_Status, msg string, payload map[string]interface{}) error { - payloadStr, err := json.Marshal(payload) - if err != nil { - return err - } - as.checkinLock.RLock() - as.status = status - as.statusMessage = msg - as.statusPayload = payload - as.statusPayloadStr = string(payloadStr) - as.checkinLock.RUnlock() - return nil -} - -// SetInputTypes sets the allowed action input types for this application -func (as *ApplicationState) SetInputTypes(inputTypes []string) { - as.checkinLock.Lock() - as.inputTypes = make(map[string]struct{}) - for _, inputType := range inputTypes { - as.inputTypes[inputType] = struct{}{} - } - as.checkinLock.Unlock() -} - -// updateStatus updates the current observed status from the application, sends the expected state back to the -// application if the server expects it to be different then its observed state, and alerts the handler on the -// server when the application status has changed. -func (as *ApplicationState) updateStatus(checkin *proto.StateObserved, waitForReader bool) { - // convert payload from string to JSON - var payload map[string]interface{} - if checkin.Payload != "" { - // ignore the error, if client is sending bad JSON, then payload will just be nil - _ = json.Unmarshal([]byte(checkin.Payload), &payload) - } - - as.checkinLock.Lock() - expectedStatus := as.expected - expectedConfigIdx := as.expectedConfigIdx - expectedConfig := as.expectedConfig - prevStatus := as.status - prevMessage := as.statusMessage - prevPayloadStr := as.statusPayloadStr - as.status = checkin.Status - as.statusMessage = checkin.Message - as.statusPayloadStr = checkin.Payload - as.statusPayload = payload - as.statusConfigIdx = checkin.ConfigStateIdx - as.statusTime = time.Now().UTC() - as.checkinLock.Unlock() - - var expected *proto.StateExpected - if expectedStatus == proto.StateExpected_STOPPING && checkin.Status != proto.StateObserved_STOPPING { - expected = &proto.StateExpected{ - State: expectedStatus, - ConfigStateIdx: checkin.ConfigStateIdx, // stopping always inform that the config it has is correct - Config: "", - } - } else if checkin.ConfigStateIdx != expectedConfigIdx { - expected = &proto.StateExpected{ - State: expectedStatus, - ConfigStateIdx: expectedConfigIdx, - Config: expectedConfig, - } - } - if expected != nil { - as.sendExpectedState(expected, waitForReader) - } - - // alert the service handler that status has changed for the application - if prevStatus != checkin.Status || prevMessage != checkin.Message || prevPayloadStr != checkin.Payload { - as.srv.handler.OnStatusChange(as, checkin.Status, checkin.Message, payload) - } -} - -// sendExpectedState sends the expected status over the pendingExpected channel if the other side is -// waiting for a message. -func (as *ApplicationState) sendExpectedState(expected *proto.StateExpected, waitForReader bool) { - if waitForReader { - as.pendingExpected <- expected - return - } - - select { - case as.pendingExpected <- expected: - default: - } -} - -// destroyActionsStream disconnects the actions stream (prevent reconnect), cancel all pending actions -func (as *ApplicationState) destroyActionsStream() { - as.actionsLock.Lock() - as.actionsConn = false - if as.actionsDone != nil { - close(as.actionsDone) - as.actionsDone = nil - } - as.actionsLock.Unlock() - as.cancelActions() -} - -// flushExpiredActions flushes any expired actions from the pending channel or current processing. -func (as *ApplicationState) flushExpiredActions() { - now := time.Now().UTC() - pendingActions := make([]*pendingAction, 0, len(as.pendingActions)) - for { - done := false - select { - case pending := <-as.pendingActions: - pendingActions = append(pendingActions, pending) - default: - done = true - } - if done { - break - } - } - for _, pending := range pendingActions { - if pending.expiresOn.Sub(now) <= 0 { - pending.callback(nil, ErrActionTimedOut) - } else { - as.pendingActions <- pending - } - } - as.actionsLock.Lock() - for id, pendingResp := range as.sentActions { - if pendingResp.expiresOn.Sub(now) <= 0 { - delete(as.sentActions, id) - pendingResp.callback(nil, ErrActionTimedOut) - } - } - as.actionsLock.Unlock() -} - -// cancelActions cancels all pending or currently processing actions. -func (as *ApplicationState) cancelActions() { - for { - done := false - select { - case pending := <-as.pendingActions: - pending.callback(nil, ErrActionCancelled) - default: - done = true - } - if done { - break - } - } - as.actionsLock.Lock() - for id, pendingResp := range as.sentActions { - delete(as.sentActions, id) - pendingResp.callback(nil, ErrActionCancelled) - } - as.actionsLock.Unlock() -} - -// destroyCheckinStream disconnects the check stream (prevent reconnect). -func (as *ApplicationState) destroyCheckinStream() { - as.checkinLock.Lock() - as.checkinConn = false - if as.checkinDone != nil { - close(as.checkinDone) - as.checkinDone = nil - } - as.checkinLock.Unlock() -} - -// watchdog ensures that the current applications are checking in during the correct intervals of time. -func (s *Server) watchdog() { - defer s.watchdogWG.Done() - for { - t := time.NewTimer(s.watchdogCheckInterval) - select { - case <-s.watchdogDone: - t.Stop() - return - case <-t.C: - } - - now := time.Now().UTC() - s.apps.Range(func(_ interface{}, val interface{}) bool { - serverApp, ok := val.(*ApplicationState) - if !ok { - return true - } - serverApp.checkinLock.RLock() - statusTime := serverApp.statusTime - serverApp.checkinLock.RUnlock() - if now.Sub(statusTime) > s.checkInMinTimeout { - serverApp.checkinLock.Lock() - prevStatus := serverApp.status - s := prevStatus - prevMessage := serverApp.statusMessage - message := prevMessage - if serverApp.status == proto.StateObserved_DEGRADED { - s = proto.StateObserved_FAILED - message = "Missed two check-ins" - serverApp.status = s - serverApp.statusMessage = message - serverApp.statusPayload = nil - serverApp.statusPayloadStr = "" - serverApp.statusTime = now - } else if serverApp.status != proto.StateObserved_FAILED { - s = proto.StateObserved_DEGRADED - message = "Missed last check-in" - serverApp.status = s - serverApp.statusMessage = message - serverApp.statusPayload = nil - serverApp.statusPayloadStr = "" - serverApp.statusTime = now - } - serverApp.checkinLock.Unlock() - if prevStatus != s || prevMessage != message { - serverApp.srv.handler.OnStatusChange(serverApp, s, message, nil) - } - } - serverApp.flushExpiredActions() - return true - }) - } -} - -// getByToken returns an application state by its token. -func (s *Server) getByToken(token string) (*ApplicationState, bool) { - val, ok := s.apps.Load(token) - if ok { - return val.(*ApplicationState), true - } - return nil, false -} - -// getCertificate returns the TLS certificate based on the clientHello or errors if not found. -func (s *Server) getCertificate(chi *tls.ClientHelloInfo) (*tls.Certificate, error) { - var cert *tls.Certificate - s.apps.Range(func(_ interface{}, val interface{}) bool { - sa, ok := val.(*ApplicationState) - if !ok { - return true - } - if sa.srvName == chi.ServerName { - cert = sa.cert.Certificate - return false - } - return true - }) - if cert != nil { - return cert, nil - } - return nil, errors.New("no supported TLS certificate", errors.TypeSecurity) -} - -// getListenAddr returns the listening address of the server. -func (s *Server) getListenAddr() string { - addr := strings.SplitN(s.listenAddr, ":", 2) - if len(addr) == 2 && addr[1] == "0" { - port := s.listener.Addr().(*net.TCPAddr).Port - return fmt.Sprintf("%s:%d", addr[0], port) - } - return s.listenAddr -} - -type pendingAction struct { - id string - name string - params []byte - callback func(map[string]interface{}, error) - expiresOn time.Time -} - -type sentAction struct { - callback func(map[string]interface{}, error) - expiresOn time.Time -} - -type actionResult struct { - result map[string]interface{} - err error -} - -func reportableErr(err error) bool { - if errors.Is(err, io.EOF) { - return false - } - s, ok := status.FromError(err) - if !ok { - return true - } - if s.Code() == codes.Canceled { - return false - } - return true -} - -func genServerName() (string, error) { - u, err := uuid.NewV4() - if err != nil { - return "", err - } - return strings.Replace(u.String(), "-", "", -1), nil -} diff --git a/pkg/core/server/server_test.go b/pkg/core/server/server_test.go deleted file mode 100644 index a2a1bdf4f80..00000000000 --- a/pkg/core/server/server_test.go +++ /dev/null @@ -1,794 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -//nolint:dupl // tests are equivalent -package server - -import ( - "context" - "fmt" - "io" - "strings" - "sync" - "testing" - "time" - - "go.elastic.co/apm/apmtest" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/elastic/elastic-agent-client/v7/pkg/client" - "github.com/elastic/elastic-agent-client/v7/pkg/proto" - "github.com/elastic/elastic-agent-libs/logp" - - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -const ( - initConfig = "initial_config" - newConfig = "new_config" -) - -func TestServer_Register(t *testing.T) { - app := &StubApp{} - srv := createAndStartServer(t, &StubHandler{}) - defer srv.Stop() - _, err := srv.Register(app, initConfig) - assert.NoError(t, err) - _, err = srv.Register(app, initConfig) - assert.Equal(t, ErrApplicationAlreadyRegistered, err) -} - -func TestServer_Get(t *testing.T) { - app := &StubApp{} - srv := createAndStartServer(t, &StubHandler{}) - defer srv.Stop() - expected, err := srv.Register(app, initConfig) - require.NoError(t, err) - observed, ok := srv.Get(app) - assert.True(t, ok) - assert.Equal(t, expected, observed) - _, found := srv.Get(&StubApp{}) - assert.False(t, found) -} - -func TestServer_InitialCheckIn(t *testing.T) { - app := &StubApp{} - srv := createAndStartServer(t, &StubHandler{}) - defer srv.Stop() - as, err := srv.Register(app, initConfig) - require.NoError(t, err) - cImpl := &StubClientImpl{} - c := newClientFromApplicationState(t, as, cImpl) - require.NoError(t, c.Start(context.Background())) - defer c.Stop() - - // client should get initial check-in - require.NoError(t, waitFor(func() error { - if cImpl.Config() != initConfig { - return fmt.Errorf("client never got initial config") - } - return nil - })) - - // set status as healthy and running - err = c.Status(proto.StateObserved_HEALTHY, "Running", nil) - require.NoError(t, err) - - // application state should be updated - assert.NoError(t, waitFor(func() error { - if app.Status() != proto.StateObserved_HEALTHY { - return fmt.Errorf("server never updated currect application state") - } - return nil - })) -} - -func TestServer_MultiClients(t *testing.T) { - initConfig1 := "initial_config_1" - initConfig2 := "initial_config_2" - app1 := &StubApp{} - app2 := &StubApp{} - srv := createAndStartServer(t, &StubHandler{}) - defer srv.Stop() - as1, err := srv.Register(app1, initConfig1) - require.NoError(t, err) - cImpl1 := &StubClientImpl{} - c1 := newClientFromApplicationState(t, as1, cImpl1) - require.NoError(t, c1.Start(context.Background())) - defer c1.Stop() - as2, err := srv.Register(app2, initConfig2) - require.NoError(t, err) - cImpl2 := &StubClientImpl{} - c2 := newClientFromApplicationState(t, as2, cImpl2) - require.NoError(t, c2.Start(context.Background())) - defer c2.Stop() - - // clients should get initial check-ins - require.NoError(t, waitFor(func() error { - if cImpl1.Config() != initConfig1 { - return fmt.Errorf("client never got initial config") - } - return nil - })) - require.NoError(t, waitFor(func() error { - if cImpl2.Config() != initConfig2 { - return fmt.Errorf("client never got initial config") - } - return nil - })) - - // set status differently - err = c1.Status(proto.StateObserved_HEALTHY, "Running", nil) - require.NoError(t, err) - err = c2.Status(proto.StateObserved_DEGRADED, "No upstream connection", nil) - require.NoError(t, err) - - // application states should be updated - assert.NoError(t, waitFor(func() error { - if app1.Status() != proto.StateObserved_HEALTHY { - return fmt.Errorf("server never updated currect application state") - } - return nil - })) - assert.NoError(t, waitFor(func() error { - if app2.Status() != proto.StateObserved_DEGRADED { - return fmt.Errorf("server never updated currect application state") - } - return nil - })) -} - -func TestServer_PreventCheckinStream(t *testing.T) { - app := &StubApp{} - srv := createAndStartServer(t, &StubHandler{}) - defer srv.Stop() - as, err := srv.Register(app, initConfig) - require.NoError(t, err) - as.checkinConn = false // prevent connection to check-in stream - cImpl := &StubClientImpl{} - c := newClientFromApplicationState(t, as, cImpl) - require.NoError(t, c.Start(context.Background())) - defer c.Stop() - - assert.NoError(t, waitFor(func() error { - if cImpl.Error() == nil { - return fmt.Errorf("client never got error trying to connect twice") - } - s, ok := status.FromError(cImpl.Error()) - if !ok { - return fmt.Errorf("client didn't get a status error") - } - if s.Code() != codes.Unavailable { - return fmt.Errorf("client didn't get unavaible error") - } - return nil - })) -} - -func TestServer_PreventActionsStream(t *testing.T) { - app := &StubApp{} - srv := createAndStartServer(t, &StubHandler{}) - defer srv.Stop() - as, err := srv.Register(app, initConfig) - require.NoError(t, err) - as.actionsConn = false // prevent connection to check-in stream - cImpl := &StubClientImpl{} - c := newClientFromApplicationState(t, as, cImpl) - require.NoError(t, c.Start(context.Background())) - defer c.Stop() - - assert.NoError(t, waitFor(func() error { - if cImpl.Error() == nil { - return fmt.Errorf("client never got error trying to connect twice") - } - s, ok := status.FromError(cImpl.Error()) - if !ok { - return fmt.Errorf("client didn't get a status error") - } - if s.Code() != codes.Unavailable { - return fmt.Errorf("client didn't get unavaible error") - } - return nil - })) -} - -func TestServer_DestroyPreventConnectAtTLS(t *testing.T) { - app := &StubApp{} - srv := createAndStartServer(t, &StubHandler{}) - defer srv.Stop() - as, err := srv.Register(app, initConfig) - require.NoError(t, err) - as.Destroy() - cImpl := &StubClientImpl{} - c := newClientFromApplicationState(t, as, cImpl) - require.NoError(t, c.Start(context.Background())) - defer c.Stop() - - assert.NoError(t, waitFor(func() error { - if cImpl.Error() == nil { - return fmt.Errorf("client never got error trying to connect twice") - } - s, ok := status.FromError(cImpl.Error()) - if !ok { - return fmt.Errorf("client didn't get a status error") - } - if s.Code() != codes.Unavailable { - return fmt.Errorf("client didn't get unavaible error") - } - if !strings.Contains(s.Message(), "authentication handshake failed") { - return fmt.Errorf("client didn't get authentication handshake failed error") - } - return nil - })) -} - -func TestServer_UpdateConfig(t *testing.T) { - app := &StubApp{} - srv := createAndStartServer(t, &StubHandler{}) - defer srv.Stop() - as, err := srv.Register(app, initConfig) - require.NoError(t, err) - cImpl := &StubClientImpl{} - c := newClientFromApplicationState(t, as, cImpl) - require.NoError(t, c.Start(context.Background())) - defer c.Stop() - - // clients should get initial check-ins then set as healthy - require.NoError(t, waitFor(func() error { - if cImpl.Config() != initConfig { - return fmt.Errorf("client never got initial config") - } - return nil - })) - err = c.Status(proto.StateObserved_HEALTHY, "Running", nil) - require.NoError(t, err) - assert.NoError(t, waitFor(func() error { - if app.Status() != proto.StateObserved_HEALTHY { - return fmt.Errorf("server never updated currect application state") - } - return nil - })) - - // push same config; should not increment config index - preIdx := as.expectedConfigIdx - require.NoError(t, as.UpdateConfig(initConfig)) - assert.Equal(t, preIdx, as.expectedConfigIdx) - - // push new config; should update the client - require.NoError(t, as.UpdateConfig(newConfig)) - assert.Equal(t, preIdx+1, as.expectedConfigIdx) - assert.NoError(t, waitFor(func() error { - if cImpl.Config() != newConfig { - return fmt.Errorf("client never got updated config") - } - return nil - })) -} - -func TestServer_UpdateConfigDisconnected(t *testing.T) { - app := &StubApp{} - srv := createAndStartServer(t, &StubHandler{}) - defer srv.Stop() - as, err := srv.Register(app, initConfig) - require.NoError(t, err) - cImpl := &StubClientImpl{} - c := newClientFromApplicationState(t, as, cImpl) - require.NoError(t, c.Start(context.Background())) - defer c.Stop() - - // clients should get initial check-ins then set as healthy - require.NoError(t, waitFor(func() error { - if cImpl.Config() != initConfig { - return fmt.Errorf("client never got initial config") - } - return nil - })) - err = c.Status(proto.StateObserved_HEALTHY, "Running", nil) - require.NoError(t, err) - assert.NoError(t, waitFor(func() error { - if app.Status() != proto.StateObserved_HEALTHY { - return fmt.Errorf("server never updated currect application state") - } - return nil - })) - - // stop the client, then update the config - c.Stop() - require.NoError(t, as.UpdateConfig(newConfig)) - - // reconnect, client should get latest config - require.NoError(t, c.Start(context.Background())) - assert.NoError(t, waitFor(func() error { - if cImpl.Config() != newConfig { - return fmt.Errorf("client never got updated config") - } - return nil - })) -} - -func TestServer_UpdateConfigStopping(t *testing.T) { - app := &StubApp{} - srv := createAndStartServer(t, &StubHandler{}) - defer srv.Stop() - as, err := srv.Register(app, initConfig) - require.NoError(t, err) - cImpl := &StubClientImpl{} - c := newClientFromApplicationState(t, as, cImpl) - require.NoError(t, c.Start(context.Background())) - defer c.Stop() - - // clients should get initial check-ins then set as healthy - require.NoError(t, waitFor(func() error { - if cImpl.Config() != initConfig { - return fmt.Errorf("client never got initial config") - } - return nil - })) - err = c.Status(proto.StateObserved_HEALTHY, "Running", nil) - require.NoError(t, err) - assert.NoError(t, waitFor(func() error { - if app.Status() != proto.StateObserved_HEALTHY { - return fmt.Errorf("server never updated currect application state") - } - return nil - })) - - // perform stop try to update config (which will error) - done := make(chan bool) - go func() { - _ = as.Stop(500 * time.Millisecond) - close(done) - }() - err = as.UpdateConfig(newConfig) - assert.Error(t, ErrApplicationStopping, err) - <-done -} - -func TestServer_Stop(t *testing.T) { - app := &StubApp{} - srv := createAndStartServer(t, &StubHandler{}) - defer srv.Stop() - as, err := srv.Register(app, initConfig) - require.NoError(t, err) - cImpl := &StubClientImpl{} - c := newClientFromApplicationState(t, as, cImpl) - require.NoError(t, c.Start(context.Background())) - defer c.Stop() - - // clients should get initial check-ins then set as healthy - require.NoError(t, waitFor(func() error { - if cImpl.Config() != initConfig { - return fmt.Errorf("client never got initial config") - } - return nil - })) - err = c.Status(proto.StateObserved_HEALTHY, "Running", nil) - require.NoError(t, err) - assert.NoError(t, waitFor(func() error { - if app.Status() != proto.StateObserved_HEALTHY { - return fmt.Errorf("server never updated currect application state") - } - return nil - })) - - // send stop to the client - done := make(chan bool) - var stopErr error - go func() { - stopErr = as.Stop(time.Second * 5) - close(done) - }() - - // process of testing the flow - // 1. server sends stop - // 2. client sends configuring - // 3. server sends stop again - // 4. client sends stopping - // 5. client disconnects - require.NoError(t, waitFor(func() error { - if cImpl.Stop() == 0 { - return fmt.Errorf("client never got expected stop") - } - return nil - })) - err = c.Status(proto.StateObserved_CONFIGURING, "Configuring", nil) - require.NoError(t, err) - require.NoError(t, waitFor(func() error { - if cImpl.Stop() < 1 { - return fmt.Errorf("client never got expected stop again") - } - return nil - })) - err = c.Status(proto.StateObserved_STOPPING, "Stopping", nil) - require.NoError(t, err) - require.NoError(t, waitFor(func() error { - if app.Status() != proto.StateObserved_STOPPING { - return fmt.Errorf("server never updated to stopping") - } - return nil - })) - c.Stop() - <-done - - // no error on stop - assert.NoError(t, stopErr) -} - -func TestServer_StopJustDisconnect(t *testing.T) { - app := &StubApp{} - srv := createAndStartServer(t, &StubHandler{}) - defer srv.Stop() - as, err := srv.Register(app, initConfig) - require.NoError(t, err) - cImpl := &StubClientImpl{} - c := newClientFromApplicationState(t, as, cImpl) - require.NoError(t, c.Start(context.Background())) - defer c.Stop() - - // clients should get initial check-ins then set as healthy - require.NoError(t, waitFor(func() error { - if cImpl.Config() != initConfig { - return fmt.Errorf("client never got initial config") - } - return nil - })) - err = c.Status(proto.StateObserved_HEALTHY, "Running", nil) - require.NoError(t, err) - assert.NoError(t, waitFor(func() error { - if app.Status() != proto.StateObserved_HEALTHY { - return fmt.Errorf("server never updated currect application state") - } - return nil - })) - - // send stop to the client - done := make(chan bool) - var stopErr error - go func() { - stopErr = as.Stop(time.Second * 5) - close(done) - }() - - // process of testing the flow - // 1. server sends stop - // 2. client disconnects - require.NoError(t, waitFor(func() error { - if cImpl.Stop() == 0 { - return fmt.Errorf("client never got expected stop") - } - return nil - })) - c.Stop() - <-done - - // no error on stop - assert.NoError(t, stopErr) -} - -func TestServer_StopTimeout(t *testing.T) { - app := &StubApp{} - srv := createAndStartServer(t, &StubHandler{}) - defer srv.Stop() - as, err := srv.Register(app, initConfig) - require.NoError(t, err) - cImpl := &StubClientImpl{} - c := newClientFromApplicationState(t, as, cImpl) - require.NoError(t, c.Start(context.Background())) - defer c.Stop() - - // clients should get initial check-ins then set as healthy - require.NoError(t, waitFor(func() error { - if cImpl.Config() != initConfig { - return fmt.Errorf("client never got initial config") - } - return nil - })) - err = c.Status(proto.StateObserved_HEALTHY, "Running", nil) - require.NoError(t, err) - assert.NoError(t, waitFor(func() error { - if app.Status() != proto.StateObserved_HEALTHY { - return fmt.Errorf("server never updated currect application state") - } - return nil - })) - - // send stop to the client - done := make(chan bool) - var stopErr error - go func() { - stopErr = as.Stop(time.Millisecond) - close(done) - }() - - // don't actually stop the client - - // timeout error on stop - <-done - assert.Equal(t, ErrApplicationStopTimedOut, stopErr) -} - -func TestServer_WatchdogFailApp(t *testing.T) { - checkMinTimeout := 300 * time.Millisecond - app := &StubApp{} - srv := createAndStartServer(t, &StubHandler{}, func(s *Server) { - s.watchdogCheckInterval = 100 * time.Millisecond - s.checkInMinTimeout = checkMinTimeout - }) - defer srv.Stop() - _, err := srv.Register(app, initConfig) - require.NoError(t, err) - assert.NoError(t, waitFor(func() error { - if app.Status() != proto.StateObserved_DEGRADED { - return fmt.Errorf("app status nevers set to degraded") - } - return nil - })) - assert.Equal(t, "Missed last check-in", app.Message()) - assert.NoError(t, waitFor(func() error { - if app.Status() != proto.StateObserved_FAILED { - return fmt.Errorf("app status nevers set to degraded") - } - return nil - })) - assert.Equal(t, "Missed two check-ins", app.Message()) -} - -func TestServer_PerformAction(t *testing.T) { - app := &StubApp{} - srv := createAndStartServer(t, &StubHandler{}, func(s *Server) { - s.watchdogCheckInterval = 50 * time.Millisecond - }) - defer srv.Stop() - as, err := srv.Register(app, initConfig) - require.NoError(t, err) - cImpl := &StubClientImpl{} - c := newClientFromApplicationState(t, as, cImpl, &EchoAction{}, &SleepAction{}) - require.NoError(t, c.Start(context.Background())) - defer c.Stop() - - // successful action - resp, err := as.PerformAction("echo", map[string]interface{}{ - "echo": "hello world", - }, 5*time.Second) - require.NoError(t, err) - assert.Equal(t, map[string]interface{}{ - "echo": "hello world", - }, resp) - - // action error client-side - _, err = as.PerformAction("echo", map[string]interface{}{ - "bad_param": "hello world", - }, 5*time.Second) - require.Error(t, err) - - // very slow action that times out - _, err = as.PerformAction("sleep", map[string]interface{}{ - "sleep": time.Second, - }, 10*time.Millisecond) - require.Error(t, err) - assert.Equal(t, ErrActionTimedOut, err) - - // try slow action again with the client disconnected (should timeout the same) - c.Stop() - require.NoError(t, waitFor(func() error { - as.actionsLock.RLock() - defer as.actionsLock.RUnlock() - if as.actionsDone != nil { - return fmt.Errorf("client never disconnected the actions stream") - } - return nil - })) - _, err = as.PerformAction("sleep", map[string]interface{}{ - "sleep": time.Second, - }, 10*time.Millisecond) - require.Error(t, err) - assert.Equal(t, ErrActionTimedOut, err) - - // perform action, reconnect client, and then action should be performed - done := make(chan bool) - go func() { - _, err = as.PerformAction("sleep", map[string]interface{}{ - "sleep": 100 * time.Millisecond, - }, 5*time.Second) - close(done) - }() - require.NoError(t, c.Start(context.Background())) - <-done - require.NoError(t, err) - - // perform action, destroy application - done = make(chan bool) - go func() { - _, err = as.PerformAction("sleep", map[string]interface{}{ - "sleep": time.Second, - }, 5*time.Second) - close(done) - }() - <-time.After(100 * time.Millisecond) - as.Destroy() - <-done - require.Error(t, err) - assert.Equal(t, ErrActionCancelled, err) - - // perform action after destroy returns cancelled - _, err = as.PerformAction("sleep", map[string]interface{}{ - "sleep": time.Second, - }, 5*time.Second) - assert.Equal(t, ErrActionCancelled, err) -} - -func newErrorLogger(t *testing.T) *logger.Logger { - t.Helper() - - loggerCfg := logger.DefaultLoggingConfig() - loggerCfg.Level = logp.ErrorLevel - - log, err := logger.NewFromConfig("", loggerCfg, false) - require.NoError(t, err) - return log -} - -func createAndStartServer(t *testing.T, handler Handler, extraConfigs ...func(*Server)) *Server { - t.Helper() - srv, err := New(newErrorLogger(t), "localhost:0", handler, apmtest.DiscardTracer) - require.NoError(t, err) - for _, extra := range extraConfigs { - extra(srv) - } - require.NoError(t, srv.Start()) - return srv -} - -func newClientFromApplicationState(t *testing.T, as *ApplicationState, impl client.StateInterface, actions ...client.Action) client.Client { - t.Helper() - - var err error - var c client.Client - var wg sync.WaitGroup - r, w := io.Pipe() - wg.Add(1) - go func() { - c, err = client.NewFromReader(r, impl, actions...) - wg.Done() - }() - - require.NoError(t, as.WriteConnInfo(w)) - wg.Wait() - require.NoError(t, err) - return c -} - -type StubApp struct { - lock sync.RWMutex - status proto.StateObserved_Status - message string - payload map[string]interface{} -} - -func (a *StubApp) Status() proto.StateObserved_Status { - a.lock.RLock() - defer a.lock.RUnlock() - return a.status -} - -func (a *StubApp) Message() string { - a.lock.RLock() - defer a.lock.RUnlock() - return a.message -} - -type StubHandler struct{} - -func (h *StubHandler) OnStatusChange(as *ApplicationState, status proto.StateObserved_Status, message string, payload map[string]interface{}) { - stub, _ := as.app.(*StubApp) - stub.lock.Lock() - defer stub.lock.Unlock() - stub.status = status - stub.message = message - stub.payload = payload -} - -type StubClientImpl struct { - Lock sync.RWMutex - config string - stop int - error error -} - -func (c *StubClientImpl) Config() string { - c.Lock.RLock() - defer c.Lock.RUnlock() - return c.config -} - -func (c *StubClientImpl) Stop() int { - c.Lock.RLock() - defer c.Lock.RUnlock() - return c.stop -} - -func (c *StubClientImpl) Error() error { - c.Lock.RLock() - defer c.Lock.RUnlock() - return c.error -} - -func (c *StubClientImpl) OnConfig(config string) { - c.Lock.Lock() - defer c.Lock.Unlock() - c.config = config -} - -func (c *StubClientImpl) OnStop() { - c.Lock.Lock() - defer c.Lock.Unlock() - c.stop++ -} - -func (c *StubClientImpl) OnError(err error) { - c.Lock.Lock() - defer c.Lock.Unlock() - c.error = err -} - -type EchoAction struct{} - -func (*EchoAction) Name() string { - return "echo" -} - -func (*EchoAction) Execute(ctx context.Context, request map[string]interface{}) (map[string]interface{}, error) { - echoRaw, ok := request["echo"] - if !ok { - return nil, fmt.Errorf("missing required param of echo") - } - return map[string]interface{}{ - "echo": echoRaw, - }, nil -} - -type SleepAction struct{} - -func (*SleepAction) Name() string { - return "sleep" -} - -func (*SleepAction) Execute(ctx context.Context, request map[string]interface{}) (map[string]interface{}, error) { - sleepRaw, ok := request["sleep"] - if !ok { - return nil, fmt.Errorf("missing required param of slow") - } - sleep, ok := sleepRaw.(float64) - if !ok { - return nil, fmt.Errorf("sleep param must be a number") - } - timer := time.NewTimer(time.Duration(sleep)) - defer timer.Stop() - - select { - case <-ctx.Done(): - return nil, ctx.Err() - case <-timer.C: - } - - return map[string]interface{}{}, nil -} - -func waitFor(check func() error) error { - started := time.Now() - for { - err := check() - if err == nil { - return nil - } - if time.Since(started) >= 5*time.Second { - return fmt.Errorf("check timed out after 5 second: %w", err) - } - time.Sleep(10 * time.Millisecond) - } -} From e141de5513a4d20e166cbbffe55a42129e2deb5f Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Tue, 26 Jul 2022 15:04:54 -0400 Subject: [PATCH 08/49] [v2] Delete unused code from refactor (#777) * Add runtime for command v2 components. * Fix imports. * Add tests for watching checkins. * Fix lint and move checkin period to a configurable timeout. * Fix tests now that checkin timeout needs to be defined. * Fix code review and lint. * Work on actually running the v2 runtime. * Work on switching to the v2 runtime. * More work on switching to v2 runtime. * Cleanup some imports. * More import cleanups. * Add TODO to FleetServerComponentModifier. * More cleanup and removals. * Remove more. * Delete more unused code. * Clean up step_download from refactor. * Remove outdated managed_mode_test.go. * Fixes from code review and lint. * Fix lint and missing errcheck. --- .gitignore | 7 - dev-tools/cmd/buildspec/buildspec.go | 131 ---- dev-tools/mage/common.go | 2 + dev-tools/mage/pkgtypes.go | 30 +- internal/pkg/agent/application/application.go | 35 +- .../application/upgrade}/artifact/artifact.go | 16 +- .../application/upgrade}/artifact/config.go | 0 .../artifact/download/composed/downloader.go | 8 +- .../download/composed/downloader_test.go | 18 +- .../artifact/download/composed/verifier.go | 8 +- .../download/composed/verifier_test.go | 14 +- .../upgrade}/artifact/download/downloader.go | 4 +- .../artifact/download/fs/downloader.go | 21 +- .../drop/beat-8.0.0-darwin-x86_64.tar.gz | 0 .../beat-8.0.0-darwin-x86_64.tar.gz.sha512 | 0 .../upgrade}/artifact/download/fs/verifier.go | 9 +- .../artifact/download/fs/verifier_test.go | 14 +- .../artifact/download/http/downloader.go | 23 +- .../artifact/download/http/downloader_test.go | 3 +- .../artifact/download/http/elastic_test.go | 26 +- .../artifact/download/http/headers_rtt.go | 0 .../download/http/headers_rtt_test.go | 0 .../artifact/download/http/verifier.go | 20 +- .../download/localremote/downloader.go | 12 +- .../artifact/download/localremote/verifier.go | 12 +- .../artifact/download/snapshot/downloader.go | 7 +- .../artifact/download/snapshot/verifier.go | 6 +- .../upgrade}/artifact/download/verifier.go | 5 +- .../application/upgrade/step_download.go | 22 +- .../pkg/agent/application/upgrade/upgrade.go | 8 +- internal/pkg/agent/cmd/container.go | 3 +- internal/pkg/agent/cmd/diagnostics.go | 1 + internal/pkg/agent/cmd/inspect.go | 2 + internal/pkg/agent/configuration/fleet.go | 19 +- internal/pkg/agent/configuration/settings.go | 6 +- internal/pkg/agent/control/server/server.go | 15 - internal/pkg/agent/install/uninstall.go | 101 +-- internal/pkg/agent/operation/common_test.go | 177 ----- internal/pkg/agent/operation/monitoring.go | 679 ------------------ .../pkg/agent/operation/monitoring_test.go | 268 ------- internal/pkg/agent/operation/operation.go | 83 --- .../pkg/agent/operation/operation_config.go | 67 -- .../agent/operation/operation_retryable.go | 95 --- .../pkg/agent/operation/operation_start.go | 65 -- .../pkg/agent/operation/operation_stop.go | 50 -- internal/pkg/agent/operation/operator.go | 396 ---------- .../pkg/agent/operation/operator_handlers.go | 103 --- internal/pkg/agent/operation/operator_test.go | 489 ------------- .../tests/downloads/-1.0-darwin-x86_64.tar.gz | 0 .../configurable-1.0-darwin-x86_64.tar.gz | 0 .../configurable-1.0-darwin-x86_64/README.md | 1 - .../configurable-1.0-darwin-x86_64/main.go | 104 --- .../serviceable-1.0-darwin-x86_64/README.md | 1 - .../serviceable-1.0-darwin-x86_64/main.go | 142 ---- internal/pkg/agent/program/program.go | 307 -------- internal/pkg/agent/program/program_test.go | 614 ---------------- internal/pkg/agent/program/spec.go | 111 --- internal/pkg/agent/program/spec_test.go | 149 ---- internal/pkg/agent/program/supported.go | 41 -- .../testdata/audit_config-auditbeat.yml | 164 ----- .../agent/program/testdata/audit_config.yml | 104 --- .../agent/program/testdata/enabled_false.yml | 17 - .../program/testdata/enabled_output_false.yml | 17 - .../testdata/enabled_output_true-filebeat.yml | 38 - .../program/testdata/enabled_output_true.yml | 17 - .../testdata/enabled_true-filebeat.yml | 38 - .../agent/program/testdata/enabled_true.yml | 22 - .../agent/program/testdata/endpoint_arm.yml | 117 --- .../endpoint_basic-endpoint-security.yml | 113 --- .../agent/program/testdata/endpoint_basic.yml | 115 --- .../program/testdata/endpoint_no_fleet.yml | 102 --- .../testdata/endpoint_unknown_output.yml | 107 --- .../testdata/fleet_server-fleet-server.yml | 33 - .../agent/program/testdata/fleet_server.yml | 51 -- .../agent/program/testdata/journal_config.yml | 21 - .../logstash_config-endpoint-security.yml | 114 --- ...logstash_config-filebeat-elasticsearch.yml | 43 -- .../testdata/logstash_config-filebeat.yml | 39 - .../testdata/logstash_config-fleet-server.yml | 18 - .../testdata/logstash_config-heartbeat.yml | 29 - .../testdata/logstash_config-metricbeat.yml | 89 --- .../testdata/logstash_config-packetbeat.yml | 34 - .../program/testdata/logstash_config.yml | 212 ------ .../testdata/namespace-endpoint-security.yml | 114 --- .../program/testdata/namespace-filebeat.yml | 71 -- .../testdata/namespace-fleet-server.yml | 18 - .../program/testdata/namespace-heartbeat.yml | 30 - .../program/testdata/namespace-metricbeat.yml | 91 --- .../program/testdata/namespace-packetbeat.yml | 35 - .../pkg/agent/program/testdata/namespace.yml | 201 ------ .../single_config-endpoint-security.yml | 115 --- .../testdata/single_config-filebeat.yml | 71 -- .../testdata/single_config-fleet-server.yml | 18 - .../testdata/single_config-heartbeat.yml | 31 - .../testdata/single_config-metricbeat.yml | 91 --- .../testdata/single_config-packetbeat.yml | 36 - .../agent/program/testdata/single_config.yml | 202 ------ .../testdata/synthetics_config-heartbeat.yml | 66 -- .../program/testdata/synthetics_config.yml | 31 - .../testdata/usecases/enabled_output_true.yml | 17 - .../testdata/usecases/enabled_true.yml | 22 - .../testdata/usecases/endpoint_basic.yml | 115 --- .../testdata/usecases/fleet_server.yml | 51 -- .../enabled_output_true.filebeat.golden.yml | 38 - .../enabled_true.filebeat.golden.yml | 38 - ...ndpoint_basic.endpoint-security.golden.yml | 112 --- .../fleet_server.fleet-server.golden.yml | 33 - .../namespace.endpoint-security.golden.yml | 113 --- .../generated/namespace.filebeat.golden.yml | 70 -- .../namespace.fleet-server.golden.yml | 18 - .../generated/namespace.heartbeat.golden.yml | 30 - .../generated/namespace.metricbeat.golden.yml | 98 --- .../generated/namespace.packetbeat.golden.yml | 35 - ...single_config.endpoint-security.golden.yml | 114 --- .../single_config.filebeat.golden.yml | 73 -- .../single_config.fleet-server.golden.yml | 18 - .../single_config.heartbeat.golden.yml | 31 - .../single_config.metricbeat.golden.yml | 99 --- .../single_config.packetbeat.golden.yml | 36 - .../synthetics_config.heartbeat.golden.yml | 68 -- .../program/testdata/usecases/namespace.yml | 201 ------ .../testdata/usecases/single_config.yml | 204 ------ .../testdata/usecases/synthetics_config.yml | 31 - internal/pkg/agent/stateresolver/resolve.go | 177 ----- .../pkg/agent/stateresolver/resolve_test.go | 396 ---------- .../agent/stateresolver/statechange_string.go | 30 - .../pkg/agent/stateresolver/stateresolver.go | 68 -- .../agent/stateresolver/stateresolver_test.go | 63 -- .../install/atomic/atomic_installer.go | 97 --- .../install/atomic/atomic_installer_test.go | 120 ---- .../install/awaitable/awaitable_installer.go | 57 -- .../pkg/artifact/install/dir/dir_checker.go | 26 - .../artifact/install/hooks/hooks_installer.go | 60 -- internal/pkg/artifact/install/installer.go | 84 --- .../pkg/artifact/install/tar/tar_installer.go | 141 ---- .../pkg/artifact/install/zip/zip_installer.go | 163 ----- .../uninstall/hooks/hooks_uninstaller.go | 35 - .../pkg/artifact/uninstall/uninstaller.go | 29 - internal/pkg/core/app/descriptor.go | 91 --- internal/pkg/core/app/execution_context.go | 43 -- internal/pkg/core/app/process_cred.go | 65 -- internal/pkg/core/app/process_cred_other.go | 13 - internal/pkg/core/app/spec.go | 25 - internal/pkg/core/app/tag.go | 24 - .../core/monitoring/beats/beats_monitor.go | 3 +- .../pkg/core/monitoring/server/process.go | 3 +- internal/pkg/core/plugin/common.go | 84 --- internal/pkg/core/plugin/common_test.go | 95 --- internal/pkg/core/plugin/process/app.go | 319 -------- internal/pkg/core/plugin/process/configure.go | 59 -- internal/pkg/core/plugin/process/start.go | 190 ----- internal/pkg/core/plugin/process/status.go | 124 ---- internal/pkg/core/plugin/process/stdlogger.go | 56 -- .../pkg/core/plugin/process/stdlogger_test.go | 70 -- .../pkg/core/plugin/process/watch_posix.go | 32 - .../pkg/core/plugin/process/watch_windows.go | 54 -- internal/pkg/core/plugin/service/app.go | 373 ---------- internal/pkg/core/retry/config.go | 41 -- internal/pkg/core/retry/error.go | 30 - internal/pkg/core/retry/retrystrategy.go | 123 ---- internal/pkg/core/retry/retrystrategy_test.go | 183 ----- internal/pkg/core/state/state.go | 99 --- internal/pkg/core/status/reporter.go | 302 -------- internal/pkg/core/status/reporter_test.go | 101 --- internal/pkg/reporter/backend.go | 13 - internal/pkg/reporter/event.go | 30 - internal/pkg/reporter/fleet/config/config.go | 19 - internal/pkg/reporter/fleet/reporter.go | 175 ----- internal/pkg/reporter/fleet/reporter_test.go | 241 ------- internal/pkg/reporter/log/format.go | 59 -- internal/pkg/reporter/log/reporter.go | 57 -- internal/pkg/reporter/log/reporter_test.go | 98 --- internal/pkg/reporter/noop/reporter.go | 28 - internal/pkg/reporter/reporter.go | 157 ---- internal/pkg/reporter/reporter_test.go | 120 ---- internal/pkg/tokenbucket/token_bucket.go | 85 --- internal/pkg/tokenbucket/token_bucket_test.go | 109 --- internal/spec/apm-server.yml | 38 - internal/spec/auditbeat.yml | 98 --- internal/spec/cloudbeat.yml | 35 - internal/spec/endpoint.yml | 67 -- internal/spec/filebeat.yml | 115 --- internal/spec/fleet-server.yml | 69 -- internal/spec/heartbeat.yml | 24 - internal/spec/metricbeat.yml | 98 --- internal/spec/osquerybeat.yml | 39 - internal/spec/packetbeat.yml | 22 - magefile.go | 30 +- pkg/component/platforms.go | 30 + 189 files changed, 242 insertions(+), 14852 deletions(-) delete mode 100644 dev-tools/cmd/buildspec/buildspec.go rename internal/pkg/{ => agent/application/upgrade}/artifact/artifact.go (74%) rename internal/pkg/{ => agent/application/upgrade}/artifact/config.go (100%) rename internal/pkg/{ => agent/application/upgrade}/artifact/download/composed/downloader.go (79%) rename internal/pkg/{ => agent/application/upgrade}/artifact/download/composed/downloader_test.go (77%) rename internal/pkg/{ => agent/application/upgrade}/artifact/download/composed/verifier.go (83%) rename internal/pkg/{ => agent/application/upgrade}/artifact/download/composed/verifier_test.go (83%) rename internal/pkg/{ => agent/application/upgrade}/artifact/download/downloader.go (68%) rename internal/pkg/{ => agent/application/upgrade}/artifact/download/fs/downloader.go (75%) rename internal/pkg/{ => agent/application/upgrade}/artifact/download/fs/testdata/drop/beat-8.0.0-darwin-x86_64.tar.gz (100%) rename internal/pkg/{ => agent/application/upgrade}/artifact/download/fs/testdata/drop/beat-8.0.0-darwin-x86_64.tar.gz.sha512 (100%) rename internal/pkg/{ => agent/application/upgrade}/artifact/download/fs/verifier.go (88%) rename internal/pkg/{ => agent/application/upgrade}/artifact/download/fs/verifier_test.go (91%) rename internal/pkg/{ => agent/application/upgrade}/artifact/download/http/downloader.go (91%) rename internal/pkg/{ => agent/application/upgrade}/artifact/download/http/downloader_test.go (98%) rename internal/pkg/{ => agent/application/upgrade}/artifact/download/http/elastic_test.go (81%) rename internal/pkg/{ => agent/application/upgrade}/artifact/download/http/headers_rtt.go (100%) rename internal/pkg/{ => agent/application/upgrade}/artifact/download/http/headers_rtt_test.go (100%) rename internal/pkg/{ => agent/application/upgrade}/artifact/download/http/verifier.go (84%) rename internal/pkg/{ => agent/application/upgrade}/artifact/download/localremote/downloader.go (66%) rename internal/pkg/{ => agent/application/upgrade}/artifact/download/localremote/verifier.go (68%) rename internal/pkg/{ => agent/application/upgrade}/artifact/download/snapshot/downloader.go (91%) rename internal/pkg/{ => agent/application/upgrade}/artifact/download/snapshot/verifier.go (70%) rename internal/pkg/{ => agent/application/upgrade}/artifact/download/verifier.go (97%) delete mode 100644 internal/pkg/agent/operation/common_test.go delete mode 100644 internal/pkg/agent/operation/monitoring.go delete mode 100644 internal/pkg/agent/operation/monitoring_test.go delete mode 100644 internal/pkg/agent/operation/operation.go delete mode 100644 internal/pkg/agent/operation/operation_config.go delete mode 100644 internal/pkg/agent/operation/operation_retryable.go delete mode 100644 internal/pkg/agent/operation/operation_start.go delete mode 100644 internal/pkg/agent/operation/operation_stop.go delete mode 100644 internal/pkg/agent/operation/operator.go delete mode 100644 internal/pkg/agent/operation/operator_handlers.go delete mode 100644 internal/pkg/agent/operation/operator_test.go delete mode 100644 internal/pkg/agent/operation/tests/downloads/-1.0-darwin-x86_64.tar.gz delete mode 100644 internal/pkg/agent/operation/tests/downloads/configurable-1.0-darwin-x86_64.tar.gz delete mode 100644 internal/pkg/agent/operation/tests/scripts/configurable-1.0-darwin-x86_64/README.md delete mode 100644 internal/pkg/agent/operation/tests/scripts/configurable-1.0-darwin-x86_64/main.go delete mode 100644 internal/pkg/agent/operation/tests/scripts/serviceable-1.0-darwin-x86_64/README.md delete mode 100644 internal/pkg/agent/operation/tests/scripts/serviceable-1.0-darwin-x86_64/main.go delete mode 100644 internal/pkg/agent/program/program.go delete mode 100644 internal/pkg/agent/program/program_test.go delete mode 100644 internal/pkg/agent/program/spec.go delete mode 100644 internal/pkg/agent/program/spec_test.go delete mode 100644 internal/pkg/agent/program/supported.go delete mode 100644 internal/pkg/agent/program/testdata/audit_config-auditbeat.yml delete mode 100644 internal/pkg/agent/program/testdata/audit_config.yml delete mode 100644 internal/pkg/agent/program/testdata/enabled_false.yml delete mode 100644 internal/pkg/agent/program/testdata/enabled_output_false.yml delete mode 100644 internal/pkg/agent/program/testdata/enabled_output_true-filebeat.yml delete mode 100644 internal/pkg/agent/program/testdata/enabled_output_true.yml delete mode 100644 internal/pkg/agent/program/testdata/enabled_true-filebeat.yml delete mode 100644 internal/pkg/agent/program/testdata/enabled_true.yml delete mode 100644 internal/pkg/agent/program/testdata/endpoint_arm.yml delete mode 100644 internal/pkg/agent/program/testdata/endpoint_basic-endpoint-security.yml delete mode 100644 internal/pkg/agent/program/testdata/endpoint_basic.yml delete mode 100644 internal/pkg/agent/program/testdata/endpoint_no_fleet.yml delete mode 100644 internal/pkg/agent/program/testdata/endpoint_unknown_output.yml delete mode 100644 internal/pkg/agent/program/testdata/fleet_server-fleet-server.yml delete mode 100644 internal/pkg/agent/program/testdata/fleet_server.yml delete mode 100644 internal/pkg/agent/program/testdata/journal_config.yml delete mode 100644 internal/pkg/agent/program/testdata/logstash_config-endpoint-security.yml delete mode 100644 internal/pkg/agent/program/testdata/logstash_config-filebeat-elasticsearch.yml delete mode 100644 internal/pkg/agent/program/testdata/logstash_config-filebeat.yml delete mode 100644 internal/pkg/agent/program/testdata/logstash_config-fleet-server.yml delete mode 100644 internal/pkg/agent/program/testdata/logstash_config-heartbeat.yml delete mode 100644 internal/pkg/agent/program/testdata/logstash_config-metricbeat.yml delete mode 100644 internal/pkg/agent/program/testdata/logstash_config-packetbeat.yml delete mode 100644 internal/pkg/agent/program/testdata/logstash_config.yml delete mode 100644 internal/pkg/agent/program/testdata/namespace-endpoint-security.yml delete mode 100644 internal/pkg/agent/program/testdata/namespace-filebeat.yml delete mode 100644 internal/pkg/agent/program/testdata/namespace-fleet-server.yml delete mode 100644 internal/pkg/agent/program/testdata/namespace-heartbeat.yml delete mode 100644 internal/pkg/agent/program/testdata/namespace-metricbeat.yml delete mode 100644 internal/pkg/agent/program/testdata/namespace-packetbeat.yml delete mode 100644 internal/pkg/agent/program/testdata/namespace.yml delete mode 100644 internal/pkg/agent/program/testdata/single_config-endpoint-security.yml delete mode 100644 internal/pkg/agent/program/testdata/single_config-filebeat.yml delete mode 100644 internal/pkg/agent/program/testdata/single_config-fleet-server.yml delete mode 100644 internal/pkg/agent/program/testdata/single_config-heartbeat.yml delete mode 100644 internal/pkg/agent/program/testdata/single_config-metricbeat.yml delete mode 100644 internal/pkg/agent/program/testdata/single_config-packetbeat.yml delete mode 100644 internal/pkg/agent/program/testdata/single_config.yml delete mode 100644 internal/pkg/agent/program/testdata/synthetics_config-heartbeat.yml delete mode 100644 internal/pkg/agent/program/testdata/synthetics_config.yml delete mode 100644 internal/pkg/agent/program/testdata/usecases/enabled_output_true.yml delete mode 100644 internal/pkg/agent/program/testdata/usecases/enabled_true.yml delete mode 100644 internal/pkg/agent/program/testdata/usecases/endpoint_basic.yml delete mode 100644 internal/pkg/agent/program/testdata/usecases/fleet_server.yml delete mode 100644 internal/pkg/agent/program/testdata/usecases/generated/enabled_output_true.filebeat.golden.yml delete mode 100644 internal/pkg/agent/program/testdata/usecases/generated/enabled_true.filebeat.golden.yml delete mode 100644 internal/pkg/agent/program/testdata/usecases/generated/endpoint_basic.endpoint-security.golden.yml delete mode 100644 internal/pkg/agent/program/testdata/usecases/generated/fleet_server.fleet-server.golden.yml delete mode 100644 internal/pkg/agent/program/testdata/usecases/generated/namespace.endpoint-security.golden.yml delete mode 100644 internal/pkg/agent/program/testdata/usecases/generated/namespace.filebeat.golden.yml delete mode 100644 internal/pkg/agent/program/testdata/usecases/generated/namespace.fleet-server.golden.yml delete mode 100644 internal/pkg/agent/program/testdata/usecases/generated/namespace.heartbeat.golden.yml delete mode 100644 internal/pkg/agent/program/testdata/usecases/generated/namespace.metricbeat.golden.yml delete mode 100644 internal/pkg/agent/program/testdata/usecases/generated/namespace.packetbeat.golden.yml delete mode 100644 internal/pkg/agent/program/testdata/usecases/generated/single_config.endpoint-security.golden.yml delete mode 100644 internal/pkg/agent/program/testdata/usecases/generated/single_config.filebeat.golden.yml delete mode 100644 internal/pkg/agent/program/testdata/usecases/generated/single_config.fleet-server.golden.yml delete mode 100644 internal/pkg/agent/program/testdata/usecases/generated/single_config.heartbeat.golden.yml delete mode 100644 internal/pkg/agent/program/testdata/usecases/generated/single_config.metricbeat.golden.yml delete mode 100644 internal/pkg/agent/program/testdata/usecases/generated/single_config.packetbeat.golden.yml delete mode 100644 internal/pkg/agent/program/testdata/usecases/generated/synthetics_config.heartbeat.golden.yml delete mode 100644 internal/pkg/agent/program/testdata/usecases/namespace.yml delete mode 100644 internal/pkg/agent/program/testdata/usecases/single_config.yml delete mode 100644 internal/pkg/agent/program/testdata/usecases/synthetics_config.yml delete mode 100644 internal/pkg/agent/stateresolver/resolve.go delete mode 100644 internal/pkg/agent/stateresolver/resolve_test.go delete mode 100644 internal/pkg/agent/stateresolver/statechange_string.go delete mode 100644 internal/pkg/agent/stateresolver/stateresolver.go delete mode 100644 internal/pkg/agent/stateresolver/stateresolver_test.go delete mode 100644 internal/pkg/artifact/install/atomic/atomic_installer.go delete mode 100644 internal/pkg/artifact/install/atomic/atomic_installer_test.go delete mode 100644 internal/pkg/artifact/install/awaitable/awaitable_installer.go delete mode 100644 internal/pkg/artifact/install/dir/dir_checker.go delete mode 100644 internal/pkg/artifact/install/hooks/hooks_installer.go delete mode 100644 internal/pkg/artifact/install/installer.go delete mode 100644 internal/pkg/artifact/install/tar/tar_installer.go delete mode 100644 internal/pkg/artifact/install/zip/zip_installer.go delete mode 100644 internal/pkg/artifact/uninstall/hooks/hooks_uninstaller.go delete mode 100644 internal/pkg/artifact/uninstall/uninstaller.go delete mode 100644 internal/pkg/core/app/descriptor.go delete mode 100644 internal/pkg/core/app/execution_context.go delete mode 100644 internal/pkg/core/app/process_cred.go delete mode 100644 internal/pkg/core/app/process_cred_other.go delete mode 100644 internal/pkg/core/app/spec.go delete mode 100644 internal/pkg/core/app/tag.go delete mode 100644 internal/pkg/core/plugin/common.go delete mode 100644 internal/pkg/core/plugin/common_test.go delete mode 100644 internal/pkg/core/plugin/process/app.go delete mode 100644 internal/pkg/core/plugin/process/configure.go delete mode 100644 internal/pkg/core/plugin/process/start.go delete mode 100644 internal/pkg/core/plugin/process/status.go delete mode 100644 internal/pkg/core/plugin/process/stdlogger.go delete mode 100644 internal/pkg/core/plugin/process/stdlogger_test.go delete mode 100644 internal/pkg/core/plugin/process/watch_posix.go delete mode 100644 internal/pkg/core/plugin/process/watch_windows.go delete mode 100644 internal/pkg/core/plugin/service/app.go delete mode 100644 internal/pkg/core/retry/config.go delete mode 100644 internal/pkg/core/retry/error.go delete mode 100644 internal/pkg/core/retry/retrystrategy.go delete mode 100644 internal/pkg/core/retry/retrystrategy_test.go delete mode 100644 internal/pkg/core/state/state.go delete mode 100644 internal/pkg/core/status/reporter.go delete mode 100644 internal/pkg/core/status/reporter_test.go delete mode 100644 internal/pkg/reporter/backend.go delete mode 100644 internal/pkg/reporter/event.go delete mode 100644 internal/pkg/reporter/fleet/config/config.go delete mode 100644 internal/pkg/reporter/fleet/reporter.go delete mode 100644 internal/pkg/reporter/fleet/reporter_test.go delete mode 100644 internal/pkg/reporter/log/format.go delete mode 100644 internal/pkg/reporter/log/reporter.go delete mode 100644 internal/pkg/reporter/log/reporter_test.go delete mode 100644 internal/pkg/reporter/noop/reporter.go delete mode 100644 internal/pkg/reporter/reporter.go delete mode 100644 internal/pkg/reporter/reporter_test.go delete mode 100644 internal/pkg/tokenbucket/token_bucket.go delete mode 100644 internal/pkg/tokenbucket/token_bucket_test.go delete mode 100644 internal/spec/apm-server.yml delete mode 100644 internal/spec/auditbeat.yml delete mode 100644 internal/spec/cloudbeat.yml delete mode 100644 internal/spec/endpoint.yml delete mode 100644 internal/spec/filebeat.yml delete mode 100644 internal/spec/fleet-server.yml delete mode 100644 internal/spec/heartbeat.yml delete mode 100644 internal/spec/metricbeat.yml delete mode 100644 internal/spec/osquerybeat.yml delete mode 100644 internal/spec/packetbeat.yml diff --git a/.gitignore b/.gitignore index 7bfae9cc392..3939307f99c 100644 --- a/.gitignore +++ b/.gitignore @@ -54,13 +54,6 @@ elastic-agent.yml.* fleet.yml fleet.yml.lock fleet.yml.old -internal/pkg/agent/operation/tests/scripts/short--1.0.yml -internal/pkg/agent/operation/tests/scripts/configurable-1.0-darwin-x86/configurable -internal/pkg/agent/operation/tests/scripts/servicable-1.0-darwin-x86/configurable -internal/pkg/agent/operation/tests/scripts/configurable-1.0-darwin-x86_64/configurable -internal/pkg/agent/operation/tests/scripts/serviceable-1.0-darwin-x86_64/serviceable -internal/pkg/agent/operation/tests/scripts/configurable -internal/pkg/agent/operation/tests/scripts/serviceable internal/pkg/agent/application/fleet.yml internal/pkg/agent/transpiler/tests/exec-1.0-darwin-x86_64/exec pkg/component/fake/fake diff --git a/dev-tools/cmd/buildspec/buildspec.go b/dev-tools/cmd/buildspec/buildspec.go deleted file mode 100644 index ea16fbe6968..00000000000 --- a/dev-tools/cmd/buildspec/buildspec.go +++ /dev/null @@ -1,131 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package main - -import ( - "bytes" - "flag" - "fmt" - "go/format" - "io/ioutil" - "os" - "text/template" - - lic "github.com/elastic/elastic-agent/dev-tools/licenses" - "github.com/elastic/elastic-agent/pkg/packer" -) - -var ( - flagSet *flag.FlagSet - input string - output string - license string -) - -func init() { - // NOTE: This uses its own flagSet because dev-tools/licenses sets flags. - flagSet = flag.NewFlagSet("buildspec", flag.ExitOnError) - flagSet.StringVar(&input, "in", "", "Source of input. \"-\" means reading from stdin") - flagSet.StringVar(&output, "out", "-", "Output path. \"-\" means writing to stdout") - flagSet.StringVar(&license, "license", "Elastic", "License header for generated file.") -} - -var tmpl = template.Must(template.New("specs").Parse(` -{{ .License }} -// Code generated by elastic-agent/internals/dev-tools/buildspec/buildspec.go - DO NOT EDIT. - -package program - -import ( - "strings" - - "github.com/elastic/elastic-agent/pkg/packer" -) - -var Supported []Spec -var SupportedMap map[string]Spec - -func init() { - // Packed Files - {{ range $i, $f := .Files -}} - // {{ $f }} - {{ end -}} - unpacked := packer.MustUnpack("{{ .Pack }}") - SupportedMap = make(map[string]Spec) - - for f, v := range unpacked { - s, err:= NewSpecFromBytes(v) - if err != nil { - panic("Cannot read spec from " + f + ": " + err.Error()) - } - Supported = append(Supported, s) - SupportedMap[strings.ToLower(s.Cmd)] = s - } -} -`)) - -func main() { - if err := flagSet.Parse(os.Args[1:]); err != nil { - fmt.Fprintf(os.Stderr, "error: %v", err) - os.Exit(1) - } - - if len(input) == 0 { - fmt.Fprintln(os.Stderr, "Invalid input source") - os.Exit(1) - } - - l, err := lic.Find(license) - if err != nil { - fmt.Fprintf(os.Stderr, "problem to retrieve the license, error: %+v", err) - os.Exit(1) - } - - data, err := gen(l) - if err != nil { - fmt.Fprintf(os.Stderr, "Error while generating the file, err: %+v\n", err) - os.Exit(1) - } - - if output == "-" { - os.Stdout.Write(data) - return - } else { - if err = ioutil.WriteFile(output, data, 0o600); err != nil { - fmt.Fprintf(os.Stderr, "Error writing data to file %q: %v\n", output, data) - os.Exit(1) - } - } - - return -} - -func gen(l string) ([]byte, error) { - pack, files, err := packer.Pack(input) - if err != nil { - return nil, err - } - - var buf bytes.Buffer - err = tmpl.Execute(&buf, struct { - Pack string - Files []string - License string - }{ - Pack: pack, - Files: files, - License: l, - }) - if err != nil { - return nil, err - } - - formatted, err := format.Source(buf.Bytes()) - if err != nil { - return nil, err - } - - return formatted, nil -} diff --git a/dev-tools/mage/common.go b/dev-tools/mage/common.go index 4cee3270fe1..52152856389 100644 --- a/dev-tools/mage/common.go +++ b/dev-tools/mage/common.go @@ -763,6 +763,8 @@ func CreateSHA512File(file string) error { //nolint:gosec // permissions are correct return os.WriteFile(file+".sha512", []byte(out), 0644) } + +// GetSHA512Hash returns SHA512 hash of file. func GetSHA512Hash(file string) (string, error) { f, err := os.Open(file) if err != nil { diff --git a/dev-tools/mage/pkgtypes.go b/dev-tools/mage/pkgtypes.go index 1ae6fc9e148..04272b0e8e9 100644 --- a/dev-tools/mage/pkgtypes.go +++ b/dev-tools/mage/pkgtypes.go @@ -2,7 +2,6 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. -//nolint:goconst // avoiding const check for Deb/Zip package mage import ( @@ -40,6 +39,13 @@ const ( defaultBinaryName = "{{.Name}}-{{.Version}}{{if .Snapshot}}-SNAPSHOT{{end}}{{if .OS}}-{{.OS}}{{end}}{{if .Arch}}-{{.Arch}}{{end}}" componentConfigMode os.FileMode = 0600 + + rpm = "rpm" + deb = "deb" + zipExt = "zip" + targz = "tar.gz" + docker = "docker" + invalid = "invalid" ) var ( @@ -204,17 +210,17 @@ func getOSArchName(platform BuildPlatform, t PackageType) (string, error) { func (typ PackageType) String() string { switch typ { case RPM: - return "rpm" + return rpm case Deb: - return "deb" + return deb case Zip: - return "zip" + return zipExt case TarGz: - return "tar.gz" + return targz case Docker: - return "docker" + return docker default: - return "invalid" + return invalid } } @@ -226,15 +232,15 @@ func (typ PackageType) MarshalText() ([]byte, error) { // UnmarshalText returns a PackageType based on the given text. func (typ *PackageType) UnmarshalText(text []byte) error { switch strings.ToLower(string(text)) { - case "rpm": + case rpm: *typ = RPM - case "deb": + case deb: *typ = Deb - case "tar.gz", "tgz", "targz": + case targz, "tgz", "targz": *typ = TarGz - case "zip": + case zipExt: *typ = Zip - case "docker": + case docker: *typ = Docker default: return errors.Errorf("unknown package type: %v", string(text)) diff --git a/internal/pkg/agent/application/application.go b/internal/pkg/agent/application/application.go index 788e189cb60..d5bc6a182d8 100644 --- a/internal/pkg/agent/application/application.go +++ b/internal/pkg/agent/application/application.go @@ -7,13 +7,9 @@ package application import ( "fmt" "path/filepath" - goruntime "runtime" - "strconv" "go.elastic.co/apm" - "github.com/elastic/go-sysinfo" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" @@ -35,18 +31,15 @@ type discoverFunc func() ([]string, error) // ErrNoConfiguration is returned when no configuration are found. var ErrNoConfiguration = errors.New("no configuration found", errors.TypeConfig) -// PlatformModifier can modify the platform details before the runtime specifications are loaded. -type PlatformModifier func(detail component.PlatformDetail) component.PlatformDetail - // New creates a new Agent and bootstrap the required subsystem. func New( log *logger.Logger, agentInfo *info.AgentInfo, reexec coordinator.ReExecManager, tracer *apm.Tracer, - modifiers ...PlatformModifier, + modifiers ...component.PlatformModifier, ) (*coordinator.Coordinator, error) { - platform, err := getPlatformDetail(modifiers...) + platform, err := component.LoadPlatformDetail(modifiers...) if err != nil { return nil, fmt.Errorf("failed to gather system information: %w", err) } @@ -137,28 +130,6 @@ func New( return coord, nil } -func getPlatformDetail(modifiers ...PlatformModifier) (component.PlatformDetail, error) { - info, err := sysinfo.Host() - if err != nil { - return component.PlatformDetail{}, err - } - os := info.Info().OS - detail := component.PlatformDetail{ - Platform: component.Platform{ - OS: goruntime.GOOS, - Arch: goruntime.GOARCH, - GOOS: goruntime.GOOS, - }, - Family: os.Family, - Major: strconv.Itoa(os.Major), - Minor: strconv.Itoa(os.Minor), - } - for _, modifier := range modifiers { - detail = modifier(detail) - } - return detail, nil -} - func mergeFleetConfig(rawConfig *config.Config) (storage.Store, *configuration.Configuration, error) { path := paths.AgentConfigFile() store := storage.NewEncryptedDiskStore(path) @@ -208,7 +179,7 @@ func externalConfigsGlob() string { } func discoverer(patterns ...string) discoverFunc { - var p []string + p := make([]string, 0, len(patterns)) for _, newP := range patterns { if len(newP) == 0 { continue diff --git a/internal/pkg/artifact/artifact.go b/internal/pkg/agent/application/upgrade/artifact/artifact.go similarity index 74% rename from internal/pkg/artifact/artifact.go rename to internal/pkg/agent/application/upgrade/artifact/artifact.go index ebe84b95db7..c0e8c84a9d8 100644 --- a/internal/pkg/artifact/artifact.go +++ b/internal/pkg/agent/application/upgrade/artifact/artifact.go @@ -9,7 +9,6 @@ import ( "path/filepath" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" ) var packageArchMap = map[string]string{ @@ -24,20 +23,27 @@ var packageArchMap = map[string]string{ "darwin-binary-universal": "darwin-universal.tar.gz", } +// Artifact provides info for fetching from artifact store. +type Artifact struct { + Name string + Cmd string + Artifact string +} + // GetArtifactName constructs a path to a downloaded artifact -func GetArtifactName(spec program.Spec, version, operatingSystem, arch string) (string, error) { +func GetArtifactName(a Artifact, version, operatingSystem, arch string) (string, error) { key := fmt.Sprintf("%s-binary-%s", operatingSystem, arch) suffix, found := packageArchMap[key] if !found { return "", errors.New(fmt.Sprintf("'%s' is not a valid combination for a package", key), errors.TypeConfig) } - return fmt.Sprintf("%s-%s-%s", spec.CommandName(), version, suffix), nil + return fmt.Sprintf("%s-%s-%s", a.Cmd, version, suffix), nil } // GetArtifactPath returns a full path of artifact for a program in specific version -func GetArtifactPath(spec program.Spec, version, operatingSystem, arch, targetDir string) (string, error) { - artifactName, err := GetArtifactName(spec, version, operatingSystem, arch) +func GetArtifactPath(a Artifact, version, operatingSystem, arch, targetDir string) (string, error) { + artifactName, err := GetArtifactName(a, version, operatingSystem, arch) if err != nil { return "", err } diff --git a/internal/pkg/artifact/config.go b/internal/pkg/agent/application/upgrade/artifact/config.go similarity index 100% rename from internal/pkg/artifact/config.go rename to internal/pkg/agent/application/upgrade/artifact/config.go diff --git a/internal/pkg/artifact/download/composed/downloader.go b/internal/pkg/agent/application/upgrade/artifact/download/composed/downloader.go similarity index 79% rename from internal/pkg/artifact/download/composed/downloader.go rename to internal/pkg/agent/application/upgrade/artifact/download/composed/downloader.go index 0b8504172f3..84e353ff661 100644 --- a/internal/pkg/artifact/download/composed/downloader.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/composed/downloader.go @@ -10,8 +10,8 @@ import ( "github.com/hashicorp/go-multierror" "go.elastic.co/apm" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download" ) // Downloader is a downloader with a predefined set of downloaders. @@ -34,13 +34,13 @@ func NewDownloader(downloaders ...download.Downloader) *Downloader { // Download fetches the package from configured source. // Returns absolute path to downloaded package and an error. -func (e *Downloader) Download(ctx context.Context, spec program.Spec, version string) (string, error) { +func (e *Downloader) Download(ctx context.Context, a artifact.Artifact, version string) (string, error) { var err error span, ctx := apm.StartSpan(ctx, "download", "app.internal") defer span.End() for _, d := range e.dd { - s, e := d.Download(ctx, spec, version) + s, e := d.Download(ctx, a, version) if e == nil { return s, nil } diff --git a/internal/pkg/artifact/download/composed/downloader_test.go b/internal/pkg/agent/application/upgrade/artifact/download/composed/downloader_test.go similarity index 77% rename from internal/pkg/artifact/download/composed/downloader_test.go rename to internal/pkg/agent/application/upgrade/artifact/download/composed/downloader_test.go index 4964c090ef9..c9820822d6f 100644 --- a/internal/pkg/artifact/download/composed/downloader_test.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/composed/downloader_test.go @@ -9,17 +9,21 @@ import ( "errors" "testing" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download" + "github.com/stretchr/testify/assert" +) - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download" +const ( + succ = "succ" ) type FailingDownloader struct { called bool } -func (d *FailingDownloader) Download(ctx context.Context, _ program.Spec, _ string) (string, error) { +func (d *FailingDownloader) Download(ctx context.Context, _ artifact.Artifact, _ string) (string, error) { d.called = true return "", errors.New("failing") } @@ -30,9 +34,9 @@ type SuccDownloader struct { called bool } -func (d *SuccDownloader) Download(ctx context.Context, _ program.Spec, _ string) (string, error) { +func (d *SuccDownloader) Download(ctx context.Context, _ artifact.Artifact, _ string) (string, error) { d.called = true - return "succ", nil + return succ, nil } func (d *SuccDownloader) Called() bool { return d.called } @@ -59,9 +63,9 @@ func TestComposed(t *testing.T) { for _, tc := range testCases { d := NewDownloader(tc.downloaders[0], tc.downloaders[1]) - r, _ := d.Download(context.TODO(), program.Spec{Name: "a"}, "b") + r, _ := d.Download(context.TODO(), artifact.Artifact{Name: "a"}, "b") - assert.Equal(t, tc.expectedResult, r == "succ") + assert.Equal(t, tc.expectedResult, r == succ) assert.True(t, tc.checkFunc(tc.downloaders)) } diff --git a/internal/pkg/artifact/download/composed/verifier.go b/internal/pkg/agent/application/upgrade/artifact/download/composed/verifier.go similarity index 83% rename from internal/pkg/artifact/download/composed/verifier.go rename to internal/pkg/agent/application/upgrade/artifact/download/composed/verifier.go index 9fb60d20007..26c714d8c52 100644 --- a/internal/pkg/artifact/download/composed/verifier.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/composed/verifier.go @@ -9,8 +9,8 @@ import ( "github.com/hashicorp/go-multierror" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download" ) // Verifier is a verifier with a predefined set of verifiers. @@ -32,13 +32,13 @@ func NewVerifier(verifiers ...download.Verifier) *Verifier { } // Verify checks the package from configured source. -func (e *Verifier) Verify(spec program.Spec, version string) error { +func (e *Verifier) Verify(a artifact.Artifact, version string) error { var err error var checksumMismatchErr *download.ChecksumMismatchError var invalidSignatureErr *download.InvalidSignatureError for _, v := range e.vv { - e := v.Verify(spec, version) + e := v.Verify(a, version) if e == nil { // Success return nil diff --git a/internal/pkg/artifact/download/composed/verifier_test.go b/internal/pkg/agent/application/upgrade/artifact/download/composed/verifier_test.go similarity index 83% rename from internal/pkg/artifact/download/composed/verifier_test.go rename to internal/pkg/agent/application/upgrade/artifact/download/composed/verifier_test.go index a6b4b3a603e..110717627dc 100644 --- a/internal/pkg/artifact/download/composed/verifier_test.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/composed/verifier_test.go @@ -8,17 +8,17 @@ import ( "errors" "testing" - "github.com/stretchr/testify/assert" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download" + "github.com/stretchr/testify/assert" ) type ErrorVerifier struct { called bool } -func (d *ErrorVerifier) Verify(spec program.Spec, version string) error { +func (d *ErrorVerifier) Verify(a artifact.Artifact, version string) error { d.called = true return errors.New("failing") } @@ -29,7 +29,7 @@ type FailVerifier struct { called bool } -func (d *FailVerifier) Verify(spec program.Spec, version string) error { +func (d *FailVerifier) Verify(a artifact.Artifact, version string) error { d.called = true return &download.InvalidSignatureError{} } @@ -40,7 +40,7 @@ type SuccVerifier struct { called bool } -func (d *SuccVerifier) Verify(spec program.Spec, version string) error { +func (d *SuccVerifier) Verify(a artifact.Artifact, version string) error { d.called = true return nil } @@ -74,7 +74,7 @@ func TestVerifier(t *testing.T) { for _, tc := range testCases { d := NewVerifier(tc.verifiers[0], tc.verifiers[1], tc.verifiers[2]) - err := d.Verify(program.Spec{Name: "a", Cmd: "a", Artifact: "a/a"}, "b") + err := d.Verify(artifact.Artifact{Name: "a", Cmd: "a", Artifact: "a/a"}, "b") assert.Equal(t, tc.expectedResult, err == nil) diff --git a/internal/pkg/artifact/download/downloader.go b/internal/pkg/agent/application/upgrade/artifact/download/downloader.go similarity index 68% rename from internal/pkg/artifact/download/downloader.go rename to internal/pkg/agent/application/upgrade/artifact/download/downloader.go index f3f134ba588..19e102ab3c9 100644 --- a/internal/pkg/artifact/download/downloader.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/downloader.go @@ -7,10 +7,10 @@ package download import ( "context" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" ) // Downloader is an interface allowing download of an artifact type Downloader interface { - Download(ctx context.Context, spec program.Spec, version string) (string, error) + Download(ctx context.Context, a artifact.Artifact, version string) (string, error) } diff --git a/internal/pkg/artifact/download/fs/downloader.go b/internal/pkg/agent/application/upgrade/artifact/download/fs/downloader.go similarity index 75% rename from internal/pkg/artifact/download/fs/downloader.go rename to internal/pkg/agent/application/upgrade/artifact/download/fs/downloader.go index 9d518a1c7fb..46e85defc31 100644 --- a/internal/pkg/artifact/download/fs/downloader.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/fs/downloader.go @@ -14,9 +14,8 @@ import ( "go.elastic.co/apm" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/artifact" ) const ( @@ -39,7 +38,7 @@ func NewDownloader(config *artifact.Config) *Downloader { // Download fetches the package from configured source. // Returns absolute path to downloaded package and an error. -func (e *Downloader) Download(ctx context.Context, spec program.Spec, version string) (_ string, err error) { +func (e *Downloader) Download(ctx context.Context, a artifact.Artifact, version string) (_ string, err error) { span, ctx := apm.StartSpan(ctx, "download", "app.internal") defer span.End() downloadedFiles := make([]string, 0, 2) @@ -53,24 +52,24 @@ func (e *Downloader) Download(ctx context.Context, spec program.Spec, version st }() // download from source to dest - path, err := e.download(e.config.OS(), spec, version) + path, err := e.download(e.config.OS(), a, version) downloadedFiles = append(downloadedFiles, path) if err != nil { return "", err } - hashPath, err := e.downloadHash(e.config.OS(), spec, version) + hashPath, err := e.downloadHash(e.config.OS(), a, version) downloadedFiles = append(downloadedFiles, hashPath) return path, err } -func (e *Downloader) download(operatingSystem string, spec program.Spec, version string) (string, error) { - filename, err := artifact.GetArtifactName(spec, version, operatingSystem, e.config.Arch()) +func (e *Downloader) download(operatingSystem string, a artifact.Artifact, version string) (string, error) { + filename, err := artifact.GetArtifactName(a, version, operatingSystem, e.config.Arch()) if err != nil { return "", errors.New(err, "generating package name failed") } - fullPath, err := artifact.GetArtifactPath(spec, version, operatingSystem, e.config.Arch(), e.config.TargetDirectory) + fullPath, err := artifact.GetArtifactPath(a, version, operatingSystem, e.config.Arch(), e.config.TargetDirectory) if err != nil { return "", errors.New(err, "generating package path failed") } @@ -78,13 +77,13 @@ func (e *Downloader) download(operatingSystem string, spec program.Spec, version return e.downloadFile(filename, fullPath) } -func (e *Downloader) downloadHash(operatingSystem string, spec program.Spec, version string) (string, error) { - filename, err := artifact.GetArtifactName(spec, version, operatingSystem, e.config.Arch()) +func (e *Downloader) downloadHash(operatingSystem string, a artifact.Artifact, version string) (string, error) { + filename, err := artifact.GetArtifactName(a, version, operatingSystem, e.config.Arch()) if err != nil { return "", errors.New(err, "generating package name failed") } - fullPath, err := artifact.GetArtifactPath(spec, version, operatingSystem, e.config.Arch(), e.config.TargetDirectory) + fullPath, err := artifact.GetArtifactPath(a, version, operatingSystem, e.config.Arch(), e.config.TargetDirectory) if err != nil { return "", errors.New(err, "generating package path failed") } diff --git a/internal/pkg/artifact/download/fs/testdata/drop/beat-8.0.0-darwin-x86_64.tar.gz b/internal/pkg/agent/application/upgrade/artifact/download/fs/testdata/drop/beat-8.0.0-darwin-x86_64.tar.gz similarity index 100% rename from internal/pkg/artifact/download/fs/testdata/drop/beat-8.0.0-darwin-x86_64.tar.gz rename to internal/pkg/agent/application/upgrade/artifact/download/fs/testdata/drop/beat-8.0.0-darwin-x86_64.tar.gz diff --git a/internal/pkg/artifact/download/fs/testdata/drop/beat-8.0.0-darwin-x86_64.tar.gz.sha512 b/internal/pkg/agent/application/upgrade/artifact/download/fs/testdata/drop/beat-8.0.0-darwin-x86_64.tar.gz.sha512 similarity index 100% rename from internal/pkg/artifact/download/fs/testdata/drop/beat-8.0.0-darwin-x86_64.tar.gz.sha512 rename to internal/pkg/agent/application/upgrade/artifact/download/fs/testdata/drop/beat-8.0.0-darwin-x86_64.tar.gz.sha512 diff --git a/internal/pkg/artifact/download/fs/verifier.go b/internal/pkg/agent/application/upgrade/artifact/download/fs/verifier.go similarity index 88% rename from internal/pkg/artifact/download/fs/verifier.go rename to internal/pkg/agent/application/upgrade/artifact/download/fs/verifier.go index 441f7f68f26..4913d1731d2 100644 --- a/internal/pkg/artifact/download/fs/verifier.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/fs/verifier.go @@ -10,10 +10,9 @@ import ( "os" "path/filepath" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/artifact" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download" ) const ( @@ -47,8 +46,8 @@ func NewVerifier(config *artifact.Config, allowEmptyPgp bool, pgp []byte) (*Veri // Verify checks downloaded package on preconfigured // location against a key stored on elastic.co website. -func (v *Verifier) Verify(spec program.Spec, version string) error { - filename, err := artifact.GetArtifactName(spec, version, v.config.OS(), v.config.Arch()) +func (v *Verifier) Verify(a artifact.Artifact, version string) error { + filename, err := artifact.GetArtifactName(a, version, v.config.OS(), v.config.Arch()) if err != nil { return errors.New(err, "retrieving package name") } diff --git a/internal/pkg/artifact/download/fs/verifier_test.go b/internal/pkg/agent/application/upgrade/artifact/download/fs/verifier_test.go similarity index 91% rename from internal/pkg/artifact/download/fs/verifier_test.go rename to internal/pkg/agent/application/upgrade/artifact/download/fs/verifier_test.go index a758f90f300..59f0fcb2b57 100644 --- a/internal/pkg/artifact/download/fs/verifier_test.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/fs/verifier_test.go @@ -18,9 +18,9 @@ import ( "github.com/stretchr/testify/require" "github.com/elastic/elastic-agent-libs/transport/httpcommon" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/artifact" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download" + + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download" "github.com/elastic/elastic-agent/internal/pkg/release" ) @@ -29,7 +29,7 @@ const ( ) var ( - beatSpec = program.Spec{Name: "Filebeat", Cmd: "filebeat", Artifact: "beat/filebeat"} + beatSpec = artifact.Artifact{Name: "Filebeat", Cmd: "filebeat", Artifact: "beat/filebeat"} ) func TestFetchVerify(t *testing.T) { @@ -38,7 +38,7 @@ func TestFetchVerify(t *testing.T) { installPath := filepath.Join("testdata", "install") targetPath := filepath.Join("testdata", "download") ctx := context.Background() - s := program.Spec{Name: "Beat", Cmd: "beat", Artifact: "beats/filebeat"} + s := artifact.Artifact{Name: "Beat", Cmd: "beat", Artifact: "beats/filebeat"} version := "8.0.0" targetFilePath := filepath.Join(targetPath, "beat-8.0.0-darwin-x86_64.tar.gz") @@ -216,8 +216,8 @@ func TestVerify(t *testing.T) { os.RemoveAll(config.DropPath) } -func prepareTestCase(beatSpec program.Spec, version string, cfg *artifact.Config) error { - filename, err := artifact.GetArtifactName(beatSpec, version, cfg.OperatingSystem, cfg.Architecture) +func prepareTestCase(a artifact.Artifact, version string, cfg *artifact.Config) error { + filename, err := artifact.GetArtifactName(a, version, cfg.OperatingSystem, cfg.Architecture) if err != nil { return err } diff --git a/internal/pkg/artifact/download/http/downloader.go b/internal/pkg/agent/application/upgrade/artifact/download/http/downloader.go similarity index 91% rename from internal/pkg/artifact/download/http/downloader.go rename to internal/pkg/agent/application/upgrade/artifact/download/http/downloader.go index 5f68816c58a..2f9a7748660 100644 --- a/internal/pkg/artifact/download/http/downloader.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/http/downloader.go @@ -21,9 +21,8 @@ import ( "github.com/elastic/elastic-agent-libs/atomic" "github.com/elastic/elastic-agent-libs/transport/httpcommon" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/artifact" "github.com/elastic/elastic-agent/internal/pkg/release" ) @@ -75,8 +74,8 @@ func NewDownloaderWithClient(log progressLogger, config *artifact.Config, client // Download fetches the package from configured source. // Returns absolute path to downloaded package and an error. -func (e *Downloader) Download(ctx context.Context, spec program.Spec, version string) (_ string, err error) { - remoteArtifact := spec.Artifact +func (e *Downloader) Download(ctx context.Context, a artifact.Artifact, version string) (_ string, err error) { + remoteArtifact := a.Artifact downloadedFiles := make([]string, 0, 2) defer func() { if err != nil { @@ -87,13 +86,13 @@ func (e *Downloader) Download(ctx context.Context, spec program.Spec, version st }() // download from source to dest - path, err := e.download(ctx, remoteArtifact, e.config.OS(), spec, version) + path, err := e.download(ctx, remoteArtifact, e.config.OS(), a, version) downloadedFiles = append(downloadedFiles, path) if err != nil { return "", err } - hashPath, err := e.downloadHash(ctx, remoteArtifact, e.config.OS(), spec, version) + hashPath, err := e.downloadHash(ctx, remoteArtifact, e.config.OS(), a, version) downloadedFiles = append(downloadedFiles, hashPath) return path, err } @@ -115,13 +114,13 @@ func (e *Downloader) composeURI(artifactName, packageName string) (string, error return uri.String(), nil } -func (e *Downloader) download(ctx context.Context, remoteArtifact string, operatingSystem string, spec program.Spec, version string) (string, error) { - filename, err := artifact.GetArtifactName(spec, version, operatingSystem, e.config.Arch()) +func (e *Downloader) download(ctx context.Context, remoteArtifact string, operatingSystem string, a artifact.Artifact, version string) (string, error) { + filename, err := artifact.GetArtifactName(a, version, operatingSystem, e.config.Arch()) if err != nil { return "", errors.New(err, "generating package name failed") } - fullPath, err := artifact.GetArtifactPath(spec, version, operatingSystem, e.config.Arch(), e.config.TargetDirectory) + fullPath, err := artifact.GetArtifactPath(a, version, operatingSystem, e.config.Arch(), e.config.TargetDirectory) if err != nil { return "", errors.New(err, "generating package path failed") } @@ -129,13 +128,13 @@ func (e *Downloader) download(ctx context.Context, remoteArtifact string, operat return e.downloadFile(ctx, remoteArtifact, filename, fullPath) } -func (e *Downloader) downloadHash(ctx context.Context, remoteArtifact string, operatingSystem string, spec program.Spec, version string) (string, error) { - filename, err := artifact.GetArtifactName(spec, version, operatingSystem, e.config.Arch()) +func (e *Downloader) downloadHash(ctx context.Context, remoteArtifact string, operatingSystem string, a artifact.Artifact, version string) (string, error) { + filename, err := artifact.GetArtifactName(a, version, operatingSystem, e.config.Arch()) if err != nil { return "", errors.New(err, "generating package name failed") } - fullPath, err := artifact.GetArtifactPath(spec, version, operatingSystem, e.config.Arch(), e.config.TargetDirectory) + fullPath, err := artifact.GetArtifactPath(a, version, operatingSystem, e.config.Arch(), e.config.TargetDirectory) if err != nil { return "", errors.New(err, "generating package path failed") } diff --git a/internal/pkg/artifact/download/http/downloader_test.go b/internal/pkg/agent/application/upgrade/artifact/download/http/downloader_test.go similarity index 98% rename from internal/pkg/artifact/download/http/downloader_test.go rename to internal/pkg/agent/application/upgrade/artifact/download/http/downloader_test.go index 6fa2777c02f..5a6762b40fc 100644 --- a/internal/pkg/artifact/download/http/downloader_test.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/http/downloader_test.go @@ -16,12 +16,13 @@ import ( "testing" "time" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" + "github.com/docker/go-units" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/elastic/elastic-agent-libs/transport/httpcommon" - "github.com/elastic/elastic-agent/internal/pkg/artifact" ) func TestDownloadBodyError(t *testing.T) { diff --git a/internal/pkg/artifact/download/http/elastic_test.go b/internal/pkg/agent/application/upgrade/artifact/download/http/elastic_test.go similarity index 81% rename from internal/pkg/artifact/download/http/elastic_test.go rename to internal/pkg/agent/application/upgrade/artifact/download/http/elastic_test.go index e76bc92fd06..66bdad9dcd4 100644 --- a/internal/pkg/artifact/download/http/elastic_test.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/http/elastic_test.go @@ -21,8 +21,8 @@ import ( "github.com/stretchr/testify/require" "github.com/elastic/elastic-agent-libs/transport/httpcommon" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/artifact" + + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -33,7 +33,7 @@ const ( ) var ( - beatSpec = program.Spec{ + beatSpec = artifact.Artifact{ Name: "filebeat", Cmd: "filebeat", Artifact: "beats/filebeat", @@ -165,16 +165,16 @@ func getRandomTestCases() []testCase { func getElasticCoClient() http.Client { correctValues := map[string]struct{}{ - fmt.Sprintf("%s-%s-%s", beatSpec.CommandName(), version, "i386.deb"): {}, - fmt.Sprintf("%s-%s-%s", beatSpec.CommandName(), version, "amd64.deb"): {}, - fmt.Sprintf("%s-%s-%s", beatSpec.CommandName(), version, "i686.rpm"): {}, - fmt.Sprintf("%s-%s-%s", beatSpec.CommandName(), version, "x86_64.rpm"): {}, - fmt.Sprintf("%s-%s-%s", beatSpec.CommandName(), version, "linux-x86.tar.gz"): {}, - fmt.Sprintf("%s-%s-%s", beatSpec.CommandName(), version, "linux-arm64.tar.gz"): {}, - fmt.Sprintf("%s-%s-%s", beatSpec.CommandName(), version, "linux-x86_64.tar.gz"): {}, - fmt.Sprintf("%s-%s-%s", beatSpec.CommandName(), version, "windows-x86.zip"): {}, - fmt.Sprintf("%s-%s-%s", beatSpec.CommandName(), version, "windows-x86_64.zip"): {}, - fmt.Sprintf("%s-%s-%s", beatSpec.CommandName(), version, "darwin-x86_64.tar.gz"): {}, + fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "i386.deb"): {}, + fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "amd64.deb"): {}, + fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "i686.rpm"): {}, + fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "x86_64.rpm"): {}, + fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "linux-x86.tar.gz"): {}, + fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "linux-arm64.tar.gz"): {}, + fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "linux-x86_64.tar.gz"): {}, + fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "windows-x86.zip"): {}, + fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "windows-x86_64.zip"): {}, + fmt.Sprintf("%s-%s-%s", beatSpec.Cmd, version, "darwin-x86_64.tar.gz"): {}, } handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { diff --git a/internal/pkg/artifact/download/http/headers_rtt.go b/internal/pkg/agent/application/upgrade/artifact/download/http/headers_rtt.go similarity index 100% rename from internal/pkg/artifact/download/http/headers_rtt.go rename to internal/pkg/agent/application/upgrade/artifact/download/http/headers_rtt.go diff --git a/internal/pkg/artifact/download/http/headers_rtt_test.go b/internal/pkg/agent/application/upgrade/artifact/download/http/headers_rtt_test.go similarity index 100% rename from internal/pkg/artifact/download/http/headers_rtt_test.go rename to internal/pkg/agent/application/upgrade/artifact/download/http/headers_rtt_test.go diff --git a/internal/pkg/artifact/download/http/verifier.go b/internal/pkg/agent/application/upgrade/artifact/download/http/verifier.go similarity index 84% rename from internal/pkg/artifact/download/http/verifier.go rename to internal/pkg/agent/application/upgrade/artifact/download/http/verifier.go index 1fe855fa2af..4568c0f2cdd 100644 --- a/internal/pkg/artifact/download/http/verifier.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/http/verifier.go @@ -14,10 +14,10 @@ import ( "strings" "github.com/elastic/elastic-agent-libs/transport/httpcommon" + + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/artifact" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download" ) const ( @@ -62,8 +62,8 @@ func NewVerifier(config *artifact.Config, allowEmptyPgp bool, pgp []byte) (*Veri // Verify checks downloaded package on preconfigured // location against a key stored on elastic.co website. -func (v *Verifier) Verify(spec program.Spec, version string) error { - fullPath, err := artifact.GetArtifactPath(spec, version, v.config.OS(), v.config.Arch(), v.config.TargetDirectory) +func (v *Verifier) Verify(a artifact.Artifact, version string) error { + fullPath, err := artifact.GetArtifactPath(a, version, v.config.OS(), v.config.Arch(), v.config.TargetDirectory) if err != nil { return errors.New(err, "retrieving package path") } @@ -77,7 +77,7 @@ func (v *Verifier) Verify(spec program.Spec, version string) error { return err } - if err = v.verifyAsc(spec, version); err != nil { + if err = v.verifyAsc(a, version); err != nil { var invalidSignatureErr *download.InvalidSignatureError if errors.As(err, &invalidSignatureErr) { os.Remove(fullPath + ".asc") @@ -88,23 +88,23 @@ func (v *Verifier) Verify(spec program.Spec, version string) error { return nil } -func (v *Verifier) verifyAsc(spec program.Spec, version string) error { +func (v *Verifier) verifyAsc(a artifact.Artifact, version string) error { if len(v.pgpBytes) == 0 { // no pgp available skip verification process return nil } - filename, err := artifact.GetArtifactName(spec, version, v.config.OS(), v.config.Arch()) + filename, err := artifact.GetArtifactName(a, version, v.config.OS(), v.config.Arch()) if err != nil { return errors.New(err, "retrieving package name") } - fullPath, err := artifact.GetArtifactPath(spec, version, v.config.OS(), v.config.Arch(), v.config.TargetDirectory) + fullPath, err := artifact.GetArtifactPath(a, version, v.config.OS(), v.config.Arch(), v.config.TargetDirectory) if err != nil { return errors.New(err, "retrieving package path") } - ascURI, err := v.composeURI(filename, spec.Artifact) + ascURI, err := v.composeURI(filename, a.Artifact) if err != nil { return errors.New(err, "composing URI for fetching asc file", errors.TypeNetwork) } diff --git a/internal/pkg/artifact/download/localremote/downloader.go b/internal/pkg/agent/application/upgrade/artifact/download/localremote/downloader.go similarity index 66% rename from internal/pkg/artifact/download/localremote/downloader.go rename to internal/pkg/agent/application/upgrade/artifact/download/localremote/downloader.go index d3877e9430f..72f4c1534cf 100644 --- a/internal/pkg/artifact/download/localremote/downloader.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/localremote/downloader.go @@ -5,12 +5,12 @@ package localremote import ( - "github.com/elastic/elastic-agent/internal/pkg/artifact" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download/composed" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download/fs" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download/http" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download/snapshot" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download/composed" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download/fs" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download/http" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download/snapshot" "github.com/elastic/elastic-agent/internal/pkg/release" "github.com/elastic/elastic-agent/pkg/core/logger" ) diff --git a/internal/pkg/artifact/download/localremote/verifier.go b/internal/pkg/agent/application/upgrade/artifact/download/localremote/verifier.go similarity index 68% rename from internal/pkg/artifact/download/localremote/verifier.go rename to internal/pkg/agent/application/upgrade/artifact/download/localremote/verifier.go index 9327e44539b..970ea342744 100644 --- a/internal/pkg/artifact/download/localremote/verifier.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/localremote/verifier.go @@ -5,12 +5,12 @@ package localremote import ( - "github.com/elastic/elastic-agent/internal/pkg/artifact" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download/composed" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download/fs" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download/http" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download/snapshot" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download/composed" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download/fs" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download/http" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download/snapshot" "github.com/elastic/elastic-agent/internal/pkg/release" "github.com/elastic/elastic-agent/pkg/core/logger" ) diff --git a/internal/pkg/artifact/download/snapshot/downloader.go b/internal/pkg/agent/application/upgrade/artifact/download/snapshot/downloader.go similarity index 91% rename from internal/pkg/artifact/download/snapshot/downloader.go rename to internal/pkg/agent/application/upgrade/artifact/download/snapshot/downloader.go index 2fbe027ae4b..b858fca0fc3 100644 --- a/internal/pkg/artifact/download/snapshot/downloader.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/snapshot/downloader.go @@ -9,10 +9,11 @@ import ( "fmt" "strings" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download/http" + "github.com/elastic/elastic-agent-libs/transport/httpcommon" - "github.com/elastic/elastic-agent/internal/pkg/artifact" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download/http" "github.com/elastic/elastic-agent/internal/pkg/release" "github.com/elastic/elastic-agent/pkg/core/logger" ) diff --git a/internal/pkg/artifact/download/snapshot/verifier.go b/internal/pkg/agent/application/upgrade/artifact/download/snapshot/verifier.go similarity index 70% rename from internal/pkg/artifact/download/snapshot/verifier.go rename to internal/pkg/agent/application/upgrade/artifact/download/snapshot/verifier.go index e4e4e667be7..31ad26a0474 100644 --- a/internal/pkg/artifact/download/snapshot/verifier.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/snapshot/verifier.go @@ -5,9 +5,9 @@ package snapshot import ( - "github.com/elastic/elastic-agent/internal/pkg/artifact" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download/http" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download/http" ) // NewVerifier creates a downloader which first checks local directory diff --git a/internal/pkg/artifact/download/verifier.go b/internal/pkg/agent/application/upgrade/artifact/download/verifier.go similarity index 97% rename from internal/pkg/artifact/download/verifier.go rename to internal/pkg/agent/application/upgrade/artifact/download/verifier.go index 2501cde9fe8..bca1d72f93a 100644 --- a/internal/pkg/artifact/download/verifier.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/verifier.go @@ -15,10 +15,11 @@ import ( "path/filepath" "strings" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" + "golang.org/x/crypto/openpgp" //nolint:staticcheck // crypto/openpgp is only receiving security updates. "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" ) // ChecksumMismatchError indicates the expected checksum for a file does not @@ -54,7 +55,7 @@ type Verifier interface { // *download.ChecksumMismatchError. And if the GPG signature is invalid then // Verify returns a *download.InvalidSignatureError. Use errors.As() to // check error types. - Verify(spec program.Spec, version string) error + Verify(a artifact.Artifact, version string) error } // VerifySHA512Hash checks that a sidecar file containing a sha512 checksum diff --git a/internal/pkg/agent/application/upgrade/step_download.go b/internal/pkg/agent/application/upgrade/step_download.go index 27e4b9c9e9c..926e310fda3 100644 --- a/internal/pkg/agent/application/upgrade/step_download.go +++ b/internal/pkg/agent/application/upgrade/step_download.go @@ -10,14 +10,14 @@ import ( "go.elastic.co/apm" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download/composed" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download/fs" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download/http" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download/localremote" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download/snapshot" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/artifact" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download/composed" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download/fs" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download/http" - downloader "github.com/elastic/elastic-agent/internal/pkg/artifact/download/localremote" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download/snapshot" "github.com/elastic/elastic-agent/internal/pkg/release" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -50,12 +50,12 @@ func (u *Upgrader) downloadArtifact(ctx context.Context, version, sourceURI stri return "", errors.New(err, "initiating fetcher") } - path, err := fetcher.Download(ctx, agentSpec, version) + path, err := fetcher.Download(ctx, agentArtifact, version) if err != nil { return "", errors.New(err, "failed upgrade of agent binary") } - if err := verifier.Verify(agentSpec, version); err != nil { + if err := verifier.Verify(agentArtifact, version); err != nil { return "", errors.New(err, "failed verification of agent binary") } @@ -64,7 +64,7 @@ func (u *Upgrader) downloadArtifact(ctx context.Context, version, sourceURI stri func newDownloader(version string, log *logger.Logger, settings *artifact.Config) (download.Downloader, error) { if !strings.HasSuffix(version, "-SNAPSHOT") { - return downloader.NewDownloader(log, settings) + return localremote.NewDownloader(log, settings) } // try snapshot repo before official @@ -84,7 +84,7 @@ func newDownloader(version string, log *logger.Logger, settings *artifact.Config func newVerifier(version string, log *logger.Logger, settings *artifact.Config) (download.Verifier, error) { allowEmptyPgp, pgp := release.PGP() if !strings.HasSuffix(version, "-SNAPSHOT") { - return downloader.NewVerifier(log, settings, allowEmptyPgp, pgp) + return localremote.NewVerifier(log, settings, allowEmptyPgp, pgp) } fsVerifier, err := fs.NewVerifier(settings, allowEmptyPgp, pgp) diff --git a/internal/pkg/agent/application/upgrade/upgrade.go b/internal/pkg/agent/application/upgrade/upgrade.go index cb6f827d8e2..444927a6052 100644 --- a/internal/pkg/agent/application/upgrade/upgrade.go +++ b/internal/pkg/agent/application/upgrade/upgrade.go @@ -14,8 +14,6 @@ import ( "runtime" "strings" - "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" - "github.com/otiai10/copy" "go.elastic.co/apm" @@ -23,11 +21,11 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/application/reexec" "github.com/elastic/elastic-agent/internal/pkg/agent/application/secret" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" "github.com/elastic/elastic-agent/internal/pkg/agent/storage" - "github.com/elastic/elastic-agent/internal/pkg/artifact" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "github.com/elastic/elastic-agent/internal/pkg/release" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -40,7 +38,7 @@ const ( ) var ( - agentSpec = program.Spec{ + agentArtifact = artifact.Artifact{ Name: "elastic-agent", Cmd: agentName, Artifact: "beats/" + agentName, diff --git a/internal/pkg/agent/cmd/container.go b/internal/pkg/agent/cmd/container.go index ff1b40936d1..b3a086439e3 100644 --- a/internal/pkg/agent/cmd/container.go +++ b/internal/pkg/agent/cmd/container.go @@ -22,6 +22,8 @@ import ( "syscall" "time" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" + "github.com/spf13/cobra" "gopkg.in/yaml.v2" @@ -32,7 +34,6 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/artifact" "github.com/elastic/elastic-agent/internal/pkg/artifact/install/tar" "github.com/elastic/elastic-agent/internal/pkg/cli" "github.com/elastic/elastic-agent/internal/pkg/config" diff --git a/internal/pkg/agent/cmd/diagnostics.go b/internal/pkg/agent/cmd/diagnostics.go index 718e1c4596f..a1412c147e1 100644 --- a/internal/pkg/agent/cmd/diagnostics.go +++ b/internal/pkg/agent/cmd/diagnostics.go @@ -428,6 +428,7 @@ func gatherConfig() (AgentConfig, error) { } cfg.ConfigRendered = mapCFG + // TODO(blakerouse): Fix diagnostic command for Elastic Agent v2 /* // Gather vars to render process config isStandalone, err := isStandalone(renderedCFG) diff --git a/internal/pkg/agent/cmd/inspect.go b/internal/pkg/agent/cmd/inspect.go index 03ea093cab6..0c51bb40460 100644 --- a/internal/pkg/agent/cmd/inspect.go +++ b/internal/pkg/agent/cmd/inspect.go @@ -19,6 +19,7 @@ func newInspectCommandWithArgs(s []string, streams *cli.IOStreams) *cobra.Comman Long: "Shows current configuration of the agent", Args: cobra.ExactArgs(0), Run: func(c *cobra.Command, args []string) { + // TODO(blakerouse): Fix inspect command for Elastic Agent v2 /* if err := inspectConfig(paths.ConfigFile()); err != nil { fmt.Fprintf(streams.Err, "Error: %v\n%s\n", err, troubleshootMessage()) @@ -40,6 +41,7 @@ func newInspectOutputCommandWithArgs(_ []string) *cobra.Command { Long: "Displays configuration generated for output.\nIf no output is specified list of output is displayed", Args: cobra.MaximumNArgs(2), RunE: func(c *cobra.Command, args []string) error { + // TODO(blakerouse): Fix inspect command for Elastic Agent v2 /* outName, _ := c.Flags().GetString("output") program, _ := c.Flags().GetString("program") diff --git a/internal/pkg/agent/configuration/fleet.go b/internal/pkg/agent/configuration/fleet.go index 5bc9c115a63..0ae59c8f4e8 100644 --- a/internal/pkg/agent/configuration/fleet.go +++ b/internal/pkg/agent/configuration/fleet.go @@ -7,18 +7,16 @@ package configuration import ( "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/remote" - fleetreporterConfig "github.com/elastic/elastic-agent/internal/pkg/reporter/fleet/config" ) // FleetAgentConfig is the internal configuration of the agent after the enrollment is done, // this configuration is not exposed in anyway in the elastic-agent.yml and is only internal configuration. type FleetAgentConfig struct { - Enabled bool `config:"enabled" yaml:"enabled"` - AccessAPIKey string `config:"access_api_key" yaml:"access_api_key"` - Client remote.Config `config:",inline" yaml:",inline"` - Reporting *fleetreporterConfig.Config `config:"reporting" yaml:"reporting"` - Info *AgentInfo `config:"agent" yaml:"agent"` - Server *FleetServerConfig `config:"server" yaml:"server,omitempty"` + Enabled bool `config:"enabled" yaml:"enabled"` + AccessAPIKey string `config:"access_api_key" yaml:"access_api_key"` + Client remote.Config `config:",inline" yaml:",inline"` + Info *AgentInfo `config:"agent" yaml:"agent"` + Server *FleetServerConfig `config:"server" yaml:"server,omitempty"` } // Valid validates the required fields for accessing the API. @@ -44,9 +42,8 @@ func (e *FleetAgentConfig) Valid() error { // DefaultFleetAgentConfig creates a default configuration for fleet. func DefaultFleetAgentConfig() *FleetAgentConfig { return &FleetAgentConfig{ - Enabled: false, - Client: remote.DefaultClientConfig(), - Reporting: fleetreporterConfig.DefaultConfig(), - Info: &AgentInfo{}, + Enabled: false, + Client: remote.DefaultClientConfig(), + Info: &AgentInfo{}, } } diff --git a/internal/pkg/agent/configuration/settings.go b/internal/pkg/agent/configuration/settings.go index 7c2c422a65b..eab16a8177d 100644 --- a/internal/pkg/agent/configuration/settings.go +++ b/internal/pkg/agent/configuration/settings.go @@ -7,9 +7,9 @@ package configuration import ( "path/filepath" - "github.com/elastic/elastic-agent/internal/pkg/artifact" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" + monitoringCfg "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/config" - "github.com/elastic/elastic-agent/internal/pkg/core/retry" "github.com/elastic/elastic-agent/pkg/core/logger" "github.com/elastic/elastic-agent/pkg/core/process" ) @@ -22,7 +22,6 @@ type SettingsConfig struct { DownloadConfig *artifact.Config `yaml:"download" config:"download" json:"download"` ProcessConfig *process.Config `yaml:"process" config:"process" json:"process"` GRPC *GRPCConfig `yaml:"grpc" config:"grpc" json:"grpc"` - RetryConfig *retry.Config `yaml:"retry" config:"retry" json:"retry"` MonitoringConfig *monitoringCfg.MonitoringConfig `yaml:"monitoring" config:"monitoring" json:"monitoring"` LoggingConfig *logger.Config `yaml:"logging,omitempty" config:"logging,omitempty" json:"logging,omitempty"` @@ -35,7 +34,6 @@ type SettingsConfig struct { func DefaultSettingsConfig() *SettingsConfig { return &SettingsConfig{ ProcessConfig: process.DefaultConfig(), - RetryConfig: retry.DefaultConfig(), DownloadConfig: artifact.DefaultConfig(), LoggingConfig: logger.DefaultLoggingConfig(), MonitoringConfig: monitoringCfg.DefaultConfig(), diff --git a/internal/pkg/agent/control/server/server.go b/internal/pkg/agent/control/server/server.go index 620a5b7b024..15cefc09f7f 100644 --- a/internal/pkg/agent/control/server/server.go +++ b/internal/pkg/agent/control/server/server.go @@ -9,7 +9,6 @@ import ( "encoding/json" "fmt" "net" - "sync" "go.elastic.co/apm" "go.elastic.co/apm/module/apmgrpc" @@ -19,14 +18,11 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/control" "github.com/elastic/elastic-agent/internal/pkg/agent/control/cproto" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" monitoringCfg "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/config" "github.com/elastic/elastic-agent/internal/pkg/release" "github.com/elastic/elastic-agent/pkg/core/logger" ) -const agentName = "elastic-agent" - // Server is the daemon side of the control protocol. type Server struct { cproto.UnimplementedElasticAgentControlServer @@ -37,17 +33,6 @@ type Server struct { listener net.Listener server *grpc.Server tracer *apm.Tracer - lock sync.RWMutex -} - -type specer interface { - Specs() map[string]program.Spec -} - -type specInfo struct { - spec program.Spec - app string - rk string } // New creates a new control protocol server. diff --git a/internal/pkg/agent/install/uninstall.go b/internal/pkg/agent/install/uninstall.go index b43db32892d..d1ee5e371ff 100644 --- a/internal/pkg/agent/install/uninstall.go +++ b/internal/pkg/agent/install/uninstall.go @@ -16,27 +16,17 @@ import ( "github.com/kardianos/service" "github.com/elastic/elastic-agent-libs/logp" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" - "github.com/elastic/elastic-agent/internal/pkg/artifact" - "github.com/elastic/elastic-agent/internal/pkg/artifact/uninstall" "github.com/elastic/elastic-agent/internal/pkg/capabilities" "github.com/elastic/elastic-agent/internal/pkg/composable" "github.com/elastic/elastic-agent/internal/pkg/config" "github.com/elastic/elastic-agent/internal/pkg/config/operations" - "github.com/elastic/elastic-agent/internal/pkg/core/app" - "github.com/elastic/elastic-agent/internal/pkg/release" + "github.com/elastic/elastic-agent/pkg/component" "github.com/elastic/elastic-agent/pkg/core/logger" ) -const ( - inputsKey = "inputs" - outputsKey = "outputs" -) - // Uninstall uninstalls persistently Elastic Agent on the system. func Uninstall(cfgFile string) error { // uninstall the current service @@ -56,7 +46,7 @@ func Uninstall(cfgFile string) error { } _ = svc.Uninstall() - if err := uninstallPrograms(context.Background(), cfgFile); err != nil { + if err := uninstallComponents(context.Background(), cfgFile); err != nil { return err } @@ -121,99 +111,74 @@ func delayedRemoval(path string) { } -func uninstallPrograms(ctx context.Context, cfgFile string) error { +func uninstallComponents(ctx context.Context, cfgFile string) error { log, err := logger.NewWithLogpLevel("", logp.ErrorLevel, false) if err != nil { return err } - cfg, err := operations.LoadFullAgentConfig(cfgFile, false) + platform, err := component.LoadPlatformDetail() if err != nil { - return err + return fmt.Errorf("failed to gather system information: %w", err) } - cfg, err = applyDynamics(ctx, log, cfg) + specs, err := component.LoadRuntimeSpecs(paths.Components(), platform) if err != nil { - return err + return fmt.Errorf("failed to detect inputs and outputs: %w", err) } - pp, err := programsFromConfig(cfg) + cfg, err := operations.LoadFullAgentConfig(cfgFile, false) if err != nil { return err } - // nothing to remove - if len(pp) == 0 { - return nil + cfg, err = applyDynamics(ctx, log, cfg) + if err != nil { + return err } - uninstaller, err := uninstall.NewUninstaller() + comps, err := serviceComponentsFromConfig(specs, cfg) if err != nil { return err } - currentVersion := release.Version() - if release.Snapshot() { - currentVersion = fmt.Sprintf("%s-SNAPSHOT", currentVersion) + // nothing to remove + if len(comps) == 0 { + return nil } - artifactConfig := artifact.DefaultConfig() - for _, p := range pp { - descriptor := app.NewDescriptor(p.Spec, currentVersion, artifactConfig, nil) - if err := uninstaller.Uninstall(ctx, p.Spec, currentVersion, descriptor.Directory()); err != nil { - os.Stderr.WriteString(fmt.Sprintf("failed to uninstall '%s': %v\n", p.Spec.Name, err)) + // remove each service component + for _, comp := range comps { + if err := uninstallComponent(ctx, comp); err != nil { + os.Stderr.WriteString(fmt.Sprintf("failed to uninstall component %q: %s\n", comp.ID, err)) } } return nil } -func programsFromConfig(cfg *config.Config) ([]program.Program, error) { +func uninstallComponent(_ context.Context, _ component.Component) error { + // TODO(blakerouse): Perform uninstall of service component; once the service runtime is written. + return errors.New("failed to uninstall component; not implemented") +} + +func serviceComponentsFromConfig(specs component.RuntimeSpecs, cfg *config.Config) ([]component.Component, error) { mm, err := cfg.ToMapStr() if err != nil { return nil, errors.New("failed to create a map from config", err) } - - // if no input is defined nothing to remove - if _, found := mm[inputsKey]; !found { - return nil, nil - } - - // if no output is defined nothing to remove - if _, found := mm[outputsKey]; !found { - return nil, nil - } - - ast, err := transpiler.NewAST(mm) - if err != nil { - return nil, errors.New("failed to create a ast from config", err) - } - - agentInfo, err := info.NewAgentInfo(false) + allComps, err := specs.ToComponents(mm) if err != nil { - return nil, errors.New("failed to get an agent info", err) + return nil, fmt.Errorf("failed to render components: %w", err) } - - ppMap, err := program.Programs(agentInfo, ast) - if err != nil { - return nil, errors.New("failed to get programs from config", err) - } - - var pp []program.Program - check := make(map[string]bool) - - for _, v := range ppMap { - for _, p := range v { - if _, found := check[p.Spec.CommandName()]; found { - continue - } - - pp = append(pp, p) - check[p.Spec.CommandName()] = true + var serviceComps []component.Component + for _, comp := range allComps { + if comp.Err == nil && comp.Spec.Spec.Service != nil { + // non-error and service based component + serviceComps = append(serviceComps, comp) } } - - return pp, nil + return serviceComps, nil } func applyDynamics(ctx context.Context, log *logger.Logger, cfg *config.Config) (*config.Config, error) { diff --git a/internal/pkg/agent/operation/common_test.go b/internal/pkg/agent/operation/common_test.go deleted file mode 100644 index aadc65b5db9..00000000000 --- a/internal/pkg/agent/operation/common_test.go +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package operation - -import ( - "context" - "os" - "path/filepath" - "runtime" - "testing" - "time" - - "go.elastic.co/apm/apmtest" - - "github.com/elastic/elastic-agent-libs/logp" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" - "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/agent/stateresolver" - "github.com/elastic/elastic-agent/internal/pkg/artifact" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download" - "github.com/elastic/elastic-agent/internal/pkg/artifact/install" - "github.com/elastic/elastic-agent/internal/pkg/artifact/uninstall" - "github.com/elastic/elastic-agent/internal/pkg/core/app" - "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/noop" - "github.com/elastic/elastic-agent/internal/pkg/core/retry" - "github.com/elastic/elastic-agent/internal/pkg/core/status" - "github.com/elastic/elastic-agent/pkg/core/logger" - "github.com/elastic/elastic-agent/pkg/core/process" - "github.com/elastic/elastic-agent/pkg/core/server" -) - -var downloadPath = getAbsPath("tests/downloads") -var installPath = getAbsPath("tests/scripts") - -func getTestOperator(t *testing.T, downloadPath string, installPath string, p *app.Descriptor) *Operator { - operatorCfg := &configuration.SettingsConfig{ - RetryConfig: &retry.Config{ - Enabled: true, - RetriesCount: 2, - Delay: 3 * time.Second, - MaxDelay: 10 * time.Second, - }, - ProcessConfig: &process.Config{ - FailureTimeout: 1, // restart instantly - }, - DownloadConfig: &artifact.Config{ - TargetDirectory: downloadPath, - InstallPath: installPath, - }, - LoggingConfig: logger.DefaultLoggingConfig(), - } - - l := getLogger() - agentInfo, _ := info.NewAgentInfo(true) - - installer := &DummyInstallerChecker{} - uninstaller := &DummyUninstaller{} - fetcher := &DummyDownloader{} - verifier := &DummyVerifier{} - - stateResolver, err := stateresolver.NewStateResolver(l) - if err != nil { - t.Fatal(err) - } - srv, err := server.New(l, "localhost:0", &ApplicationStatusHandler{}, apmtest.DiscardTracer) - if err != nil { - t.Fatal(err) - } - err = srv.Start() - if err != nil { - t.Fatal(err) - } - - operator, err := NewOperator(context.Background(), l, agentInfo, "p1", operatorCfg, fetcher, verifier, installer, uninstaller, stateResolver, srv, nil, noop.NewMonitor(), status.NewController(l)) - if err != nil { - t.Fatal(err) - } - - operator.config.DownloadConfig.OperatingSystem = "darwin" - operator.config.DownloadConfig.Architecture = "64" - - // make the download path so the `operation_verify` can ensure the path exists - downloadConfig := operator.config.DownloadConfig - fullPath, err := artifact.GetArtifactPath(p.Spec(), p.Version(), downloadConfig.OS(), downloadConfig.Arch(), downloadConfig.TargetDirectory) - if err != nil { - t.Fatal(err) - } - createFile(t, fullPath) - - return operator -} - -func getLogger() *logger.Logger { - loggerCfg := logger.DefaultLoggingConfig() - loggerCfg.Level = logp.ErrorLevel - l, _ := logger.NewFromConfig("", loggerCfg, false) - return l -} - -func getProgram(binary, version string) *app.Descriptor { - spec := program.SupportedMap[binary] - downloadCfg := &artifact.Config{ - InstallPath: installPath, - OperatingSystem: "darwin", - Architecture: "64", - } - return app.NewDescriptorWithPath(installPath, spec, version, downloadCfg, nil) -} - -func getAbsPath(path string) string { - _, filename, _, _ := runtime.Caller(0) - return filepath.Join(filepath.Dir(filename), path) -} - -func createFile(t *testing.T, path string) { - _, err := os.Stat(path) - if os.IsNotExist(err) { - file, err := os.Create(path) - if err != nil { - t.Fatal(err) - } - defer file.Close() - } -} - -func waitFor(t *testing.T, check func() error) { - started := time.Now() - for { - err := check() - if err == nil { - return - } - if time.Since(started) >= 15*time.Second { - t.Fatalf("check timed out after 15 second: %s", err) - } - time.Sleep(10 * time.Millisecond) - } -} - -type DummyDownloader struct{} - -func (*DummyDownloader) Download(_ context.Context, _ program.Spec, _ string) (string, error) { - return "", nil -} - -var _ download.Downloader = &DummyDownloader{} - -type DummyVerifier struct{} - -func (*DummyVerifier) Verify(_ program.Spec, _ string) error { - return nil -} - -var _ download.Verifier = &DummyVerifier{} - -type DummyInstallerChecker struct{} - -func (*DummyInstallerChecker) Check(_ context.Context, _ program.Spec, _, _ string) error { - return nil -} - -func (*DummyInstallerChecker) Install(_ context.Context, _ program.Spec, _, _ string) error { - return nil -} - -var _ install.InstallerChecker = &DummyInstallerChecker{} - -type DummyUninstaller struct{} - -func (*DummyUninstaller) Uninstall(_ context.Context, _ program.Spec, _, _ string) error { - return nil -} - -var _ uninstall.Uninstaller = &DummyUninstaller{} diff --git a/internal/pkg/agent/operation/monitoring.go b/internal/pkg/agent/operation/monitoring.go deleted file mode 100644 index 11e080d7fd3..00000000000 --- a/internal/pkg/agent/operation/monitoring.go +++ /dev/null @@ -1,679 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package operation - -import ( - "fmt" - "path/filepath" - "strings" - - "github.com/hashicorp/go-multierror" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" - "github.com/elastic/elastic-agent/internal/pkg/agent/configrequest" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/core/app" - "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/beats" -) - -const ( - monitoringName = "FLEET_MONITORING" - outputKey = "output" - logsProcessName = "filebeat" - metricsProcessName = "metricbeat" - artifactPrefix = "beats" - agentName = "elastic-agent" -) - -func (o *Operator) handleStartSidecar(s configrequest.Step) (result error) { - // if monitoring is disabled and running stop it - if !o.monitor.IsMonitoringEnabled() { - if o.isMonitoring != 0 { - o.logger.Info("operator.handleStartSidecar: monitoring is running and disabled, proceeding to stop") - return o.handleStopSidecar(s) - } - - o.logger.Info("operator.handleStartSidecar: monitoring is not running and disabled, no action taken") - return nil - } - - for _, step := range o.getMonitoringSteps(s) { - p, cfg, err := getProgramFromStepWithTags(step, o.config.DownloadConfig, monitoringTags()) - if err != nil { - return errors.New(err, - errors.TypeApplication, - errors.M(errors.MetaKeyAppName, step.ProgramSpec.CommandName()), - "operator.handleStartSidecar failed to create program") - } - - // best effort on starting monitoring, if no hosts provided stop and spare resources - if step.ID == configrequest.StepRemove { - if err := o.stop(p); err != nil { - result = multierror.Append(err, err) - } else { - o.markStopMonitoring(step.ProgramSpec.CommandName()) - } - } else { - if err := o.start(p, cfg); err != nil { - result = multierror.Append(err, err) - } else { - o.markStartMonitoring(step.ProgramSpec.CommandName()) - } - } - } - - return result -} - -func (o *Operator) handleStopSidecar(s configrequest.Step) (result error) { - for _, step := range o.generateMonitoringSteps(s.Version, "", nil) { - p, _, err := getProgramFromStepWithTags(step, o.config.DownloadConfig, monitoringTags()) - if err != nil { - return errors.New(err, - errors.TypeApplication, - errors.M(errors.MetaKeyAppName, step.ProgramSpec.CommandName()), - "operator.handleStopSidecar failed to create program") - } - - o.logger.Debugf("stopping program %v", p) - if err := o.stop(p); err != nil { - result = multierror.Append(err, err) - } else { - o.markStopMonitoring(step.ProgramSpec.CommandName()) - } - } - - return result -} - -func monitoringTags() map[app.Tag]string { - return map[app.Tag]string{ - app.TagSidecar: "true", - } -} - -func (o *Operator) getMonitoringSteps(step configrequest.Step) []configrequest.Step { - // get output - config, err := getConfigFromStep(step) - if err != nil { - o.logger.Error("operator.getMonitoringSteps: getting config from step failed: %v", err) - return nil - } - - outputIface, found := config[outputKey] - if !found { - o.logger.Errorf("operator.getMonitoringSteps: monitoring configuration not found for sidecar of type %s", step.ProgramSpec.CommandName()) - return nil - } - - outputMap, ok := outputIface.(map[string]interface{}) - if !ok { - o.logger.Error("operator.getMonitoringSteps: monitoring config is not a map") - return nil - } - - if len(outputMap) == 0 { - o.logger.Errorf("operator.getMonitoringSteps: monitoring is missing an output configuration for sidecar of type: %s", step.ProgramSpec.CommandName()) - return nil - } - - // Guards against parser issues upstream, this should not be possible but - // since we are folding all the child options as a map we should make sure we have - //a unique output. - if len(outputMap) > 1 { - o.logger.Errorf("operator.getMonitoringSteps: monitoring has too many outputs configuration for sidecar of type: %s", step.ProgramSpec.CommandName()) - return nil - } - - // Aggregate output configuration independently of the received output key. - output := make(map[string]interface{}) - - for _, v := range outputMap { - child, ok := v.(map[string]interface{}) - if !ok { - o.logger.Error("operator.getMonitoringSteps: monitoring config is not a map") - return nil - } - for c, j := range child { - output[c] = j - } - } - - t, ok := output["type"] - if !ok { - o.logger.Errorf("operator.getMonitoringSteps: unknown monitoring output for sidecar of type: %s", step.ProgramSpec.CommandName()) - return nil - } - - outputType, ok := t.(string) - if !ok { - o.logger.Errorf("operator.getMonitoringSteps: unexpected monitoring output type: %+v for sidecar of type: %s", t, step.ProgramSpec.CommandName()) - return nil - } - - return o.generateMonitoringSteps(step.Version, outputType, output) -} - -func (o *Operator) generateMonitoringSteps(version, outputType string, output interface{}) []configrequest.Step { - var steps []configrequest.Step - watchLogs := o.monitor.WatchLogs() - watchMetrics := o.monitor.WatchMetrics() - monitoringNamespace := o.monitor.MonitoringNamespace() - - // generate only when monitoring is running (for config refresh) or - // state changes (turning on/off) - if watchLogs != o.isMonitoringLogs() || watchLogs { - fbConfig, any := o.getMonitoringFilebeatConfig(outputType, output, monitoringNamespace) - stepID := configrequest.StepRun - if !watchLogs || !any { - stepID = configrequest.StepRemove - } - filebeatStep := configrequest.Step{ - ID: stepID, - Version: version, - ProgramSpec: loadSpecFromSupported(logsProcessName), - Meta: map[string]interface{}{ - configrequest.MetaConfigKey: fbConfig, - }, - } - - steps = append(steps, filebeatStep) - } - if watchMetrics != o.isMonitoringMetrics() || watchMetrics { - mbConfig, any := o.getMonitoringMetricbeatConfig(outputType, output, monitoringNamespace) - stepID := configrequest.StepRun - if !watchMetrics || !any { - stepID = configrequest.StepRemove - } - - metricbeatStep := configrequest.Step{ - ID: stepID, - Version: version, - ProgramSpec: loadSpecFromSupported(metricsProcessName), - Meta: map[string]interface{}{ - configrequest.MetaConfigKey: mbConfig, - }, - } - - steps = append(steps, metricbeatStep) - } - - return steps -} - -func loadSpecFromSupported(processName string) program.Spec { - if loadedSpec, found := program.SupportedMap[strings.ToLower(processName)]; found { - return loadedSpec - } - - return program.Spec{ - Name: processName, - Cmd: processName, - Artifact: fmt.Sprintf("%s/%s", artifactPrefix, processName), - } -} - -func (o *Operator) getMonitoringFilebeatConfig(outputType string, output interface{}, monitoringNamespace string) (map[string]interface{}, bool) { - inputs := []interface{}{ - map[string]interface{}{ - "type": "filestream", - "close": map[string]interface{}{ - "on_state_change": map[string]interface{}{ - "inactive": "5m", - }, - }, - "parsers": []map[string]interface{}{ - { - "ndjson": map[string]interface{}{ - "overwrite_keys": true, - "message_key": "message", - }, - }, - }, - "paths": []string{ - filepath.Join(paths.Home(), "logs", "elastic-agent-*.ndjson"), - filepath.Join(paths.Home(), "logs", "elastic-agent-watcher-*.ndjson"), - }, - "index": fmt.Sprintf("logs-elastic_agent-%s", monitoringNamespace), - "processors": []map[string]interface{}{ - { - "add_fields": map[string]interface{}{ - "target": "data_stream", - "fields": map[string]interface{}{ - "type": "logs", - "dataset": "elastic_agent", - "namespace": monitoringNamespace, - }, - }, - }, - { - "add_fields": map[string]interface{}{ - "target": "event", - "fields": map[string]interface{}{ - "dataset": "elastic_agent", - }, - }, - }, - { - "add_fields": map[string]interface{}{ - "target": "elastic_agent", - "fields": map[string]interface{}{ - "id": o.agentInfo.AgentID(), - "version": o.agentInfo.Version(), - "snapshot": o.agentInfo.Snapshot(), - }, - }, - }, - { - "add_fields": map[string]interface{}{ - "target": "agent", - "fields": map[string]interface{}{ - "id": o.agentInfo.AgentID(), - }, - }, - }, - { - "drop_fields": map[string]interface{}{ - "fields": []string{ - "ecs.version", //coming from logger, already added by libbeat - }, - "ignore_missing": true, - }, - }, - }, - }, - } - logPaths := o.getLogFilePaths() - if len(logPaths) > 0 { - for name, paths := range logPaths { - inputs = append(inputs, map[string]interface{}{ - "type": "filestream", - "close": map[string]interface{}{ - "on_state_change": map[string]interface{}{ - "inactive": "5m", - }, - }, - "parsers": []map[string]interface{}{ - { - "ndjson": map[string]interface{}{ - "overwrite_keys": true, - "message_key": "message", - }, - }, - }, - "paths": paths, - "index": fmt.Sprintf("logs-elastic_agent.%s-%s", name, monitoringNamespace), - "processors": []map[string]interface{}{ - { - "add_fields": map[string]interface{}{ - "target": "data_stream", - "fields": map[string]interface{}{ - "type": "logs", - "dataset": fmt.Sprintf("elastic_agent.%s", name), - "namespace": monitoringNamespace, - }, - }, - }, - { - "add_fields": map[string]interface{}{ - "target": "event", - "fields": map[string]interface{}{ - "dataset": fmt.Sprintf("elastic_agent.%s", name), - }, - }, - }, - { - "add_fields": map[string]interface{}{ - "target": "elastic_agent", - "fields": map[string]interface{}{ - "id": o.agentInfo.AgentID(), - "version": o.agentInfo.Version(), - "snapshot": o.agentInfo.Snapshot(), - }, - }, - }, - { - "add_fields": map[string]interface{}{ - "target": "agent", - "fields": map[string]interface{}{ - "id": o.agentInfo.AgentID(), - }, - }, - }, - { - "drop_fields": map[string]interface{}{ - "fields": []string{ - "ecs.version", //coming from logger, already added by libbeat - }, - "ignore_missing": true, - }, - }, - }, - }) - } - } - - result := map[string]interface{}{ - "filebeat": map[string]interface{}{ - "inputs": inputs, - }, - "output": map[string]interface{}{ - outputType: output, - }, - } - - return result, true -} - -func (o *Operator) getMonitoringMetricbeatConfig(outputType string, output interface{}, monitoringNamespace string) (map[string]interface{}, bool) { - hosts := o.getMetricbeatEndpoints() - if len(hosts) == 0 { - return nil, false - } - var modules []interface{} - fixedAgentName := strings.ReplaceAll(agentName, "-", "_") - - for name, endpoints := range hosts { - modules = append(modules, map[string]interface{}{ - "module": "beat", - "metricsets": []string{"stats", "state"}, - "period": "10s", - "hosts": endpoints, - "index": fmt.Sprintf("metrics-elastic_agent.%s-%s", name, monitoringNamespace), - "processors": []map[string]interface{}{ - { - "add_fields": map[string]interface{}{ - "target": "data_stream", - "fields": map[string]interface{}{ - "type": "metrics", - "dataset": fmt.Sprintf("elastic_agent.%s", name), - "namespace": monitoringNamespace, - }, - }, - }, - { - "add_fields": map[string]interface{}{ - "target": "event", - "fields": map[string]interface{}{ - "dataset": fmt.Sprintf("elastic_agent.%s", name), - }, - }, - }, - { - "add_fields": map[string]interface{}{ - "target": "elastic_agent", - "fields": map[string]interface{}{ - "id": o.agentInfo.AgentID(), - "version": o.agentInfo.Version(), - "snapshot": o.agentInfo.Snapshot(), - }, - }, - }, - { - "add_fields": map[string]interface{}{ - "target": "agent", - "fields": map[string]interface{}{ - "id": o.agentInfo.AgentID(), - }, - }, - }, - }, - }, map[string]interface{}{ - "module": "http", - "metricsets": []string{"json"}, - "namespace": "agent", - "period": "10s", - "path": "/stats", - "hosts": endpoints, - "index": fmt.Sprintf("metrics-elastic_agent.%s-%s", fixedAgentName, monitoringNamespace), - "processors": []map[string]interface{}{ - { - "add_fields": map[string]interface{}{ - "target": "data_stream", - "fields": map[string]interface{}{ - "type": "metrics", - "dataset": fmt.Sprintf("elastic_agent.%s", fixedAgentName), - "namespace": monitoringNamespace, - }, - }, - }, - { - "add_fields": map[string]interface{}{ - "target": "event", - "fields": map[string]interface{}{ - "dataset": fmt.Sprintf("elastic_agent.%s", fixedAgentName), - }, - }, - }, - { - "add_fields": map[string]interface{}{ - "target": "elastic_agent", - "fields": map[string]interface{}{ - "id": o.agentInfo.AgentID(), - "version": o.agentInfo.Version(), - "snapshot": o.agentInfo.Snapshot(), - "process": name, - }, - }, - }, - { - "add_fields": map[string]interface{}{ - "target": "agent", - "fields": map[string]interface{}{ - "id": o.agentInfo.AgentID(), - }, - }, - }, - { - "copy_fields": map[string]interface{}{ - "fields": normalizeHTTPCopyRules(name), - "ignore_missing": true, - }, - }, - { - "drop_fields": map[string]interface{}{ - "fields": []string{ - "http", - }, - "ignore_missing": true, - }, - }, - }, - }) - } - - modules = append(modules, map[string]interface{}{ - "module": "http", - "metricsets": []string{"json"}, - "namespace": "agent", - "period": "10s", - "path": "/stats", - "hosts": []string{beats.AgentPrefixedMonitoringEndpoint(o.config.DownloadConfig.OS(), o.config.MonitoringConfig.HTTP)}, - "index": fmt.Sprintf("metrics-elastic_agent.%s-%s", fixedAgentName, monitoringNamespace), - "processors": []map[string]interface{}{ - { - "add_fields": map[string]interface{}{ - "target": "data_stream", - "fields": map[string]interface{}{ - "type": "metrics", - "dataset": fmt.Sprintf("elastic_agent.%s", fixedAgentName), - "namespace": monitoringNamespace, - }, - }, - }, - { - "add_fields": map[string]interface{}{ - "target": "event", - "fields": map[string]interface{}{ - "dataset": fmt.Sprintf("elastic_agent.%s", fixedAgentName), - }, - }, - }, - { - "add_fields": map[string]interface{}{ - "target": "elastic_agent", - "fields": map[string]interface{}{ - "id": o.agentInfo.AgentID(), - "version": o.agentInfo.Version(), - "snapshot": o.agentInfo.Snapshot(), - "process": "elastic-agent", - }, - }, - }, - { - "add_fields": map[string]interface{}{ - "target": "agent", - "fields": map[string]interface{}{ - "id": o.agentInfo.AgentID(), - }, - }, - }, - { - "copy_fields": map[string]interface{}{ - "fields": normalizeHTTPCopyRules(fixedAgentName), - "ignore_missing": true, - }, - }, - { - "drop_fields": map[string]interface{}{ - "fields": []string{ - "http", - }, - "ignore_missing": true, - }, - }, - }, - }) - - result := map[string]interface{}{ - "metricbeat": map[string]interface{}{ - "modules": modules, - }, - "output": map[string]interface{}{ - outputType: output, - }, - } - - return result, true -} - -func (o *Operator) getLogFilePaths() map[string][]string { - paths := map[string][]string{} - - o.appsLock.Lock() - defer o.appsLock.Unlock() - - for _, a := range o.apps { - logPath := a.Monitor().LogPath(a.Spec(), o.pipelineID) - if logPath != "" { - paths[strings.ReplaceAll(a.Name(), "-", "_")] = []string{ - logPath, - fmt.Sprintf("%s*", logPath), - } - } - } - - return paths -} - -func (o *Operator) getMetricbeatEndpoints() map[string][]string { - endpoints := map[string][]string{} - - o.appsLock.Lock() - defer o.appsLock.Unlock() - - for _, a := range o.apps { - metricEndpoint := a.Monitor().MetricsPathPrefixed(a.Spec(), o.pipelineID) - if metricEndpoint != "" { - safeName := strings.ReplaceAll(a.Name(), "-", "_") - // prevent duplicates - var found bool - for _, ep := range endpoints[safeName] { - if ep == metricEndpoint { - found = true - break - } - } - - if !found { - endpoints[safeName] = append(endpoints[safeName], metricEndpoint) - } - } - } - - return endpoints -} - -func (o *Operator) markStopMonitoring(process string) { - switch process { - case logsProcessName: - o.isMonitoring ^= isMonitoringLogsFlag - case metricsProcessName: - o.isMonitoring ^= isMonitoringMetricsFlag - } -} - -func (o *Operator) markStartMonitoring(process string) { - switch process { - case logsProcessName: - o.isMonitoring |= isMonitoringLogsFlag - case metricsProcessName: - o.isMonitoring |= isMonitoringMetricsFlag - } -} - -func (o *Operator) isMonitoringLogs() bool { - return (o.isMonitoring & isMonitoringLogsFlag) != 0 -} - -func (o *Operator) isMonitoringMetrics() bool { - return (o.isMonitoring & isMonitoringMetricsFlag) != 0 -} - -func normalizeHTTPCopyRules(name string) []map[string]interface{} { - fromToMap := []map[string]interface{}{ - // I should be able to see the CPU Usage on the running machine. Am using too much CPU? - { - "from": "http.agent.beat.cpu", - "to": "system.process.cpu", - }, - // I should be able to see the Memory usage of Elastic Agent. Is the Elastic Agent using too much memory? - { - "from": "http.agent.beat.memstats.memory_sys", - "to": "system.process.memory.size", - }, - // I should be able to see the system memory. Am I running out of memory? - // TODO: with APM agent: total and free - - // I should be able to see Disk usage on the running machine. Am I running out of disk space? - // TODO: with APM agent - - // I should be able to see fd usage. Am I keep too many files open? - { - "from": "http.agent.beat.handles", - "to": "system.process.fd", - }, - // Cgroup reporting - { - "from": "http.agent.beat.cgroup", - "to": "system.process.cgroup", - }, - } - - spec, found := program.SupportedMap[name] - if !found { - return fromToMap - } - - for _, exportedMetric := range spec.ExportedMetrics { - fromToMap = append(fromToMap, map[string]interface{}{ - "from": fmt.Sprintf("http.agent.%s", exportedMetric), - "to": exportedMetric, - }) - } - - return fromToMap -} diff --git a/internal/pkg/agent/operation/monitoring_test.go b/internal/pkg/agent/operation/monitoring_test.go deleted file mode 100644 index c64ede2a8f1..00000000000 --- a/internal/pkg/agent/operation/monitoring_test.go +++ /dev/null @@ -1,268 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package operation - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/stretchr/testify/require" - "go.elastic.co/apm/apmtest" - - "github.com/elastic/elastic-agent/internal/pkg/testutils" - - "github.com/elastic/elastic-agent-client/v7/pkg/proto" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" - "github.com/elastic/elastic-agent/internal/pkg/agent/configrequest" - "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/agent/stateresolver" - "github.com/elastic/elastic-agent/internal/pkg/artifact" - "github.com/elastic/elastic-agent/internal/pkg/config" - "github.com/elastic/elastic-agent/internal/pkg/core/app" - "github.com/elastic/elastic-agent/internal/pkg/core/monitoring" - monitoringConfig "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/config" - "github.com/elastic/elastic-agent/internal/pkg/core/retry" - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "github.com/elastic/elastic-agent/internal/pkg/core/status" - "github.com/elastic/elastic-agent/pkg/core/process" - "github.com/elastic/elastic-agent/pkg/core/server" -) - -func TestExportedMetrics(t *testing.T) { - programName := "testing" - expectedMetricsName := "metric_name" - program.SupportedMap[programName] = program.Spec{ - ExportedMetrics: []string{expectedMetricsName}, - } - - exportedMetrics := normalizeHTTPCopyRules(programName) - - exportedMetricFound := false - for _, kv := range exportedMetrics { - from, found := kv["from"] - if !found { - continue - } - to, found := kv["to"] - if !found { - continue - } - - if to != expectedMetricsName { - continue - } - if from != fmt.Sprintf("http.agent.%s", expectedMetricsName) { - continue - } - exportedMetricFound = true - break - } - - require.True(t, exportedMetricFound, "exported metric not found") - delete(program.SupportedMap, programName) -} - -func TestGenerateSteps(t *testing.T) { - testutils.InitStorage(t) - - const sampleOutput = "sample-output" - const outputType = "logstash" - - type testCase struct { - Name string - Config *monitoringConfig.MonitoringConfig - ExpectedSteps int - FilebeatStep bool - MetricbeatStep bool - } - - testCases := []testCase{ - {"NO monitoring", &monitoringConfig.MonitoringConfig{MonitorLogs: false, MonitorMetrics: false}, 0, false, false}, - {"FB monitoring", &monitoringConfig.MonitoringConfig{MonitorLogs: true, MonitorMetrics: false}, 1, true, false}, - {"MB monitoring", &monitoringConfig.MonitoringConfig{MonitorLogs: false, MonitorMetrics: true}, 1, false, true}, - {"ALL monitoring", &monitoringConfig.MonitoringConfig{MonitorLogs: true, MonitorMetrics: true}, 2, true, true}, - } - - for _, tc := range testCases { - t.Run(tc.Name, func(t *testing.T) { - m := &testMonitor{monitorLogs: tc.Config.MonitorLogs, monitorMetrics: tc.Config.MonitorMetrics} - operator := getMonitorableTestOperator(t, "tests/scripts", m, tc.Config) - steps := operator.generateMonitoringSteps("8.0", outputType, sampleOutput) - if actualSteps := len(steps); actualSteps != tc.ExpectedSteps { - t.Fatalf("invalid number of steps, expected %v, got %v", tc.ExpectedSteps, actualSteps) - } - - var fbFound, mbFound bool - for _, s := range steps { - // Filebeat step check - if s.ProgramSpec.CommandName() == "filebeat" { - fbFound = true - checkStep(t, "filebeat", outputType, sampleOutput, s) - } - - // Metricbeat step check - if s.ProgramSpec.CommandName() == "metricbeat" { - mbFound = true - checkStep(t, "metricbeat", outputType, sampleOutput, s) - } - } - - if tc.FilebeatStep != fbFound { - t.Fatalf("Steps for filebeat do not match. Was expected: %v, Was found: %v", tc.FilebeatStep, fbFound) - } - - if tc.MetricbeatStep != mbFound { - t.Fatalf("Steps for metricbeat do not match. Was expected: %v, Was found: %v", tc.MetricbeatStep, mbFound) - } - }) - } -} - -func checkStep(t *testing.T, stepName string, outputType string, expectedOutput interface{}, s configrequest.Step) { - if meta := s.Meta[configrequest.MetaConfigKey]; meta != nil { - mapstr, ok := meta.(map[string]interface{}) - if !ok { - t.Fatalf("no meta config for %s step", stepName) - } - - esOut, ok := mapstr["output"].(map[string]interface{}) - if !ok { - t.Fatalf("output not found for %s step", stepName) - } - - if actualOutput := esOut[outputType]; actualOutput != expectedOutput { - t.Fatalf("output for %s step does not match. expected: %v, got %v", stepName, expectedOutput, actualOutput) - } - } -} - -func getMonitorableTestOperator(t *testing.T, installPath string, m monitoring.Monitor, mcfg *monitoringConfig.MonitoringConfig) *Operator { - cfg := &configuration.SettingsConfig{ - RetryConfig: &retry.Config{ - Enabled: true, - RetriesCount: 2, - Delay: 3 * time.Second, - MaxDelay: 10 * time.Second, - }, - ProcessConfig: &process.Config{}, - DownloadConfig: &artifact.Config{ - InstallPath: installPath, - OperatingSystem: "darwin", - }, - MonitoringConfig: mcfg, - } - - l := getLogger() - agentInfo, _ := info.NewAgentInfo(true) - - installer := &DummyInstallerChecker{} - uninstaller := &DummyUninstaller{} - fetcher := &DummyDownloader{} - verifier := &DummyVerifier{} - - stateResolver, err := stateresolver.NewStateResolver(l) - if err != nil { - t.Fatal(err) - } - srv, err := server.New(l, "localhost:0", &ApplicationStatusHandler{}, apmtest.DiscardTracer) - if err != nil { - t.Fatal(err) - } - - ctx := context.Background() - operator, err := NewOperator(ctx, l, agentInfo, "p1", cfg, fetcher, verifier, installer, uninstaller, stateResolver, srv, nil, m, status.NewController(l)) - if err != nil { - t.Fatal(err) - } - - operator.apps["dummy"] = &testMonitorableApp{monitor: m} - - return operator -} - -type testMonitorableApp struct { - monitor monitoring.Monitor -} - -func (*testMonitorableApp) Name() string { return "" } -func (*testMonitorableApp) Started() bool { return false } -func (*testMonitorableApp) Start(_ context.Context, _ app.Taggable, cfg map[string]interface{}) error { - return nil -} -func (*testMonitorableApp) Stop() {} -func (*testMonitorableApp) Shutdown() {} -func (*testMonitorableApp) Configure(_ context.Context, config map[string]interface{}) error { - return nil -} -func (*testMonitorableApp) Spec() program.Spec { return program.Spec{} } -func (*testMonitorableApp) State() state.State { return state.State{} } -func (*testMonitorableApp) SetState(_ state.Status, _ string, _ map[string]interface{}) {} -func (a *testMonitorableApp) Monitor() monitoring.Monitor { return a.monitor } -func (a *testMonitorableApp) OnStatusChange(_ *server.ApplicationState, _ proto.StateObserved_Status, _ string, _ map[string]interface{}) { -} - -type testMonitor struct { - monitorLogs bool - monitorMetrics bool -} - -// EnrichArgs enriches arguments provided to application, in order to enable -// monitoring -func (b *testMonitor) EnrichArgs(_ program.Spec, _ string, args []string, _ bool) []string { - return args -} - -// Cleanup cleans up all drops. -func (b *testMonitor) Cleanup(program.Spec, string) error { return nil } - -// Close closes the monitor. -func (b *testMonitor) Close() {} - -// Prepare executes steps in order for monitoring to work correctly -func (b *testMonitor) Prepare(program.Spec, string, int, int) error { return nil } - -const testPath = "path" - -// LogPath describes a path where application stores logs. Empty if -// application is not monitorable -func (b *testMonitor) LogPath(program.Spec, string) string { - if !b.monitorLogs { - return "" - } - return testPath -} - -// MetricsPath describes a location where application exposes metrics -// collectable by metricbeat. -func (b *testMonitor) MetricsPath(program.Spec, string) string { - if !b.monitorMetrics { - return "" - } - return testPath -} - -// MetricsPathPrefixed return metrics path prefixed with http+ prefix. -func (b *testMonitor) MetricsPathPrefixed(program.Spec, string) string { - return "http+path" -} - -// Reload reloads state based on configuration. -func (b *testMonitor) Reload(cfg *config.Config) error { return nil } - -// IsMonitoringEnabled returns true if monitoring is configured. -func (b *testMonitor) IsMonitoringEnabled() bool { return b.monitorLogs || b.monitorMetrics } - -// MonitoringNamespace returns monitoring namespace configured. -func (b *testMonitor) MonitoringNamespace() string { return "default" } - -// WatchLogs return true if monitoring is configured and monitoring logs is enabled. -func (b *testMonitor) WatchLogs() bool { return b.monitorLogs } - -// WatchMetrics return true if monitoring is configured and monitoring metrics is enabled. -func (b *testMonitor) WatchMetrics() bool { return b.monitorMetrics } diff --git a/internal/pkg/agent/operation/operation.go b/internal/pkg/agent/operation/operation.go deleted file mode 100644 index 0419058cc44..00000000000 --- a/internal/pkg/agent/operation/operation.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package operation - -import ( - "context" - - "github.com/elastic/elastic-agent-client/v7/pkg/proto" - - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/core/app" - "github.com/elastic/elastic-agent/internal/pkg/core/monitoring" - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "github.com/elastic/elastic-agent/pkg/core/server" -) - -// operation is an operation definition -// each operation needs to implement this interface in order -// to ease up rollbacks -type operation interface { - // Name is human readable name which identifies an operation - Name() string - // Check checks whether operation needs to be run - // In case prerequisites (such as invalid cert or tweaked binary) are not met, it returns error - // examples: - // - Start does not need to run if process is running - // - Fetch does not need to run if package is already present - Check(ctx context.Context, application Application) (bool, error) - // Run runs the operation - Run(ctx context.Context, application Application) error -} - -// Application is an application capable of being started, stopped and configured. -type Application interface { - Name() string - Started() bool - Start(ctx context.Context, p app.Taggable, cfg map[string]interface{}) error - Stop() - Shutdown() - Configure(ctx context.Context, config map[string]interface{}) error - Monitor() monitoring.Monitor - State() state.State - Spec() program.Spec - SetState(status state.Status, msg string, payload map[string]interface{}) - OnStatusChange(s *server.ApplicationState, status proto.StateObserved_Status, msg string, payload map[string]interface{}) -} - -// Descriptor defines a program which needs to be run. -// Is passed around operator operations. -type Descriptor interface { - Spec() program.Spec - ServicePort() int - BinaryName() string - Version() string - ID() string - Directory() string - Tags() map[app.Tag]string -} - -// ApplicationStatusHandler expects that only Application is registered in the server and updates the -// current state of the application from the OnStatusChange callback from inside the server. -// -// In the case that an application is reported as failed by the server it will then restart the application, unless -// it expects that the application should be stopping. -type ApplicationStatusHandler struct{} - -// OnStatusChange is the handler called by the GRPC server code. -// -// It updates the status of the application and handles restarting the application is needed. -func (*ApplicationStatusHandler) OnStatusChange(s *server.ApplicationState, status proto.StateObserved_Status, msg string, payload map[string]interface{}) { - if state.IsStateFiltered(msg, payload) { - return - } - app, ok := s.App().(Application) - - if !ok { - panic(errors.New("only Application can be registered when using the ApplicationStatusHandler", errors.TypeUnexpected)) - } - app.OnStatusChange(s, status, msg, payload) -} diff --git a/internal/pkg/agent/operation/operation_config.go b/internal/pkg/agent/operation/operation_config.go deleted file mode 100644 index fec587f3540..00000000000 --- a/internal/pkg/agent/operation/operation_config.go +++ /dev/null @@ -1,67 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package operation - -import ( - "context" - - "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/core/plugin/process" - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -var ( - // ErrClientNotFound is an error when client is not found - ErrClientNotFound = errors.New("client not found, check if process is running") - // ErrClientNotConfigurable happens when stored client does not implement Config func - ErrClientNotConfigurable = errors.New("client does not provide configuration") -) - -// Configures running process by sending a configuration to its -// grpc endpoint -type operationConfig struct { - logger *logger.Logger - operatorConfig *configuration.SettingsConfig - cfg map[string]interface{} -} - -func newOperationConfig( - logger *logger.Logger, - operatorConfig *configuration.SettingsConfig, - cfg map[string]interface{}) *operationConfig { - return &operationConfig{ - logger: logger, - operatorConfig: operatorConfig, - cfg: cfg, - } -} - -// Name is human readable name identifying an operation -func (o *operationConfig) Name() string { - return "operation-config" -} - -// Check checks whether config needs to be run. -// -// Always returns true. -func (o *operationConfig) Check(_ context.Context, _ Application) (bool, error) { return true, nil } - -// Run runs the operation -func (o *operationConfig) Run(ctx context.Context, application Application) (err error) { - defer func() { - if err != nil { - // application failed to apply config but is running. - s := state.Degraded - if errors.Is(err, process.ErrAppNotRunning) { - s = state.Failed - } - - application.SetState(s, err.Error(), nil) - } - }() - return application.Configure(ctx, o.cfg) -} diff --git a/internal/pkg/agent/operation/operation_retryable.go b/internal/pkg/agent/operation/operation_retryable.go deleted file mode 100644 index b30fd68563c..00000000000 --- a/internal/pkg/agent/operation/operation_retryable.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package operation - -import ( - "context" - "fmt" - "strings" - - "github.com/elastic/elastic-agent/internal/pkg/core/retry" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -// retryableOperations consists of multiple operations which are -// retryable as a whole. -// if nth operation fails all preceding are retried as well -type retryableOperations struct { - logger *logger.Logger - operations []operation - retryConfig *retry.Config -} - -func newRetryableOperations( - logger *logger.Logger, - retryConfig *retry.Config, - operations ...operation) *retryableOperations { - - return &retryableOperations{ - logger: logger, - retryConfig: retryConfig, - operations: operations, - } -} - -// Name is human readable name identifying an operation -func (o *retryableOperations) Name() string { - names := make([]string, 0, len(o.operations)) - for _, op := range o.operations { - names = append(names, op.Name()) - } - return fmt.Sprintf("retryable block: %s", strings.Join(names, " ")) -} - -// Check checks whether operation needs to be run -// examples: -// - Start does not need to run if process is running -// - Fetch does not need to run if package is already present -func (o *retryableOperations) Check(ctx context.Context, application Application) (bool, error) { - for _, op := range o.operations { - // finish early if at least one operation needs to be run or errored out - if run, err := op.Check(ctx, application); err != nil || run { - return run, err - } - } - - return false, nil -} - -// Run runs the operation -func (o *retryableOperations) Run(ctx context.Context, application Application) (err error) { - return retry.Do(ctx, o.retryConfig, o.runOnce(application)) -} - -// Run runs the operation -func (o *retryableOperations) runOnce(application Application) func(context.Context) error { - return func(ctx context.Context) error { - for _, op := range o.operations { - if ctx.Err() != nil { - return ctx.Err() - } - - shouldRun, err := op.Check(ctx, application) - if err != nil { - return err - } - - if !shouldRun { - continue - } - - o.logger.Debugf("running operation '%s' of the block '%s'", op.Name(), o.Name()) - if err := op.Run(ctx, application); err != nil { - o.logger.Errorf("operation %s failed, err: %v", op.Name(), err) - return err - } - } - - return nil - } -} - -// check interface -var _ operation = &retryableOperations{} diff --git a/internal/pkg/agent/operation/operation_start.go b/internal/pkg/agent/operation/operation_start.go deleted file mode 100644 index 38ee7167766..00000000000 --- a/internal/pkg/agent/operation/operation_start.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package operation - -import ( - "context" - - "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -// operationStart start installed process -// skips if process is already running -type operationStart struct { - logger *logger.Logger - program Descriptor - operatorConfig *configuration.SettingsConfig - cfg map[string]interface{} -} - -func newOperationStart( - logger *logger.Logger, - program Descriptor, - operatorConfig *configuration.SettingsConfig, - cfg map[string]interface{}) *operationStart { - // TODO: make configurable - - return &operationStart{ - logger: logger, - program: program, - operatorConfig: operatorConfig, - cfg: cfg, - } -} - -// Name is human readable name identifying an operation -func (o *operationStart) Name() string { - return "operation-start" -} - -// Check checks whether application needs to be started. -// -// Only starts the application when in stopped state, any other state -// and the application is handled by the life cycle inside of the `Application` -// implementation. -func (o *operationStart) Check(_ context.Context, application Application) (bool, error) { - if application.Started() { - return false, nil - } - return true, nil -} - -// Run runs the operation -func (o *operationStart) Run(ctx context.Context, application Application) (err error) { - defer func() { - if err != nil { - application.SetState(state.Failed, err.Error(), nil) - } - }() - - return application.Start(ctx, o.program, o.cfg) -} diff --git a/internal/pkg/agent/operation/operation_stop.go b/internal/pkg/agent/operation/operation_stop.go deleted file mode 100644 index cb33010e1af..00000000000 --- a/internal/pkg/agent/operation/operation_stop.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package operation - -import ( - "context" - - "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -// operationStop stops the running process -// skips if process is already skipped -type operationStop struct { - logger *logger.Logger - operatorConfig *configuration.SettingsConfig -} - -func newOperationStop( - logger *logger.Logger, - operatorConfig *configuration.SettingsConfig) *operationStop { - return &operationStop{ - logger: logger, - operatorConfig: operatorConfig, - } -} - -// Name is human readable name identifying an operation -func (o *operationStop) Name() string { - return "operation-stop" -} - -// Check checks whether application needs to be stopped. -// -// If the application state is not stopped then stop should be performed. -func (o *operationStop) Check(_ context.Context, application Application) (bool, error) { - if application.State().Status != state.Stopped { - return true, nil - } - return false, nil -} - -// Run runs the operation -func (o *operationStop) Run(ctx context.Context, application Application) (err error) { - application.Stop() - return nil -} diff --git a/internal/pkg/agent/operation/operator.go b/internal/pkg/agent/operation/operator.go deleted file mode 100644 index 50bf725e193..00000000000 --- a/internal/pkg/agent/operation/operator.go +++ /dev/null @@ -1,396 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package operation - -import ( - "context" - "fmt" - "os" - "strings" - "sync" - "time" - - "go.elastic.co/apm" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" - "github.com/elastic/elastic-agent/internal/pkg/agent/configrequest" - "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/agent/stateresolver" - "github.com/elastic/elastic-agent/internal/pkg/artifact/download" - "github.com/elastic/elastic-agent/internal/pkg/artifact/install" - "github.com/elastic/elastic-agent/internal/pkg/artifact/uninstall" - "github.com/elastic/elastic-agent/internal/pkg/core/app" - "github.com/elastic/elastic-agent/internal/pkg/core/monitoring" - "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/noop" - "github.com/elastic/elastic-agent/internal/pkg/core/plugin/process" - "github.com/elastic/elastic-agent/internal/pkg/core/plugin/service" - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "github.com/elastic/elastic-agent/internal/pkg/core/status" - "github.com/elastic/elastic-agent/pkg/core/logger" - "github.com/elastic/elastic-agent/pkg/core/server" -) - -const ( - isMonitoringMetricsFlag = 1 << 0 - isMonitoringLogsFlag = 1 << 1 -) - -type waiter interface { - Wait() -} - -// Operator runs Start/Stop/Update operations -// it is responsible for detecting reconnect to existing processes -// based on backed up configuration -// Enables running sidecars for processes. -// TODO: implement retry strategies -type Operator struct { - bgContext context.Context - pipelineID string - logger *logger.Logger - agentInfo *info.AgentInfo - config *configuration.SettingsConfig - handlers map[string]handleFunc - stateResolver *stateresolver.StateResolver - srv *server.Server - reporter state.Reporter - monitor monitoring.Monitor - isMonitoring int - - apps map[string]Application - appsLock sync.Mutex - - installer install.InstallerChecker - uninstaller uninstall.Uninstaller - downloader download.Downloader - verifier download.Verifier - statusController status.Controller - statusReporter status.Reporter -} - -// NewOperator creates a new operator, this operator holds -// a collection of running processes, back it up -// Based on backed up collection it prepares clients, watchers... on init -func NewOperator( - ctx context.Context, - logger *logger.Logger, - agentInfo *info.AgentInfo, - pipelineID string, - config *configuration.SettingsConfig, - fetcher download.Downloader, - verifier download.Verifier, - installer install.InstallerChecker, - uninstaller uninstall.Uninstaller, - stateResolver *stateresolver.StateResolver, - srv *server.Server, - reporter state.Reporter, - monitor monitoring.Monitor, - statusController status.Controller) (*Operator, error) { - if config.DownloadConfig == nil { - return nil, fmt.Errorf("artifacts configuration not provided") - } - - operator := &Operator{ - bgContext: ctx, - config: config, - pipelineID: pipelineID, - logger: logger, - agentInfo: agentInfo, - downloader: fetcher, - verifier: verifier, - installer: installer, - uninstaller: uninstaller, - stateResolver: stateResolver, - srv: srv, - apps: make(map[string]Application), - reporter: reporter, - monitor: monitor, - statusController: statusController, - statusReporter: statusController.RegisterComponent("operator-" + pipelineID), - } - - operator.initHandlerMap() - - os.MkdirAll(config.DownloadConfig.TargetDirectory, 0755) - os.MkdirAll(config.DownloadConfig.InstallPath, 0755) - - return operator, nil -} - -// State describes the current state of the system. -// Reports all known applications and theirs states. Whether they are running -// or not, and if they are information about process is also present. -func (o *Operator) State() map[string]state.State { - result := make(map[string]state.State) - - o.appsLock.Lock() - defer o.appsLock.Unlock() - - for k, v := range o.apps { - result[k] = v.State() - } - - return result -} - -// Specs returns all program specifications -func (o *Operator) Specs() map[string]program.Spec { - r := make(map[string]program.Spec) - - o.appsLock.Lock() - defer o.appsLock.Unlock() - - for _, app := range o.apps { - // use app.Name() instead of the (map) key so we can easy find the "_monitoring" processes - r[app.Name()] = app.Spec() - } - - return r -} - -// Close stops all programs handled by operator and clears state -func (o *Operator) Close() error { - o.monitor.Close() - o.statusReporter.Unregister() - - return o.HandleConfig(context.Background(), configrequest.New("", time.Now(), nil)) -} - -// HandleConfig handles configuration for a pipeline and performs actions to achieve this configuration. -func (o *Operator) HandleConfig(ctx context.Context, cfg configrequest.Request) (err error) { - span, ctx := apm.StartSpan(ctx, "route", "app.internal") - defer func() { - if !errors.Is(err, context.Canceled) { - apm.CaptureError(ctx, err).Send() - } - span.End() - }() - - _, stateID, steps, ack, err := o.stateResolver.Resolve(cfg) - if err != nil { - if !errors.Is(err, context.Canceled) { - // error is not filtered and should be reported - o.statusReporter.Update(state.Failed, err.Error(), nil) - err = errors.New(err, errors.TypeConfig, fmt.Sprintf("operator: failed to resolve configuration %s, error: %v", cfg, err)) - } - - return err - } - o.statusController.UpdateStateID(stateID) - - for _, step := range steps { - if !strings.EqualFold(step.ProgramSpec.CommandName(), monitoringName) { - if _, isSupported := program.SupportedMap[step.ProgramSpec.CommandName()]; !isSupported { - // mark failed, new config cannot be run - msg := fmt.Sprintf("program '%s' is not supported", step.ProgramSpec.CommandName()) - o.statusReporter.Update(state.Failed, msg, nil) - return errors.New(msg, - errors.TypeApplication, - errors.M(errors.MetaKeyAppName, step.ProgramSpec.CommandName())) - } - } - - handler, found := o.handlers[step.ID] - if !found { - msg := fmt.Sprintf("operator: received unexpected event '%s'", step.ID) - o.statusReporter.Update(state.Failed, msg, nil) - return errors.New(msg, errors.TypeConfig) - } - - if err := handler(step); err != nil { - msg := fmt.Sprintf("operator: failed to execute step %s, error: %v", step.ID, err) - o.statusReporter.Update(state.Failed, msg, nil) - return errors.New(err, errors.TypeConfig, msg) - } - } - - // Ack the resolver should state for next call. - o.statusReporter.Update(state.Healthy, "", nil) - ack() - - return nil -} - -// Shutdown handles shutting down the running apps for Agent shutdown. -func (o *Operator) Shutdown() { - // wait for installer and downloader - if awaitable, ok := o.installer.(waiter); ok { - o.logger.Infof("waiting for installer of pipeline '%s' to finish", o.pipelineID) - awaitable.Wait() - o.logger.Debugf("pipeline installer '%s' done", o.pipelineID) - } - - o.appsLock.Lock() - defer o.appsLock.Unlock() - - wg := sync.WaitGroup{} - wg.Add(len(o.apps)) - - started := time.Now() - - for _, a := range o.apps { - go func(a Application) { - started := time.Now() - a.Shutdown() - wg.Done() - o.logger.Debugf("took %s to shutdown %s", - time.Now().Sub(started), a.Name()) - }(a) - } - wg.Wait() - o.logger.Debugf("took %s to shutdown %d apps", - time.Now().Sub(started), len(o.apps)) -} - -// Start starts a new process based on a configuration -// specific configuration of new process is passed -func (o *Operator) start(p Descriptor, cfg map[string]interface{}) (err error) { - flow := []operation{ - newOperationStart(o.logger, p, o.config, cfg), - newOperationConfig(o.logger, o.config, cfg), - } - - return o.runFlow(p, flow) -} - -// Stop stops the running process, if process is already stopped it does not return an error -func (o *Operator) stop(p Descriptor) (err error) { - flow := []operation{ - newOperationStop(o.logger, o.config), - } - - return o.runFlow(p, flow) -} - -// PushConfig tries to push config to a running process -func (o *Operator) pushConfig(p Descriptor, cfg map[string]interface{}) error { - flow := []operation{ - newOperationConfig(o.logger, o.config, cfg), - } - - return o.runFlow(p, flow) -} - -func (o *Operator) runFlow(p Descriptor, operations []operation) error { - if len(operations) == 0 { - o.logger.Infof("operator received event with no operations for program '%s'", p.ID()) - return nil - } - - app, err := o.getApp(p) - if err != nil { - return err - } - - for _, op := range operations { - if err := o.bgContext.Err(); err != nil { - return err - } - - shouldRun, err := op.Check(o.bgContext, app) - if err != nil { - return err - } - - if !shouldRun { - o.logger.Infof("operation '%s' skipped for %s.%s", op.Name(), p.BinaryName(), p.Version()) - continue - } - - o.logger.Debugf("running operation '%s' for %s.%s", op.Name(), p.BinaryName(), p.Version()) - if err := op.Run(o.bgContext, app); err != nil { - return err - } - } - - // when application is stopped remove from the operator - if app.State().Status == state.Stopped { - o.deleteApp(p) - } - - return nil -} - -func (o *Operator) getApp(p Descriptor) (Application, error) { - o.appsLock.Lock() - defer o.appsLock.Unlock() - - id := p.ID() - - o.logger.Debugf("operator is looking for %s in app collection: %v", p.ID(), o.apps) - if a, ok := o.apps[id]; ok { - return a, nil - } - - desc, ok := p.(*app.Descriptor) - if !ok { - return nil, fmt.Errorf("descriptor is not an app.Descriptor") - } - - // TODO: (michal) join args into more compact options version - var a Application - var err error - - monitor := o.monitor - appName := p.BinaryName() - if app.IsSidecar(p) { - // make watchers unmonitorable - monitor = noop.NewMonitor() - appName += "_monitoring" - } - - if p.ServicePort() == 0 { - // Applications without service ports defined are ran as through the process application type. - a, err = process.NewApplication( - o.bgContext, - p.ID(), - appName, - o.pipelineID, - o.config.LoggingConfig.Level.String(), - desc, - o.srv, - o.config, - o.logger, - o.reporter, - monitor, - o.statusController) - } else { - // Service port is defined application is ran with service application type, with it fetching - // the connection credentials through the defined service port. - a, err = service.NewApplication( - o.bgContext, - p.ID(), - appName, - o.pipelineID, - o.config.LoggingConfig.Level.String(), - p.ServicePort(), - desc, - o.srv, - o.config, - o.logger, - o.reporter, - monitor, - o.statusController) - } - - if err != nil { - return nil, err - } - - o.apps[id] = a - return a, nil -} - -func (o *Operator) deleteApp(p Descriptor) { - o.appsLock.Lock() - defer o.appsLock.Unlock() - - id := p.ID() - - o.logger.Debugf("operator is removing %s from app collection: %v", p.ID(), o.apps) - delete(o.apps, id) -} diff --git a/internal/pkg/agent/operation/operator_handlers.go b/internal/pkg/agent/operation/operator_handlers.go deleted file mode 100644 index 36f10b3d70e..00000000000 --- a/internal/pkg/agent/operation/operator_handlers.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package operation - -import ( - "fmt" - "strings" - - "github.com/elastic/elastic-agent/internal/pkg/agent/configrequest" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/artifact" - "github.com/elastic/elastic-agent/internal/pkg/core/app" - "github.com/elastic/elastic-agent/internal/pkg/release" -) - -type handleFunc func(step configrequest.Step) error - -func (o *Operator) initHandlerMap() { - hm := make(map[string]handleFunc) - - hm[configrequest.StepRun] = o.handleRun - hm[configrequest.StepRemove] = o.handleRemove - - o.handlers = hm -} - -func (o *Operator) handleRun(step configrequest.Step) error { - if strings.EqualFold(step.ProgramSpec.CommandName(), monitoringName) { - return o.handleStartSidecar(step) - } - - p, cfg, err := getProgramFromStep(step, o.config.DownloadConfig) - if err != nil { - return errors.New(err, - "operator.handleStart failed to create program", - errors.TypeApplication, - errors.M(errors.MetaKeyAppName, step.ProgramSpec.CommandName())) - } - - return o.start(p, cfg) -} - -func (o *Operator) handleRemove(step configrequest.Step) error { - o.logger.Debugf("stopping process %s: %s", step.ProgramSpec.CommandName(), step.ID) - if strings.EqualFold(step.ProgramSpec.CommandName(), monitoringName) { - return o.handleStopSidecar(step) - } - - p, _, err := getProgramFromStep(step, o.config.DownloadConfig) - if err != nil { - return errors.New(err, - "operator.handleRemove failed to stop program", - errors.TypeApplication, - errors.M(errors.MetaKeyAppName, step.ProgramSpec.CommandName())) - } - - return o.stop(p) -} - -func getProgramFromStep(step configrequest.Step, artifactConfig *artifact.Config) (Descriptor, map[string]interface{}, error) { - return getProgramFromStepWithTags(step, artifactConfig, nil) -} - -func getProgramFromStepWithTags(step configrequest.Step, artifactConfig *artifact.Config, tags map[app.Tag]string) (Descriptor, map[string]interface{}, error) { - config, err := getConfigFromStep(step) - if err != nil { - return nil, nil, err - } - - version := step.Version - if release.Snapshot() { - version = fmt.Sprintf("%s-SNAPSHOT", version) - } - - p := app.NewDescriptor(step.ProgramSpec, version, artifactConfig, tags) - return p, config, nil -} - -func getConfigFromStep(step configrequest.Step) (map[string]interface{}, error) { - metConfig, hasConfig := step.Meta[configrequest.MetaConfigKey] - - if !hasConfig && needsMetaConfig(step) { - return nil, fmt.Errorf("step: %s, no config in metadata", step.ID) - } - - var config map[string]interface{} - if hasConfig { - var ok bool - config, ok = metConfig.(map[string]interface{}) - if !ok { - return nil, errors.New(errors.TypeConfig, - fmt.Sprintf("step: %s, program config is in invalid format", step.ID)) - } - } - - return config, nil -} - -func needsMetaConfig(step configrequest.Step) bool { - return step.ID == configrequest.StepRun -} diff --git a/internal/pkg/agent/operation/operator_test.go b/internal/pkg/agent/operation/operator_test.go deleted file mode 100644 index 8400918a023..00000000000 --- a/internal/pkg/agent/operation/operator_test.go +++ /dev/null @@ -1,489 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package operation - -import ( - "fmt" - "math/rand" - "net" - "os" - "os/exec" - "path/filepath" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/elastic/elastic-agent-client/v7/pkg/proto" - - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/core/state" -) - -func TestMain(m *testing.M) { - // init supported with test cases - port, err := getFreePort() - if err != nil { - panic(err) - } - - configurableSpec := program.Spec{ - Name: "configurable", - Cmd: "configurable", - Args: []string{}, - } - - serviceSpec := program.Spec{ - ServicePort: port, - Name: "serviceable", - Cmd: "serviceable", - Args: []string{fmt.Sprintf("%d", port)}, - } - - program.Supported = append(program.Supported, configurableSpec, serviceSpec) - program.SupportedMap["configurable"] = configurableSpec - program.SupportedMap["serviceable"] = serviceSpec - - if err := isAvailable("configurable"); err != nil { - panic(err) - } - if err := isAvailable("serviceable"); err != nil { - panic(err) - } - - os.Exit(m.Run()) -} - -func TestNotSupported(t *testing.T) { - p := getProgram("notsupported", "1.0") - - operator := getTestOperator(t, downloadPath, installPath, p) - err := operator.start(p, nil) - if err == nil { - t.Fatal("was expecting error but got none") - } -} - -func TestConfigurableRun(t *testing.T) { - p := getProgram("configurable", "1.0") - - operator := getTestOperator(t, downloadPath, installPath, p) - if err := operator.start(p, nil); err != nil { - t.Fatal(err) - } - defer operator.stop(p) // failure catch, to ensure no sub-process stays running - - waitFor(t, func() error { - items := operator.State() - item, ok := items[p.ID()] - if !ok { - return fmt.Errorf("no state for process") - } - if item.Status != state.Healthy { - return fmt.Errorf("process never went to running") - } - return nil - }) - - // try to configure - cfg := make(map[string]interface{}) - tstFilePath := filepath.Join(os.TempDir(), fmt.Sprintf("tmp%d", rand.Uint32())) - cfg["TestFile"] = tstFilePath - if err := operator.pushConfig(p, cfg); err != nil { - t.Fatalf("failed to config: %v", err) - } - - waitFor(t, func() error { - if s, err := os.Stat(tstFilePath); err != nil || s == nil { - return fmt.Errorf("failed to create a file using Config call %s", tstFilePath) - } - return nil - }) - - // wait to finish configuring - waitFor(t, func() error { - items := operator.State() - item, ok := items[p.ID()] - if ok && item.Status == state.Configuring { - return fmt.Errorf("process still configuring") - } - return nil - }) - - items := operator.State() - item0, ok := items[p.ID()] - if !ok || item0.Status != state.Healthy { - t.Fatalf("Process no longer running after config %#v", items) - } - pid := item0.ProcessInfo.PID - - // stop the process - if err := operator.stop(p); err != nil { - t.Fatalf("Failed to stop process with PID %d: %v", pid, err) - } - - waitFor(t, func() error { - items := operator.State() - _, ok := items[p.ID()] - if ok { - return fmt.Errorf("state for process, should be removed") - } - return nil - }) - - // check process stopped - proc, err := os.FindProcess(pid) - if err != nil && proc != nil { - t.Fatal("Process found") - } -} - -func TestConfigurableFailed(t *testing.T) { - p := getProgram("configurable", "1.0") - - operator := getTestOperator(t, downloadPath, installPath, p) - if err := operator.start(p, nil); err != nil { - t.Fatal(err) - } - defer operator.stop(p) // failure catch, to ensure no sub-process stays running - - var pid int - waitFor(t, func() error { - items := operator.State() - item, ok := items[p.ID()] - if !ok { - return fmt.Errorf("no state for process") - } - if item.Status != state.Healthy { - return fmt.Errorf("process never went to running") - } - pid = item.ProcessInfo.PID - return nil - }) - items := operator.State() - item, ok := items[p.ID()] - if !ok { - t.Fatalf("no state for process") - } - assert.Equal(t, map[string]interface{}{ - "status": float64(proto.StateObserved_HEALTHY), - "message": "Running", - }, item.Payload) - - // try to configure (with failed status) - cfg := make(map[string]interface{}) - tstFilePath := filepath.Join(os.TempDir(), fmt.Sprintf("tmp%d", rand.Uint32())) - cfg["TestFile"] = tstFilePath - cfg["Status"] = proto.StateObserved_FAILED - if err := operator.pushConfig(p, cfg); err != nil { - t.Fatalf("failed to config: %v", err) - } - - // should still create the file - waitFor(t, func() error { - if s, err := os.Stat(tstFilePath); err != nil || s == nil { - return fmt.Errorf("failed to create a file using Config call %s", tstFilePath) - } - return nil - }) - - // wait for not running status - waitFor(t, func() error { - items := operator.State() - item, ok := items[p.ID()] - if !ok { - return fmt.Errorf("no state for process") - } - if item.Status == state.Healthy { - return fmt.Errorf("process never left running") - } - return nil - }) - - // don't send status anymore - delete(cfg, "Status") - if err := operator.pushConfig(p, cfg); err != nil { - t.Fatalf("failed to config: %v", err) - } - - // check that it restarted (has a new PID) - waitFor(t, func() error { - items := operator.State() - item, ok := items[p.ID()] - if !ok { - return fmt.Errorf("no state for process") - } - if item.ProcessInfo == nil { - return fmt.Errorf("in restart loop") - } - if pid == item.ProcessInfo.PID { - return fmt.Errorf("process never restarted") - } - pid = item.ProcessInfo.PID - return nil - }) - - waitFor(t, func() error { - items := operator.State() - item, ok := items[p.ID()] - if !ok { - return fmt.Errorf("no state for process") - } - if item.Status != state.Healthy { - return fmt.Errorf("process never went to back to running") - } - return nil - }) - - // stop the process - if err := operator.stop(p); err != nil { - t.Fatalf("Failed to stop process with PID %d: %v", pid, err) - } - - // check process stopped - proc, err := os.FindProcess(pid) - if err != nil && proc != nil { - t.Fatal("Process found") - } -} - -func TestConfigurableCrash(t *testing.T) { - p := getProgram("configurable", "1.0") - - operator := getTestOperator(t, downloadPath, installPath, p) - if err := operator.start(p, nil); err != nil { - t.Fatal(err) - } - defer operator.stop(p) // failure catch, to ensure no sub-process stays running - - var pid int - waitFor(t, func() error { - items := operator.State() - item, ok := items[p.ID()] - if !ok { - return fmt.Errorf("no state for process") - } - if item.Status != state.Healthy { - return fmt.Errorf("process never went to running") - } - pid = item.ProcessInfo.PID - return nil - }) - - // try to configure (with failed status) - cfg := make(map[string]interface{}) - tstFilePath := filepath.Join(os.TempDir(), fmt.Sprintf("tmp%d", rand.Uint32())) - cfg["TestFile"] = tstFilePath - cfg["Crash"] = true - if err := operator.pushConfig(p, cfg); err != nil { - t.Fatalf("failed to config: %v", err) - } - - // should still create the file - waitFor(t, func() error { - if s, err := os.Stat(tstFilePath); err != nil || s == nil { - return fmt.Errorf("failed to create a file using Config call %s", tstFilePath) - } - return nil - }) - - // wait for not running status - waitFor(t, func() error { - items := operator.State() - item, ok := items[p.ID()] - if !ok { - return fmt.Errorf("no state for process") - } - if item.Status == state.Healthy { - return fmt.Errorf("process never left running") - } - return nil - }) - - // don't send crash anymore - delete(cfg, "Crash") - if err := operator.pushConfig(p, cfg); err != nil { - t.Fatalf("failed to config: %v", err) - } - - // check that it restarted (has a new PID) - waitFor(t, func() error { - items := operator.State() - item, ok := items[p.ID()] - if !ok { - return fmt.Errorf("no state for process") - } - if item.ProcessInfo == nil { - return fmt.Errorf("in restart loop") - } - if pid == item.ProcessInfo.PID { - return fmt.Errorf("process never restarted") - } - pid = item.ProcessInfo.PID - return nil - }) - - // let the process get back to ready - waitFor(t, func() error { - items := operator.State() - item, ok := items[p.ID()] - if !ok { - return fmt.Errorf("no state for process") - } - if item.Status != state.Healthy { - return fmt.Errorf("process never went to back to running") - } - return nil - }) - - // stop the process - if err := operator.stop(p); err != nil { - t.Fatalf("Failed to stop process with PID %d: %v", pid, err) - } - - // check process stopped - proc, err := os.FindProcess(pid) - if err != nil && proc != nil { - t.Fatal("Process found") - } -} - -func TestConfigurableStartStop(t *testing.T) { - p := getProgram("configurable", "1.0") - - operator := getTestOperator(t, downloadPath, installPath, p) - defer operator.stop(p) // failure catch, to ensure no sub-process stays running - - // start and stop it 3 times - for i := 0; i < 3; i++ { - if err := operator.start(p, nil); err != nil { - t.Fatal(err) - } - - waitFor(t, func() error { - items := operator.State() - item, ok := items[p.ID()] - if !ok { - return fmt.Errorf("no state for process") - } - if item.Status != state.Healthy { - return fmt.Errorf("process never went to running") - } - return nil - }) - - // stop the process - if err := operator.stop(p); err != nil { - t.Fatalf("Failed to stop process: %v", err) - } - - waitFor(t, func() error { - items := operator.State() - _, ok := items[p.ID()] - if ok { - return fmt.Errorf("state for process, should be removed") - } - return nil - }) - } -} - -func TestConfigurableService(t *testing.T) { - t.Skip("Flaky test: https://github.com/elastic/beats/issues/23607") - p := getProgram("serviceable", "1.0") - - operator := getTestOperator(t, downloadPath, installPath, p) - if err := operator.start(p, nil); err != nil { - t.Fatal(err) - } - defer operator.stop(p) // failure catch, to ensure no sub-process stays running - - // emulating a service, so we need to start the binary here in the test - spec := p.ProcessSpec() - cmd := exec.Command(spec.BinaryPath, fmt.Sprintf("%d", p.ServicePort())) - cmd.Env = append(cmd.Env, os.Environ()...) - cmd.Dir = filepath.Dir(spec.BinaryPath) - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - if err := cmd.Start(); err != nil { - t.Fatal(err) - } - - waitFor(t, func() error { - items := operator.State() - item, ok := items[p.ID()] - if !ok { - return fmt.Errorf("no state for process") - } - if item.Status != state.Healthy { - return fmt.Errorf("process never went to running") - } - return nil - }) - - // try to configure - cfg := make(map[string]interface{}) - tstFilePath := filepath.Join(os.TempDir(), fmt.Sprintf("tmp%d", rand.Uint32())) - cfg["TestFile"] = tstFilePath - if err := operator.pushConfig(p, cfg); err != nil { - t.Fatalf("failed to config: %v", err) - } - - waitFor(t, func() error { - if s, err := os.Stat(tstFilePath); err != nil || s == nil { - return fmt.Errorf("failed to create a file using Config call %s", tstFilePath) - } - return nil - }) - - // wait to finish configuring - waitFor(t, func() error { - items := operator.State() - item, ok := items[p.ID()] - if ok && item.Status == state.Configuring { - return fmt.Errorf("process still configuring") - } - return nil - }) - - items := operator.State() - item0, ok := items[p.ID()] - if !ok || item0.Status != state.Healthy { - t.Fatalf("Process no longer running after config %#v", items) - } - - // stop the process - if err := operator.stop(p); err != nil { - t.Fatalf("Failed to stop service: %v", err) - } - - if err := cmd.Wait(); err != nil { - t.Fatalf("Process failed: %v", err) - } -} - -func isAvailable(name string) error { - p := getProgram(name, "version") - spec := p.ProcessSpec() - path := spec.BinaryPath - - if s, err := os.Stat(path); err != nil || s == nil { - return fmt.Errorf("binary not available %s: %v", spec.BinaryPath, err) - } - return nil -} - -// getFreePort finds a free port. -func getFreePort() (int, error) { - addr, err := net.ResolveTCPAddr("tcp", "localhost:0") - if err != nil { - return 0, err - } - l, err := net.ListenTCP("tcp", addr) - if err != nil { - return 0, err - } - defer l.Close() - return l.Addr().(*net.TCPAddr).Port, nil -} diff --git a/internal/pkg/agent/operation/tests/downloads/-1.0-darwin-x86_64.tar.gz b/internal/pkg/agent/operation/tests/downloads/-1.0-darwin-x86_64.tar.gz deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/internal/pkg/agent/operation/tests/downloads/configurable-1.0-darwin-x86_64.tar.gz b/internal/pkg/agent/operation/tests/downloads/configurable-1.0-darwin-x86_64.tar.gz deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/internal/pkg/agent/operation/tests/scripts/configurable-1.0-darwin-x86_64/README.md b/internal/pkg/agent/operation/tests/scripts/configurable-1.0-darwin-x86_64/README.md deleted file mode 100644 index 309d9b655d8..00000000000 --- a/internal/pkg/agent/operation/tests/scripts/configurable-1.0-darwin-x86_64/README.md +++ /dev/null @@ -1 +0,0 @@ -Testing program emulating tool which is configurable using GRPC communication channel when running as a sub-process. diff --git a/internal/pkg/agent/operation/tests/scripts/configurable-1.0-darwin-x86_64/main.go b/internal/pkg/agent/operation/tests/scripts/configurable-1.0-darwin-x86_64/main.go deleted file mode 100644 index 1e6c88106b6..00000000000 --- a/internal/pkg/agent/operation/tests/scripts/configurable-1.0-darwin-x86_64/main.go +++ /dev/null @@ -1,104 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package main - -import ( - "context" - "fmt" - "os" - "path/filepath" - - "gopkg.in/yaml.v2" - - "github.com/elastic/elastic-agent-client/v7/pkg/client" - "github.com/elastic/elastic-agent-client/v7/pkg/proto" -) - -func main() { - f, _ := os.OpenFile(filepath.Join(os.TempDir(), "testing.out"), os.O_APPEND|os.O_CREATE|os.O_RDWR, 0666) - f.WriteString("starting \n") - ctx, cancel := context.WithCancel(context.Background()) - s := &configServer{ - f: f, - ctx: ctx, - cancel: cancel, - } - client, err := client.NewFromReader(os.Stdin, s) - if err != nil { - f.WriteString(err.Error()) - panic(err) - } - s.client = client - err = client.Start(ctx) - if err != nil { - f.WriteString(err.Error()) - panic(err) - } - <-ctx.Done() - f.WriteString("finished \n") -} - -type configServer struct { - f *os.File - ctx context.Context - cancel context.CancelFunc - client client.Client -} - -func (s *configServer) OnConfig(cfgString string) { - s.client.Status(proto.StateObserved_CONFIGURING, "Writing config file", nil) - - testCfg := &TestConfig{} - if err := yaml.Unmarshal([]byte(cfgString), &testCfg); err != nil { - s.client.Status(proto.StateObserved_FAILED, fmt.Sprintf("Failed to unmarshall config: %s", err), nil) - return - } - - if testCfg.TestFile != "" { - tf, err := os.Create(testCfg.TestFile) - if err != nil { - s.client.Status(proto.StateObserved_FAILED, fmt.Sprintf("Failed to create file %s: %s", testCfg.TestFile, err), nil) - return - } - - err = tf.Close() - if err != nil { - s.client.Status(proto.StateObserved_FAILED, fmt.Sprintf("Failed to close file %s: %s", testCfg.TestFile, err), nil) - return - } - } - - if testCfg.Crash { - os.Exit(2) - } - - if testCfg.Status != nil { - s.client.Status(*testCfg.Status, "Custom status", map[string]interface{}{ - "status": *testCfg.Status, - "message": "Custom status", - }) - } else { - s.client.Status(proto.StateObserved_HEALTHY, "Running", map[string]interface{}{ - "status": proto.StateObserved_HEALTHY, - "message": "Running", - }) - } -} - -func (s *configServer) OnStop() { - s.client.Status(proto.StateObserved_STOPPING, "Stopping", nil) - s.cancel() -} - -func (s *configServer) OnError(err error) { - s.f.WriteString(err.Error()) -} - -// TestConfig is a configuration for testing Config calls -type TestConfig struct { - TestFile string `config:"TestFile" yaml:"TestFile"` - Status *proto.StateObserved_Status `config:"Status" yaml:"Status"` - Crash bool `config:"Crash" yaml:"Crash"` -} diff --git a/internal/pkg/agent/operation/tests/scripts/serviceable-1.0-darwin-x86_64/README.md b/internal/pkg/agent/operation/tests/scripts/serviceable-1.0-darwin-x86_64/README.md deleted file mode 100644 index da8cc52049c..00000000000 --- a/internal/pkg/agent/operation/tests/scripts/serviceable-1.0-darwin-x86_64/README.md +++ /dev/null @@ -1 +0,0 @@ -Testing program emulating tool which is configurable using GRPC communication channel when running as an external service. diff --git a/internal/pkg/agent/operation/tests/scripts/serviceable-1.0-darwin-x86_64/main.go b/internal/pkg/agent/operation/tests/scripts/serviceable-1.0-darwin-x86_64/main.go deleted file mode 100644 index 99ceab143f1..00000000000 --- a/internal/pkg/agent/operation/tests/scripts/serviceable-1.0-darwin-x86_64/main.go +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package main - -import ( - "context" - "crypto/tls" - "crypto/x509" - "fmt" - "net" - "os" - "path/filepath" - "strconv" - - "google.golang.org/grpc" - "google.golang.org/grpc/credentials" - protobuf "google.golang.org/protobuf/proto" - "gopkg.in/yaml.v2" - - "github.com/elastic/elastic-agent-client/v7/pkg/client" - "github.com/elastic/elastic-agent-client/v7/pkg/proto" -) - -func main() { - srvPort, err := strconv.Atoi(os.Args[1]) - if err != nil { - panic(err) - } - f, _ := os.OpenFile(filepath.Join(os.TempDir(), "testing.out"), os.O_APPEND|os.O_CREATE|os.O_RDWR, 0666) - _, _ = f.WriteString("starting \n") - ctx, cancel := context.WithCancel(context.Background()) - s := &configServer{ - f: f, - ctx: ctx, - cancel: cancel, - } - _, _ = f.WriteString(fmt.Sprintf("reading creds from port: %d\n", srvPort)) - client, err := clientFromNet(srvPort, s) - if err != nil { - _, _ = f.WriteString(err.Error()) - panic(err) - } - s.client = client - err = client.Start(ctx) - if err != nil { - _, _ = f.WriteString(err.Error()) - panic(err) - } - <-ctx.Done() - _, _ = f.WriteString("finished \n") -} - -type configServer struct { - f *os.File - ctx context.Context - cancel context.CancelFunc - client client.Client -} - -func (s *configServer) OnConfig(cfgString string) { - _ = s.client.Status(proto.StateObserved_CONFIGURING, "Writing config file", nil) - - testCfg := &TestConfig{} - if err := yaml.Unmarshal([]byte(cfgString), &testCfg); err != nil { - _ = s.client.Status(proto.StateObserved_FAILED, fmt.Sprintf("Failed to unmarshall config: %s", err), nil) - return - } - - if testCfg.TestFile != "" { - tf, err := os.Create(testCfg.TestFile) - if err != nil { - _ = s.client.Status(proto.StateObserved_FAILED, fmt.Sprintf("Failed to create file %s: %s", testCfg.TestFile, err), nil) - return - } - - err = tf.Close() - if err != nil { - _ = s.client.Status(proto.StateObserved_FAILED, fmt.Sprintf("Failed to close file %s: %s", testCfg.TestFile, err), nil) - return - } - } - - _ = s.client.Status(proto.StateObserved_HEALTHY, "Running", map[string]interface{}{ - "status": proto.StateObserved_HEALTHY, - "message": "Running", - }) -} - -func (s *configServer) OnStop() { - _ = s.client.Status(proto.StateObserved_STOPPING, "Stopping", nil) - s.cancel() -} - -func (s *configServer) OnError(err error) { - _, _ = s.f.WriteString(err.Error()) -} - -// TestConfig is a configuration for testing Config calls -type TestConfig struct { - TestFile string `config:"TestFile" yaml:"TestFile"` -} - -func getCreds(port int) (*proto.ConnInfo, error) { - c, err := net.Dial("tcp", fmt.Sprintf("127.0.0.1:%d", port)) - if err != nil { - return nil, err - } - defer c.Close() - buf := make([]byte, 1024*1024) - n, err := c.Read(buf) - if err != nil { - return nil, err - } - var connInfo proto.ConnInfo - err = protobuf.Unmarshal(buf[:n], &connInfo) - if err != nil { - return nil, err - } - return &connInfo, nil -} - -func clientFromNet(port int, impl client.StateInterface, actions ...client.Action) (client.Client, error) { - connInfo, err := getCreds(port) - if err != nil { - return nil, err - } - cert, err := tls.X509KeyPair(connInfo.PeerCert, connInfo.PeerKey) - if err != nil { - return nil, err - } - caCertPool := x509.NewCertPool() - caCertPool.AppendCertsFromPEM(connInfo.CaCert) - trans := credentials.NewTLS(&tls.Config{ - ServerName: connInfo.ServerName, - Certificates: []tls.Certificate{cert}, - RootCAs: caCertPool, - MinVersion: tls.VersionTLS12, - }) - return client.New(connInfo.Addr, connInfo.Token, impl, actions, grpc.WithTransportCredentials(trans)), nil -} diff --git a/internal/pkg/agent/program/program.go b/internal/pkg/agent/program/program.go deleted file mode 100644 index 08f30a81609..00000000000 --- a/internal/pkg/agent/program/program.go +++ /dev/null @@ -1,307 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package program - -import ( - "fmt" - "strings" - - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" - "github.com/elastic/elastic-agent/internal/pkg/eql" -) - -// Program represents a program that must be started or must run . -type Program struct { - Spec Spec - Config *transpiler.AST -} - -// Cmd return the execution command to run. -func (p *Program) Cmd() string { - return p.Spec.Command() -} - -// Checksum return the checksum of the current instance of the program. -func (p *Program) Checksum() string { - return p.Config.HashStr() -} - -// Identifier returns the Program unique identifier. -func (p *Program) Identifier() string { - return strings.ToLower(p.Spec.Name) -} - -// Configuration return the program configuration in a map[string]iface format. -func (p *Program) Configuration() map[string]interface{} { - m, err := p.Config.Map() - if err != nil { - // TODO, that should not panic, refactor to remove any panic. - // Will refactor to never return an error at this stage. - panic(err) - } - return m -} - -// Programs take a Tree representation of the main configuration and apply all the different -// programs rules and generate individual configuration from the rules. -func Programs(agentInfo transpiler.AgentInfo, singleConfig *transpiler.AST) (map[string][]Program, error) { - grouped, err := groupByOutputs(singleConfig) - if err != nil { - return nil, errors.New(err, errors.TypeConfig, "fail to extract program configuration") - } - - groupedPrograms := make(map[string][]Program) - for k, config := range grouped { - programs, err := DetectPrograms(agentInfo, config) - if err != nil { - return nil, errors.New(err, errors.TypeConfig, "fail to generate program configuration") - } - groupedPrograms[k] = programs - } - - return groupedPrograms, nil -} - -// DetectPrograms returns the list of programs detected from the provided configuration. -func DetectPrograms(agentInfo transpiler.AgentInfo, singleConfig *transpiler.AST) ([]Program, error) { - programs := make([]Program, 0) - for _, spec := range Supported { - specificAST := singleConfig.Clone() - ok, err := DetectProgram(spec.Rules, spec.When, spec.Constraints, agentInfo, specificAST) - if err != nil { - return nil, err - } - if !ok { - continue - } - program := Program{ - Spec: spec, - Config: specificAST, - } - programs = append(programs, program) - } - - return programs, nil -} - -// DetectProgram returns true or false if this program exists in the AST. -// -// Note `ast` is modified to match what the program expects. Should clone the AST before passing to -// this function if you want to still have the original. -func DetectProgram(rules *transpiler.RuleList, when string, constraints string, info transpiler.AgentInfo, ast *transpiler.AST) (bool, error) { - if len(constraints) > 0 { - constraints, err := eql.New(constraints) - if err != nil { - return false, err - } - ok, err := constraints.Eval(ast) - if err != nil { - return false, err - } - if !ok { - return false, nil - } - } - - err := rules.Apply(info, ast) - if err != nil { - return false, err - } - - if len(when) == 0 { - return false, ErrMissingWhen - } - - expression, err := eql.New(when) - if err != nil { - return false, err - } - - return expression.Eval(ast) -} - -// KnownProgramNames returns a list of runnable programs by the elastic-agent. -func KnownProgramNames() []string { - names := make([]string, len(Supported)) - for idx, program := range Supported { - names[idx] = program.Name - } - - return names -} - -func groupByOutputs(single *transpiler.AST) (map[string]*transpiler.AST, error) { - const ( - outputsKey = "outputs" - outputKey = "output" - inputsKey = "inputs" - typeKey = "type" - ) - - if _, found := transpiler.Select(single, outputsKey); !found { - return nil, errors.New("invalid configuration missing outputs configuration") - } - - // Normalize using an intermediate map. - normMap, err := single.Map() - if err != nil { - return nil, errors.New(err, "could not read configuration") - } - - // Recreates multiple configuration grouped by the name of the outputs. - // Each configuration will be started into his own operator with the same name as the output. - grouped := make(map[string]*outputType) - - m, ok := normMap[outputsKey] - if !ok { - return nil, errors.New("fail to received a list of configured outputs") - } - - out, ok := m.(map[string]interface{}) - if !ok { - return nil, errors.New(fmt.Errorf( - "invalid outputs configuration received, expecting a map not a %T", - m, - )) - } - - for k, v := range out { - outputsOptions, ok := v.(map[string]interface{}) - if !ok { - return nil, errors.New("invalid type for output configuration block") - } - - t, ok := outputsOptions[typeKey] - if !ok { - return nil, fmt.Errorf("missing output type named output %s", k) - } - - n, ok := t.(string) - if !ok { - return nil, fmt.Errorf("invalid type received %T and expecting a string", t) - } - - delete(outputsOptions, typeKey) - - enabled, err := isEnabled(outputsOptions) - if err != nil { - return nil, err - } - - // Propagate global configuration to each individual configuration. - clone := cloneMap(normMap) - delete(clone, outputsKey) - clone[outputKey] = map[string]interface{}{n: v} - clone[inputsKey] = make([]map[string]interface{}, 0) - - grouped[k] = &outputType{ - enabled: enabled, - config: clone, - } - } - - s, ok := normMap[inputsKey] - if !ok { - s = make([]interface{}, 0) - } - - list, ok := s.([]interface{}) - if !ok { - return nil, errors.New("fail to receive a list of configured streams") - } - - for _, item := range list { - stream, ok := item.(map[string]interface{}) - if !ok { - return nil, fmt.Errorf( - "invalid type for stream expecting a map of options and received %T", - item, - ) - } - targetName := findOutputName(stream) - - // Do we have configuration for that specific outputs if not we fail to load the configuration. - config, ok := grouped[targetName] - if !ok { - return nil, fmt.Errorf("unknown configuration output with name %s", targetName) - } - - streams := config.config[inputsKey].([]map[string]interface{}) - streams = append(streams, stream) - - config.config[inputsKey] = streams - grouped[targetName] = config - } - - transpiled := make(map[string]*transpiler.AST) - - for name, group := range grouped { - if !group.enabled { - continue - } - if len(group.config[inputsKey].([]map[string]interface{})) == 0 { - continue - } - - ast, err := transpiler.NewAST(group.config) - if err != nil { - return nil, errors.New(err, "fail to generate configuration for output name %s", name) - } - - transpiled[name] = ast - } - - return transpiled, nil -} - -func isEnabled(m map[string]interface{}) (bool, error) { - const ( - enabledKey = "enabled" - ) - - enabled, ok := m[enabledKey] - if !ok { - return true, nil - } - switch e := enabled.(type) { - case bool: - return e, nil - } - return false, fmt.Errorf("invalid type received for enabled %T and expecting a boolean", enabled) -} - -func findOutputName(m map[string]interface{}) string { - const ( - defaultOutputName = "default" - useOutputKey = "use_output" - ) - - output, ok := m[useOutputKey] - if !ok { - return defaultOutputName - } - - return output.(string) -} - -func cloneMap(m map[string]interface{}) map[string]interface{} { - newMap := make(map[string]interface{}) - for k, v := range m { - sV, ok := v.(map[string]interface{}) - if ok { - newMap[k] = cloneMap(sV) - continue - } - newMap[k] = v - } - - return newMap -} - -type outputType struct { - enabled bool - config map[string]interface{} -} diff --git a/internal/pkg/agent/program/program_test.go b/internal/pkg/agent/program/program_test.go deleted file mode 100644 index 4d12e40cc8e..00000000000 --- a/internal/pkg/agent/program/program_test.go +++ /dev/null @@ -1,614 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package program - -import ( - "flag" - "io/ioutil" - "os" - "path/filepath" - "strings" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/pkg/errors" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - yaml "gopkg.in/yaml.v2" - - "github.com/elastic/elastic-agent/internal/pkg/agent/internal/yamltest" - "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" -) - -var ( - generateFlag = flag.Bool("generate", false, "Write golden files") -) - -func TestGroupBy(t *testing.T) { - t.Run("only named output", func(t *testing.T) { - sConfig := map[string]interface{}{ - "outputs": map[string]interface{}{ - "special": map[string]interface{}{ - "type": "elasticsearch", - "hosts": "xxx", - "username": "myusername", - "password": "mypassword", - }, - "infosec1": map[string]interface{}{ - "type": "elasticsearch", - "hosts": "yyy", - "username": "anotherusername", - "password": "anotherpassword", - }, - }, - - "inputs": []map[string]interface{}{ - { - "type": "log", - "use_output": "special", - "streams": map[string]interface{}{"paths": "/var/log/hello.log"}, - }, - { - "type": "system/metrics", - "use_output": "special", - }, - { - "type": "log", - "streams": map[string]interface{}{"paths": "/var/log/infosec.log"}, - "use_output": "infosec1", - }, - }, - } - - ast, err := transpiler.NewAST(sConfig) - require.NoError(t, err) - - grouped, err := groupByOutputs(ast) - require.NoError(t, err) - require.Equal(t, 2, len(grouped)) - - c1 := transpiler.MustNewAST(map[string]interface{}{ - "output": map[string]interface{}{ - "elasticsearch": map[string]interface{}{ - "hosts": "xxx", - "username": "myusername", - "password": "mypassword", - }, - }, - "inputs": []map[string]interface{}{ - { - "type": "log", - "streams": map[string]interface{}{"paths": "/var/log/hello.log"}, - "use_output": "special", - }, - { - "type": "system/metrics", - "use_output": "special", - }, - }, - }) - - c2, _ := transpiler.NewAST(map[string]interface{}{ - "output": map[string]interface{}{ - "elasticsearch": map[string]interface{}{ - "hosts": "yyy", - "username": "anotherusername", - "password": "anotherpassword", - }, - }, - "inputs": []map[string]interface{}{ - { - "type": "log", - "streams": map[string]interface{}{"paths": "/var/log/infosec.log"}, - "use_output": "infosec1", - }, - }, - }) - - defaultConfig, ok := grouped["special"] - require.True(t, ok) - require.Equal(t, c1.Hash(), defaultConfig.Hash()) - - infosec1Config, ok := grouped["infosec1"] - - require.True(t, ok) - require.Equal(t, c2.Hash(), infosec1Config.Hash()) - }) - - t.Run("fail when the referenced named output doesn't exist", func(t *testing.T) { - sConfig := map[string]interface{}{ - "monitoring": map[string]interface{}{ - "elasticsearch": map[string]interface{}{ - "hosts": "localhost", - }, - }, - "outputs": map[string]interface{}{ - "default": map[string]interface{}{ - "type": "elasticsearch", - "hosts": "xxx", - "username": "myusername", - "password": "mypassword", - }, - "infosec1": map[string]interface{}{ - "type": "elasticsearch", - "hosts": "yyy", - "username": "anotherusername", - "password": "anotherpassword", - }, - }, - - "inputs": []map[string]interface{}{ - { - "type": "log", - "streams": map[string]interface{}{"paths": "/var/log/hello.log"}, - "use_output": "special", - }, - { - "type": "system/metrics", - "use_output": "special", - }, - { - "type": "log", - "streams": map[string]interface{}{"paths": "/var/log/infosec.log"}, - "use_output": "donotexist", - }, - }, - } - - ast, err := transpiler.NewAST(sConfig) - require.NoError(t, err) - - _, err = groupByOutputs(ast) - require.Error(t, err) - }) - - t.Run("only default output", func(t *testing.T) { - sConfig := map[string]interface{}{ - "outputs": map[string]interface{}{ - "default": map[string]interface{}{ - "type": "elasticsearch", - "hosts": "xxx", - "username": "myusername", - "password": "mypassword", - }, - "infosec1": map[string]interface{}{ - "type": "elasticsearch", - "hosts": "yyy", - "username": "anotherusername", - "password": "anotherpassword", - }, - }, - "inputs": []map[string]interface{}{ - { - "type": "log", - "streams": map[string]interface{}{"paths": "/var/log/hello.log"}, - }, - - { - "type": "system/metrics", - }, - { - "type": "log", - "streams": map[string]interface{}{"paths": "/var/log/infosec.log"}, - }, - }, - } - - ast, err := transpiler.NewAST(sConfig) - require.NoError(t, err) - - grouped, err := groupByOutputs(ast) - require.NoError(t, err) - require.Equal(t, 1, len(grouped)) - - c1 := transpiler.MustNewAST(map[string]interface{}{ - "output": map[string]interface{}{ - "elasticsearch": map[string]interface{}{ - "hosts": "xxx", - "username": "myusername", - "password": "mypassword", - }, - }, - "inputs": []map[string]interface{}{ - { - "type": "log", - "streams": map[string]interface{}{"paths": "/var/log/hello.log"}, - }, - - { - "type": "system/metrics", - }, - - { - "type": "log", - "streams": map[string]interface{}{"paths": "/var/log/infosec.log"}, - }, - }, - }) - - defaultConfig, ok := grouped["default"] - require.True(t, ok) - require.Equal(t, c1.Hash(), defaultConfig.Hash()) - - _, ok = grouped["infosec1"] - - require.False(t, ok) - }) - - t.Run("default and named output", func(t *testing.T) { - sConfig := map[string]interface{}{ - "outputs": map[string]interface{}{ - "default": map[string]interface{}{ - "type": "elasticsearch", - "hosts": "xxx", - "username": "myusername", - "password": "mypassword", - }, - "infosec1": map[string]interface{}{ - "type": "elasticsearch", - "hosts": "yyy", - "username": "anotherusername", - "password": "anotherpassword", - }, - }, - "inputs": []map[string]interface{}{ - { - "type": "log", - "streams": map[string]interface{}{"paths": "/var/log/hello.log"}, - }, - - { - "type": "system/metrics", - }, - - { - "type": "log", - "streams": map[string]interface{}{"paths": "/var/log/infosec.log"}, - "use_output": "infosec1", - }, - }, - } - - ast, err := transpiler.NewAST(sConfig) - require.NoError(t, err) - - grouped, err := groupByOutputs(ast) - require.NoError(t, err) - require.Equal(t, 2, len(grouped)) - - c1 := transpiler.MustNewAST(map[string]interface{}{ - "output": map[string]interface{}{ - "elasticsearch": map[string]interface{}{ - "hosts": "xxx", - "username": "myusername", - "password": "mypassword", - }, - }, - "inputs": []map[string]interface{}{ - { - "type": "log", - "streams": map[string]interface{}{"paths": "/var/log/hello.log"}, - }, - - { - "type": "system/metrics", - }, - }, - }) - - c2, _ := transpiler.NewAST(map[string]interface{}{ - "output": map[string]interface{}{ - "elasticsearch": map[string]interface{}{ - "hosts": "yyy", - "username": "anotherusername", - "password": "anotherpassword", - }, - }, - "inputs": []map[string]interface{}{ - { - "type": "log", - "streams": map[string]interface{}{"paths": "/var/log/infosec.log"}, - "use_output": "infosec1", - }, - }, - }) - - defaultConfig, ok := grouped["default"] - require.True(t, ok) - require.Equal(t, c1.Hash(), defaultConfig.Hash()) - - infosec1Config, ok := grouped["infosec1"] - - require.True(t, ok) - require.Equal(t, c2.Hash(), infosec1Config.Hash()) - }) - - t.Run("streams is an empty list", func(t *testing.T) { - sConfig := map[string]interface{}{ - "outputs": map[string]interface{}{ - "default": map[string]interface{}{ - "type": "elasticsearch", - "hosts": "xxx", - "username": "myusername", - "password": "mypassword", - }, - "infosec1": map[string]interface{}{ - "type": "elasticsearch", - "hosts": "yyy", - "username": "anotherusername", - "password": "anotherpassword", - }, - }, - "datasources": []map[string]interface{}{}, - } - - ast, err := transpiler.NewAST(sConfig) - require.NoError(t, err) - - grouped, err := groupByOutputs(ast) - require.NoError(t, err) - require.Equal(t, 0, len(grouped)) - }) - - t.Run("no streams are defined", func(t *testing.T) { - sConfig := map[string]interface{}{ - "outputs": map[string]interface{}{ - "default": map[string]interface{}{ - "type": "elasticsearch", - "hosts": "xxx", - "username": "myusername", - "password": "mypassword", - }, - "infosec1": map[string]interface{}{ - "type": "elasticsearch", - "hosts": "yyy", - "username": "anotherusername", - "password": "anotherpassword", - }, - }, - } - - ast, err := transpiler.NewAST(sConfig) - require.NoError(t, err) - - grouped, err := groupByOutputs(ast) - require.NoError(t, err) - require.Equal(t, 0, len(grouped)) - }) -} - -func TestConfiguration(t *testing.T) { - defer os.Remove("fleet.yml") - - testcases := map[string]struct { - programs map[string][]string - err bool - }{ - "namespace": { - programs: map[string][]string{ - "default": {"filebeat", "fleet-server", "heartbeat", "metricbeat", "endpoint", "packetbeat"}, - }, - }, - "logstash_config": { - programs: map[string][]string{ - "default": {"filebeat", "fleet-server", "heartbeat", "metricbeat", "endpoint", "packetbeat"}, - "elasticsearch": {"filebeat"}, - }, - }, - "single_config": { - programs: map[string][]string{ - "default": {"filebeat", "fleet-server", "heartbeat", "metricbeat", "endpoint", "packetbeat"}, - }, - }, - "audit_config": { - programs: map[string][]string{ - "default": {"auditbeat"}, - }, - }, - "fleet_server": { - programs: map[string][]string{ - "default": {"fleet-server"}, - }, - }, - "synthetics_config": { - programs: map[string][]string{ - "default": {"heartbeat"}, - }, - }, - "enabled_true": { - programs: map[string][]string{ - "default": {"filebeat"}, - }, - }, - "enabled_false": { - programs: map[string][]string{ - "default": {}, - }, - }, - "enabled_output_true": { - programs: map[string][]string{ - "default": {"filebeat"}, - }, - }, - "enabled_output_false": { - programs: map[string][]string{}, - }, - "endpoint_basic": { - programs: map[string][]string{ - "default": {"endpoint"}, - }, - }, - "endpoint_no_fleet": { - programs: map[string][]string{ - "default": {}, - }, - }, - "endpoint_unknown_output": { - programs: map[string][]string{ - "default": {}, - }, - }, - "endpoint_arm": { - programs: map[string][]string{ - "default": {}, - }, - }, - } - - for name, test := range testcases { - t.Run(name, func(t *testing.T) { - singleConfig, err := ioutil.ReadFile(filepath.Join("testdata", name+".yml")) - require.NoError(t, err) - - var m map[string]interface{} - err = yaml.Unmarshal(singleConfig, &m) - require.NoError(t, err) - - ast, err := transpiler.NewAST(m) - require.NoError(t, err) - - programs, err := Programs(&fakeAgentInfo{}, ast) - if test.err { - require.Error(t, err) - return - } - require.NoError(t, err) - require.Equal(t, len(test.programs), len(programs)) - - if len(programs) > 0 { - _, containsDefault := programs["default"] - require.True(t, containsDefault) - } - - for progKey, progs := range programs { - testPrograms, isExpectedProgram := test.programs[progKey] - require.True(t, isExpectedProgram) - require.Equal(t, len(testPrograms), len(progs)) - - for _, program := range progs { - filename := name + "-" + program.Spec.CommandName() - if progKey != "default" { - filename += "-" + progKey - } - programConfig, err := ioutil.ReadFile(filepath.Join( - "testdata", - filename+".yml", - )) - - require.NoError(t, err) - var m map[string]interface{} - err = yamltest.FromYAML(programConfig, &m) - require.NoError(t, errors.Wrap(err, program.Cmd())) - - compareMap := &transpiler.MapVisitor{} - program.Config.Accept(compareMap) - - if !assert.True(t, cmp.Equal(m, compareMap.Content)) { - diff := cmp.Diff(m, compareMap.Content) - if diff != "" { - t.Errorf("%s-%s mismatch (-want +got):\n%s", name, program.Spec.Name, diff) - } - } - } - } - - }) - } -} - -func TestUseCases(t *testing.T) { - defer os.Remove("fleet.yml") - - useCasesPath := filepath.Join("testdata", "usecases") - useCases, err := filepath.Glob(filepath.Join(useCasesPath, "*.yml")) - require.NoError(t, err) - - generatedFilesDir := filepath.Join(useCasesPath, "generated") - - // Cleanup all generated files to make sure not having any left overs - if *generateFlag { - err := os.RemoveAll(generatedFilesDir) - require.NoError(t, err) - } - - for _, usecase := range useCases { - t.Run(usecase, func(t *testing.T) { - - useCaseName := strings.TrimSuffix(filepath.Base(usecase), ".yml") - singleConfig, err := ioutil.ReadFile(usecase) - require.NoError(t, err) - - var m map[string]interface{} - err = yaml.Unmarshal(singleConfig, &m) - require.NoError(t, err) - - ast, err := transpiler.NewAST(m) - require.NoError(t, err) - - programs, err := Programs(&fakeAgentInfo{}, ast) - require.NoError(t, err) - - require.Equal(t, 1, len(programs)) - - defPrograms, ok := programs["default"] - require.True(t, ok) - - for _, program := range defPrograms { - generatedPath := filepath.Join( - useCasesPath, "generated", - useCaseName+"."+program.Spec.CommandName()+".golden.yml", - ) - - compareMap := &transpiler.MapVisitor{} - program.Config.Accept(compareMap) - - // Generate new golden file for programm - if *generateFlag { - d, err := yaml.Marshal(&compareMap.Content) - require.NoError(t, err) - - err = os.MkdirAll(generatedFilesDir, 0755) - require.NoError(t, err) - err = ioutil.WriteFile(generatedPath, d, 0644) - require.NoError(t, err) - } - - programConfig, err := ioutil.ReadFile(generatedPath) - require.NoError(t, err) - - var m map[string]interface{} - err = yamltest.FromYAML(programConfig, &m) - require.NoError(t, errors.Wrap(err, program.Cmd())) - - if !assert.True(t, cmp.Equal(m, compareMap.Content)) { - diff := cmp.Diff(m, compareMap.Content) - if diff != "" { - t.Errorf("%s-%s mismatch (-want +got):\n%s", usecase, program.Spec.Name, diff) - } - } - } - }) - } -} - -type fakeAgentInfo struct{} - -func (*fakeAgentInfo) AgentID() string { - return "agent-id" -} - -func (*fakeAgentInfo) Version() string { - return "8.0.0" -} - -func (*fakeAgentInfo) Snapshot() bool { - return false -} - -func (*fakeAgentInfo) Headers() map[string]string { - return map[string]string{ - "h1": "test-header", - } -} diff --git a/internal/pkg/agent/program/spec.go b/internal/pkg/agent/program/spec.go deleted file mode 100644 index 0b3a8eeb347..00000000000 --- a/internal/pkg/agent/program/spec.go +++ /dev/null @@ -1,111 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package program - -import ( - "fmt" - "io/ioutil" - "path/filepath" - "runtime" - "strings" - - "gopkg.in/yaml.v2" - - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" -) - -// ErrMissingWhen is returned when no boolean expression is defined for a program. -var ErrMissingWhen = errors.New("program must define a 'When' expression") - -// Spec represents a specific program specification, it contains information about how to run the -// program and also the rules to apply to the single configuration to create a specific program -// configuration. -// -// NOTE: Current spec are build at compile time, we want to revisit that to allow other program -// to register their spec in a secure way. -type Spec struct { - Name string `yaml:"name"` - ServicePort int `yaml:"service,omitempty"` - Cmd string `yaml:"cmd"` - Args []string `yaml:"args"` - Artifact string `yaml:"artifact"` - ActionInputTypes []string `yaml:"action_input_types,omitempty"` - LogPaths map[string]string `yaml:"log_paths,omitempty"` - MetricEndpoints map[string]string `yaml:"metric_endpoints,omitempty"` - Rules *transpiler.RuleList `yaml:"rules"` - CheckInstallSteps *transpiler.StepList `yaml:"check_install"` - PostInstallSteps *transpiler.StepList `yaml:"post_install"` - PreUninstallSteps *transpiler.StepList `yaml:"pre_uninstall"` - When string `yaml:"when"` - Constraints string `yaml:"constraints"` - RestartOnOutputChange bool `yaml:"restart_on_output_change,omitempty"` - ExportedMetrics []string `yaml:"exported_metrics,omitempty"` -} - -func (s *Spec) Command() string { - name := strings.ToLower(s.Cmd) - if runtime.GOOS == "windows" && !strings.HasSuffix(name, ".exe") { - return name + ".exe" - } - - return name -} - -func (s *Spec) CommandName() string { - return strings.ToLower(s.Cmd) -} - -// ReadSpecs reads all the specs that match the provided globbing path. -func ReadSpecs(path string) ([]Spec, error) { - var specs []Spec - files, err := filepath.Glob(path) - if err != nil { - return []Spec{}, errors.New(err, "could not include spec", errors.TypeConfig) - } - - for _, f := range files { - b, err := ioutil.ReadFile(f) - if err != nil { - return []Spec{}, errors.New(err, fmt.Sprintf("could not read spec %s", f), errors.TypeConfig) - } - - spec := Spec{} - if err := yaml.Unmarshal(b, &spec); err != nil { - return []Spec{}, errors.New(err, fmt.Sprintf("could not unmarshal YAML for file %s", f), errors.TypeConfig) - } - specs = append(specs, spec) - } - - return specs, nil -} - -// NewSpecFromBytes create a Spec from a bytes. -func NewSpecFromBytes(b []byte) (Spec, error) { - spec := Spec{} - if err := yaml.Unmarshal(b, &spec); err != nil { - return Spec{}, errors.New(err, "could not unmarshal YAML", errors.TypeConfig) - } - return spec, nil -} - -// MustReadSpecs read specs and panic on errors. -func MustReadSpecs(path string) []Spec { - s, err := ReadSpecs(path) - if err != nil { - panic(err) - } - return s -} - -// FindSpecByName find a spec by name and return it or false if we cannot find it. -func FindSpecByName(name string) (Spec, bool) { - for _, candidate := range Supported { - if name == candidate.Name { - return candidate, true - } - } - return Spec{}, false -} diff --git a/internal/pkg/agent/program/spec_test.go b/internal/pkg/agent/program/spec_test.go deleted file mode 100644 index 110dd92eb36..00000000000 --- a/internal/pkg/agent/program/spec_test.go +++ /dev/null @@ -1,149 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package program - -import ( - "io/ioutil" - "os" - "path/filepath" - "regexp" - "strings" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "gopkg.in/yaml.v2" - - "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" -) - -func TestSerialization(t *testing.T) { - spec := Spec{ - Name: "hello", - Cmd: "hellocmd", - Args: []string{"-c", "first"}, - Artifact: "nested/hellocmd", - Rules: transpiler.NewRuleList( - transpiler.Copy("inputs", "filebeat"), - transpiler.Filter("filebeat", "output", "keystore"), - transpiler.Rename("filebeat", "notfilebeat"), - transpiler.Translate("type", map[string]interface{}{ - "event/file": "log", - "event/stdin": "stdin", - }), - transpiler.TranslateWithRegexp("type", regexp.MustCompile("^metric/(.+)"), "$1/hello"), - transpiler.Map("inputs", - transpiler.Translate("type", map[string]interface{}{ - "event/file": "log", - })), - transpiler.FilterValues( - "inputs", - "type", - "log", - ), - ), - CheckInstallSteps: transpiler.NewStepList( - transpiler.ExecFile(25, "app", "verify", "--installed"), - ), - PostInstallSteps: transpiler.NewStepList( - transpiler.DeleteFile("d-1", true), - transpiler.MoveFile("m-1", "m-2", false), - ), - PreUninstallSteps: transpiler.NewStepList( - transpiler.ExecFile(30, "app", "uninstall", "--force"), - ), - When: "1 == 1", - Constraints: "2 == 2", - } - yml := `name: hello -cmd: hellocmd -args: -- -c -- first -artifact: nested/hellocmd -rules: -- copy: - from: inputs - to: filebeat -- filter: - selectors: - - filebeat - - output - - keystore -- rename: - from: filebeat - to: notfilebeat -- translate: - path: type - mapper: - event/file: log - event/stdin: stdin -- translate_with_regexp: - path: type - re: ^metric/(.+) - with: $1/hello -- map: - path: inputs - rules: - - translate: - path: type - mapper: - event/file: log -- filter_values: - selector: inputs - key: type - values: - - log -check_install: -- exec_file: - path: app - args: - - verify - - --installed - timeout: 25 -post_install: -- delete_file: - path: d-1 - fail_on_missing: true -- move_file: - path: m-1 - target: m-2 - fail_on_missing: false -pre_uninstall: -- exec_file: - path: app - args: - - uninstall - - --force - timeout: 30 -when: 1 == 1 -constraints: 2 == 2 -` - t.Run("serialization", func(t *testing.T) { - b, err := yaml.Marshal(spec) - require.NoError(t, err) - assert.Equal(t, string(b), yml) - }) - - t.Run("deserialization", func(t *testing.T) { - s := Spec{} - err := yaml.Unmarshal([]byte(yml), &s) - require.NoError(t, err) - assert.Equal(t, spec, s) - }) -} - -func TestExport(t *testing.T) { - dir, err := ioutil.TempDir("", "test_export") - require.NoError(t, err) - defer os.RemoveAll(dir) - - for _, spec := range Supported { - b, err := yaml.Marshal(spec) - require.NoError(t, err) - err = ioutil.WriteFile(filepath.Join(dir, strings.ToLower(spec.Name)+".yml"), b, 0666) - require.NoError(t, err) - } -} diff --git a/internal/pkg/agent/program/supported.go b/internal/pkg/agent/program/supported.go deleted file mode 100644 index 4bd218a4ee9..00000000000 --- a/internal/pkg/agent/program/supported.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -// Code generated by elastic-agent/internals/dev-tools/buildspec/buildspec.go - DO NOT EDIT. - -package program - -import ( - "strings" - - "github.com/elastic/elastic-agent/pkg/packer" -) - -var Supported []Spec -var SupportedMap map[string]Spec - -func init() { - // Packed Files - // internal/spec/apm-server.yml - // internal/spec/auditbeat.yml - // internal/spec/cloudbeat.yml - // internal/spec/endpoint.yml - // internal/spec/filebeat.yml - // internal/spec/fleet-server.yml - // internal/spec/heartbeat.yml - // internal/spec/metricbeat.yml - // internal/spec/osquerybeat.yml - // internal/spec/packetbeat.yml - unpacked := packer.MustUnpack("eJzMe1uTo7iW7vv5Gf16zpnNJZ3dTMR+MGRxs5Mq4zISekOSE2wL7E4b2zAx/31C4mLAZFZWdvWe/VARlVjosrQu3/rW4r9+22Sn9WsWsX8cD2vyj+iQ/v/j+vW8fv2PImW//edvODVP6Ps+XgS6Nw88RjLESHzYYrB4dCzzgpdyiaCrIOjMQuhKEUBJqI7+lpFyH4PLPnYM5+QvnaNjuKcQTBKkBCcEJtI8DfIQuEcEFhq1XRktnaOxmcbORjadzSV2UrqFqs5I6jGcLTTXPumrL/J3P3CBH7gvvqTZi3J/fX7SNSc+UCMNHoilFdQKdlCRGbXdQ6g+PzrmceYY000I9dMc1mfaOEeDSTOSBUcEnx/5uvOlvsWqPoGqf4bK9UDUhXjuGNPYsZiEgPToWOiIQCC1z23//HWjH3Cmy9R+nolnxjTGyuQlVLQcpddDJZ/JGatT/vvJseSEPO3bscQypehpH6P0yhBc3J539tY8my/1AgH5TNPgJVKCydd43/5W/dNfEdzx+9iGSlASWUuIxcTYT81ju6ySKcvRpTtGikkanLCKGFRObP39dp7mn5h3o/P7zul0L95BKXuAqieRNEjw9328VqVaJuiAbZ8RpikhuMq9c9sew1awpZZWjMm6XkdaQ53d3kEJtgNGyt6+TkJPF+1ejtQKitvZ9RKBKwtV/0yyO7nfrVvNp8nU1uXqfDfZdO7y5Fgsj9JgS01tj4C5Q9Atv270318WBzWygvzrRj8iMMmoFe9d+1Sv42mz5fT/Ok/TOASTnWMlCZFObL2Md2ulXtOWjo5BGbbMklpsS5QgIam3d4tL7KouQxYr3eLC95BFiplGypdsbkwzbGkZUf2EKHE2W+z/+dv/G3qFnG5OeB2dhk4BBrsI+hsETMlIvRN62sdh75lZoI7BzcHzZi4M7zZmngbHEHpSBJ5PIaDcsPM1kI/O5lK9s1qJd4gSSBRO8whcT11HgVLzSJTVZj7tOZYSQV8mxkRCQL5gy5TQcsJwam6wFey+Aa4MHhuugYGZiz2AIKdGZzz0Cwru1jhihWYRmGTz9MpoGhy/AZ+FWZAN50WK90KsoAi54j9Ju7VkBv4XtlrsAj0wNfu7RJ++br9cnm1puEZCgX+glstCuMgrB8KyeSqfkS2cQY7AJAm5cj3JaQiuJVo6M7g4JCTzDyg1t5TfSRok1H4+9+4mc5m49Mx/4c4Jq4FE7ECCircPwSTj9yuc8GLfcYrjBmtsnuMFuB6dL6aMLCaJ9YzaQIzKEEmhbxCgB7zR1Qj6e8eg1dmMP7J5vB8xeO+FKkyKTK1AgLK1Pa0NsHFE/oRYq8r4bJd93UwzmFbrz4uHV7d2TkQJjgh4EladR254/Izksp85ha4jyy8dgx5w6rP10z4WhlDIvyPLlMJAK6ntshBIufgbeoza0mmt+Am1zBei+gUC5mmeThIMgpJY5hZBKeMBwLGCJFTiOAKTC4WLnMstApM/+T6gwnJkBQ+Nk6E2u3BZi7W7+7J9FRd6giz/BaWMYfFML7DiMaJ63CFxJ3TG6SpGqVY4VqBUQVTsr0RwUTsKLUdLPcWqw+fehdBP2vtbTsTf3Lbmxt1v4t7mhp7gdBEPZSJ8APQuIfBYdX+tU6vHyC/Eds8i6Cha61TvAyHLiRIU1NQSlPmMvC2XJ6xMFARdqblHIEt5cw9EPlH+nFp/1HL3S/670FmYSITbgqkdIyjugPuFstlTE3Cb92o9O1ArOH3d6M15mucSLjtjxdk7AdzWE2rFXV0bC95b50v1e6XHQelY6Ew2ut6xd263WQQeYmEHxjSr7HdxdouH2IeIkYxJEQ/EXE+47Da6FFkml0/ZrmML+UgILOIQPsfUSphj1f5jqechkPn9NUGNyyknyjWhVvCG/fVBirinRSfY3dlinM+KXdZ9V8z7tI/dpy+zDngQe+kBDwsVWJH6tm7c7gADbUfBlXVkmjrWl7i1V0M/kEIviRVsI4AOQgaKltM0KLhfqmUldMZVGrvxeTDGrgB4MnWLh3aPGJivKNASkrlJH+C0utLaU++81h+PjnWbv/7txGWFgaagQBM+rftODWKG833knTZu/ew6re0v9j8GhZYAeUKe/J4joOXtPZguIzA4kHQVhzwuWN4Zp+iACl3Chb7Fiswc29+TVJORElc6+cXMuP9yjCTHhX7EipdgQ08jcGWk2OU/cZYqrphawXUDKuiAraC207dAZQfobe/8QA2ga5upZUNT80hBUHZkwsGk0Hk07T4T679EgGMRWkTQ7wBvKSYqK7nuGPF+63wxd8h+jqlNz45l7pCppREIjvxZ15eSQgDNxq/HDW6KwKjPbwEhVCoZz1O2mxsjzzMvD6FeRpYmjf/OfY53wMrDIKZVv4vYlv7x6NgnrZK3dyZ2c0d6SlLtdAfYq4Spi3NmfO6hP0CZe+Z4o48d+z62srMWG81qvNID9Tc84LZ3N1/212/tvcZEre5xnYfiXVbdVx3zDL0F8o0/qfxsDcwNrvOTNAJUJqkpbGUQMweJwH5GlYThLY9/QY5Ufz8z/N/7Z2/O+ZcSA5EMkJTP+yySgZ9KDAjb53QsMVCuZwoGiUHv2SAx+LcC+Qf6cXA8ANr9M26pxc6YaZXCm1oFhKsMlBvHEavBToDl6X7IPKQRTFqQWSuFUMq3gKRTA22ePWKFcofXH98490C7kFTbIuiVHBTfGA0th4pw1McIeFJlqB7jMggBkhB02oB3B87+pRl7X26DjLpiF0ztgFN0boypt79th2kZcQaN8feDQJv1nyn0OaifGRndI/Dw6FhXhlMqRQY3uNohqdLBeXqIn2vwHFlmuVSCCTfiRpdelpfYVZrk1ysRMItQiYXhfdog1xk97DfZ0B59MNkRi+vhIl4pwZZC90Dt3SxU5F0FGPwLVlhODblEwJNJyqT1QLfRbY4TsgLFELqBDqGyenSeQvXrUzwLgSecG6zlANUK3H4Vzi7Ika2feSA00usZydolhP6+ul8OQFw14vI0nPN3i21IahbrpWY2Tmsu3d6fq54UQp/NlesZFVpn/9Kfcz53wXVVP0ZgIvPEztloZ2Ivzj64JkT1D2Ghmbd3tJIK36EdsULO3XPONhP+bMN1gAp701RS7mNn9/wIzeuCpFpGUvPkfKlABjSv7X7F/5s1zCvh90atgECLn/1KRtdJvT0C3quQn+on2Lo81gkMC2VNgKFh0uKkHblAj4VqIECGU4+rmdc2qDk8iKcsXS+d27ONdOK65bQBcbohqs8DUtE8oxY7IaDJXBeey+mMWFop7CzzJA7y6jt+QMDjCesRfR8kRIY7rmfNPqwqJrTB1nDbubv7mi/l9k7qcSW1fEYyp/PMOc1hcEGqmyBrNXjuMqJoMkk9RoqODN6QY3/85DGC002TCEZAZtyev26myvPTdEZsl0E1yCMw4Tp1xE/72Xyps7UVbCsgumrAkND9r5vppqsH5GabzRoJSWkfZBqujNNWPzZdMDm8x3H5jOz7hwTPDUhBdeCX3/PtdZLQTRKHwJnLp9WLqZAX13MJQfdlOPYNMued5EcfxqITT4gpWIkzYWBehvbUY91tV8ZWb68/TIgEmZEGRS/h2N7LqmuTfab/LvbcscpiHxAdiMLOOB4Hi9Wc/gAc/vpY1Y9JBw50S2q7Cbcj7itxqkkOX091ZZz5BwxWeQjdbWRL8bfvUuwqZoG/h5JbVOu79qmgYCJ0dJ6iBAN2XMN6rGCxk4QapDqP4f9OsiAXvmk5OYXgcCZZPbYk2Ww5nd3FyJcNW49gVp/7LFAlAHVcFP62whmHyg43Ou5VpDKPUTu4zFN2xB/Ang2+5Qkfsa4JtVZjBO6g6iUnOOUJliySvM54iWTBPb61tAzxWFVMjggihp/kHQKujIofVtOs5epqfoCUvsnFmNSJDckbMoYUI3IQ1TizoBZLwwpbCoxMCq0r4wJBHtddCSocQ9Q6b2rbyDJzxHFGbbs/8leiOpMeeELPE+8dgnHjd4cY+W8nLudL/RTC6cDvD5PgOz9TVR4V84hNTcJyRVz2/E2VOA/IyR9WG5tcga0tjxF7wXOwTUuiFuJ+2/urdLxHOm/gYrDHseoo98fQZ/w+esTF5bP7b2W9QQCdSbp6lyQYEDg9wrY966WOzbZfhCKf4mfwX8S+b8TQ4L68IRn8d5LEIlaFYFKK3LZPODb76udMXHbAvJBeFbN+zv1ChcvE/7vvcH2gKc+Ha4wp7IP0K7hNgUH1JGSxXJzZ8g94sI7wOdniTC3vwvcX9vSuO0+wI0/7mAL/MthLhcMtbRspAccUO6x4r938s5pHYP8CAf9AZK3Elqbyc33d6NWzy/3556o3ITxnKPeCeOvI4SPEokyU4OWOFPobydX3ii6fXr9T9PnLpGivCDfM16U4hLScgypfp4ophUrc/w0+dzCO+Uihy+ag0o/IDm64vc75UarJOPWLddcHKRMpBCy/6YcUN7pze59uiSFfKHBLCm7jItuXiKl18Wcz757a/iXKvDO++eQ/sRoUODWP6DbuNQToNbztp2M3UoyhKXXmzBH0U66nt339UXrboCIfu9iuo7/zZd/O+N9r6PXW4bZ204dgR25z8Vzh4fYbPWCeH9/mrgj22if+dZK85eze1ImmqNyN0RWBLWJ1yv1mk3OEinZpCy1NMcgaI31F/l3xbj+Fu2+Y74Ok70+Ttp018kZmL8td/G0zvTgc1xj6PoTeHMHd3rVPNb/kaxxbo4avUD0mMLNBjo5BK/0oiMBzrnJKUHpKRJcH922CG/H2brEbwb5svT6Nd3j5VQ4Sr5r8peZt69zk1OY1N+6W5+IV32bIJ6z4zGHSoAus6o5qu6zigyjK3DhQdFi3uEtqCge9bpy7fPMHeeCobxov2HRzr1Mnb/s16w9t4Z09NDbxrj+vY0Krn/U+m71ARRQeRjlX0XlX6Cm2AkaNSdOVlzdz3Zpw2twuhm1Rt8UaN+6z4qgbfyCKW3hUPsI2cKsHWdNtN7lghWO7nShajazV4OP82WjHNuu2vDSygjSEwZHa41zzPXd8t489Vj1pwPHeyUl0vI13dOWN3sxTLv8qPkBVP5Ns8aO1S6Jc7rr+Gh8x307zoU52+YqOrKr9tvvTuzbWXf+O/x69r7vOvzpeV0XGu3eruZMzUVu96OQ7b2PdN/f5Dp7p2gGFHmtzxOmnOhFHcdlfmmM5EfkIUfUkVFafOtebzUefO+MtL4k/1eU40K3px2ojjZ02ObgVJMgKhB8SsX20dlLZxyA2/uJOyJGYmKyj17HuxqUVJCTrFzGj3rNOIPxoAfMTZNDPtEt/qDvxLkhzcOKXcyXhIEP6PBnzfuHyLqDKmugEhKrLkBI8dFugxwuCLk9M10TljjZh4mKLP/LZZUSRBwD1PVL5fWDbMczmbm6O9r7Q2XRRZQEbGsEYyP07CpN9x9wBr/+KgqWQDa2SoqUAq4Joo5ZZRgY5GPE/74uW6fr0uiEjFvgdBBJJ2bbWyPqjg7qRX6mp2vEPC5rWgAO2pB9TrA2Nm/kMQ/0oSos/bj/4yY8Xrmek8PSL5FhQIBcNWcGGAjKcNwtl7YKgu+Xzflv6v39fBavVjj19gJo9IegXEfDqHpGm72rC4bmgGMfOZbzpEfqyJmnAb7KgpnbGTGtSlZdQSRKcUm6dleZnLZ05DvE/0g9b331N07VW1HgWqHZoycZyPtrCUKUGN11Sn38VnfgO1XtHHQ7gT53uBlqCresLtbQXbLGSPu3v+h47a3bgw93Zc6xoPWoNwWSLoC4JSJ7VkAVWPfkRWFS96o2tGEKfOq0ibskjx2Cv49R083FMv1/5s+e43WEapLc0PHigtptwnRByMjVBxd7KkgPYqbbnnfX3OHg++IBntPT5VuQaKXeSdPXomA/5rNAa2yzd6fu96f/bJdCBvt/pKZc7zjzhJ6FKD9RKXkgaZAgml36Jor6D+/7dgts43MT/WD1dRZT/tnl4nS3vZVTNw9eIHx3D7yKCClJXsaI7d9NneEeN9/oDb5TdKQQnBhWzIKk5GdXj1k8MYHilK+2ef7K3t/PeYvZjWP8WrfCRd/rfLPwS6vjzc4ykSB84gx2UKNA6KcYv7T/txU3x7lC3WvqxFxPzsdjQlkJ7tuzc06hZxyYHuKaRy7t0ZE8GCQuBz/ic1flHPyYb4IN/gx7S/fHPfP1ajME/1btSEBTrflX+TFRTRtCdDCvzP1GV/3no9zMdpB+qxruf7zKtP+fCQ/m8232nlQQGjGS72ee6zJr3mVCdD3eX2f4Bg+BM4eLRefpyMToM8BvJYpdhbz7NGsC26xnVkO1j3a06h6uiUwFbTMgwAoLd464j5aH97e6Av6861t7fB6shH2xlH3TXdlzIuEv4JaxKqGjiU6wQsJzaz4+Dzhwx7mU5Tb4tp9lzGSvuZcQVHCKyW49RMSvL3EZKIPUSQZsHkBNrmvbbRLAgJ79KPX+QBPIxd2Pf7SUXNZZCNqtay/t9Of2xbyZ/2Vsfa5L+mT9Px/xF2qP/gdeblMclBN4rGuH+xrW5uzfng9z9kD/+V2n3T309Pfvtv//P/wQAAP//cJQCCQ==") - SupportedMap = make(map[string]Spec) - - for f, v := range unpacked { - s, err := NewSpecFromBytes(v) - if err != nil { - panic("Cannot read spec from " + f + ": " + err.Error()) - } - Supported = append(Supported, s) - SupportedMap[strings.ToLower(s.Cmd)] = s - } -} diff --git a/internal/pkg/agent/program/testdata/audit_config-auditbeat.yml b/internal/pkg/agent/program/testdata/audit_config-auditbeat.yml deleted file mode 100644 index 7ea962de31a..00000000000 --- a/internal/pkg/agent/program/testdata/audit_config-auditbeat.yml +++ /dev/null @@ -1,164 +0,0 @@ -auditbeat: - modules: - - audit_rules: | - # Things that affect identity. - -w /etc/group -p wa -k identity - backlog_limit: 8192 - backpressure_strategy: auto - failure_mode: silent - id: audit/auditd-auditd_manager.auditd-6d35f37c-2243-4229-9fba-ccf39305b536 - include_raw_message: true - include_warnings: false - index: logs-auditd_manager.auditd-default - module: auditd - processors: - - drop_event: - when.equals.source.ip: 127.0.0.1 - - add_fields: - fields: - dataset: auditd_manager.auditd - namespace: default - type: logs - target: data_stream - - add_fields: - fields: - dataset: auditd_manager.auditd - target: event - - add_fields: - fields: - id: agent-id - snapshot: false - version: 8.0.0 - target: elastic_agent - - add_fields: - fields: - id: agent-id - target: agent - rate_limit: 0 - resolve_ids: true - socket_type: multicast - tags: - - auditd_manager-auditd - - id: fim_1 - index: logs-auditd_manager.file_integrity-default - module: file_integrity - paths: - - /bin - - /usr/bin - - /sbin - - /usr/sbin - - /etc - processors: - - add_fields: - fields: - dataset: auditd_manager.file_integrity - namespace: default - type: logs - target: data_stream - - add_fields: - fields: - dataset: auditd_manager.file_integrity - target: event - - add_fields: - fields: - id: agent-id - snapshot: false - version: 8.0.0 - target: elastic_agent - - add_fields: - fields: - id: agent-id - target: agent - - id: fim_2 - index: logs-auditd_manager.file_integrity-default - module: file_integrity - paths: - - /bin - - /usr/bin - - /sbin - - /usr/sbin - - /etc - processors: - - add_fields: - fields: - dataset: auditd_manager.file_integrity - namespace: default - type: logs - target: data_stream - - add_fields: - fields: - dataset: auditd_manager.file_integrity - target: event - - add_fields: - fields: - id: agent-id - snapshot: false - version: 8.0.0 - target: elastic_agent - - add_fields: - fields: - id: agent-id - target: agent - - datasets: - - socket - id: id-auditd-system-socket - index: logs-audit_system.socket-default - module: system - processors: - - drop_event: - when.equals.source.ip: 127.0.0.1 - - add_fields: - fields: - dataset: audit_system.socket - namespace: default - type: logs - target: data_stream - - add_fields: - fields: - dataset: audit_system.socket - target: event - - add_fields: - fields: - id: agent-id - snapshot: false - version: 8.0.0 - target: elastic_agent - - add_fields: - fields: - id: agent-id - target: agent - - datasets: - - process - id: id-auditd-system-process - index: logs-audit_system.process-default - module: system - processors: - - add_fields: - fields: - dataset: audit_system.process - namespace: default - type: logs - target: data_stream - - add_fields: - fields: - dataset: audit_system.process - target: event - - add_fields: - fields: - id: agent-id - snapshot: false - version: 8.0.0 - target: elastic_agent - - add_fields: - fields: - id: agent-id - target: agent -output: - elasticsearch: - headers: - h1: test-header - hosts: - - 127.0.0.1:9200 - - 127.0.0.1:9300 - password: changeme - username: elastic diff --git a/internal/pkg/agent/program/testdata/audit_config.yml b/internal/pkg/agent/program/testdata/audit_config.yml deleted file mode 100644 index a6d906d4cda..00000000000 --- a/internal/pkg/agent/program/testdata/audit_config.yml +++ /dev/null @@ -1,104 +0,0 @@ -outputs: - default: - type: elasticsearch - hosts: [127.0.0.1:9200, 127.0.0.1:9300] - username: elastic - password: changeme -inputs: - - type: not_audit - this_is_ignored: ~ - streams: - - type: foo - - id: audit/auditd-auditd-6d35f37c-2243-4229-9fba-ccf39305b536 - name: auditd_manager-1 - revision: 1 - type: audit/auditd - use_output: default - meta: - package: - name: auditd_manager - version: 0.0.1 - data_stream: - namespace: default - streams: - - id: >- - audit/auditd-auditd_manager.auditd-6d35f37c-2243-4229-9fba-ccf39305b536 - type: audit/auditd - data_stream: - dataset: auditd_manager.auditd - type: logs - condition: '${host.platform} == ''linux''' - include_raw_message: true - socket_type: multicast - resolve_ids: true - failure_mode: silent - audit_rules: | - # Things that affect identity. - -w /etc/group -p wa -k identity - backlog_limit: 8192 - rate_limit: 0 - include_warnings: false - backpressure_strategy: auto - tags: - - auditd_manager-auditd - processors: - - drop_event: - when.equals.source.ip: '127.0.0.1' - - id: audit/auditd-file_integrity-6d35f37c-2243-4229-9fba-ccf39305b536 - name: auditd_manager-1 - revision: 1 - type: audit/file_integrity - use_output: default - meta: - package: - name: auditd_manager - version: 0.0.1 - data_stream: - namespace: default - streams: - - id: fim_1 - type: audit/file_integrity - data_stream: - dataset: auditd_manager.file_integrity - type: logs - paths: - - /bin - - /usr/bin - - /sbin - - /usr/sbin - - /etc - - id: fim_2 - type: audit/file_integrity - data_stream: - dataset: auditd_manager.file_integrity - type: logs - paths: - - /bin - - /usr/bin - - /sbin - - /usr/sbin - - /etc - - id: audit/system-system-6d35f37c-2243-4229-9fba-ccf39305b536 - type: audit/system - data_stream: - namespace: default - streams: - - id: id-auditd-system-socket - type: audit/system - dataset: socket - data_stream: - dataset: audit_system.socket - type: logs - processors: - - drop_event: - when.equals.source.ip: '127.0.0.1' - - id: id-auditd-system-process - type: audit/system - dataset: process - data_stream: - dataset: audit_system.process - type: logs -management: - host: "localhost" -config: - reload: 123 diff --git a/internal/pkg/agent/program/testdata/enabled_false.yml b/internal/pkg/agent/program/testdata/enabled_false.yml deleted file mode 100644 index 34b7388e1e1..00000000000 --- a/internal/pkg/agent/program/testdata/enabled_false.yml +++ /dev/null @@ -1,17 +0,0 @@ -inputs: - - type: event/file - streams: - - enabled: false - paths: - - var/log/hello1.log - - var/log/hello2.log -management: - host: "localhost" -config: - reload: 123 -outputs: - default: - type: elasticsearch - hosts: [127.0.0.1:9200, 127.0.0.1:9300] - username: elastic - password: changeme diff --git a/internal/pkg/agent/program/testdata/enabled_output_false.yml b/internal/pkg/agent/program/testdata/enabled_output_false.yml deleted file mode 100644 index f0b57a01897..00000000000 --- a/internal/pkg/agent/program/testdata/enabled_output_false.yml +++ /dev/null @@ -1,17 +0,0 @@ -inputs: - - type: event/file - streams: - - paths: - - /var/log/hello1.log - - /var/log/hello2.log -management: - host: "localhost" -config: - reload: 123 -outputs: - default: - type: elasticsearch - enabled: false - hosts: [127.0.0.1:9200, 127.0.0.1:9300] - username: elastic - password: changeme diff --git a/internal/pkg/agent/program/testdata/enabled_output_true-filebeat.yml b/internal/pkg/agent/program/testdata/enabled_output_true-filebeat.yml deleted file mode 100644 index d9b4dc079f5..00000000000 --- a/internal/pkg/agent/program/testdata/enabled_output_true-filebeat.yml +++ /dev/null @@ -1,38 +0,0 @@ -filebeat: - inputs: - - type: log - paths: - - /var/log/hello1.log - - /var/log/hello2.log - index: logs-generic-default - processors: - - add_fields: - target: "data_stream" - fields: - type: logs - dataset: generic - namespace: default - - add_fields: - target: "event" - fields: - dataset: generic - - add_fields: - target: "elastic_agent" - fields: - id: agent-id - version: 8.0.0 - snapshot: false - - add_fields: - target: "agent" - fields: - id: agent-id -output: - elasticsearch: - enabled: true - hosts: - - 127.0.0.1:9200 - - 127.0.0.1:9300 - headers: - h1: test-header - username: elastic - password: changeme diff --git a/internal/pkg/agent/program/testdata/enabled_output_true.yml b/internal/pkg/agent/program/testdata/enabled_output_true.yml deleted file mode 100644 index 9601388c536..00000000000 --- a/internal/pkg/agent/program/testdata/enabled_output_true.yml +++ /dev/null @@ -1,17 +0,0 @@ -inputs: - - type: event/file - streams: - - paths: - - /var/log/hello1.log - - /var/log/hello2.log -management: - host: "localhost" -config: - reload: 123 -outputs: - default: - type: elasticsearch - enabled: true - hosts: [127.0.0.1:9200, 127.0.0.1:9300] - username: elastic - password: changeme diff --git a/internal/pkg/agent/program/testdata/enabled_true-filebeat.yml b/internal/pkg/agent/program/testdata/enabled_true-filebeat.yml deleted file mode 100644 index f579dcba416..00000000000 --- a/internal/pkg/agent/program/testdata/enabled_true-filebeat.yml +++ /dev/null @@ -1,38 +0,0 @@ -filebeat: - inputs: - - type: log - enabled: true - paths: - - /var/log/hello1.log - - /var/log/hello2.log - index: logs-generic-default - processors: - - add_fields: - target: "data_stream" - fields: - type: logs - dataset: generic - namespace: default - - add_fields: - target: "event" - fields: - dataset: generic - - add_fields: - target: "elastic_agent" - fields: - id: agent-id - version: 8.0.0 - snapshot: false - - add_fields: - target: "agent" - fields: - id: agent-id -output: - elasticsearch: - hosts: - - 127.0.0.1:9200 - - 127.0.0.1:9300 - headers: - h1: test-header - username: elastic - password: changeme diff --git a/internal/pkg/agent/program/testdata/enabled_true.yml b/internal/pkg/agent/program/testdata/enabled_true.yml deleted file mode 100644 index 6afc7f37ab1..00000000000 --- a/internal/pkg/agent/program/testdata/enabled_true.yml +++ /dev/null @@ -1,22 +0,0 @@ -name: Production Website DB Servers -fleet: - kibana_url: https://kibana.mydomain.com:5601 - ca_hash: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - checkin_interval: 5m -inputs: - - type: event/file - streams: - - enabled: true - paths: - - /var/log/hello1.log - - /var/log/hello2.log -management: - host: "localhost" -config: - reload: 123 -outputs: - default: - type: elasticsearch - hosts: [127.0.0.1:9200, 127.0.0.1:9300] - username: elastic - password: changeme diff --git a/internal/pkg/agent/program/testdata/endpoint_arm.yml b/internal/pkg/agent/program/testdata/endpoint_arm.yml deleted file mode 100644 index 5353cd43d9c..00000000000 --- a/internal/pkg/agent/program/testdata/endpoint_arm.yml +++ /dev/null @@ -1,117 +0,0 @@ -revision: 5 -name: Endpoint Host -fleet: - agent: - id: fleet-agent-id - host: - id: host-agent-id - access_api_key: VuaCfGcBCdbkQm-e5aOx:ui2lp2axTNmsyakw9tvNnw - kibana: - protocol: https - host: localhost:5601 - timeout: 30s - -outputs: - default: - type: elasticsearch - hosts: [127.0.0.1:9200, 127.0.0.1:9300] - username: elastic - password: changeme - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - -inputs: -- id: endpoint-id - type: endpoint - name: endpoint-1 - enabled: true - package: - name: endpoint - version: 0.3.0 - data_stream: - namespace: default - artifact_manifest: - schema_version: v22 - manifest_version: v21 - artifacts: - - endpoint-allowlist-windows: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-windows - - endpoint-allowlist-macos: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-macos - - endpoint-allowlist-linux: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-linux - policy: - linux: - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true - windows: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - registry: logs-endpoint.events.registry-default - process: logs-endpoint.events.process-default - driver: logs-endpoint.events.driver-default - library: logs-endpoint.events.library-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - registry: true - process: true - security: true - file: true - dns: false - dll_and_driver_load: false - network: true - mac: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true -runtime: - arch: arm64 - family: redhat - major: "7" diff --git a/internal/pkg/agent/program/testdata/endpoint_basic-endpoint-security.yml b/internal/pkg/agent/program/testdata/endpoint_basic-endpoint-security.yml deleted file mode 100644 index dfbec8016ba..00000000000 --- a/internal/pkg/agent/program/testdata/endpoint_basic-endpoint-security.yml +++ /dev/null @@ -1,113 +0,0 @@ -revision: 5 -fleet: - access_api_key: VuaCfGcBCdbkQm-e5aOx:ui2lp2axTNmsyakw9tvNnw - protocol: https - hosts: [ localhost:5601 ] - timeout: 30s - agent: - id: fleet-agent-id - logging.level: error - host: - id: host-agent-id - -output: - elasticsearch: - hosts: - - "127.0.0.1:9200" - - "127.0.0.1:9300" - username: elastic - password: changeme - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - -inputs: -- id: endpoint-id - type: endpoint - name: endpoint-1 - enabled: true - package: - name: endpoint - version: 0.3.0 - data_stream: - namespace: default - artifact_manifest: - schema_version: v22 - manifest_version: v21 - artifacts: - - endpoint-allowlist-windows: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-windows - - endpoint-allowlist-macos: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-macos - - endpoint-allowlist-linux: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-linux - policy: - linux: - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true - windows: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - registry: logs-endpoint.events.registry-default - process: logs-endpoint.events.process-default - driver: logs-endpoint.events.driver-default - library: logs-endpoint.events.library-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - registry: true - process: true - security: true - file: true - dns: false - dll_and_driver_load: false - network: true - mac: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true diff --git a/internal/pkg/agent/program/testdata/endpoint_basic.yml b/internal/pkg/agent/program/testdata/endpoint_basic.yml deleted file mode 100644 index 9f438cd46fd..00000000000 --- a/internal/pkg/agent/program/testdata/endpoint_basic.yml +++ /dev/null @@ -1,115 +0,0 @@ -revision: 5 -name: Endpoint Host -fleet: - access_api_key: VuaCfGcBCdbkQm-e5aOx:ui2lp2axTNmsyakw9tvNnw - protocol: https - hosts: [ localhost:5601 ] - timeout: 30s - agent: - id: fleet-agent-id - logging.level: error - host: - id: host-agent-id - -outputs: - default: - type: elasticsearch - hosts: [127.0.0.1:9200, 127.0.0.1:9300] - username: elastic - password: changeme - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - -inputs: -- id: endpoint-id - type: endpoint - name: endpoint-1 - enabled: true - package: - name: endpoint - version: 0.3.0 - data_stream: - namespace: default - artifact_manifest: - schema_version: v22 - manifest_version: v21 - artifacts: - - endpoint-allowlist-windows: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-windows - - endpoint-allowlist-macos: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-macos - - endpoint-allowlist-linux: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-linux - policy: - linux: - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true - windows: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - registry: logs-endpoint.events.registry-default - process: logs-endpoint.events.process-default - driver: logs-endpoint.events.driver-default - library: logs-endpoint.events.library-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - registry: true - process: true - security: true - file: true - dns: false - dll_and_driver_load: false - network: true - mac: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true -runtime: - arch: x86_64 diff --git a/internal/pkg/agent/program/testdata/endpoint_no_fleet.yml b/internal/pkg/agent/program/testdata/endpoint_no_fleet.yml deleted file mode 100644 index de7ccd2a11c..00000000000 --- a/internal/pkg/agent/program/testdata/endpoint_no_fleet.yml +++ /dev/null @@ -1,102 +0,0 @@ -name: Endpoint Host -revision: 5 -outputs: - default: - type: elasticsearch - hosts: [127.0.0.1:9200, 127.0.0.1:9300] - username: elastic - password: changeme - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - -inputs: -- id: endpoint-id - type: endpoint - name: endpoint-1 - enabled: true - package: - name: endpoint - version: 0.3.0 - data_stream: - namespace: default - artifact_manifest: - schema_version: v22 - manifest_version: v21 - artifacts: - - endpoint-allowlist-windows: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-windows - - endpoint-allowlist-macos: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-macos - - endpoint-allowlist-linux: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-linux - policy: - linux: - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true - windows: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - registry: logs-endpoint.events.registry-default - process: logs-endpoint.events.process-default - driver: logs-endpoint.events.driver-default - library: logs-endpoint.events.library-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - registry: true - process: true - security: true - file: true - dns: false - dll_and_driver_load: false - network: true - mac: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true diff --git a/internal/pkg/agent/program/testdata/endpoint_unknown_output.yml b/internal/pkg/agent/program/testdata/endpoint_unknown_output.yml deleted file mode 100644 index 48e362849be..00000000000 --- a/internal/pkg/agent/program/testdata/endpoint_unknown_output.yml +++ /dev/null @@ -1,107 +0,0 @@ -name: Endpoint Host -revision: 5 -fleet: - agent: - id: fleet-agent-id - api: - access_api_key: VuaCfGcBCdbkQm-e5aOx:ui2lp2axTNmsyakw9tvNnw - kibana: - protocol: https - host: localhost:5601 - timeout: 30s - -outputs: - default: - type: unknown - -inputs: -- id: endpoint-id - type: endpoint - name: endpoint-1 - enabled: true - package: - name: endpoint - version: 0.3.0 - data_stream: - namespace: default - artifact_manifest: - schema_version: v22 - manifest_version: v21 - artifacts: - - endpoint-allowlist-windows: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-windows - - endpoint-allowlist-macos: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-macos - - endpoint-allowlist-linux: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-linux - policy: - linux: - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true - windows: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - registry: logs-endpoint.events.registry-default - process: logs-endpoint.events.process-default - driver: logs-endpoint.events.driver-default - library: logs-endpoint.events.library-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - registry: true - process: true - security: true - file: true - dns: false - dll_and_driver_load: false - network: true - mac: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true diff --git a/internal/pkg/agent/program/testdata/fleet_server-fleet-server.yml b/internal/pkg/agent/program/testdata/fleet_server-fleet-server.yml deleted file mode 100644 index 7a0fad5c9df..00000000000 --- a/internal/pkg/agent/program/testdata/fleet_server-fleet-server.yml +++ /dev/null @@ -1,33 +0,0 @@ -fleet: - agent: - id: fleet-agent-id - logging.level: error - host: - id: host-agent-id - -output: - elasticsearch: - hosts: [ 127.0.0.1:9200, 127.0.0.1:9300 ] - headers: - h1: test-header - username: fleet - password: fleetpassword - -inputs: - - id: fleet-server-id - type: fleet-server - name: fleet_server-1 - revision: 6 - server: - host: 0.0.0.0 - port: 8220 - ssl: - verification_mode: none - limits: - max_connections: 40 - runtime: - gc_percent: 50 - timeouts: - read: 5m - policy: - id: copy-policy-id diff --git a/internal/pkg/agent/program/testdata/fleet_server.yml b/internal/pkg/agent/program/testdata/fleet_server.yml deleted file mode 100644 index a816197917e..00000000000 --- a/internal/pkg/agent/program/testdata/fleet_server.yml +++ /dev/null @@ -1,51 +0,0 @@ -name: Fleet Server Only -fleet: - enabled: true - access_api_key: VuaCfGcBCdbkQm-e5aOx:ui2lp2axTNmsyakw9tvNnw - protocol: https - hosts: [ localhost:5601 ] - timeout: 30s - agent: - id: fleet-agent-id - logging.level: error - host: - id: host-agent-id - server: - host: 127.0.0.1 - port: 8822 - ssl: - verification_mode: none - policy: - id: copy-policy-id - output: - elasticsearch: - hosts: [ 127.0.0.1:9200, 127.0.0.1:9300 ] - username: fleet - password: fleetpassword - -outputs: - default: - type: elasticsearch - hosts: [127.0.0.1:9200, 127.0.0.1:9300] - username: elastic - password: changeme - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - -inputs: - - id: fleet-server-id - type: fleet-server - use_output: default - data_stream: - namespace: default - name: fleet_server-1 - revision: 6 - server: - host: 0.0.0.0 - port: 8220 - limits: - max_connections: 40 - runtime: - gc_percent: 50 - timeouts: - read: 5m diff --git a/internal/pkg/agent/program/testdata/journal_config.yml b/internal/pkg/agent/program/testdata/journal_config.yml deleted file mode 100644 index 732ebab6fb2..00000000000 --- a/internal/pkg/agent/program/testdata/journal_config.yml +++ /dev/null @@ -1,21 +0,0 @@ -streams: - - type: log/journal - paths: [] - backoff: 1s - max_backoff: 20s - seek: cursor - cursor_seek_fallback: head - include_matches: [] - save_remote_hostname: false - - type: log/file - ignore_older: 123s -management: - host: "localhost" -config: - reload: 123 -outputs: - default: - type: elasticsearch - hosts: [127.0.0.1:9200, 127.0.0.1:9300] - username: elastic - password: changeme diff --git a/internal/pkg/agent/program/testdata/logstash_config-endpoint-security.yml b/internal/pkg/agent/program/testdata/logstash_config-endpoint-security.yml deleted file mode 100644 index 37b47f631a1..00000000000 --- a/internal/pkg/agent/program/testdata/logstash_config-endpoint-security.yml +++ /dev/null @@ -1,114 +0,0 @@ -fleet: - enabled: true - access_api_key: VuaCfGcBCdbkQm-e5aOx:ui2lp2axTNmsyakw9tvNnw - protocol: https - hosts: [ localhost:5601 ] - timeout: 30s - agent: - id: fleet-agent-id - logging.level: error - host: - id: host-agent-id - -output: - logstash: - ssl.certificate: abcert - ssl.key: abckey - hosts: - - 127.0.0.1:5044 - ssl.certificate_authorities: - - abc1 - - abc2 - -inputs: -- id: endpoint-id - type: endpoint - use_input: default - name: endpoint-1 - enabled: true - package: - name: endpoint - version: 0.3.0 - data_stream: - namespace: default - artifact_manifest: - schema_version: v22 - manifest_version: v21 - artifacts: - - endpoint-allowlist-windows: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-windows - - endpoint-allowlist-macos: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-macos - - endpoint-allowlist-linux: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-linux - policy: - linux: - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true - windows: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - registry: logs-endpoint.events.registry-default - process: logs-endpoint.events.process-default - driver: logs-endpoint.events.driver-default - library: logs-endpoint.events.library-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - registry: true - process: true - security: true - file: true - dns: false - dll_and_driver_load: false - network: true - mac: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true diff --git a/internal/pkg/agent/program/testdata/logstash_config-filebeat-elasticsearch.yml b/internal/pkg/agent/program/testdata/logstash_config-filebeat-elasticsearch.yml deleted file mode 100644 index 39dab9091ed..00000000000 --- a/internal/pkg/agent/program/testdata/logstash_config-filebeat-elasticsearch.yml +++ /dev/null @@ -1,43 +0,0 @@ -filebeat: - inputs: - - type: log - paths: - - /var/log/hello3.log - - /var/log/hello4.log - index: testtype-generic-default - vars: - var: value - processors: - - add_fields: - target: "data_stream" - fields: - type: testtype - dataset: generic - namespace: default - - add_fields: - target: "event" - fields: - dataset: generic - - add_fields: - target: "elastic_agent" - fields: - id: agent-id - version: 8.0.0 - snapshot: false - - add_fields: - target: "agent" - fields: - id: agent-id -output: - elasticsearch: - hosts: - - 127.0.0.1:9201 - - 127.0.0.1:9301 - headers: - h1: test-header - username: elastic - password: changeme - bulk_max_size: 23 - worker: 10 - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= diff --git a/internal/pkg/agent/program/testdata/logstash_config-filebeat.yml b/internal/pkg/agent/program/testdata/logstash_config-filebeat.yml deleted file mode 100644 index ee44efd259e..00000000000 --- a/internal/pkg/agent/program/testdata/logstash_config-filebeat.yml +++ /dev/null @@ -1,39 +0,0 @@ -filebeat: - inputs: - - type: log - paths: - - /var/log/hello1.log - - /var/log/hello2.log - index: logs-generic-default - vars: - var: value - processors: - - add_fields: - target: "data_stream" - fields: - type: logs - dataset: generic - namespace: default - - add_fields: - target: "event" - fields: - dataset: generic - - add_fields: - target: "elastic_agent" - fields: - id: agent-id - version: 8.0.0 - snapshot: false - - add_fields: - target: "agent" - fields: - id: agent-id -output: - logstash: - ssl.certificate: abcert - ssl.key: abckey - hosts: - - 127.0.0.1:5044 - ssl.certificate_authorities: - - abc1 - - abc2 diff --git a/internal/pkg/agent/program/testdata/logstash_config-fleet-server.yml b/internal/pkg/agent/program/testdata/logstash_config-fleet-server.yml deleted file mode 100644 index b306cf40277..00000000000 --- a/internal/pkg/agent/program/testdata/logstash_config-fleet-server.yml +++ /dev/null @@ -1,18 +0,0 @@ -fleet: - agent: - id: fleet-agent-id - logging.level: error - host: - id: host-agent-id - -output: - elasticsearch: - hosts: [ 127.0.0.1:9200, 127.0.0.1:9300 ] - headers: - h1: test-header - username: fleet - password: fleetpassword - -inputs: - - id: fleet-server-id - type: fleet-server diff --git a/internal/pkg/agent/program/testdata/logstash_config-heartbeat.yml b/internal/pkg/agent/program/testdata/logstash_config-heartbeat.yml deleted file mode 100644 index fbddfbe022e..00000000000 --- a/internal/pkg/agent/program/testdata/logstash_config-heartbeat.yml +++ /dev/null @@ -1,29 +0,0 @@ -inputs: -- type: synthetics/http - id: unique-http-id - name: my-http - schedule: '*/5 * * * * * *' - host: "http://localhost:80/service/status" - timeout: 16s - wait: 1s - data_stream.namespace: default - processors: - - add_fields: - target: 'elastic_agent' - fields: - id: agent-id - version: 8.0.0 - snapshot: false - - add_fields: - target: 'agent' - fields: - id: agent-id -output: - logstash: - ssl.certificate: abcert - ssl.key: abckey - hosts: - - 127.0.0.1:5044 - ssl.certificate_authorities: - - abc1 - - abc2 diff --git a/internal/pkg/agent/program/testdata/logstash_config-metricbeat.yml b/internal/pkg/agent/program/testdata/logstash_config-metricbeat.yml deleted file mode 100644 index a2d429c8108..00000000000 --- a/internal/pkg/agent/program/testdata/logstash_config-metricbeat.yml +++ /dev/null @@ -1,89 +0,0 @@ -metricbeat: - modules: - - module: docker - metricsets: [status] - index: metrics-docker.status-default - hosts: ["http://127.0.0.1:8080"] - processors: - - add_fields: - target: "data_stream" - fields: - type: metrics - dataset: docker.status - namespace: default - - add_fields: - target: "event" - fields: - dataset: docker.status - - add_fields: - target: "elastic_agent" - fields: - id: agent-id - version: 8.0.0 - snapshot: false - - add_fields: - target: "agent" - fields: - id: agent-id - - module: docker - metricsets: [info] - index: metrics-generic-default - hosts: ["http://127.0.0.1:8080"] - processors: - - add_fields: - target: "data_stream" - fields: - type: metrics - dataset: generic - namespace: default - - add_fields: - target: "event" - fields: - dataset: generic - - add_fields: - target: "elastic_agent" - fields: - id: agent-id - version: 8.0.0 - snapshot: false - - add_fields: - target: "agent" - fields: - id: agent-id - - module: apache - metricsets: [info] - index: metrics-generic-testing - hosts: ["http://apache.remote"] - processors: - - add_fields: - fields: - should_be: first - - add_fields: - target: "data_stream" - fields: - type: metrics - dataset: generic - namespace: testing - - add_fields: - target: "event" - fields: - dataset: generic - - add_fields: - target: "elastic_agent" - fields: - id: agent-id - version: 8.0.0 - snapshot: false - - add_fields: - target: "agent" - fields: - id: agent-id -output: - logstash: - ssl.certificate: abcert - ssl.key: abckey - hosts: - - 127.0.0.1:5044 - ssl.certificate_authorities: - - abc1 - - abc2 diff --git a/internal/pkg/agent/program/testdata/logstash_config-packetbeat.yml b/internal/pkg/agent/program/testdata/logstash_config-packetbeat.yml deleted file mode 100644 index 2258ea5aa8d..00000000000 --- a/internal/pkg/agent/program/testdata/logstash_config-packetbeat.yml +++ /dev/null @@ -1,34 +0,0 @@ -inputs: -- type: packet - processors: - - add_fields: - target: 'elastic_agent' - fields: - id: agent-id - version: 8.0.0 - snapshot: false - - add_fields: - target: 'agent' - fields: - id: agent-id - streams: - - type: flow - timeout: 10s - period: 10s - keep_null: false - data_stream: - dataset: packet.flow - type: logs - - type: icmp - data_stream: - dataset: packet.icmp - type: logs -output: - logstash: - ssl.certificate: abcert - ssl.key: abckey - hosts: - - 127.0.0.1:5044 - ssl.certificate_authorities: - - abc1 - - abc2 diff --git a/internal/pkg/agent/program/testdata/logstash_config.yml b/internal/pkg/agent/program/testdata/logstash_config.yml deleted file mode 100644 index ab2a8b744a0..00000000000 --- a/internal/pkg/agent/program/testdata/logstash_config.yml +++ /dev/null @@ -1,212 +0,0 @@ -name: Production Website DB Servers -fleet: - enabled: true - access_api_key: VuaCfGcBCdbkQm-e5aOx:ui2lp2axTNmsyakw9tvNnw - protocol: https - hosts: [ localhost:5601 ] - timeout: 30s - agent: - id: fleet-agent-id - logging.level: error - host: - id: host-agent-id - server: - output: - elasticsearch: - hosts: [ 127.0.0.1:9200, 127.0.0.1:9300 ] - username: fleet - password: fleetpassword - -outputs: - default: - type: logstash - ssl.certificate: abcert - ssl.key: abckey - hosts: [127.0.0.1:5044] - ssl.certificate_authorities: - - abc1 - - abc2 - - elasticsearch: - type: elasticsearch - hosts: [127.0.0.1:9201, 127.0.0.1:9301] - bulk_max_size: 23 - worker: 10 - username: elastic - password: changeme - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - - monitoring: - type: elasticsearch - api_key: VuaCfGcBCdbkQm-e5aOx:ui2lp2axTNmsyakw9tvNnw - hosts: ["monitoring:9200"] - ca_sha256: "7lHLiyp4J8m9kw38SJ7SURJP4bXRZv/BNxyyXkCcE/M=" - -inputs: -- id: fleet-server-id - type: fleet-server - use_output: default - data_stream: - type: default -- type: docker/metrics - use_output: default - streams: - - metricset: status - processors: - - null - data_stream: - dataset: docker.status - - metricset: info - data_stream: - dataset: "" - hosts: ["http://127.0.0.1:8080"] -- type: logfile - use_output: default - streams: - - paths: - - /var/log/hello1.log - - /var/log/hello2.log - vars: - var: value -- type: logfile - data_stream: - type: testtype - use_output: elasticsearch - streams: - - paths: - - /var/log/hello3.log - - /var/log/hello4.log - vars: - var: value -- id: apache-metrics-id - type: apache/metrics - data_stream: - namespace: testing - use_output: default - processors: - - add_fields: - fields: - should_be: first - streams: - - enabled: true - metricset: info - hosts: ["http://apache.remote"] - hosts: ["http://apache.local"] -- type: synthetics/http - id: unique-http-id - name: my-http - schedule: '*/5 * * * * * *' - host: "http://localhost:80/service/status" - timeout: 16s - wait: 1s -- type: packet - streams: - - type: flow - timeout: 10s - period: 10s - keep_null: false - data_stream: - dataset: packet.flow - type: logs - - type: icmp - data_stream: - dataset: packet.icmp - type: logs -- id: endpoint-id - type: endpoint - name: endpoint-1 - enabled: true - use_input: default - package: - name: endpoint - version: 0.3.0 - data_stream: - namespace: default - artifact_manifest: - schema_version: v22 - manifest_version: v21 - artifacts: - - endpoint-allowlist-windows: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-windows - - endpoint-allowlist-macos: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-macos - - endpoint-allowlist-linux: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-linux - policy: - linux: - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true - windows: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - registry: logs-endpoint.events.registry-default - process: logs-endpoint.events.process-default - driver: logs-endpoint.events.driver-default - library: logs-endpoint.events.library-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - registry: true - process: true - security: true - file: true - dns: false - dll_and_driver_load: false - network: true - mac: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true - -agent.monitoring: - use_output: monitoring - -agent: - reload: 123 diff --git a/internal/pkg/agent/program/testdata/namespace-endpoint-security.yml b/internal/pkg/agent/program/testdata/namespace-endpoint-security.yml deleted file mode 100644 index 7e9f04dc411..00000000000 --- a/internal/pkg/agent/program/testdata/namespace-endpoint-security.yml +++ /dev/null @@ -1,114 +0,0 @@ -fleet: - enabled: true - access_api_key: VuaCfGcBCdbkQm-e5aOx:ui2lp2axTNmsyakw9tvNnw - protocol: https - hosts: [ localhost:5601 ] - timeout: 30s - agent: - id: fleet-agent-id - logging.level: error - host: - id: host-agent-id - -output: - elasticsearch: - hosts: - - "127.0.0.1:9200" - - "127.0.0.1:9300" - namespace: test_namespace - username: elastic - password: changeme - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - -inputs: -- id: endpoint-id - type: endpoint - name: endpoint-1 - enabled: true - package: - name: endpoint - version: 0.3.0 - data_stream: - namespace: default - artifact_manifest: - schema_version: v22 - manifest_version: v21 - artifacts: - - endpoint-allowlist-windows: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-windows - - endpoint-allowlist-macos: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-macos - - endpoint-allowlist-linux: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-linux - policy: - linux: - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true - windows: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - registry: logs-endpoint.events.registry-default - process: logs-endpoint.events.process-default - driver: logs-endpoint.events.driver-default - library: logs-endpoint.events.library-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - registry: true - process: true - security: true - file: true - dns: false - dll_and_driver_load: false - network: true - mac: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true diff --git a/internal/pkg/agent/program/testdata/namespace-filebeat.yml b/internal/pkg/agent/program/testdata/namespace-filebeat.yml deleted file mode 100644 index eafedb688c9..00000000000 --- a/internal/pkg/agent/program/testdata/namespace-filebeat.yml +++ /dev/null @@ -1,71 +0,0 @@ -filebeat: - inputs: - - type: log - paths: - - /var/log/hello1.log - - /var/log/hello2.log - index: logs-generic-default - vars: - var: value - processors: - - add_fields: - target: "data_stream" - fields: - type: logs - dataset: generic - namespace: default - - add_fields: - target: "event" - fields: - dataset: generic - - add_fields: - target: "elastic_agent" - fields: - id: agent-id - version: 8.0.0 - snapshot: false - - add_fields: - target: "agent" - fields: - id: agent-id - - type: log - paths: - - /var/log/hello3.log - - /var/log/hello4.log - index: testtype-generic-default - vars: - var: value - processors: - - add_fields: - target: "data_stream" - fields: - type: testtype - dataset: generic - namespace: default - - add_fields: - target: "event" - fields: - dataset: generic - - add_fields: - target: "elastic_agent" - fields: - id: agent-id - version: 8.0.0 - snapshot: false - - add_fields: - target: "agent" - fields: - id: agent-id -output: - elasticsearch: - hosts: - - 127.0.0.1:9200 - - 127.0.0.1:9300 - headers: - h1: test-header - - namespace: test_namespace - username: elastic - password: changeme - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= diff --git a/internal/pkg/agent/program/testdata/namespace-fleet-server.yml b/internal/pkg/agent/program/testdata/namespace-fleet-server.yml deleted file mode 100644 index b306cf40277..00000000000 --- a/internal/pkg/agent/program/testdata/namespace-fleet-server.yml +++ /dev/null @@ -1,18 +0,0 @@ -fleet: - agent: - id: fleet-agent-id - logging.level: error - host: - id: host-agent-id - -output: - elasticsearch: - hosts: [ 127.0.0.1:9200, 127.0.0.1:9300 ] - headers: - h1: test-header - username: fleet - password: fleetpassword - -inputs: - - id: fleet-server-id - type: fleet-server diff --git a/internal/pkg/agent/program/testdata/namespace-heartbeat.yml b/internal/pkg/agent/program/testdata/namespace-heartbeat.yml deleted file mode 100644 index f34b204f5fa..00000000000 --- a/internal/pkg/agent/program/testdata/namespace-heartbeat.yml +++ /dev/null @@ -1,30 +0,0 @@ -inputs: -- type: synthetics/http - id: unique-http-id - name: my-http - schedule: '*/5 * * * * * *' - host: "http://localhost:80/service/status" - timeout: 16s - wait: 1s - data_stream.namespace: default - processors: - - add_fields: - target: 'elastic_agent' - fields: - id: agent-id - version: 8.0.0 - snapshot: false - - add_fields: - target: 'agent' - fields: - id: agent-id -output: - elasticsearch: - hosts: - - 127.0.0.1:9200 - - 127.0.0.1:9300 - namespace: test_namespace - username: elastic - password: changeme - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= diff --git a/internal/pkg/agent/program/testdata/namespace-metricbeat.yml b/internal/pkg/agent/program/testdata/namespace-metricbeat.yml deleted file mode 100644 index d0d4c24f058..00000000000 --- a/internal/pkg/agent/program/testdata/namespace-metricbeat.yml +++ /dev/null @@ -1,91 +0,0 @@ -metricbeat: - modules: - - module: docker - metricsets: [status] - index: metrics-docker.status-default - hosts: ["http://127.0.0.1:8080"] - processors: - - add_fields: - target: "data_stream" - fields: - type: metrics - dataset: docker.status - namespace: default - - add_fields: - target: "event" - fields: - dataset: docker.status - - add_fields: - target: "elastic_agent" - fields: - id: agent-id - version: 8.0.0 - snapshot: false - - add_fields: - target: "agent" - fields: - id: agent-id - - module: docker - metricsets: [info] - index: metrics-generic-default - hosts: ["http://127.0.0.1:8080"] - processors: - - add_fields: - target: "data_stream" - fields: - type: metrics - dataset: generic - namespace: default - - add_fields: - target: "event" - fields: - dataset: generic - - add_fields: - target: "elastic_agent" - fields: - id: agent-id - version: 8.0.0 - snapshot: false - - add_fields: - target: "agent" - fields: - id: agent-id - - module: apache - metricsets: [info] - index: metrics-generic-testing - hosts: ["http://apache.remote"] - processors: - - add_fields: - fields: - should_be: first - - add_fields: - target: "data_stream" - fields: - type: metrics - dataset: generic - namespace: testing - - add_fields: - target: "event" - fields: - dataset: generic - - add_fields: - target: "elastic_agent" - fields: - id: agent-id - version: 8.0.0 - snapshot: false - - add_fields: - target: "agent" - fields: - id: agent-id -output: - elasticsearch: - hosts: [127.0.0.1:9200, 127.0.0.1:9300] - headers: - h1: test-header - - namespace: test_namespace - username: elastic - password: changeme - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= diff --git a/internal/pkg/agent/program/testdata/namespace-packetbeat.yml b/internal/pkg/agent/program/testdata/namespace-packetbeat.yml deleted file mode 100644 index d71499bdef4..00000000000 --- a/internal/pkg/agent/program/testdata/namespace-packetbeat.yml +++ /dev/null @@ -1,35 +0,0 @@ -inputs: -- type: packet - processors: - - add_fields: - target: 'elastic_agent' - fields: - id: agent-id - version: 8.0.0 - snapshot: false - - add_fields: - target: 'agent' - fields: - id: agent-id - streams: - - type: flow - timeout: 10s - period: 10s - keep_null: false - data_stream: - dataset: packet.flow - type: logs - - type: icmp - data_stream: - dataset: packet.icmp - type: logs -output: - elasticsearch: - hosts: - - 127.0.0.1:9200 - - 127.0.0.1:9300 - namespace: test_namespace - username: elastic - password: changeme - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= diff --git a/internal/pkg/agent/program/testdata/namespace.yml b/internal/pkg/agent/program/testdata/namespace.yml deleted file mode 100644 index c2f83a9abf0..00000000000 --- a/internal/pkg/agent/program/testdata/namespace.yml +++ /dev/null @@ -1,201 +0,0 @@ -name: Production Website DB Servers -fleet: - enabled: true - access_api_key: VuaCfGcBCdbkQm-e5aOx:ui2lp2axTNmsyakw9tvNnw - protocol: https - hosts: [ localhost:5601 ] - timeout: 30s - agent: - id: fleet-agent-id - logging.level: error - host: - id: host-agent-id - server: - output: - elasticsearch: - hosts: [ 127.0.0.1:9200, 127.0.0.1:9300 ] - username: fleet - password: fleetpassword - -outputs: - default: - type: elasticsearch - namespace: test_namespace - hosts: [127.0.0.1:9200, 127.0.0.1:9300] - username: elastic - password: changeme - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - - monitoring: - type: elasticsearch - api_key: VuaCfGcBCdbkQm-e5aOx:ui2lp2axTNmsyakw9tvNnw - hosts: ["monitoring:9200"] - ca_sha256: "7lHLiyp4J8m9kw38SJ7SURJP4bXRZv/BNxyyXkCcE/M=" - -inputs: -- id: fleet-server-id - type: fleet-server - use_output: default - data_stream: - type: default -- type: docker/metrics - use_output: default - streams: - - metricset: status - processors: - - null - data_stream: - dataset: docker.status - - metricset: info - data_stream: - dataset: "" - hosts: ["http://127.0.0.1:8080"] -- type: logfile - use_output: default - streams: - - paths: - - /var/log/hello1.log - - /var/log/hello2.log - vars: - var: value -- type: logfile - data_stream: - type: testtype - use_output: default - streams: - - paths: - - /var/log/hello3.log - - /var/log/hello4.log - vars: - var: value -- id: apache-metrics-id - type: apache/metrics - data_stream: - namespace: testing - use_output: default - processors: - - add_fields: - fields: - should_be: first - streams: - - enabled: true - metricset: info - hosts: ["http://apache.remote"] - hosts: ["http://apache.local"] -- type: synthetics/http - id: unique-http-id - name: my-http - schedule: '*/5 * * * * * *' - host: "http://localhost:80/service/status" - timeout: 16s - wait: 1s -- type: packet - streams: - - type: flow - timeout: 10s - period: 10s - keep_null: false - data_stream: - dataset: packet.flow - type: logs - - type: icmp - data_stream: - dataset: packet.icmp - type: logs -- id: endpoint-id - type: endpoint - name: endpoint-1 - enabled: true - package: - name: endpoint - version: 0.3.0 - data_stream: - namespace: default - artifact_manifest: - schema_version: v22 - manifest_version: v21 - artifacts: - - endpoint-allowlist-windows: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-windows - - endpoint-allowlist-macos: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-macos - - endpoint-allowlist-linux: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-linux - policy: - linux: - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true - windows: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - registry: logs-endpoint.events.registry-default - process: logs-endpoint.events.process-default - driver: logs-endpoint.events.driver-default - library: logs-endpoint.events.library-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - registry: true - process: true - security: true - file: true - dns: false - dll_and_driver_load: false - network: true - mac: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true - -agent.monitoring: - use_output: monitoring - -agent: - reload: 123 diff --git a/internal/pkg/agent/program/testdata/single_config-endpoint-security.yml b/internal/pkg/agent/program/testdata/single_config-endpoint-security.yml deleted file mode 100644 index f7bcdb284c4..00000000000 --- a/internal/pkg/agent/program/testdata/single_config-endpoint-security.yml +++ /dev/null @@ -1,115 +0,0 @@ -fleet: - enabled: true - access_api_key: VuaCfGcBCdbkQm-e5aOx:ui2lp2axTNmsyakw9tvNnw - protocol: https - hosts: [ localhost:5601 ] - timeout: 30s - agent: - id: fleet-agent-id - logging.level: error - host: - id: host-agent-id - -output: - elasticsearch: - hosts: - - "127.0.0.1:9200" - - "127.0.0.1:9300" - bulk_max_size: 23 - worker: 10 - username: elastic - password: changeme - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - -inputs: -- id: endpoint-id - type: endpoint - name: endpoint-1 - enabled: true - package: - name: endpoint - version: 0.3.0 - data_stream: - namespace: default - artifact_manifest: - schema_version: v22 - manifest_version: v21 - artifacts: - - endpoint-allowlist-windows: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-windows - - endpoint-allowlist-macos: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-macos - - endpoint-allowlist-linux: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-linux - policy: - linux: - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true - windows: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - registry: logs-endpoint.events.registry-default - process: logs-endpoint.events.process-default - driver: logs-endpoint.events.driver-default - library: logs-endpoint.events.library-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - registry: true - process: true - security: true - file: true - dns: false - dll_and_driver_load: false - network: true - mac: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true diff --git a/internal/pkg/agent/program/testdata/single_config-filebeat.yml b/internal/pkg/agent/program/testdata/single_config-filebeat.yml deleted file mode 100644 index e628cd2c098..00000000000 --- a/internal/pkg/agent/program/testdata/single_config-filebeat.yml +++ /dev/null @@ -1,71 +0,0 @@ -filebeat: - inputs: - - type: log - paths: - - /var/log/hello1.log - - /var/log/hello2.log - index: logs-generic-default - vars: - var: value - processors: - - add_fields: - target: "data_stream" - fields: - type: logs - dataset: generic - namespace: default - - add_fields: - target: "event" - fields: - dataset: generic - - add_fields: - target: "elastic_agent" - fields: - id: agent-id - version: 8.0.0 - snapshot: false - - add_fields: - target: "agent" - fields: - id: agent-id - - type: log - paths: - - /var/log/hello3.log - - /var/log/hello4.log - index: testtype-generic-default - vars: - var: value - processors: - - add_fields: - target: "data_stream" - fields: - type: testtype - dataset: generic - namespace: default - - add_fields: - target: "event" - fields: - dataset: generic - - add_fields: - target: "elastic_agent" - fields: - id: agent-id - version: 8.0.0 - snapshot: false - - add_fields: - target: "agent" - fields: - id: agent-id -output: - elasticsearch: - hosts: - - 127.0.0.1:9200 - - 127.0.0.1:9300 - headers: - h1: test-header - username: elastic - password: changeme - bulk_max_size: 23 - worker: 10 - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= diff --git a/internal/pkg/agent/program/testdata/single_config-fleet-server.yml b/internal/pkg/agent/program/testdata/single_config-fleet-server.yml deleted file mode 100644 index b306cf40277..00000000000 --- a/internal/pkg/agent/program/testdata/single_config-fleet-server.yml +++ /dev/null @@ -1,18 +0,0 @@ -fleet: - agent: - id: fleet-agent-id - logging.level: error - host: - id: host-agent-id - -output: - elasticsearch: - hosts: [ 127.0.0.1:9200, 127.0.0.1:9300 ] - headers: - h1: test-header - username: fleet - password: fleetpassword - -inputs: - - id: fleet-server-id - type: fleet-server diff --git a/internal/pkg/agent/program/testdata/single_config-heartbeat.yml b/internal/pkg/agent/program/testdata/single_config-heartbeat.yml deleted file mode 100644 index 800f4100382..00000000000 --- a/internal/pkg/agent/program/testdata/single_config-heartbeat.yml +++ /dev/null @@ -1,31 +0,0 @@ -inputs: -- type: synthetics/http - id: unique-http-id - name: my-http - schedule: '*/5 * * * * * *' - host: "http://localhost:80/service/status" - timeout: 16s - wait: 1s - data_stream.namespace: default - processors: - - add_fields: - target: 'elastic_agent' - fields: - id: agent-id - version: 8.0.0 - snapshot: false - - add_fields: - target: 'agent' - fields: - id: agent-id -output: - elasticsearch: - hosts: - - 127.0.0.1:9200 - - 127.0.0.1:9300 - bulk_max_size: 23 - worker: 10 - username: elastic - password: changeme - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= diff --git a/internal/pkg/agent/program/testdata/single_config-metricbeat.yml b/internal/pkg/agent/program/testdata/single_config-metricbeat.yml deleted file mode 100644 index a2c36b151f0..00000000000 --- a/internal/pkg/agent/program/testdata/single_config-metricbeat.yml +++ /dev/null @@ -1,91 +0,0 @@ -metricbeat: - modules: - - module: docker - metricsets: [status] - index: metrics-docker.status-default - hosts: ["http://127.0.0.1:8080"] - processors: - - add_fields: - target: "data_stream" - fields: - type: metrics - dataset: docker.status - namespace: default - - add_fields: - target: "event" - fields: - dataset: docker.status - - add_fields: - target: "elastic_agent" - fields: - id: agent-id - version: 8.0.0 - snapshot: false - - add_fields: - target: "agent" - fields: - id: agent-id - - module: docker - metricsets: [info] - index: metrics-generic-default - hosts: ["http://127.0.0.1:8080"] - processors: - - add_fields: - target: "data_stream" - fields: - type: metrics - dataset: generic - namespace: default - - add_fields: - target: "event" - fields: - dataset: generic - - add_fields: - target: "elastic_agent" - fields: - id: agent-id - version: 8.0.0 - snapshot: false - - add_fields: - target: "agent" - fields: - id: agent-id - - module: apache - metricsets: [info] - index: metrics-generic-testing - hosts: ["http://apache.remote"] - processors: - - add_fields: - fields: - should_be: first - - add_fields: - target: "data_stream" - fields: - type: metrics - dataset: generic - namespace: testing - - add_fields: - target: "event" - fields: - dataset: generic - - add_fields: - target: "elastic_agent" - fields: - id: agent-id - version: 8.0.0 - snapshot: false - - add_fields: - target: "agent" - fields: - id: agent-id -output: - elasticsearch: - hosts: [127.0.0.1:9200, 127.0.0.1:9300] - headers: - h1: test-header - username: elastic - password: changeme - bulk_max_size: 23 - worker: 10 - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= diff --git a/internal/pkg/agent/program/testdata/single_config-packetbeat.yml b/internal/pkg/agent/program/testdata/single_config-packetbeat.yml deleted file mode 100644 index f800d0bd2a0..00000000000 --- a/internal/pkg/agent/program/testdata/single_config-packetbeat.yml +++ /dev/null @@ -1,36 +0,0 @@ -inputs: -- type: packet - processors: - - add_fields: - target: 'elastic_agent' - fields: - id: agent-id - version: 8.0.0 - snapshot: false - - add_fields: - target: 'agent' - fields: - id: agent-id - streams: - - type: flow - timeout: 10s - period: 10s - keep_null: false - data_stream: - dataset: packet.flow - type: logs - - type: icmp - data_stream: - dataset: packet.icmp - type: logs -output: - elasticsearch: - hosts: - - 127.0.0.1:9200 - - 127.0.0.1:9300 - username: elastic - password: changeme - bulk_max_size: 23 - worker: 10 - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= diff --git a/internal/pkg/agent/program/testdata/single_config.yml b/internal/pkg/agent/program/testdata/single_config.yml deleted file mode 100644 index 16a03f9a77d..00000000000 --- a/internal/pkg/agent/program/testdata/single_config.yml +++ /dev/null @@ -1,202 +0,0 @@ -name: Production Website DB Servers -fleet: - enabled: true - access_api_key: VuaCfGcBCdbkQm-e5aOx:ui2lp2axTNmsyakw9tvNnw - protocol: https - hosts: [ localhost:5601 ] - timeout: 30s - agent: - id: fleet-agent-id - logging.level: error - host: - id: host-agent-id - server: - output: - elasticsearch: - hosts: [ 127.0.0.1:9200, 127.0.0.1:9300 ] - username: fleet - password: fleetpassword - -outputs: - default: - type: elasticsearch - hosts: [127.0.0.1:9200, 127.0.0.1:9300] - bulk_max_size: 23 - worker: 10 - username: elastic - password: changeme - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - - monitoring: - type: elasticsearch - api_key: VuaCfGcBCdbkQm-e5aOx:ui2lp2axTNmsyakw9tvNnw - hosts: ["monitoring:9200"] - ca_sha256: "7lHLiyp4J8m9kw38SJ7SURJP4bXRZv/BNxyyXkCcE/M=" - -inputs: -- id: fleet-server-id - type: fleet-server - use_output: default - data_stream: - type: default -- type: docker/metrics - use_output: default - streams: - - metricset: status - processors: - - null - data_stream: - dataset: docker.status - - metricset: info - data_stream: - dataset: "" - hosts: ["http://127.0.0.1:8080"] -- type: logfile - use_output: default - streams: - - paths: - - /var/log/hello1.log - - /var/log/hello2.log - vars: - var: value -- type: logfile - data_stream: - type: testtype - use_output: default - streams: - - paths: - - /var/log/hello3.log - - /var/log/hello4.log - vars: - var: value -- id: apache-metrics-id - type: apache/metrics - data_stream: - namespace: testing - use_output: default - processors: - - add_fields: - fields: - should_be: first - streams: - - enabled: true - metricset: info - hosts: ["http://apache.remote"] - hosts: ["http://apache.local"] -- type: synthetics/http - id: unique-http-id - name: my-http - schedule: '*/5 * * * * * *' - host: "http://localhost:80/service/status" - timeout: 16s - wait: 1s -- type: packet - streams: - - type: flow - timeout: 10s - period: 10s - keep_null: false - data_stream: - dataset: packet.flow - type: logs - - type: icmp - data_stream: - dataset: packet.icmp - type: logs -- id: endpoint-id - type: endpoint - name: endpoint-1 - enabled: true - package: - name: endpoint - version: 0.3.0 - data_stream: - namespace: default - artifact_manifest: - schema_version: v22 - manifest_version: v21 - artifacts: - - endpoint-allowlist-windows: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-windows - - endpoint-allowlist-macos: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-macos - - endpoint-allowlist-linux: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-linux - policy: - linux: - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true - windows: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - registry: logs-endpoint.events.registry-default - process: logs-endpoint.events.process-default - driver: logs-endpoint.events.driver-default - library: logs-endpoint.events.library-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - registry: true - process: true - security: true - file: true - dns: false - dll_and_driver_load: false - network: true - mac: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true - -agent.monitoring: - use_output: monitoring - -agent: - reload: 123 diff --git a/internal/pkg/agent/program/testdata/synthetics_config-heartbeat.yml b/internal/pkg/agent/program/testdata/synthetics_config-heartbeat.yml deleted file mode 100644 index 284d391f78b..00000000000 --- a/internal/pkg/agent/program/testdata/synthetics_config-heartbeat.yml +++ /dev/null @@ -1,66 +0,0 @@ -inputs: -- type: synthetics/http - id: unique-http-id - name: my-http - schedule: '*/5 * * * * * *' - host: "http://localhost:80/service/status" - timeout: 16s - wait: 1s - data_stream.namespace: default - processors: - - add_fields: - target: 'elastic_agent' - fields: - id: agent-id - version: 8.0.0 - snapshot: false - - add_fields: - target: 'agent' - fields: - id: agent-id -- type: synthetics/tcp - id: unique-tcp-id - name: my-tcp - schedule: '*/5 * * * * * *' - host: "localhost:777" - timeout: 16s - wait: 1s - data_stream.namespace: default - processors: - - add_fields: - target: 'elastic_agent' - fields: - id: agent-id - version: 8.0.0 - snapshot: false - - add_fields: - target: 'agent' - fields: - id: agent-id -- type: synthetics/icmp - id: unique-icmp-id - name: my-icmp - schedule: '@every 5s' - host: "localhost" - ipv4: true - ipv6: true - mode: any - timeout: 16s - wait: 1s - data_stream.namespace: default - processors: - - add_fields: - target: 'elastic_agent' - fields: - id: agent-id - version: 8.0.0 - snapshot: false - - add_fields: - target: 'agent' - fields: - id: agent-id -output: - elasticsearch: - hosts: [127.0.0.1:9200, 127.0.0.1:9300] - username: elastic - password: changeme diff --git a/internal/pkg/agent/program/testdata/synthetics_config.yml b/internal/pkg/agent/program/testdata/synthetics_config.yml deleted file mode 100644 index 74aa9916a65..00000000000 --- a/internal/pkg/agent/program/testdata/synthetics_config.yml +++ /dev/null @@ -1,31 +0,0 @@ -inputs: -- type: synthetics/http - id: unique-http-id - name: my-http - schedule: '*/5 * * * * * *' - host: "http://localhost:80/service/status" - timeout: 16s - wait: 1s -- type: synthetics/tcp - id: unique-tcp-id - name: my-tcp - schedule: '*/5 * * * * * *' - host: "localhost:777" - timeout: 16s - wait: 1s -- type: synthetics/icmp - id: unique-icmp-id - name: my-icmp - schedule: '@every 5s' - host: "localhost" - ipv4: true - ipv6: true - mode: any - timeout: 16s - wait: 1s -outputs: - default: - type: elasticsearch - hosts: [127.0.0.1:9200, 127.0.0.1:9300] - username: elastic - password: changeme diff --git a/internal/pkg/agent/program/testdata/usecases/enabled_output_true.yml b/internal/pkg/agent/program/testdata/usecases/enabled_output_true.yml deleted file mode 100644 index 9601388c536..00000000000 --- a/internal/pkg/agent/program/testdata/usecases/enabled_output_true.yml +++ /dev/null @@ -1,17 +0,0 @@ -inputs: - - type: event/file - streams: - - paths: - - /var/log/hello1.log - - /var/log/hello2.log -management: - host: "localhost" -config: - reload: 123 -outputs: - default: - type: elasticsearch - enabled: true - hosts: [127.0.0.1:9200, 127.0.0.1:9300] - username: elastic - password: changeme diff --git a/internal/pkg/agent/program/testdata/usecases/enabled_true.yml b/internal/pkg/agent/program/testdata/usecases/enabled_true.yml deleted file mode 100644 index 6afc7f37ab1..00000000000 --- a/internal/pkg/agent/program/testdata/usecases/enabled_true.yml +++ /dev/null @@ -1,22 +0,0 @@ -name: Production Website DB Servers -fleet: - kibana_url: https://kibana.mydomain.com:5601 - ca_hash: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - checkin_interval: 5m -inputs: - - type: event/file - streams: - - enabled: true - paths: - - /var/log/hello1.log - - /var/log/hello2.log -management: - host: "localhost" -config: - reload: 123 -outputs: - default: - type: elasticsearch - hosts: [127.0.0.1:9200, 127.0.0.1:9300] - username: elastic - password: changeme diff --git a/internal/pkg/agent/program/testdata/usecases/endpoint_basic.yml b/internal/pkg/agent/program/testdata/usecases/endpoint_basic.yml deleted file mode 100644 index 9f438cd46fd..00000000000 --- a/internal/pkg/agent/program/testdata/usecases/endpoint_basic.yml +++ /dev/null @@ -1,115 +0,0 @@ -revision: 5 -name: Endpoint Host -fleet: - access_api_key: VuaCfGcBCdbkQm-e5aOx:ui2lp2axTNmsyakw9tvNnw - protocol: https - hosts: [ localhost:5601 ] - timeout: 30s - agent: - id: fleet-agent-id - logging.level: error - host: - id: host-agent-id - -outputs: - default: - type: elasticsearch - hosts: [127.0.0.1:9200, 127.0.0.1:9300] - username: elastic - password: changeme - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - -inputs: -- id: endpoint-id - type: endpoint - name: endpoint-1 - enabled: true - package: - name: endpoint - version: 0.3.0 - data_stream: - namespace: default - artifact_manifest: - schema_version: v22 - manifest_version: v21 - artifacts: - - endpoint-allowlist-windows: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-windows - - endpoint-allowlist-macos: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-macos - - endpoint-allowlist-linux: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-linux - policy: - linux: - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true - windows: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - registry: logs-endpoint.events.registry-default - process: logs-endpoint.events.process-default - driver: logs-endpoint.events.driver-default - library: logs-endpoint.events.library-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - registry: true - process: true - security: true - file: true - dns: false - dll_and_driver_load: false - network: true - mac: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true -runtime: - arch: x86_64 diff --git a/internal/pkg/agent/program/testdata/usecases/fleet_server.yml b/internal/pkg/agent/program/testdata/usecases/fleet_server.yml deleted file mode 100644 index a816197917e..00000000000 --- a/internal/pkg/agent/program/testdata/usecases/fleet_server.yml +++ /dev/null @@ -1,51 +0,0 @@ -name: Fleet Server Only -fleet: - enabled: true - access_api_key: VuaCfGcBCdbkQm-e5aOx:ui2lp2axTNmsyakw9tvNnw - protocol: https - hosts: [ localhost:5601 ] - timeout: 30s - agent: - id: fleet-agent-id - logging.level: error - host: - id: host-agent-id - server: - host: 127.0.0.1 - port: 8822 - ssl: - verification_mode: none - policy: - id: copy-policy-id - output: - elasticsearch: - hosts: [ 127.0.0.1:9200, 127.0.0.1:9300 ] - username: fleet - password: fleetpassword - -outputs: - default: - type: elasticsearch - hosts: [127.0.0.1:9200, 127.0.0.1:9300] - username: elastic - password: changeme - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - -inputs: - - id: fleet-server-id - type: fleet-server - use_output: default - data_stream: - namespace: default - name: fleet_server-1 - revision: 6 - server: - host: 0.0.0.0 - port: 8220 - limits: - max_connections: 40 - runtime: - gc_percent: 50 - timeouts: - read: 5m diff --git a/internal/pkg/agent/program/testdata/usecases/generated/enabled_output_true.filebeat.golden.yml b/internal/pkg/agent/program/testdata/usecases/generated/enabled_output_true.filebeat.golden.yml deleted file mode 100644 index 6b898a6128a..00000000000 --- a/internal/pkg/agent/program/testdata/usecases/generated/enabled_output_true.filebeat.golden.yml +++ /dev/null @@ -1,38 +0,0 @@ -filebeat: - inputs: - - index: logs-generic-default - paths: - - /var/log/hello1.log - - /var/log/hello2.log - processors: - - add_fields: - fields: - dataset: generic - namespace: default - type: logs - target: data_stream - - add_fields: - fields: - dataset: generic - target: event - - add_fields: - fields: - id: agent-id - snapshot: false - version: 8.0.0 - target: elastic_agent - - add_fields: - fields: - id: agent-id - target: agent - type: log -output: - elasticsearch: - enabled: true - headers: - h1: test-header - hosts: - - 127.0.0.1:9200 - - 127.0.0.1:9300 - password: changeme - username: elastic diff --git a/internal/pkg/agent/program/testdata/usecases/generated/enabled_true.filebeat.golden.yml b/internal/pkg/agent/program/testdata/usecases/generated/enabled_true.filebeat.golden.yml deleted file mode 100644 index 197bf2f6232..00000000000 --- a/internal/pkg/agent/program/testdata/usecases/generated/enabled_true.filebeat.golden.yml +++ /dev/null @@ -1,38 +0,0 @@ -filebeat: - inputs: - - enabled: true - index: logs-generic-default - paths: - - /var/log/hello1.log - - /var/log/hello2.log - processors: - - add_fields: - fields: - dataset: generic - namespace: default - type: logs - target: data_stream - - add_fields: - fields: - dataset: generic - target: event - - add_fields: - fields: - id: agent-id - snapshot: false - version: 8.0.0 - target: elastic_agent - - add_fields: - fields: - id: agent-id - target: agent - type: log -output: - elasticsearch: - headers: - h1: test-header - hosts: - - 127.0.0.1:9200 - - 127.0.0.1:9300 - password: changeme - username: elastic diff --git a/internal/pkg/agent/program/testdata/usecases/generated/endpoint_basic.endpoint-security.golden.yml b/internal/pkg/agent/program/testdata/usecases/generated/endpoint_basic.endpoint-security.golden.yml deleted file mode 100644 index 6359f9185b8..00000000000 --- a/internal/pkg/agent/program/testdata/usecases/generated/endpoint_basic.endpoint-security.golden.yml +++ /dev/null @@ -1,112 +0,0 @@ -fleet: - access_api_key: VuaCfGcBCdbkQm-e5aOx:ui2lp2axTNmsyakw9tvNnw - agent: - id: fleet-agent-id - logging.level: error - host: - id: host-agent-id - hosts: - - localhost:5601 - protocol: https - timeout: 30s -inputs: -- artifact_manifest: - artifacts: - - endpoint-allowlist-windows: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-windows - - endpoint-allowlist-macos: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-macos - - endpoint-allowlist-linux: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-linux - manifest_version: v21 - schema_version: v22 - data_stream: - namespace: default - enabled: true - id: endpoint-id - name: endpoint-1 - package: - name: endpoint - version: 0.3.0 - policy: - linux: - advanced: - free-form: free-form-value - events: - file: true - network: true - process: true - indices: - file: logs-endpoint.events.file-default - metadata: metrics-endpoint.metadata-default - network: logs-endpoint.events.network-default - policy: metrics-endpoint.policy-default - process: logs-endpoint.events.process-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - mac: - advanced: - free-form: free-form-value - events: - file: true - network: true - process: true - indices: - alerts: logs-endpoint.alerts-default - file: logs-endpoint.events.file-default - metadata: metrics-endpoint.metadata-default - network: logs-endpoint.events.network-default - policy: metrics-endpoint.policy-default - process: logs-endpoint.events.process-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - malware: - mode: prevent - windows: - advanced: - free-form: free-form-value - events: - dll_and_driver_load: false - dns: false - file: true - network: true - process: true - registry: true - security: true - indices: - alerts: logs-endpoint.alerts-default - driver: logs-endpoint.events.driver-default - file: logs-endpoint.events.file-default - library: logs-endpoint.events.library-default - metadata: metrics-endpoint.metadata-default - network: logs-endpoint.events.network-default - policy: metrics-endpoint.policy-default - process: logs-endpoint.events.process-default - registry: logs-endpoint.events.registry-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - malware: - mode: prevent - type: endpoint -output: - elasticsearch: - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - hosts: - - 127.0.0.1:9200 - - 127.0.0.1:9300 - password: changeme - username: elastic -revision: 5 diff --git a/internal/pkg/agent/program/testdata/usecases/generated/fleet_server.fleet-server.golden.yml b/internal/pkg/agent/program/testdata/usecases/generated/fleet_server.fleet-server.golden.yml deleted file mode 100644 index 01dc3bd3c89..00000000000 --- a/internal/pkg/agent/program/testdata/usecases/generated/fleet_server.fleet-server.golden.yml +++ /dev/null @@ -1,33 +0,0 @@ -fleet: - agent: - id: fleet-agent-id - logging.level: error - host: - id: host-agent-id -inputs: -- id: fleet-server-id - name: fleet_server-1 - policy: - id: copy-policy-id - revision: 6 - server: - host: 0.0.0.0 - limits: - max_connections: 40 - port: 8220 - runtime: - gc_percent: 50 - ssl: - verification_mode: none - timeouts: - read: 5m - type: fleet-server -output: - elasticsearch: - headers: - h1: test-header - hosts: - - 127.0.0.1:9200 - - 127.0.0.1:9300 - password: fleetpassword - username: fleet diff --git a/internal/pkg/agent/program/testdata/usecases/generated/namespace.endpoint-security.golden.yml b/internal/pkg/agent/program/testdata/usecases/generated/namespace.endpoint-security.golden.yml deleted file mode 100644 index 7c8b033c4e6..00000000000 --- a/internal/pkg/agent/program/testdata/usecases/generated/namespace.endpoint-security.golden.yml +++ /dev/null @@ -1,113 +0,0 @@ -fleet: - access_api_key: VuaCfGcBCdbkQm-e5aOx:ui2lp2axTNmsyakw9tvNnw - agent: - id: fleet-agent-id - logging.level: error - enabled: true - host: - id: host-agent-id - hosts: - - localhost:5601 - protocol: https - timeout: 30s -inputs: -- artifact_manifest: - artifacts: - - endpoint-allowlist-windows: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-windows - - endpoint-allowlist-macos: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-macos - - endpoint-allowlist-linux: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-linux - manifest_version: v21 - schema_version: v22 - data_stream: - namespace: default - enabled: true - id: endpoint-id - name: endpoint-1 - package: - name: endpoint - version: 0.3.0 - policy: - linux: - advanced: - free-form: free-form-value - events: - file: true - network: true - process: true - indices: - file: logs-endpoint.events.file-default - metadata: metrics-endpoint.metadata-default - network: logs-endpoint.events.network-default - policy: metrics-endpoint.policy-default - process: logs-endpoint.events.process-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - mac: - advanced: - free-form: free-form-value - events: - file: true - network: true - process: true - indices: - alerts: logs-endpoint.alerts-default - file: logs-endpoint.events.file-default - metadata: metrics-endpoint.metadata-default - network: logs-endpoint.events.network-default - policy: metrics-endpoint.policy-default - process: logs-endpoint.events.process-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - malware: - mode: prevent - windows: - advanced: - free-form: free-form-value - events: - dll_and_driver_load: false - dns: false - file: true - network: true - process: true - registry: true - security: true - indices: - alerts: logs-endpoint.alerts-default - driver: logs-endpoint.events.driver-default - file: logs-endpoint.events.file-default - library: logs-endpoint.events.library-default - metadata: metrics-endpoint.metadata-default - network: logs-endpoint.events.network-default - policy: metrics-endpoint.policy-default - process: logs-endpoint.events.process-default - registry: logs-endpoint.events.registry-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - malware: - mode: prevent - type: endpoint -output: - elasticsearch: - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - hosts: - - 127.0.0.1:9200 - - 127.0.0.1:9300 - namespace: test_namespace - password: changeme - username: elastic diff --git a/internal/pkg/agent/program/testdata/usecases/generated/namespace.filebeat.golden.yml b/internal/pkg/agent/program/testdata/usecases/generated/namespace.filebeat.golden.yml deleted file mode 100644 index 2def5f274de..00000000000 --- a/internal/pkg/agent/program/testdata/usecases/generated/namespace.filebeat.golden.yml +++ /dev/null @@ -1,70 +0,0 @@ -filebeat: - inputs: - - index: logs-generic-default - paths: - - /var/log/hello1.log - - /var/log/hello2.log - processors: - - add_fields: - fields: - dataset: generic - namespace: default - type: logs - target: data_stream - - add_fields: - fields: - dataset: generic - target: event - - add_fields: - fields: - id: agent-id - snapshot: false - version: 8.0.0 - target: elastic_agent - - add_fields: - fields: - id: agent-id - target: agent - type: log - vars: - var: value - - index: testtype-generic-default - paths: - - /var/log/hello3.log - - /var/log/hello4.log - processors: - - add_fields: - fields: - dataset: generic - namespace: default - type: testtype - target: data_stream - - add_fields: - fields: - dataset: generic - target: event - - add_fields: - fields: - id: agent-id - snapshot: false - version: 8.0.0 - target: elastic_agent - - add_fields: - fields: - id: agent-id - target: agent - type: log - vars: - var: value -output: - elasticsearch: - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - headers: - h1: test-header - hosts: - - 127.0.0.1:9200 - - 127.0.0.1:9300 - namespace: test_namespace - password: changeme - username: elastic diff --git a/internal/pkg/agent/program/testdata/usecases/generated/namespace.fleet-server.golden.yml b/internal/pkg/agent/program/testdata/usecases/generated/namespace.fleet-server.golden.yml deleted file mode 100644 index ab7499a4f11..00000000000 --- a/internal/pkg/agent/program/testdata/usecases/generated/namespace.fleet-server.golden.yml +++ /dev/null @@ -1,18 +0,0 @@ -fleet: - agent: - id: fleet-agent-id - logging.level: error - host: - id: host-agent-id -inputs: -- id: fleet-server-id - type: fleet-server -output: - elasticsearch: - headers: - h1: test-header - hosts: - - 127.0.0.1:9200 - - 127.0.0.1:9300 - password: fleetpassword - username: fleet diff --git a/internal/pkg/agent/program/testdata/usecases/generated/namespace.heartbeat.golden.yml b/internal/pkg/agent/program/testdata/usecases/generated/namespace.heartbeat.golden.yml deleted file mode 100644 index c18573ee780..00000000000 --- a/internal/pkg/agent/program/testdata/usecases/generated/namespace.heartbeat.golden.yml +++ /dev/null @@ -1,30 +0,0 @@ -inputs: -- data_stream.namespace: default - host: http://localhost:80/service/status - id: unique-http-id - name: my-http - processors: - - add_fields: - fields: - id: agent-id - snapshot: false - version: 8.0.0 - target: elastic_agent - - add_fields: - fields: - id: agent-id - target: agent - schedule: '*/5 * * * * * *' - timeout: 16s - type: synthetics/http - wait: 1s -output: - elasticsearch: - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - hosts: - - 127.0.0.1:9200 - - 127.0.0.1:9300 - namespace: test_namespace - password: changeme - username: elastic diff --git a/internal/pkg/agent/program/testdata/usecases/generated/namespace.metricbeat.golden.yml b/internal/pkg/agent/program/testdata/usecases/generated/namespace.metricbeat.golden.yml deleted file mode 100644 index 3232297227a..00000000000 --- a/internal/pkg/agent/program/testdata/usecases/generated/namespace.metricbeat.golden.yml +++ /dev/null @@ -1,98 +0,0 @@ -metricbeat: - modules: - - hosts: - - http://127.0.0.1:8080 - index: metrics-docker.status-default - metricsets: - - status - module: docker - processors: - - add_fields: - fields: - dataset: docker.status - namespace: default - type: metrics - target: data_stream - - add_fields: - fields: - dataset: docker.status - target: event - - add_fields: - fields: - id: agent-id - snapshot: false - version: 8.0.0 - target: elastic_agent - - add_fields: - fields: - id: agent-id - target: agent - - hosts: - - http://127.0.0.1:8080 - index: metrics-generic-default - metricsets: - - info - module: docker - processors: - - add_fields: - fields: - dataset: generic - namespace: default - type: metrics - target: data_stream - - add_fields: - fields: - dataset: generic - target: event - - add_fields: - fields: - id: agent-id - snapshot: false - version: 8.0.0 - target: elastic_agent - - add_fields: - fields: - id: agent-id - target: agent - - hosts: - - http://apache.remote - index: metrics-generic-testing - metricsets: - - info - module: apache - processors: - - add_fields: - fields: - should_be: first - - add_fields: - fields: - dataset: generic - namespace: testing - type: metrics - target: data_stream - - add_fields: - fields: - dataset: generic - target: event - - add_fields: - fields: - id: agent-id - snapshot: false - version: 8.0.0 - target: elastic_agent - - add_fields: - fields: - id: agent-id - target: agent -output: - elasticsearch: - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - headers: - h1: test-header - hosts: - - 127.0.0.1:9200 - - 127.0.0.1:9300 - namespace: test_namespace - password: changeme - username: elastic diff --git a/internal/pkg/agent/program/testdata/usecases/generated/namespace.packetbeat.golden.yml b/internal/pkg/agent/program/testdata/usecases/generated/namespace.packetbeat.golden.yml deleted file mode 100644 index cc38887ff8e..00000000000 --- a/internal/pkg/agent/program/testdata/usecases/generated/namespace.packetbeat.golden.yml +++ /dev/null @@ -1,35 +0,0 @@ -inputs: -- processors: - - add_fields: - fields: - id: agent-id - snapshot: false - version: 8.0.0 - target: elastic_agent - - add_fields: - fields: - id: agent-id - target: agent - streams: - - data_stream: - dataset: packet.flow - type: logs - keep_null: false - period: 10s - timeout: 10s - type: flow - - data_stream: - dataset: packet.icmp - type: logs - type: icmp - type: packet -output: - elasticsearch: - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - hosts: - - 127.0.0.1:9200 - - 127.0.0.1:9300 - namespace: test_namespace - password: changeme - username: elastic diff --git a/internal/pkg/agent/program/testdata/usecases/generated/single_config.endpoint-security.golden.yml b/internal/pkg/agent/program/testdata/usecases/generated/single_config.endpoint-security.golden.yml deleted file mode 100644 index 552e169bbac..00000000000 --- a/internal/pkg/agent/program/testdata/usecases/generated/single_config.endpoint-security.golden.yml +++ /dev/null @@ -1,114 +0,0 @@ -fleet: - access_api_key: VuaCfGcBCdbkQm-e5aOx:ui2lp2axTNmsyakw9tvNnw - agent: - id: fleet-agent-id - logging.level: error - enabled: true - host: - id: host-agent-id - hosts: - - localhost:5601 - protocol: https - timeout: 30s -inputs: -- artifact_manifest: - artifacts: - - endpoint-allowlist-windows: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-windows - - endpoint-allowlist-macos: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-macos - - endpoint-allowlist-linux: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-linux - manifest_version: v21 - schema_version: v22 - data_stream: - namespace: default - enabled: true - id: endpoint-id - name: endpoint-1 - package: - name: endpoint - version: 0.3.0 - policy: - linux: - advanced: - free-form: free-form-value - events: - file: true - network: true - process: true - indices: - file: logs-endpoint.events.file-default - metadata: metrics-endpoint.metadata-default - network: logs-endpoint.events.network-default - policy: metrics-endpoint.policy-default - process: logs-endpoint.events.process-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - mac: - advanced: - free-form: free-form-value - events: - file: true - network: true - process: true - indices: - alerts: logs-endpoint.alerts-default - file: logs-endpoint.events.file-default - metadata: metrics-endpoint.metadata-default - network: logs-endpoint.events.network-default - policy: metrics-endpoint.policy-default - process: logs-endpoint.events.process-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - malware: - mode: prevent - windows: - advanced: - free-form: free-form-value - events: - dll_and_driver_load: false - dns: false - file: true - network: true - process: true - registry: true - security: true - indices: - alerts: logs-endpoint.alerts-default - driver: logs-endpoint.events.driver-default - file: logs-endpoint.events.file-default - library: logs-endpoint.events.library-default - metadata: metrics-endpoint.metadata-default - network: logs-endpoint.events.network-default - policy: metrics-endpoint.policy-default - process: logs-endpoint.events.process-default - registry: logs-endpoint.events.registry-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - malware: - mode: prevent - type: endpoint -output: - elasticsearch: - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - bulk_max_size: 23 - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - hosts: - - 127.0.0.1:9200 - - 127.0.0.1:9300 - password: changeme - username: elastic - worker: 10 diff --git a/internal/pkg/agent/program/testdata/usecases/generated/single_config.filebeat.golden.yml b/internal/pkg/agent/program/testdata/usecases/generated/single_config.filebeat.golden.yml deleted file mode 100644 index 22055f9e09e..00000000000 --- a/internal/pkg/agent/program/testdata/usecases/generated/single_config.filebeat.golden.yml +++ /dev/null @@ -1,73 +0,0 @@ -filebeat: - inputs: - - id: logfile-1 - index: logs-generic-default - paths: - - /var/log/hello1.log - - /var/log/hello2.log - processors: - - add_fields: - fields: - dataset: generic - namespace: default - type: logs - target: data_stream - - add_fields: - fields: - dataset: generic - target: event - - add_fields: - fields: - id: agent-id - snapshot: false - version: 8.0.0 - target: elastic_agent - - add_fields: - fields: - id: agent-id - target: agent - type: log - vars: - var: value - - id: logfile-2 - index: testtype-generic-default - paths: - - /var/log/hello3.log - - /var/log/hello4.log - processors: - - add_fields: - fields: - dataset: generic - namespace: default - type: testtype - target: data_stream - - add_fields: - fields: - dataset: generic - target: event - - add_fields: - fields: - id: agent-id - snapshot: false - version: 8.0.0 - target: elastic_agent - - add_fields: - fields: - id: agent-id - target: agent - type: log - vars: - var: value -output: - elasticsearch: - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - bulk_max_size: 23 - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - headers: - h1: test-header - hosts: - - 127.0.0.1:9200 - - 127.0.0.1:9300 - password: changeme - username: elastic - worker: 10 diff --git a/internal/pkg/agent/program/testdata/usecases/generated/single_config.fleet-server.golden.yml b/internal/pkg/agent/program/testdata/usecases/generated/single_config.fleet-server.golden.yml deleted file mode 100644 index ab7499a4f11..00000000000 --- a/internal/pkg/agent/program/testdata/usecases/generated/single_config.fleet-server.golden.yml +++ /dev/null @@ -1,18 +0,0 @@ -fleet: - agent: - id: fleet-agent-id - logging.level: error - host: - id: host-agent-id -inputs: -- id: fleet-server-id - type: fleet-server -output: - elasticsearch: - headers: - h1: test-header - hosts: - - 127.0.0.1:9200 - - 127.0.0.1:9300 - password: fleetpassword - username: fleet diff --git a/internal/pkg/agent/program/testdata/usecases/generated/single_config.heartbeat.golden.yml b/internal/pkg/agent/program/testdata/usecases/generated/single_config.heartbeat.golden.yml deleted file mode 100644 index f4c5827603a..00000000000 --- a/internal/pkg/agent/program/testdata/usecases/generated/single_config.heartbeat.golden.yml +++ /dev/null @@ -1,31 +0,0 @@ -inputs: -- data_stream.namespace: default - host: http://localhost:80/service/status - id: unique-http-id - name: my-http - processors: - - add_fields: - fields: - id: agent-id - snapshot: false - version: 8.0.0 - target: elastic_agent - - add_fields: - fields: - id: agent-id - target: agent - schedule: '*/5 * * * * * *' - timeout: 16s - type: synthetics/http - wait: 1s -output: - elasticsearch: - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - bulk_max_size: 23 - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - hosts: - - 127.0.0.1:9200 - - 127.0.0.1:9300 - password: changeme - username: elastic - worker: 10 diff --git a/internal/pkg/agent/program/testdata/usecases/generated/single_config.metricbeat.golden.yml b/internal/pkg/agent/program/testdata/usecases/generated/single_config.metricbeat.golden.yml deleted file mode 100644 index aca14055635..00000000000 --- a/internal/pkg/agent/program/testdata/usecases/generated/single_config.metricbeat.golden.yml +++ /dev/null @@ -1,99 +0,0 @@ -metricbeat: - modules: - - hosts: - - http://127.0.0.1:8080 - index: metrics-docker.status-default - metricsets: - - status - module: docker - processors: - - add_fields: - fields: - dataset: docker.status - namespace: default - type: metrics - target: data_stream - - add_fields: - fields: - dataset: docker.status - target: event - - add_fields: - fields: - id: agent-id - snapshot: false - version: 8.0.0 - target: elastic_agent - - add_fields: - fields: - id: agent-id - target: agent - - hosts: - - http://127.0.0.1:8080 - index: metrics-generic-default - metricsets: - - info - module: docker - processors: - - add_fields: - fields: - dataset: generic - namespace: default - type: metrics - target: data_stream - - add_fields: - fields: - dataset: generic - target: event - - add_fields: - fields: - id: agent-id - snapshot: false - version: 8.0.0 - target: elastic_agent - - add_fields: - fields: - id: agent-id - target: agent - - hosts: - - http://apache.remote - index: metrics-generic-testing - metricsets: - - info - module: apache - processors: - - add_fields: - fields: - should_be: first - - add_fields: - fields: - dataset: generic - namespace: testing - type: metrics - target: data_stream - - add_fields: - fields: - dataset: generic - target: event - - add_fields: - fields: - id: agent-id - snapshot: false - version: 8.0.0 - target: elastic_agent - - add_fields: - fields: - id: agent-id - target: agent -output: - elasticsearch: - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - bulk_max_size: 23 - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - headers: - h1: test-header - hosts: - - 127.0.0.1:9200 - - 127.0.0.1:9300 - password: changeme - username: elastic - worker: 10 diff --git a/internal/pkg/agent/program/testdata/usecases/generated/single_config.packetbeat.golden.yml b/internal/pkg/agent/program/testdata/usecases/generated/single_config.packetbeat.golden.yml deleted file mode 100644 index e7f13deb0a2..00000000000 --- a/internal/pkg/agent/program/testdata/usecases/generated/single_config.packetbeat.golden.yml +++ /dev/null @@ -1,36 +0,0 @@ -inputs: -- processors: - - add_fields: - fields: - id: agent-id - snapshot: false - version: 8.0.0 - target: elastic_agent - - add_fields: - fields: - id: agent-id - target: agent - streams: - - data_stream: - dataset: packet.flow - type: logs - keep_null: false - period: 10s - timeout: 10s - type: flow - - data_stream: - dataset: packet.icmp - type: logs - type: icmp - type: packet -output: - elasticsearch: - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - bulk_max_size: 23 - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - hosts: - - 127.0.0.1:9200 - - 127.0.0.1:9300 - password: changeme - username: elastic - worker: 10 diff --git a/internal/pkg/agent/program/testdata/usecases/generated/synthetics_config.heartbeat.golden.yml b/internal/pkg/agent/program/testdata/usecases/generated/synthetics_config.heartbeat.golden.yml deleted file mode 100644 index 870a0070f4e..00000000000 --- a/internal/pkg/agent/program/testdata/usecases/generated/synthetics_config.heartbeat.golden.yml +++ /dev/null @@ -1,68 +0,0 @@ -inputs: -- data_stream.namespace: default - host: http://localhost:80/service/status - id: unique-http-id - name: my-http - processors: - - add_fields: - fields: - id: agent-id - snapshot: false - version: 8.0.0 - target: elastic_agent - - add_fields: - fields: - id: agent-id - target: agent - schedule: '*/5 * * * * * *' - timeout: 16s - type: synthetics/http - wait: 1s -- data_stream.namespace: default - host: localhost:777 - id: unique-tcp-id - name: my-tcp - processors: - - add_fields: - fields: - id: agent-id - snapshot: false - version: 8.0.0 - target: elastic_agent - - add_fields: - fields: - id: agent-id - target: agent - schedule: '*/5 * * * * * *' - timeout: 16s - type: synthetics/tcp - wait: 1s -- data_stream.namespace: default - host: localhost - id: unique-icmp-id - ipv4: true - ipv6: true - mode: any - name: my-icmp - processors: - - add_fields: - fields: - id: agent-id - snapshot: false - version: 8.0.0 - target: elastic_agent - - add_fields: - fields: - id: agent-id - target: agent - schedule: '@every 5s' - timeout: 16s - type: synthetics/icmp - wait: 1s -output: - elasticsearch: - hosts: - - 127.0.0.1:9200 - - 127.0.0.1:9300 - password: changeme - username: elastic diff --git a/internal/pkg/agent/program/testdata/usecases/namespace.yml b/internal/pkg/agent/program/testdata/usecases/namespace.yml deleted file mode 100644 index c2f83a9abf0..00000000000 --- a/internal/pkg/agent/program/testdata/usecases/namespace.yml +++ /dev/null @@ -1,201 +0,0 @@ -name: Production Website DB Servers -fleet: - enabled: true - access_api_key: VuaCfGcBCdbkQm-e5aOx:ui2lp2axTNmsyakw9tvNnw - protocol: https - hosts: [ localhost:5601 ] - timeout: 30s - agent: - id: fleet-agent-id - logging.level: error - host: - id: host-agent-id - server: - output: - elasticsearch: - hosts: [ 127.0.0.1:9200, 127.0.0.1:9300 ] - username: fleet - password: fleetpassword - -outputs: - default: - type: elasticsearch - namespace: test_namespace - hosts: [127.0.0.1:9200, 127.0.0.1:9300] - username: elastic - password: changeme - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - - monitoring: - type: elasticsearch - api_key: VuaCfGcBCdbkQm-e5aOx:ui2lp2axTNmsyakw9tvNnw - hosts: ["monitoring:9200"] - ca_sha256: "7lHLiyp4J8m9kw38SJ7SURJP4bXRZv/BNxyyXkCcE/M=" - -inputs: -- id: fleet-server-id - type: fleet-server - use_output: default - data_stream: - type: default -- type: docker/metrics - use_output: default - streams: - - metricset: status - processors: - - null - data_stream: - dataset: docker.status - - metricset: info - data_stream: - dataset: "" - hosts: ["http://127.0.0.1:8080"] -- type: logfile - use_output: default - streams: - - paths: - - /var/log/hello1.log - - /var/log/hello2.log - vars: - var: value -- type: logfile - data_stream: - type: testtype - use_output: default - streams: - - paths: - - /var/log/hello3.log - - /var/log/hello4.log - vars: - var: value -- id: apache-metrics-id - type: apache/metrics - data_stream: - namespace: testing - use_output: default - processors: - - add_fields: - fields: - should_be: first - streams: - - enabled: true - metricset: info - hosts: ["http://apache.remote"] - hosts: ["http://apache.local"] -- type: synthetics/http - id: unique-http-id - name: my-http - schedule: '*/5 * * * * * *' - host: "http://localhost:80/service/status" - timeout: 16s - wait: 1s -- type: packet - streams: - - type: flow - timeout: 10s - period: 10s - keep_null: false - data_stream: - dataset: packet.flow - type: logs - - type: icmp - data_stream: - dataset: packet.icmp - type: logs -- id: endpoint-id - type: endpoint - name: endpoint-1 - enabled: true - package: - name: endpoint - version: 0.3.0 - data_stream: - namespace: default - artifact_manifest: - schema_version: v22 - manifest_version: v21 - artifacts: - - endpoint-allowlist-windows: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-windows - - endpoint-allowlist-macos: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-macos - - endpoint-allowlist-linux: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-linux - policy: - linux: - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true - windows: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - registry: logs-endpoint.events.registry-default - process: logs-endpoint.events.process-default - driver: logs-endpoint.events.driver-default - library: logs-endpoint.events.library-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - registry: true - process: true - security: true - file: true - dns: false - dll_and_driver_load: false - network: true - mac: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true - -agent.monitoring: - use_output: monitoring - -agent: - reload: 123 diff --git a/internal/pkg/agent/program/testdata/usecases/single_config.yml b/internal/pkg/agent/program/testdata/usecases/single_config.yml deleted file mode 100644 index 654453af2c6..00000000000 --- a/internal/pkg/agent/program/testdata/usecases/single_config.yml +++ /dev/null @@ -1,204 +0,0 @@ -name: Production Website DB Servers -fleet: - enabled: true - access_api_key: VuaCfGcBCdbkQm-e5aOx:ui2lp2axTNmsyakw9tvNnw - protocol: https - hosts: [ localhost:5601 ] - timeout: 30s - agent: - id: fleet-agent-id - logging.level: error - host: - id: host-agent-id - server: - output: - elasticsearch: - hosts: [ 127.0.0.1:9200, 127.0.0.1:9300 ] - username: fleet - password: fleetpassword - -outputs: - default: - type: elasticsearch - hosts: [127.0.0.1:9200, 127.0.0.1:9300] - bulk_max_size: 23 - worker: 10 - username: elastic - password: changeme - api_key: TiNAGG4BaaMdaH1tRfuU:KnR6yE41RrSowb0kQ0HWoA - ca_sha256: 7HIpactkIAq2Y49orFOOQKurWxmmSFZhBCoQYcRhJ3Y= - - monitoring: - type: elasticsearch - api_key: VuaCfGcBCdbkQm-e5aOx:ui2lp2axTNmsyakw9tvNnw - hosts: ["monitoring:9200"] - ca_sha256: "7lHLiyp4J8m9kw38SJ7SURJP4bXRZv/BNxyyXkCcE/M=" - -inputs: -- id: fleet-server-id - type: fleet-server - use_output: default - data_stream: - type: default -- type: docker/metrics - use_output: default - streams: - - metricset: status - processors: - - null - data_stream: - dataset: docker.status - - metricset: info - data_stream: - dataset: "" - hosts: ["http://127.0.0.1:8080"] -- type: logfile - id: logfile-1 - use_output: default - streams: - - paths: - - /var/log/hello1.log - - /var/log/hello2.log - vars: - var: value -- type: logfile - id: logfile-2 - data_stream: - type: testtype - use_output: default - streams: - - paths: - - /var/log/hello3.log - - /var/log/hello4.log - vars: - var: value -- id: apache-metrics-id - type: apache/metrics - data_stream: - namespace: testing - use_output: default - processors: - - add_fields: - fields: - should_be: first - streams: - - enabled: true - metricset: info - hosts: ["http://apache.remote"] - hosts: ["http://apache.local"] -- type: synthetics/http - id: unique-http-id - name: my-http - schedule: '*/5 * * * * * *' - host: "http://localhost:80/service/status" - timeout: 16s - wait: 1s -- type: packet - streams: - - type: flow - timeout: 10s - period: 10s - keep_null: false - data_stream: - dataset: packet.flow - type: logs - - type: icmp - data_stream: - dataset: packet.icmp - type: logs -- id: endpoint-id - type: endpoint - name: endpoint-1 - enabled: true - package: - name: endpoint - version: 0.3.0 - data_stream: - namespace: default - artifact_manifest: - schema_version: v22 - manifest_version: v21 - artifacts: - - endpoint-allowlist-windows: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-windows - - endpoint-allowlist-macos: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-macos - - endpoint-allowlist-linux: - sha256: 1234 - size: 2 - url: /relative/path/to/endpoint-allowlist-linux - policy: - linux: - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true - windows: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - registry: logs-endpoint.events.registry-default - process: logs-endpoint.events.process-default - driver: logs-endpoint.events.driver-default - library: logs-endpoint.events.library-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - registry: true - process: true - security: true - file: true - dns: false - dll_and_driver_load: false - network: true - mac: - malware: - mode: prevent - advanced: - free-form: free-form-value - indices: - network: logs-endpoint.events.network-default - file: logs-endpoint.events.file-default - process: logs-endpoint.events.process-default - alerts: logs-endpoint.alerts-default - metadata: metrics-endpoint.metadata-default - policy: metrics-endpoint.policy-default - telemetry: metrics-endpoint.telemetry-default - logging: - file: info - stdout: debug - events: - process: true - file: true - network: true - -agent.monitoring: - use_output: monitoring - -agent: - reload: 123 diff --git a/internal/pkg/agent/program/testdata/usecases/synthetics_config.yml b/internal/pkg/agent/program/testdata/usecases/synthetics_config.yml deleted file mode 100644 index 74aa9916a65..00000000000 --- a/internal/pkg/agent/program/testdata/usecases/synthetics_config.yml +++ /dev/null @@ -1,31 +0,0 @@ -inputs: -- type: synthetics/http - id: unique-http-id - name: my-http - schedule: '*/5 * * * * * *' - host: "http://localhost:80/service/status" - timeout: 16s - wait: 1s -- type: synthetics/tcp - id: unique-tcp-id - name: my-tcp - schedule: '*/5 * * * * * *' - host: "localhost:777" - timeout: 16s - wait: 1s -- type: synthetics/icmp - id: unique-icmp-id - name: my-icmp - schedule: '@every 5s' - host: "localhost" - ipv4: true - ipv6: true - mode: any - timeout: 16s - wait: 1s -outputs: - default: - type: elasticsearch - hosts: [127.0.0.1:9200, 127.0.0.1:9300] - username: elastic - password: changeme diff --git a/internal/pkg/agent/stateresolver/resolve.go b/internal/pkg/agent/stateresolver/resolve.go deleted file mode 100644 index 5afe2256cb6..00000000000 --- a/internal/pkg/agent/stateresolver/resolve.go +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package stateresolver - -import ( - "sort" - "strings" - "time" - - "github.com/elastic/elastic-agent/internal/pkg/agent/configrequest" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/release" -) - -//go:generate stringer -type=stateChange -linecomment=true - -const shortID = 8 - -// stateChange represent a how a process is modified between configuration change. -type stateChange uint8 - -const ( - startState stateChange = iota + 1 // START - updateState // UPDATE - unchangedState // UNCHANGED -) - -// state represent the SHOULD state of the system, contains a reference to the actual bundle of -// configuration received by the upstream call and keep track of the last change executed on a program. -// -// The list of change are the following: -// start: first time to see that configuration and decide to start a new process. -// update: need to update the process switch a new configuration. -// unchanged: keep running the process with the actual configuration. -type state struct { - ID string - LastModified time.Time - Active map[string]active -} - -func (s *state) ShortID() string { - if len(s.ID) <= shortID { - return s.ID - } - return s.ID[0:shortID] -} - -func (s *state) String() string { - var str strings.Builder - str.WriteString("ID:" + s.ID + ", LastModified: " + s.LastModified.String()) - str.WriteString("Active Process [\n") - for _, a := range s.Active { - str.WriteString(a.String()) - } - str.WriteString("]") - - return str.String() -} - -type active struct { - LastChange stateChange - LastModified time.Time - Identifier string - Program program.Program -} - -func (s *active) String() string { - return "Identifier: " + s.Identifier + - ", LastChange: " + s.LastChange.String() + - ", LastModified: " + s.LastModified.String() + - ", Checksum: " + s.Program.Checksum() -} - -type cfgReq interface { - ID() string - CreatedAt() time.Time - Programs() []program.Program -} - -// Converge converges the system, take the current sate and create a new should state and all the steps -// required to go from current state to the new state. -func converge(s state, cfg cfgReq) (state, []configrequest.Step) { - newState := state{ - ID: cfg.ID(), - LastModified: cfg.CreatedAt(), - Active: make(map[string]active, len(cfg.Programs())), - } - - steps := make([]configrequest.Step, 0) - - // Find process that must be stopped. - activeKeys := getActiveKeys(s.Active) - for _, id := range activeKeys { - active := s.Active[id] - - var found bool - for _, p := range cfg.Programs() { - // Still need to run the process. - if id == p.Identifier() { - found = true - break - } - } - - if !found { - steps = append(steps, configrequest.Step{ - ID: configrequest.StepRemove, - ProgramSpec: active.Program.Spec, - Version: release.Version(), - }) - } - } - - // What need to be started or updated. - for _, p := range cfg.Programs() { - a, found := s.Active[p.Identifier()] - if !found { - newState.Active[p.Identifier()] = active{ - LastChange: startState, - LastModified: cfg.CreatedAt(), - Identifier: p.Identifier(), - Program: p, - } - - steps = append(steps, configrequest.Step{ - ID: configrequest.StepRun, - ProgramSpec: p.Spec, - Version: release.Version(), - Meta: map[string]interface{}{ - configrequest.MetaConfigKey: p.Configuration(), - }, - }) - - // Complete new process, skip to the next process. - continue - } - - // Checksum doesn't match and we force an update of the process. - if a.Program.Checksum() != p.Checksum() { - newState.Active[p.Identifier()] = active{ - LastChange: updateState, - LastModified: cfg.CreatedAt(), - Identifier: p.Identifier(), - Program: p, - } - steps = append(steps, configrequest.Step{ - ID: configrequest.StepRun, - ProgramSpec: p.Spec, - Version: release.Version(), - Meta: map[string]interface{}{ - configrequest.MetaConfigKey: p.Configuration(), - }, - }) - } else { - // Configuration did not change in this loop so we keep - // the last configuration as is. - a.LastChange = unchangedState - newState.Active[p.Identifier()] = a - } - } - - // What need to be updated. - return newState, steps -} - -func getActiveKeys(aa map[string]active) []string { - keys := make([]string, 0, len(aa)) - for k := range aa { - keys = append(keys, k) - } - - sort.Strings(keys) - - return keys -} diff --git a/internal/pkg/agent/stateresolver/resolve_test.go b/internal/pkg/agent/stateresolver/resolve_test.go deleted file mode 100644 index 4276ca39639..00000000000 --- a/internal/pkg/agent/stateresolver/resolve_test.go +++ /dev/null @@ -1,396 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package stateresolver - -import ( - "regexp" - "testing" - "time" - - "github.com/google/go-cmp/cmp/cmpopts" - - "github.com/google/go-cmp/cmp" - "github.com/stretchr/testify/require" - - "github.com/elastic/elastic-agent/internal/pkg/agent/configrequest" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" - "github.com/elastic/elastic-agent/internal/pkg/release" -) - -func TestResolver(t *testing.T) { - fb1 := fb("1") - fb2 := fb("2") - mb1 := mb("2") - tn := time.Now() - tn2 := time.Now().Add(time.Minute * 5) - - testcases := map[string]struct { - submit cfgReq - cur state - should state - steps []configrequest.Step - }{ - "from no programs to running program": { - submit: &cfg{ - id: "config-1", - createdAt: tn, - programs: []program.Program{ - fb1, mb1, - }, - }, - cur: state{}, // empty state - should: state{ - ID: "config-1", - LastModified: tn, - Active: map[string]active{ - "filebeat": { - LastChange: startState, - LastModified: tn, - Identifier: "filebeat", - Program: fb1, - }, - "metricbeat": { - LastChange: startState, - LastModified: tn, - Identifier: "metricbeat", - Program: mb1, - }, - }, - }, - steps: []configrequest.Step{ - { - ID: configrequest.StepRun, - ProgramSpec: fb1.Spec, - Version: release.Version(), - Meta: withMeta(fb1), - }, - { - ID: configrequest.StepRun, - ProgramSpec: mb1.Spec, - Version: release.Version(), - Meta: withMeta(mb1), - }, - }, - }, - "adding a program to an already running system": { - submit: &cfg{ - id: "config-2", - createdAt: tn2, - programs: []program.Program{ - fb1, mb1, - }, - }, - cur: state{ - ID: "config-1", - LastModified: tn, - Active: map[string]active{ - "filebeat": { - LastChange: startState, - LastModified: tn, - Identifier: "filebeat", - Program: fb1, - }, - }, - }, - should: state{ - ID: "config-2", - LastModified: tn2, - Active: map[string]active{ - "filebeat": { - LastChange: unchangedState, - LastModified: tn, - Identifier: "filebeat", - Program: fb1, - }, - "metricbeat": { - LastChange: startState, - LastModified: tn2, - Identifier: "metricbeat", - Program: mb1, - }, - }, - }, - steps: []configrequest.Step{ - { - ID: configrequest.StepRun, - ProgramSpec: mb1.Spec, - Version: release.Version(), - Meta: withMeta(mb1), - }, - }, - }, - "updating an already running program": { - submit: &cfg{ - id: "config-2", - createdAt: tn2, - programs: []program.Program{ - fb2, mb1, - }, - }, - cur: state{ - ID: "config-1", - LastModified: tn, - Active: map[string]active{ - "filebeat": { - LastChange: startState, - LastModified: tn, - Identifier: "filebeat", - Program: fb1, - }, - }, - }, - should: state{ - ID: "config-2", - LastModified: tn2, - Active: map[string]active{ - "filebeat": { - LastChange: updateState, - LastModified: tn2, - Identifier: "filebeat", - Program: fb2, - }, - "metricbeat": { - LastChange: startState, - LastModified: tn2, - Identifier: "metricbeat", - Program: mb1, - }, - }, - }, - steps: []configrequest.Step{ - { - ID: configrequest.StepRun, - ProgramSpec: fb2.Spec, - Version: release.Version(), - Meta: withMeta(fb2), - }, - { - ID: configrequest.StepRun, - ProgramSpec: mb1.Spec, - Version: release.Version(), - Meta: withMeta(mb1), - }, - }, - }, - "remove a running program and start a new one": { - submit: &cfg{ - id: "config-2", - createdAt: tn2, - programs: []program.Program{ - mb1, - }, - }, - cur: state{ - ID: "config-1", - LastModified: tn, - Active: map[string]active{ - "filebeat": { - LastChange: startState, - LastModified: tn, - Identifier: "filebeat", - Program: fb1, - }, - }, - }, - should: state{ - ID: "config-2", - LastModified: tn2, - Active: map[string]active{ - "metricbeat": { - LastChange: startState, - LastModified: tn2, - Identifier: "metricbeat", - Program: mb1, - }, - }, - }, - steps: []configrequest.Step{ - { - ID: configrequest.StepRemove, - ProgramSpec: fb1.Spec, - Version: release.Version(), - }, - { - ID: configrequest.StepRun, - ProgramSpec: mb1.Spec, - Version: release.Version(), - Meta: withMeta(mb1), - }, - }, - }, - "stops all runnings programs": { - submit: &cfg{ - id: "config-2", - createdAt: tn2, - programs: []program.Program{}, - }, - cur: state{ - ID: "config-1", - LastModified: tn, - Active: map[string]active{ - "filebeat": { - LastChange: startState, - LastModified: tn, - Identifier: "filebeat", - Program: fb1, - }, - "metricbeat": { - LastChange: startState, - LastModified: tn, - Identifier: "metricbeat", - Program: mb1, - }, - }, - }, - should: state{ - ID: "config-2", - LastModified: tn2, - Active: map[string]active{}, - }, - steps: []configrequest.Step{ - { - ID: configrequest.StepRemove, - ProgramSpec: fb1.Spec, - Version: release.Version(), - }, - { - ID: configrequest.StepRemove, - ProgramSpec: mb1.Spec, - Version: release.Version(), - }, - }, - }, - "no changes detected": { - submit: &cfg{ - id: "config-1", - createdAt: tn, - programs: []program.Program{ - fb1, mb1, - }, - }, - cur: state{ - ID: "config-1", - LastModified: tn, - Active: map[string]active{ - "filebeat": { - LastChange: startState, - LastModified: tn, - Identifier: "filebeat", - Program: fb1, - }, - "metricbeat": { - LastChange: startState, - LastModified: tn, - Identifier: "metricbeat", - Program: mb1, - }, - }, - }, - should: state{ - ID: "config-1", - LastModified: tn, - Active: map[string]active{ - "filebeat": { - LastChange: unchangedState, - LastModified: tn, - Identifier: "filebeat", - Program: fb1, - }, - "metricbeat": { - LastChange: unchangedState, - LastModified: tn, - Identifier: "metricbeat", - Program: mb1, - }, - }, - }, - steps: []configrequest.Step{}, - }, - } - - for name, test := range testcases { - t.Run(name, func(t *testing.T) { - should, steps := converge(test.cur, test.submit) - - require.Equal(t, test.should.ID, should.ID) - require.Equal(t, test.should.LastModified, should.LastModified) - - require.Equal(t, len(test.steps), len(steps), "steps count don't match") - require.Equal(t, len(test.should.Active), len(should.Active), "active count don't match") - - for id, a := range test.should.Active { - compare := should.Active[id] - require.Equal(t, a.LastModified, compare.LastModified) - require.Equal(t, a.Identifier, compare.Identifier) - require.Equal(t, a.LastChange, compare.LastChange) - require.Equal(t, a.Program.Checksum(), compare.Program.Checksum()) - } - - if diff := cmp.Diff(test.steps, steps, cmpopts.IgnoreUnexported(regexp.Regexp{})); diff != "" { - t.Errorf("converge() mismatch (-want +got):\n%s", diff) - } - }) - } -} - -type cfg struct { - id string - createdAt time.Time - programs []program.Program -} - -func (c *cfg) ID() string { - return c.id -} - -func (c *cfg) ShortID() string { - return c.id -} - -func (c *cfg) Programs() []program.Program { - return c.programs -} - -func (c *cfg) CreatedAt() time.Time { - return c.createdAt -} - -func (c *cfg) ProgramNames() []string { - names := make([]string, 0, len(c.programs)) - for _, name := range c.programs { - names = append(names, name.Spec.Name) - } - return names -} - -func p(identifier, checksum string) program.Program { - s, ok := program.FindSpecByName(identifier) - if !ok { - panic("can't find spec with identifier " + identifier) - } - return program.Program{ - Spec: s, - Config: transpiler.MustNewAST(map[string]interface{}{ - s.Name: map[string]interface{}{ - "checksum": checksum, // make sure checksum is different between configuration change. - }, - }), - } -} - -func fb(checksum string) program.Program { - return p("Filebeat", checksum) -} - -func mb(checksum string) program.Program { - return p("Metricbeat", checksum) -} - -func withMeta(prog program.Program) map[string]interface{} { - return map[string]interface{}{ - configrequest.MetaConfigKey: prog.Configuration(), - } -} diff --git a/internal/pkg/agent/stateresolver/statechange_string.go b/internal/pkg/agent/stateresolver/statechange_string.go deleted file mode 100644 index 53175471789..00000000000 --- a/internal/pkg/agent/stateresolver/statechange_string.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -// Code generated by "stringer -type=stateChange -linecomment=true"; DO NOT EDIT. - -package stateresolver - -import "strconv" - -func _() { - // An "invalid array index" compiler error signifies that the constant values have changed. - // Re-run the stringer command to generate them again. - var x [1]struct{} - _ = x[startState-1] - _ = x[updateState-2] - _ = x[unchangedState-3] -} - -const _stateChange_name = "STARTUPDATEUNCHANGE" - -var _stateChange_index = [...]uint8{0, 5, 11, 19} - -func (i stateChange) String() string { - i -= 1 - if i >= stateChange(len(_stateChange_index)-1) { - return "stateChange(" + strconv.FormatInt(int64(i+1), 10) + ")" - } - return _stateChange_name[_stateChange_index[i]:_stateChange_index[i+1]] -} diff --git a/internal/pkg/agent/stateresolver/stateresolver.go b/internal/pkg/agent/stateresolver/stateresolver.go deleted file mode 100644 index d63cc482cde..00000000000 --- a/internal/pkg/agent/stateresolver/stateresolver.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package stateresolver - -import ( - "sync" - - "github.com/elastic/elastic-agent/internal/pkg/agent/configrequest" - uid "github.com/elastic/elastic-agent/internal/pkg/id" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -// Acker allow to ack the should state from a converge operation. -type Acker func() - -// StateResolver is a resolver of a config state change -// it subscribes to Config event and publishes StateChange events based on that/ -// Based on StateChange event operator know what to do. -type StateResolver struct { - l *logger.Logger - curState state - mu sync.Mutex -} - -// NewStateResolver allow to modify default event names. -func NewStateResolver(log *logger.Logger) (*StateResolver, error) { - return &StateResolver{ - l: log, - }, nil -} - -// Resolve resolves passed config into one or multiple steps -func (s *StateResolver) Resolve( - cfg configrequest.Request, -) (uid.ID, string, []configrequest.Step, Acker, error) { - s.mu.Lock() - defer s.mu.Unlock() - - newState, steps := converge(s.curState, cfg) - newStateID := newState.ShortID() - id, err := uid.Generate() - if err != nil { - return id, newStateID, nil, nil, err - } - - s.l.Infof("New State ID is %s", newStateID) - s.l.Infof("Converging state requires execution of %d step(s)", len(steps)) - for i, step := range steps { - // more detailed debug log - s.l.Debugf("step %d: %s", i, step.String()) - } - - // Allow the operator to ack the should state when applying the steps is done correctly. - ack := func() { - s.ack(newState) - } - - return id, newStateID, steps, ack, nil -} - -func (s *StateResolver) ack(newState state) { - s.mu.Lock() - defer s.mu.Unlock() - s.l.Info("Updating internal state") - s.curState = newState -} diff --git a/internal/pkg/agent/stateresolver/stateresolver_test.go b/internal/pkg/agent/stateresolver/stateresolver_test.go deleted file mode 100644 index ad67725e6a7..00000000000 --- a/internal/pkg/agent/stateresolver/stateresolver_test.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package stateresolver - -import ( - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -func TestStateResolverAcking(t *testing.T) { - submit := &cfg{ - id: "config-1", - createdAt: time.Now(), - programs: []program.Program{ - fb("1"), mb("1"), - }, - } - - t.Run("when we ACK the should state", func(t *testing.T) { - log, _ := logger.New("", false) - r, err := NewStateResolver(log) - require.NoError(t, err) - - // Current state is empty. - _, _, steps, ack, err := r.Resolve(submit) - require.NoError(t, err) - require.Equal(t, 2, len(steps)) - - // Ack the should state. - ack() - - // Current sate is not empty lets try to resolve the same configuration. - _, _, steps, _, err = r.Resolve(submit) - require.NoError(t, err) - require.Equal(t, 0, len(steps)) - }) - - t.Run("when we don't ACK the should state", func(t *testing.T) { - log, _ := logger.New("", false) - r, err := NewStateResolver(log) - require.NoError(t, err) - - // Current state is empty. - _, _, steps1, _, err := r.Resolve(submit) - require.NoError(t, err) - require.Equal(t, 2, len(steps1)) - - // We didn't ACK the should state, verify that resolve produce the same output. - _, _, steps2, _, err := r.Resolve(submit) - require.NoError(t, err) - require.Equal(t, 2, len(steps2)) - - assert.Equal(t, steps1, steps2) - }) -} diff --git a/internal/pkg/artifact/install/atomic/atomic_installer.go b/internal/pkg/artifact/install/atomic/atomic_installer.go deleted file mode 100644 index 10c2652c1c8..00000000000 --- a/internal/pkg/artifact/install/atomic/atomic_installer.go +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package atomic - -import ( - "context" - "io/ioutil" - "os" - "path/filepath" - "runtime" - - "github.com/hashicorp/go-multierror" - - "github.com/elastic/elastic-agent/internal/pkg/agent/program" -) - -type embeddedInstaller interface { - Install(ctx context.Context, spec program.Spec, version, installDir string) error -} - -// Installer installs into temporary destination and moves to correct one after -// successful finish. -type Installer struct { - installer embeddedInstaller -} - -// NewInstaller creates a new AtomicInstaller -func NewInstaller(i embeddedInstaller) (*Installer, error) { - return &Installer{ - installer: i, - }, nil -} - -// Install performs installation of program in a specific version. -func (i *Installer) Install(ctx context.Context, spec program.Spec, version, installDir string) error { - // tar installer uses Dir of installDir to determine location of unpack - // - // installer is ran inside a tmp directory created in the parent installDir, this is so the atomic - // rename always occurs on the same mount path that holds the installation directory - tempDir, err := ioutil.TempDir(filepath.Dir(installDir), "tmp") - if err != nil { - return err - } - - // always remove the entire tempDir - defer func() { - os.RemoveAll(tempDir) - }() - - tempInstallDir := filepath.Join(tempDir, filepath.Base(installDir)) - - // cleanup install directory before Install - if _, err := os.Stat(installDir); err == nil || os.IsExist(err) { - os.RemoveAll(installDir) - } - - if _, err := os.Stat(tempInstallDir); err == nil || os.IsExist(err) { - os.RemoveAll(tempInstallDir) - } - - // on windows rename is not atomic, let's force it to flush the cache - defer func() { - if runtime.GOOS == "windows" { - syncDir(installDir) - syncDir(tempInstallDir) - } - }() - - if err := i.installer.Install(ctx, spec, version, tempInstallDir); err != nil { - // cleanup unfinished install - if rerr := os.RemoveAll(tempInstallDir); rerr != nil { - err = multierror.Append(err, rerr) - } - return err - } - - if err := os.Rename(tempInstallDir, installDir); err != nil { - if rerr := os.RemoveAll(installDir); rerr != nil { - err = multierror.Append(err, rerr) - } - if rerr := os.RemoveAll(tempInstallDir); rerr != nil { - err = multierror.Append(err, rerr) - } - return err - } - - return nil -} - -func syncDir(dir string) { - if f, err := os.OpenFile(dir, os.O_RDWR, 0777); err == nil { - f.Sync() - f.Close() - } -} diff --git a/internal/pkg/artifact/install/atomic/atomic_installer_test.go b/internal/pkg/artifact/install/atomic/atomic_installer_test.go deleted file mode 100644 index 08c8b592d6a..00000000000 --- a/internal/pkg/artifact/install/atomic/atomic_installer_test.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package atomic - -import ( - "context" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "sync" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" -) - -func TestOKInstall(t *testing.T) { - sig := make(chan int) - ti := &testInstaller{sig} - var wg sync.WaitGroup - i, err := NewInstaller(ti) - s := program.Spec{Name: "a", Cmd: "a"} - - assert.NoError(t, err) - - ctx := context.Background() - installDir := filepath.Join(paths.TempDir(), "install_dir") - - wg.Add(1) - go func() { - err := i.Install(ctx, s, "b", installDir) - assert.NoError(t, err) - wg.Done() - }() - - // signal to process next files - close(sig) - - wg.Wait() - - assert.DirExists(t, installDir) - files := getFiles() - - for name := range files { - path := filepath.Join(installDir, name) - assert.FileExists(t, path) - } - - os.RemoveAll(installDir) -} - -func TestContextCancelledInstall(t *testing.T) { - sig := make(chan int) - ti := &testInstaller{sig} - var wg sync.WaitGroup - i, err := NewInstaller(ti) - s := program.Spec{Name: "a", Cmd: "a"} - - assert.NoError(t, err) - - ctx, cancel := context.WithCancel(context.Background()) - installDir := filepath.Join(paths.TempDir(), "install_dir") - - wg.Add(1) - go func() { - err := i.Install(ctx, s, "b", installDir) - assert.Error(t, err) - wg.Done() - }() - - // cancel before signaling - cancel() - close(sig) - - wg.Wait() - - assert.NoDirExists(t, installDir) -} - -type testInstaller struct { - signal chan int -} - -func (ti *testInstaller) Install(ctx context.Context, _ program.Spec, _, installDir string) error { - files := getFiles() - if err := os.MkdirAll(installDir, 0777); err != nil { - return err - } - - for name, content := range files { - if err := ctx.Err(); err != nil { - return err - } - - filename := filepath.Join(installDir, name) - if err := ioutil.WriteFile(filename, content, 0666); err != nil { - return err - } - - // wait for all but last - <-ti.signal - } - - return nil -} - -func getFiles() map[string][]byte { - files := make(map[string][]byte) - fileCount := 3 - for i := 1; i <= fileCount; i++ { - files[fmt.Sprintf("file_%d", i)] = []byte(fmt.Sprintf("content of file %d", i)) - } - - return files -} diff --git a/internal/pkg/artifact/install/awaitable/awaitable_installer.go b/internal/pkg/artifact/install/awaitable/awaitable_installer.go deleted file mode 100644 index 33e8c6c48d5..00000000000 --- a/internal/pkg/artifact/install/awaitable/awaitable_installer.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package awaitable - -import ( - "context" - "sync" - - "github.com/elastic/elastic-agent/internal/pkg/agent/program" -) - -type embeddedInstaller interface { - Install(ctx context.Context, spec program.Spec, version, installDir string) error -} - -type embeddedChecker interface { - Check(ctx context.Context, spec program.Spec, version, installDir string) error -} - -// Installer installs into temporary destination and moves to correct one after -// successful finish. -type Installer struct { - installer embeddedInstaller - checker embeddedChecker - wg sync.WaitGroup -} - -// NewInstaller creates a new AtomicInstaller -func NewInstaller(i embeddedInstaller, ch embeddedChecker) (*Installer, error) { - return &Installer{ - installer: i, - checker: ch, - }, nil -} - -// Wait allows caller to wait for install to be finished -func (i *Installer) Wait() { - i.wg.Wait() -} - -// Install performs installation of program in a specific version. -func (i *Installer) Install(ctx context.Context, spec program.Spec, version, installDir string) error { - i.wg.Add(1) - defer i.wg.Done() - - return i.installer.Install(ctx, spec, version, installDir) -} - -// Check performs installation checks -func (i *Installer) Check(ctx context.Context, spec program.Spec, version, installDir string) error { - i.wg.Add(1) - defer i.wg.Done() - - return i.checker.Check(ctx, spec, version, installDir) -} diff --git a/internal/pkg/artifact/install/dir/dir_checker.go b/internal/pkg/artifact/install/dir/dir_checker.go deleted file mode 100644 index 38a93756ff8..00000000000 --- a/internal/pkg/artifact/install/dir/dir_checker.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package dir - -import ( - "context" - "os" - - "github.com/elastic/elastic-agent/internal/pkg/agent/program" -) - -// Checker performs basic check that the install directory exists. -type Checker struct{} - -// NewChecker returns a new Checker. -func NewChecker() *Checker { - return &Checker{} -} - -// Check checks that the install directory exists. -func (*Checker) Check(_ context.Context, _ program.Spec, _, installDir string) error { - _, err := os.Stat(installDir) - return err -} diff --git a/internal/pkg/artifact/install/hooks/hooks_installer.go b/internal/pkg/artifact/install/hooks/hooks_installer.go deleted file mode 100644 index 73ce7b81c5b..00000000000 --- a/internal/pkg/artifact/install/hooks/hooks_installer.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package hooks - -import ( - "context" - - "github.com/elastic/elastic-agent/internal/pkg/agent/program" -) - -type embeddedInstaller interface { - Install(ctx context.Context, spec program.Spec, version, installDir string) error -} - -type embeddedChecker interface { - Check(ctx context.Context, spec program.Spec, version, installDir string) error -} - -// InstallerChecker runs the PostInstallSteps after running the embedded installer -// and runs the InstallerCheckSteps after running the embedded installation checker. -type InstallerChecker struct { - installer embeddedInstaller - checker embeddedChecker -} - -// NewInstallerChecker creates a new InstallerChecker -func NewInstallerChecker(i embeddedInstaller, c embeddedChecker) (*InstallerChecker, error) { - return &InstallerChecker{ - installer: i, - checker: c, - }, nil -} - -// Install performs installation of program in a specific version, then runs the -// PostInstallSteps for the program if defined. -func (i *InstallerChecker) Install(ctx context.Context, spec program.Spec, version, installDir string) error { - if err := i.installer.Install(ctx, spec, version, installDir); err != nil { - return err - } - if spec.PostInstallSteps != nil { - return spec.PostInstallSteps.Execute(ctx, installDir) - } - return nil -} - -// Check performs installation check of program to ensure that it is already installed, then -// runs the InstallerCheckSteps to ensure that the installation is valid. -func (i *InstallerChecker) Check(ctx context.Context, spec program.Spec, version, installDir string) error { - err := i.checker.Check(ctx, spec, version, installDir) - if err != nil { - return err - } - if spec.CheckInstallSteps != nil { - return spec.CheckInstallSteps.Execute(ctx, installDir) - } - - return nil -} diff --git a/internal/pkg/artifact/install/installer.go b/internal/pkg/artifact/install/installer.go deleted file mode 100644 index 15bc01e6f3a..00000000000 --- a/internal/pkg/artifact/install/installer.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package install - -import ( - "context" - "errors" - "runtime" - - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/artifact" - "github.com/elastic/elastic-agent/internal/pkg/artifact/install/atomic" - "github.com/elastic/elastic-agent/internal/pkg/artifact/install/awaitable" - "github.com/elastic/elastic-agent/internal/pkg/artifact/install/dir" - "github.com/elastic/elastic-agent/internal/pkg/artifact/install/hooks" - "github.com/elastic/elastic-agent/internal/pkg/artifact/install/tar" - "github.com/elastic/elastic-agent/internal/pkg/artifact/install/zip" -) - -var ( - // ErrConfigNotProvided is returned when provided config is nil - ErrConfigNotProvided = errors.New("config not provided") -) - -// Installer is an interface allowing installation of an artifact -type Installer interface { - // Install installs an artifact and returns - // location of the installed program - // error if something went wrong - Install(ctx context.Context, spec program.Spec, version, installDir string) error -} - -// InstallerChecker is an interface that installs but also checks for valid installation. -type InstallerChecker interface { - Installer - - // Check checks if the installation is good. - Check(ctx context.Context, spec program.Spec, version, installDir string) error -} - -// AwaitableInstallerChecker is an interface that installs, checks but also is awaitable to check when actions are done. -type AwaitableInstallerChecker interface { - InstallerChecker - - // Waits for its work to be done. - Wait() -} - -// NewInstaller returns a correct installer associated with a -// package type: -// - rpm -> rpm installer -// - deb -> deb installer -// - binary -> zip installer on windows, tar installer on linux and mac -func NewInstaller(config *artifact.Config) (AwaitableInstallerChecker, error) { - if config == nil { - return nil, ErrConfigNotProvided - } - - var installer Installer - var err error - if runtime.GOOS == "windows" { - installer, err = zip.NewInstaller(config) - } else { - installer, err = tar.NewInstaller(config) - } - - if err != nil { - return nil, err - } - - atomicInstaller, err := atomic.NewInstaller(installer) - if err != nil { - return nil, err - } - - hooksInstaller, err := hooks.NewInstallerChecker(atomicInstaller, dir.NewChecker()) - if err != nil { - return nil, err - } - - return awaitable.NewInstaller(hooksInstaller, hooksInstaller) -} diff --git a/internal/pkg/artifact/install/tar/tar_installer.go b/internal/pkg/artifact/install/tar/tar_installer.go deleted file mode 100644 index de7f02dfe38..00000000000 --- a/internal/pkg/artifact/install/tar/tar_installer.go +++ /dev/null @@ -1,141 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package tar - -import ( - "archive/tar" - "compress/gzip" - "context" - "fmt" - "io" - "os" - "path/filepath" - "strings" - - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/artifact" -) - -// Installer or tar packages -type Installer struct { - config *artifact.Config -} - -// NewInstaller creates an installer able to install tar packages -func NewInstaller(config *artifact.Config) (*Installer, error) { - return &Installer{ - config: config, - }, nil -} - -// Install performs installation of program in a specific version. -// It expects package to be already downloaded. -func (i *Installer) Install(ctx context.Context, spec program.Spec, version, installDir string) error { - artifactPath, err := artifact.GetArtifactPath(spec, version, i.config.OS(), i.config.Arch(), i.config.TargetDirectory) - if err != nil { - return err - } - - f, err := os.Open(artifactPath) - if err != nil { - return errors.New(fmt.Sprintf("artifact for '%s' version '%s' could not be found at '%s'", spec.Name, version, artifactPath), errors.TypeFilesystem, errors.M(errors.MetaKeyPath, artifactPath)) - } - defer f.Close() - - // cleanup install directory before unpack - _, err = os.Stat(installDir) - if err == nil || os.IsExist(err) { - os.RemoveAll(installDir) - } - - // unpack must occur in directory that holds the installation directory - // or the extraction will be double nested - return unpack(ctx, f, filepath.Dir(installDir)) -} - -func unpack(ctx context.Context, r io.Reader, dir string) error { - zr, err := gzip.NewReader(r) - if err != nil { - return errors.New("requires gzip-compressed body", err, errors.TypeFilesystem) - } - - tr := tar.NewReader(zr) - var rootDir string - - for { - // exit and propagate cancellation err as soon as we know about it - if err := ctx.Err(); err != nil { - return err - } - - f, err := tr.Next() - if errors.Is(err, io.EOF) { - break - } - if err != nil { - return err - } - - if !validFileName(f.Name) { - return errors.New("tar contained invalid filename: %q", f.Name, errors.TypeFilesystem, errors.M(errors.MetaKeyPath, f.Name)) - } - rel := filepath.FromSlash(f.Name) - abs := filepath.Join(dir, rel) - - // find the root dir - if currentDir := filepath.Dir(abs); rootDir == "" || len(filepath.Dir(rootDir)) > len(currentDir) { - rootDir = currentDir - } - - fi := f.FileInfo() - mode := fi.Mode() - switch { - case mode.IsRegular(): - // just to be sure, it should already be created by Dir type - if err := os.MkdirAll(filepath.Dir(abs), 0755); err != nil { - return errors.New(err, "TarInstaller: creating directory for file "+abs, errors.TypeFilesystem, errors.M(errors.MetaKeyPath, abs)) - } - - wf, err := os.OpenFile(abs, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode.Perm()) - if err != nil { - return errors.New(err, "TarInstaller: creating file "+abs, errors.TypeFilesystem, errors.M(errors.MetaKeyPath, abs)) - } - - _, err = io.Copy(wf, tr) - - if err == nil { - // sometimes we try executing binary too fast and run into text file busy after unpacking - // syncing prevents this - if syncErr := wf.Sync(); syncErr != nil { - err = syncErr - } - } - - if closeErr := wf.Close(); closeErr != nil && err == nil { - err = closeErr - } - - if err != nil { - return fmt.Errorf("TarInstaller: error writing to %s: %w", abs, err) - } - case mode.IsDir(): - if err := os.MkdirAll(abs, 0755); err != nil { - return errors.New(err, "TarInstaller: creating directory for file "+abs, errors.TypeFilesystem, errors.M(errors.MetaKeyPath, abs)) - } - default: - return errors.New(fmt.Sprintf("tar file entry %s contained unsupported file type %v", f.Name, mode), errors.TypeFilesystem, errors.M(errors.MetaKeyPath, f.Name)) - } - } - - return nil -} - -func validFileName(p string) bool { - if p == "" || strings.Contains(p, `\`) || strings.HasPrefix(p, "/") || strings.Contains(p, "../") { - return false - } - return true -} diff --git a/internal/pkg/artifact/install/zip/zip_installer.go b/internal/pkg/artifact/install/zip/zip_installer.go deleted file mode 100644 index e5ce9f7a41e..00000000000 --- a/internal/pkg/artifact/install/zip/zip_installer.go +++ /dev/null @@ -1,163 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package zip - -import ( - "archive/zip" - "context" - "io" - "os" - "path/filepath" - - "github.com/hashicorp/go-multierror" - - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/artifact" -) - -// Installer or zip packages -type Installer struct { - config *artifact.Config -} - -// NewInstaller creates an installer able to install zip packages -func NewInstaller(config *artifact.Config) (*Installer, error) { - return &Installer{ - config: config, - }, nil -} - -// Install performs installation of program in a specific version. -// It expects package to be already downloaded. -func (i *Installer) Install(ctx context.Context, spec program.Spec, version, installDir string) error { - artifactPath, err := artifact.GetArtifactPath(spec, version, i.config.OS(), i.config.Arch(), i.config.TargetDirectory) - if err != nil { - return err - } - - // cleanup install directory before unzip - _, err = os.Stat(installDir) - if err == nil || os.IsExist(err) { - os.RemoveAll(installDir) - } - - if err := i.unzip(ctx, artifactPath); err != nil { - return err - } - - rootDir, err := i.getRootDir(artifactPath) - if err != nil { - return err - } - - // if root directory is not the same as desired directory rename - // e.g contains `-windows-` or `-SNAPSHOT-` - if rootDir != installDir { - defer syncDir(rootDir) - defer syncDir(installDir) - - if err := os.Rename(rootDir, installDir); err != nil { - return errors.New(err, errors.TypeFilesystem, errors.M(errors.MetaKeyPath, installDir)) - } - - } - - return nil -} - -func (i *Installer) unzip(_ context.Context, artifactPath string) error { - r, err := zip.OpenReader(artifactPath) - if err != nil { - return err - } - defer r.Close() - - if err := os.MkdirAll(i.config.InstallPath, 0755); err != nil && !os.IsExist(err) { - // failed to create install dir - return err - } - - unpackFile := func(f *zip.File) (err error) { - rc, err := f.Open() - if err != nil { - return err - } - defer func() { - if cerr := rc.Close(); cerr != nil { - err = multierror.Append(err, cerr) - } - }() - - path := filepath.Join(i.config.InstallPath, f.Name) - - if f.FileInfo().IsDir() { - os.MkdirAll(path, f.Mode()) - } else { - os.MkdirAll(filepath.Dir(path), f.Mode()) - f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode()) - if err != nil { - return err - } - defer func() { - if closeErr := f.Close(); closeErr != nil { - err = multierror.Append(err, closeErr) - } - }() - - if _, err = io.Copy(f, rc); err != nil { - return err - } - - // sometimes we try executing binary too fast and run into text file busy after unpacking - // syncing prevents this - f.Sync() - } - return nil - } - - for _, f := range r.File { - if err := unpackFile(f); err != nil { - return err - } - } - - return nil -} - -// retrieves root directory from zip archive -func (i *Installer) getRootDir(zipPath string) (dir string, err error) { - defer func() { - if dir != "" { - dir = filepath.Join(i.config.InstallPath, dir) - } - }() - - zipReader, err := zip.OpenReader(zipPath) - if err != nil { - return "", err - } - defer zipReader.Close() - - var rootDir string - for _, f := range zipReader.File { - if filepath.Base(f.Name) == filepath.Dir(f.Name) { - return f.Name, nil - } - - if currentDir := filepath.Dir(f.Name); rootDir == "" || len(currentDir) < len(rootDir) { - rootDir = currentDir - } - } - - return rootDir, nil -} - -func syncDir(dir string) { - if f, err := os.OpenFile(dir, os.O_RDWR, 0777); err == nil { - f.Sync() - f.Close() - } -} diff --git a/internal/pkg/artifact/uninstall/hooks/hooks_uninstaller.go b/internal/pkg/artifact/uninstall/hooks/hooks_uninstaller.go deleted file mode 100644 index 461d64b4476..00000000000 --- a/internal/pkg/artifact/uninstall/hooks/hooks_uninstaller.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package hooks - -import ( - "context" - - "github.com/elastic/elastic-agent/internal/pkg/agent/program" -) - -type embeddedUninstaller interface { - Uninstall(ctx context.Context, spec program.Spec, version, installDir string) error -} - -// Uninstaller that executes PreUninstallSteps -type Uninstaller struct { - uninstaller embeddedUninstaller -} - -// NewUninstaller creates an uninstaller that executes PreUninstallSteps -func NewUninstaller(i embeddedUninstaller) (*Uninstaller, error) { - return &Uninstaller{ - uninstaller: i, - }, nil -} - -// Uninstall performs the execution of the PreUninstallSteps -func (i *Uninstaller) Uninstall(ctx context.Context, spec program.Spec, version, installDir string) error { - if spec.PreUninstallSteps != nil { - return spec.PreUninstallSteps.Execute(ctx, installDir) - } - return i.uninstaller.Uninstall(ctx, spec, version, installDir) -} diff --git a/internal/pkg/artifact/uninstall/uninstaller.go b/internal/pkg/artifact/uninstall/uninstaller.go deleted file mode 100644 index a5eb73f669f..00000000000 --- a/internal/pkg/artifact/uninstall/uninstaller.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package uninstall - -import ( - "context" - - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/artifact/uninstall/hooks" -) - -// Uninstaller is an interface allowing un-installation of an artifact -type Uninstaller interface { - // Uninstall uninstalls an artifact. - Uninstall(ctx context.Context, spec program.Spec, version, installDir string) error -} - -// NewUninstaller returns a correct uninstaller. -func NewUninstaller() (Uninstaller, error) { - return hooks.NewUninstaller(&nilUninstaller{}) -} - -type nilUninstaller struct{} - -func (*nilUninstaller) Uninstall(_ context.Context, _ program.Spec, _, _ string) error { - return nil -} diff --git a/internal/pkg/core/app/descriptor.go b/internal/pkg/core/app/descriptor.go deleted file mode 100644 index 84c7c1019ae..00000000000 --- a/internal/pkg/core/app/descriptor.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package app - -import ( - "path/filepath" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/artifact" -) - -// Descriptor defines a program which needs to be run. -// Is passed around operator operations. -type Descriptor struct { - spec program.Spec - executionCtx ExecutionContext - directory string - process ProcessSpec -} - -// NewDescriptor creates a program which satisfies Program interface and can be used with Operator. -func NewDescriptor(spec program.Spec, version string, config *artifact.Config, tags map[Tag]string) *Descriptor { - dir := paths.Components() - return NewDescriptorWithPath(dir, spec, version, config, tags) -} - -// NewDescriptorOnPath creates a program which satisfies Program interface and can be used with Operator. -func NewDescriptorWithPath(path string, spec program.Spec, version string, config *artifact.Config, tags map[Tag]string) *Descriptor { - servicePort := 0 - if spec.ServicePort > 0 { - servicePort = spec.ServicePort - } - - return &Descriptor{ - spec: spec, - directory: path, - executionCtx: NewExecutionContext(servicePort, spec.CommandName(), version, tags), - process: specification(path, spec), - } -} - -// ServicePort is the port the service will connect to gather GRPC information. When this is not -// 0 then the application is ran using the `service` application type, versus a `process` application. -func (p *Descriptor) ServicePort() int { - return p.executionCtx.ServicePort -} - -// BinaryName is the name of the binary. E.g filebeat. -func (p *Descriptor) BinaryName() string { - return p.executionCtx.BinaryName -} - -// Version specifies a version of the applications e.g '7.2.0'. -func (p *Descriptor) Version() string { return p.executionCtx.Version } - -// Tags is a collection of tags used to specify application more precisely. -// Two descriptor with same binary name and version but with different tags will -// result in two different instances of the application. -func (p *Descriptor) Tags() map[Tag]string { return p.executionCtx.Tags } - -// ID is a unique representation of the application. -func (p *Descriptor) ID() string { return p.executionCtx.ID } - -// ExecutionContext returns execution context of the application. -func (p *Descriptor) ExecutionContext() ExecutionContext { return p.executionCtx } - -// Spec returns a program specification with resolved binary path. -func (p *Descriptor) Spec() program.Spec { - return p.spec -} - -// ProcessSpec returns a process specification with resolved binary path. -func (p *Descriptor) ProcessSpec() ProcessSpec { - return p.process -} - -// Directory specifies the root directory of the application within an install path. -func (p *Descriptor) Directory() string { - return p.directory -} - -func specification(dir string, spec program.Spec) ProcessSpec { - return ProcessSpec{ - BinaryPath: filepath.Join(dir, spec.Command()), - Args: spec.Args, - Configuration: nil, - } -} diff --git a/internal/pkg/core/app/execution_context.go b/internal/pkg/core/app/execution_context.go deleted file mode 100644 index 48479403aa8..00000000000 --- a/internal/pkg/core/app/execution_context.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package app - -import ( - "crypto/sha256" - "fmt" -) - -const ( - hashLen = 16 -) - -// ExecutionContext describes runnable binary -type ExecutionContext struct { - ServicePort int - BinaryName string - Version string - Tags map[Tag]string - ID string -} - -// NewExecutionContext creates an execution context and generates an ID for this context -func NewExecutionContext(servicePort int, binaryName, version string, tags map[Tag]string) ExecutionContext { - id := fmt.Sprintf("%s--%s", binaryName, version) - if len(tags) > 0 { - hash := fmt.Sprintf("%x", sha256.New().Sum([]byte(fmt.Sprint(tags)))) - if len(hash) > hashLen { - hash = hash[:hashLen] - } - id += fmt.Sprintf("--%x", hash) - } - - return ExecutionContext{ - ServicePort: servicePort, - BinaryName: binaryName, - Version: version, - Tags: tags, - ID: id, - } -} diff --git a/internal/pkg/core/app/process_cred.go b/internal/pkg/core/app/process_cred.go deleted file mode 100644 index ee6f4f0e2a4..00000000000 --- a/internal/pkg/core/app/process_cred.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -//go:build linux || darwin -// +build linux darwin - -package app - -import ( - "os" - "os/user" - "strconv" - - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" -) - -// UserGroup returns the uid and gid for the process specification. -func (spec ProcessSpec) UserGroup() (int, int, error) { - if spec.User.Uid == "" && spec.Group.Gid == "" { - // use own level - return os.Geteuid(), os.Getegid(), nil - } - - // check if user/group exists - usedUID := spec.User.Uid - userGID := "" - if u, err := user.LookupId(spec.User.Uid); err != nil { - u, err := user.Lookup(spec.User.Name) - if err != nil { - return 0, 0, err - } - usedUID = u.Uid - userGID = u.Gid - } else { - userGID = u.Gid - } - - usedGID := spec.Group.Gid - if spec.Group.Gid != "" || spec.Group.Name != "" { - if _, err := user.LookupGroupId(spec.Group.Gid); err != nil { - g, err := user.LookupGroup(spec.Group.Name) - if err != nil { - return 0, 0, err - } - - usedGID = g.Gid - } - } else { - // if group is not specified and user is found, use users group - usedGID = userGID - } - - uid, err := strconv.Atoi(usedUID) - if err != nil { - return 0, 0, errors.New(err, "invalid user") - } - - gid, _ := strconv.Atoi(usedGID) - if err != nil { - return 0, 0, errors.New(err, "invalid group") - } - - return uid, gid, nil -} diff --git a/internal/pkg/core/app/process_cred_other.go b/internal/pkg/core/app/process_cred_other.go deleted file mode 100644 index 49aa0ccd613..00000000000 --- a/internal/pkg/core/app/process_cred_other.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -//go:build !linux && !darwin -// +build !linux,!darwin - -package app - -// UserGroup returns the uid and gid for the process specification. -func (spec ProcessSpec) UserGroup() (int, int, error) { - return 0, 0, nil -} diff --git a/internal/pkg/core/app/spec.go b/internal/pkg/core/app/spec.go deleted file mode 100644 index 6f09c52e34b..00000000000 --- a/internal/pkg/core/app/spec.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package app - -import ( - "os/user" -) - -// ProcessSpec specifies a way of running a process -type ProcessSpec struct { - // Binary path. - BinaryPath string - - // Set of arguments. - Args []string - Configuration map[string]interface{} - - // Under what user we can run the program. (example: apm-server is not running as root, isolation and cgroup) - User user.User - Group user.Group - - // TODO: mapping transformation rules for configuration between elastic-agent.yml and to the beats. -} diff --git a/internal/pkg/core/app/tag.go b/internal/pkg/core/app/tag.go deleted file mode 100644 index e289190ad81..00000000000 --- a/internal/pkg/core/app/tag.go +++ /dev/null @@ -1,24 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package app - -// Tag is a tag for specifying metadata related -// to a process. -type Tag string - -// TagSidecar tags a sidecar process -const TagSidecar = "sidecar" - -// Taggable is an object containing tags. -type Taggable interface { - Tags() map[Tag]string -} - -// IsSidecar returns true if tags contains sidecar flag. -func IsSidecar(descriptor Taggable) bool { - tags := descriptor.Tags() - _, isSidecar := tags[TagSidecar] - return isSidecar -} diff --git a/internal/pkg/core/monitoring/beats/beats_monitor.go b/internal/pkg/core/monitoring/beats/beats_monitor.go index d70878eb8a0..b795c5ecb58 100644 --- a/internal/pkg/core/monitoring/beats/beats_monitor.go +++ b/internal/pkg/core/monitoring/beats/beats_monitor.go @@ -13,9 +13,10 @@ import ( "strings" "unicode" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" + "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/artifact" "github.com/elastic/elastic-agent/internal/pkg/config" monitoringConfig "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/config" ) diff --git a/internal/pkg/core/monitoring/server/process.go b/internal/pkg/core/monitoring/server/process.go index cbd4ddcf3df..1d1d9c80806 100644 --- a/internal/pkg/core/monitoring/server/process.go +++ b/internal/pkg/core/monitoring/server/process.go @@ -15,11 +15,12 @@ import ( "syscall" "time" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" + "github.com/gorilla/mux" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/artifact" "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/beats" ) diff --git a/internal/pkg/core/plugin/common.go b/internal/pkg/core/plugin/common.go deleted file mode 100644 index 145ff574b75..00000000000 --- a/internal/pkg/core/plugin/common.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package plugin - -import ( - "gopkg.in/yaml.v2" - - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/config" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -type configFetcher interface { - Config() string -} - -// IsRestartNeeded returns true if -// - spec is configured to support restart on change -// - output changes in between configs -func IsRestartNeeded(log *logger.Logger, spec program.Spec, cfgFetch configFetcher, newCfg map[string]interface{}) bool { - if !spec.RestartOnOutputChange { - // early exit if restart is not needed anyway - return false - } - - // compare outputs - curCfgStr := cfgFetch.Config() - if curCfgStr == "" { - // no config currently applied - return false - } - - currentOutput, err := getOutputConfigFromString(curCfgStr) - if err != nil { - log.Errorf("failed to retrieve output config from current state: %v", err) - return false - } - - newOutput, err := getOutputConfigFromMap(newCfg) - if err != nil { - log.Errorf("failed to retrieve output config from new state: %v", err) - return false - } - - // restart needed only if output changed - return currentOutput != newOutput -} - -func getOutputConfigFromString(cfgString string) (string, error) { - cfg, err := config.NewConfigFrom(cfgString) - if err != nil { - return "", err - } - - cfgMap, err := cfg.ToMapStr() - if err != nil { - return "", err - } - - return getOutputConfigFromMap(cfgMap) -} - -func getOutputConfigFromMap(cfgMap map[string]interface{}) (string, error) { - outputCfgIface, found := cfgMap["output"] - if !found { - // output not found not an error - return "", nil - } - - outputCfg, ok := outputCfgIface.(map[string]interface{}) - if !ok { - return "", errors.New("not a map") - } - - cfgStr, err := yaml.Marshal(outputCfg) - if err != nil { - return "", errors.New(err, errors.TypeApplication) - } - - return string(cfgStr), nil -} diff --git a/internal/pkg/core/plugin/common_test.go b/internal/pkg/core/plugin/common_test.go deleted file mode 100644 index 03f0306f145..00000000000 --- a/internal/pkg/core/plugin/common_test.go +++ /dev/null @@ -1,95 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package plugin - -import ( - "testing" - - "github.com/stretchr/testify/require" - "gopkg.in/yaml.v2" - - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -func TestRestartNeeded(t *testing.T) { - tt := []struct { - Name string - OldOutput map[string]interface{} - NewOutput map[string]interface{} - ShouldRestart bool - - ExpectedRestart bool - }{ - { - "same empty output", - map[string]interface{}{}, - map[string]interface{}{}, - true, - false, - }, - { - "same not empty output", - map[string]interface{}{"output": map[string]interface{}{"username": "user", "password": "123456"}}, - map[string]interface{}{"output": map[string]interface{}{"username": "user", "password": "123456"}}, - true, - false, - }, - { - "different empty output", - map[string]interface{}{}, - map[string]interface{}{"output": map[string]interface{}{"username": "user", "password": "123456"}}, - true, - false, - }, - { - "different not empty output", - map[string]interface{}{"output": map[string]interface{}{"username": "user", "password": "123456"}}, - map[string]interface{}{"output": map[string]interface{}{"username": "user", "password": "s3cur3_Pa55;"}}, - true, - true, - }, - { - "different not empty output no restart required", - map[string]interface{}{"output": map[string]interface{}{"username": "user", "password": "123456"}}, - map[string]interface{}{"output": map[string]interface{}{"username": "user", "password": "s3cur3_Pa55;"}}, - false, - false, - }, - } - - for _, tc := range tt { - t.Run(tc.Name, func(t *testing.T) { - cf, err := newTestConfigFetcher(tc.OldOutput) - require.NoError(t, err) - s := testProgramSpec(tc.ShouldRestart) - l, _ := logger.New("tst", false) - - IsRestartNeeded(l, s, cf, tc.NewOutput) - }) - } -} - -func newTestConfigFetcher(cfg map[string]interface{}) (*testConfigFetcher, error) { - cfgStr, err := yaml.Marshal(cfg) - if err != nil { - return nil, errors.New(err, errors.TypeApplication) - } - - return &testConfigFetcher{cfg: string(cfgStr)}, nil -} - -type testConfigFetcher struct { - cfg string -} - -func (f testConfigFetcher) Config() string { return f.cfg } - -func testProgramSpec(restartOnOutput bool) program.Spec { - return program.Spec{ - RestartOnOutputChange: restartOnOutput, - } -} diff --git a/internal/pkg/core/plugin/process/app.go b/internal/pkg/core/plugin/process/app.go deleted file mode 100644 index c184cefe397..00000000000 --- a/internal/pkg/core/plugin/process/app.go +++ /dev/null @@ -1,319 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package process - -import ( - "context" - "fmt" - "os" - "reflect" - "sync" - "time" - - "github.com/elastic/elastic-agent-client/v7/pkg/proto" - - "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/core/app" - "github.com/elastic/elastic-agent/internal/pkg/core/monitoring" - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "github.com/elastic/elastic-agent/internal/pkg/core/status" - "github.com/elastic/elastic-agent/internal/pkg/tokenbucket" - "github.com/elastic/elastic-agent/pkg/core/logger" - "github.com/elastic/elastic-agent/pkg/core/process" - "github.com/elastic/elastic-agent/pkg/core/server" -) - -var ( - // ErrAppNotRunning is returned when configuration is performed on not running application. - ErrAppNotRunning = errors.New("application is not running", errors.TypeApplication) - procExitTimeout = 10 * time.Second -) - -// Application encapsulates a concrete application ran by elastic-agent e.g Beat. -type Application struct { - bgContext context.Context - id string - name string - pipelineID string - logLevel string - desc *app.Descriptor - srv *server.Server - srvState *server.ApplicationState - limiter *tokenbucket.Bucket - startContext context.Context - tag app.Taggable - state state.State - reporter state.Reporter - watchClosers map[int]context.CancelFunc - - uid int - gid int - - monitor monitoring.Monitor - statusReporter status.Reporter - - processConfig *process.Config - - logger *logger.Logger - - appLock sync.Mutex - restartCanceller context.CancelFunc - restartConfig map[string]interface{} -} - -// ArgsDecorator decorates arguments before calling an application -type ArgsDecorator func([]string) []string - -// NewApplication creates a new instance of an applications. It will not automatically start -// the application. -func NewApplication( - ctx context.Context, - id, appName, pipelineID, logLevel string, - desc *app.Descriptor, - srv *server.Server, - cfg *configuration.SettingsConfig, - logger *logger.Logger, - reporter state.Reporter, - monitor monitoring.Monitor, - statusController status.Controller) (*Application, error) { - - s := desc.ProcessSpec() - uid, gid, err := s.UserGroup() - if err != nil { - return nil, err - } - - b, _ := tokenbucket.NewTokenBucket(ctx, 3, 3, 1*time.Second) - return &Application{ - bgContext: ctx, - id: id, - name: appName, - pipelineID: pipelineID, - logLevel: logLevel, - desc: desc, - srv: srv, - processConfig: cfg.ProcessConfig, - logger: logger, - limiter: b, - state: state.State{ - Status: state.Stopped, - }, - reporter: reporter, - monitor: monitor, - uid: uid, - gid: gid, - statusReporter: statusController.RegisterApp(id, appName), - watchClosers: make(map[int]context.CancelFunc), - }, nil -} - -// Monitor returns monitoring handler of this app. -func (a *Application) Monitor() monitoring.Monitor { - return a.monitor -} - -// Spec returns the program spec of this app. -func (a *Application) Spec() program.Spec { - return a.desc.Spec() -} - -// State returns the application state. -func (a *Application) State() state.State { - a.appLock.Lock() - defer a.appLock.Unlock() - return a.state -} - -// Name returns application name -func (a *Application) Name() string { - return a.name -} - -// Started returns true if the application is started. -func (a *Application) Started() bool { - return a.state.Status != state.Stopped && a.state.Status != state.Crashed && a.state.Status != state.Failed -} - -// Stop stops the current application. -func (a *Application) Stop() { - a.appLock.Lock() - status := a.state.Status - srvState := a.srvState - a.appLock.Unlock() - - if status == state.Stopped { - return - } - - if srvState != nil { - // signal stop through GRPC, wait and kill is performed later in gracefulKill - if err := srvState.Stop(a.processConfig.StopTimeout); err != nil { - err := fmt.Errorf("failed to stop after %s: %w", a.processConfig.StopTimeout, err) - a.setState(state.Failed, err.Error(), nil) - - a.logger.Error(err) - } - - } - - a.appLock.Lock() - defer a.appLock.Unlock() - - a.srvState = nil - if a.state.ProcessInfo != nil { - // stop and clean watcher - a.stopWatcher(a.state.ProcessInfo) - a.gracefulKill(a.state.ProcessInfo) - - a.state.ProcessInfo = nil - - // cleanup drops - a.cleanUp() - } - a.setState(state.Stopped, "Stopped", nil) -} - -// Shutdown stops the application (aka. subprocess). -func (a *Application) Shutdown() { - a.logger.Infof("Signaling application to stop because of shutdown: %s", a.id) - a.Stop() -} - -// SetState sets the status of the application. -func (a *Application) SetState(s state.Status, msg string, payload map[string]interface{}) { - a.appLock.Lock() - defer a.appLock.Unlock() - a.setState(s, msg, payload) -} - -func (a *Application) watch(ctx context.Context, p app.Taggable, proc *process.Info, cfg map[string]interface{}) { - go func() { - var procState *os.ProcessState - - select { - case ps := <-a.waitProc(proc.Process): - procState = ps - case <-a.bgContext.Done(): - return - case <-ctx.Done(): - // closer called - return - } - - a.appLock.Lock() - defer a.appLock.Unlock() - if a.state.ProcessInfo != proc { - // already another process started, another watcher is watching instead - a.gracefulKill(proc) - return - } - - // stop the watcher - a.stopWatcher(a.state.ProcessInfo) - - // was already stopped by Stop, do not restart - if a.state.Status == state.Stopped { - return - } - - a.state.ProcessInfo = nil - srvState := a.srvState - - if srvState == nil || srvState.Expected() == proto.StateExpected_STOPPING { - return - } - - msg := fmt.Sprintf("exited with code: %d", procState.ExitCode()) - a.setState(state.Restarting, msg, nil) - - // it was a crash - a.start(ctx, p, cfg, true) - }() -} - -func (a *Application) stopWatcher(procInfo *process.Info) { - if procInfo != nil { - if closer, ok := a.watchClosers[procInfo.PID]; ok { - closer() - delete(a.watchClosers, procInfo.PID) - } - } -} - -func (a *Application) waitProc(proc *os.Process) <-chan *os.ProcessState { - resChan := make(chan *os.ProcessState) - - go func() { - procState, err := proc.Wait() - if err != nil { - // process is not a child - some OSs requires process to be child - a.externalProcess(proc) - } - - resChan <- procState - }() - - return resChan -} - -func (a *Application) setState(s state.Status, msg string, payload map[string]interface{}) { - if a.state.Status != s || a.state.Message != msg || !reflect.DeepEqual(a.state.Payload, payload) { - if state.IsStateFiltered(msg, payload) { - return - } - - a.state.Status = s - a.state.Message = msg - a.state.Payload = payload - if a.reporter != nil { - go a.reporter.OnStateChange(a.id, a.name, a.state) - } - a.statusReporter.Update(s, msg, payload) - } -} - -func (a *Application) cleanUp() { - a.monitor.Cleanup(a.desc.Spec(), a.pipelineID) -} - -func (a *Application) gracefulKill(proc *process.Info) { - if proc == nil || proc.Process == nil { - return - } - - // send stop signal to request stop - if err := proc.Stop(); err != nil { - a.logger.Errorf("failed to stop %s: %v", a.Name(), err) - } - - var wg sync.WaitGroup - doneChan := make(chan struct{}) - wg.Add(1) - go func() { - wg.Done() - - if _, err := proc.Process.Wait(); err != nil { - // process is not a child - some OSs requires process to be child - a.externalProcess(proc.Process) - } - close(doneChan) - }() - - // wait for awaiter - wg.Wait() - - // kill in case it's still running after timeout - t := time.NewTimer(procExitTimeout) - defer t.Stop() - select { - case <-doneChan: - case <-t.C: - a.logger.Infof("gracefulKill timed out after %d, killing %s", - procExitTimeout, a.Name()) - _ = proc.Process.Kill() - } -} diff --git a/internal/pkg/core/plugin/process/configure.go b/internal/pkg/core/plugin/process/configure.go deleted file mode 100644 index 57f12e191de..00000000000 --- a/internal/pkg/core/plugin/process/configure.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package process - -import ( - "context" - - "gopkg.in/yaml.v2" - - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/core/plugin" - "github.com/elastic/elastic-agent/internal/pkg/core/state" -) - -// Configure configures the application with the passed configuration. -func (a *Application) Configure(ctx context.Context, config map[string]interface{}) (err error) { - defer func() { - if err != nil { - // inject App metadata - err = errors.New(err, errors.M(errors.MetaKeyAppName, a.name), errors.M(errors.MetaKeyAppName, a.id)) - a.statusReporter.Update(state.Degraded, err.Error(), nil) - } - }() - - a.appLock.Lock() - defer a.appLock.Unlock() - - if a.state.Status == state.Stopped { - return errors.New(ErrAppNotRunning) - } - if a.srvState == nil { - return errors.New(ErrAppNotRunning) - } - - cfgStr, err := yaml.Marshal(config) - if err != nil { - return errors.New(err, errors.TypeApplication) - } - - isRestartNeeded := plugin.IsRestartNeeded(a.logger, a.Spec(), a.srvState, config) - - err = a.srvState.UpdateConfig(string(cfgStr)) - if err != nil { - return errors.New(err, errors.TypeApplication) - } - - if isRestartNeeded { - a.logger.Infof("initiating restart of '%s' due to config change", a.Name()) - a.appLock.Unlock() - a.Stop() - err = a.Start(ctx, a.desc, config) - // lock back so it won't panic on deferred unlock - a.appLock.Lock() - } - - return err -} diff --git a/internal/pkg/core/plugin/process/start.go b/internal/pkg/core/plugin/process/start.go deleted file mode 100644 index 55081d9977d..00000000000 --- a/internal/pkg/core/plugin/process/start.go +++ /dev/null @@ -1,190 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package process - -import ( - "context" - "fmt" - "io" - "os/exec" - "path/filepath" - - "gopkg.in/yaml.v2" - - "github.com/elastic/elastic-agent-client/v7/pkg/proto" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/core/app" - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "github.com/elastic/elastic-agent/pkg/core/process" - "github.com/elastic/elastic-agent/pkg/core/server" -) - -// Start starts the application with a specified config. -func (a *Application) Start(ctx context.Context, t app.Taggable, cfg map[string]interface{}) error { - a.appLock.Lock() - defer a.appLock.Unlock() - - return a.start(ctx, t, cfg, false) -} - -// Start starts the application without grabbing the lock. -func (a *Application) start(ctx context.Context, t app.Taggable, cfg map[string]interface{}, isRestart bool) (err error) { - defer func() { - if err != nil { - // inject App metadata - err = errors.New(err, errors.M(errors.MetaKeyAppName, a.name), errors.M(errors.MetaKeyAppName, a.id)) - } - }() - - // starting only if it's not running - // or if it is, then only in case it's a restart and this call initiates from restart call - if a.Started() && a.state.Status != state.Restarting { - if a.state.ProcessInfo == nil { - // already started if not stopped or crashed - return nil - } - - // in case app reported status it might still be running and failure timer - // in progress. Stop timer and stop failing process - a.stopFailedTimer() - a.stopWatcher(a.state.ProcessInfo) - - // kill the process - _ = a.state.ProcessInfo.Process.Kill() - a.state.ProcessInfo = nil - } - - if a.state.Status == state.Restarting && !isRestart { - return nil - } - - cfgStr, err := yaml.Marshal(cfg) - if err != nil { - return fmt.Errorf("%q could not unmarshal config from yaml: %w", a.Name(), err) - } - - a.startContext = ctx - a.tag = t - srvState := a.srvState - - // Failed applications can be started again. - if srvState != nil { - a.setState(state.Starting, "Starting", nil) - srvState.SetStatus(proto.StateObserved_STARTING, a.state.Message, a.state.Payload) - srvState.UpdateConfig(srvState.Config()) - } else { - a.srvState, err = a.srv.Register(a, string(cfgStr)) - if err != nil { - return err - } - // Set input types from the spec - a.srvState.SetInputTypes(a.desc.Spec().ActionInputTypes) - } - - if a.state.Status != state.Stopped { - // restarting as it was previously in a different state - a.setState(state.Restarting, "Restarting", nil) - } else if a.state.Status != state.Restarting { - // keep restarting state otherwise it's starting - a.setState(state.Starting, "Starting", nil) - } - - defer func() { - if err != nil { - if a.srvState != nil { - a.srvState.Destroy() - a.srvState = nil - } - if a.state.ProcessInfo != nil { - _ = a.state.ProcessInfo.Process.Kill() - a.state.ProcessInfo = nil - } - } - }() - - if err := a.monitor.Prepare(a.desc.Spec(), a.pipelineID, a.uid, a.gid); err != nil { - return fmt.Errorf("%q failed to prepare monitor for %q: %w", - a.Name(), a.desc.Spec().Name, err) - } - - if a.limiter != nil { - a.limiter.Add() - } - - spec := a.desc.ProcessSpec() - spec.Args = injectLogLevel(a.logLevel, spec.Args) - - // use separate file - isSidecar := app.IsSidecar(t) - spec.Args = a.monitor.EnrichArgs(a.desc.Spec(), a.pipelineID, spec.Args, isSidecar) - - // specify beat name to avoid data lock conflicts - // as for https://github.com/elastic/beats/v7/pull/14030 more than one instance - // of the beat with same data path fails to start - spec.Args = injectDataPath(spec.Args, a.pipelineID, a.id) - - a.state.ProcessInfo, err = process.Start( - spec.BinaryPath, - a.uid, - a.gid, - spec.Args, nil, func(c *exec.Cmd) error { - c.Stdout = newLoggerWriter(a.Name(), logStdOut, a.logger) - c.Stderr = newLoggerWriter(a.Name(), logStdErr, a.logger) - return nil - }) - if err != nil { - return fmt.Errorf("%q failed to start %q: %w", - a.Name(), spec.BinaryPath, err) - } - - // write connect info to stdin - go a.writeToStdin(a.srvState, a.state.ProcessInfo.Stdin) - - // create closer for watcher, used to terminate watcher without - // side effect of restarting process during shutdown - cancelCtx, cancel := context.WithCancel(ctx) - a.watchClosers[a.state.ProcessInfo.PID] = cancel - // setup watcher - a.watch(cancelCtx, t, a.state.ProcessInfo, cfg) - - return nil -} - -func (a *Application) writeToStdin(as *server.ApplicationState, wc io.WriteCloser) { - err := as.WriteConnInfo(wc) - if err != nil { - err = errors.New(err, errors.M(errors.MetaKeyAppName, a.name), errors.M(errors.MetaKeyAppName, a.id)) - a.logger.Errorf("%q failed writing connection info to spawned application: %v", a.Name(), err) - } - _ = wc.Close() -} - -func injectLogLevel(logLevel string, args []string) []string { - var level string - // Translate to level beat understands - switch logLevel { - case "info": - level = "info" - case "debug": - level = "debug" - case "warning": - level = "warning" - case "error": - level = "error" - } - - if args == nil || level == "" { - return args - } - - return append(args, "-E", "logging.level="+level) -} - -func injectDataPath(args []string, pipelineID, id string) []string { - dataPath := filepath.Join(paths.Home(), "run", pipelineID, id) - return append(args, "-E", "path.data="+dataPath) -} diff --git a/internal/pkg/core/plugin/process/status.go b/internal/pkg/core/plugin/process/status.go deleted file mode 100644 index 50488dfd77b..00000000000 --- a/internal/pkg/core/plugin/process/status.go +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package process - -import ( - "context" - "fmt" - "time" - - "gopkg.in/yaml.v2" - - "github.com/elastic/elastic-agent-client/v7/pkg/proto" - - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "github.com/elastic/elastic-agent/pkg/core/process" - "github.com/elastic/elastic-agent/pkg/core/server" -) - -// OnStatusChange is the handler called by the GRPC server code. -// -// It updates the status of the application and handles restarting the application if needed. -func (a *Application) OnStatusChange(s *server.ApplicationState, status proto.StateObserved_Status, msg string, payload map[string]interface{}) { - a.appLock.Lock() - defer a.appLock.Unlock() - - // If the application is stopped, do not update the state. Stopped is a final state - // and should not be overridden. - if a.state.Status == state.Stopped { - return - } - - a.setState(state.FromProto(status), msg, payload) - if status == proto.StateObserved_FAILED { - // ignore when expected state is stopping - if s.Expected() == proto.StateExpected_STOPPING { - return - } - - // it was marshalled to pass into the state, so unmarshall will always succeed - var cfg map[string]interface{} - _ = yaml.Unmarshal([]byte(s.Config()), &cfg) - - // start the failed timer - // pass process info to avoid killing new process spun up in a meantime - a.startFailedTimer(cfg, a.state.ProcessInfo) - } else { - a.stopFailedTimer() - } -} - -// startFailedTimer starts a timer that will restart the application if it doesn't exit failed after a period of time. -// -// This does not grab the appLock, that must be managed by the caller. -func (a *Application) startFailedTimer(cfg map[string]interface{}, proc *process.Info) { - if a.restartCanceller != nil { - // already have running failed timer; just update config - a.restartConfig = cfg - return - } - - ctx, cancel := context.WithCancel(a.startContext) - a.restartCanceller = cancel - a.restartConfig = cfg - t := time.NewTimer(a.processConfig.FailureTimeout) - go func() { - defer func() { - a.appLock.Lock() - a.restartCanceller = nil - a.restartConfig = nil - a.appLock.Unlock() - }() - - select { - case <-ctx.Done(): - return - case <-t.C: - a.restart(proc) - } - }() -} - -// stopFailedTimer stops the timer that would restart the application from reporting failure. -// -// This does not grab the appLock, that must be managed by the caller. -func (a *Application) stopFailedTimer() { - if a.restartCanceller == nil { - return - } - a.restartCanceller() - a.restartCanceller = nil -} - -// restart restarts the application -func (a *Application) restart(proc *process.Info) { - a.appLock.Lock() - defer a.appLock.Unlock() - - // stop the watcher - a.stopWatcher(proc) - - // kill the process - if proc != nil && proc.Process != nil { - _ = proc.Process.Kill() - } - - if proc != a.state.ProcessInfo { - // we're restarting different process than actually running - // no need to start another one - return - } - - a.state.ProcessInfo = nil - - ctx := a.startContext - tag := a.tag - - a.setState(state.Restarting, "", nil) - err := a.start(ctx, tag, a.restartConfig, true) - if err != nil { - a.setState(state.Crashed, fmt.Sprintf("failed to restart: %s", err), nil) - } -} diff --git a/internal/pkg/core/plugin/process/stdlogger.go b/internal/pkg/core/plugin/process/stdlogger.go deleted file mode 100644 index 4c7d8625216..00000000000 --- a/internal/pkg/core/plugin/process/stdlogger.go +++ /dev/null @@ -1,56 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package process - -import ( - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -type logStd int - -const ( - agentConsoleName = "agent.console.name" - agentConsoleType = "agent.console.type" - - logStdOut logStd = iota - logStdErr -) - -func (l logStd) String() string { - switch l { - case logStdOut: - return "stdout" - case logStdErr: - return "stderr" - } - - return "unknown" -} - -type loggerWriter struct { - format string - logf func(format string, args ...interface{}) -} - -func newLoggerWriter(appName string, std logStd, log *logger.Logger) loggerWriter { - log = log.With( - agentConsoleName, appName, - agentConsoleType, std.String()) - - logf := log.Infof - if std == logStdErr { - logf = log.Errorf - } - - return loggerWriter{ - format: appName + " " + std.String() + ": %q", - logf: logf, - } -} - -func (l loggerWriter) Write(p []byte) (n int, err error) { - l.logf(l.format, string(p)) - return len(p), nil -} diff --git a/internal/pkg/core/plugin/process/stdlogger_test.go b/internal/pkg/core/plugin/process/stdlogger_test.go deleted file mode 100644 index 959f387c32a..00000000000 --- a/internal/pkg/core/plugin/process/stdlogger_test.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package process - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.uber.org/zap/zapcore" - - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -func Test_loggerWriter(t *testing.T) { - tc := []struct { - name string - args struct { - appName string - logTo logStd - } - logMsg string - logLevel zapcore.Level - }{ - { - name: "capture stdout", - args: struct { - appName string - logTo logStd - }{ - appName: "somebeats", - logTo: logStdOut, - }, - logMsg: "stdout log", - logLevel: zapcore.InfoLevel, - }, - { - name: "capture stderr", - args: struct { - appName string - logTo logStd - }{ - appName: "somebeats", - logTo: logStdErr, - }, - logMsg: "stderr log", - logLevel: zapcore.ErrorLevel, - }, - } - - for _, tt := range tc { - logg, obs := logger.NewTesting("test-loggerWriter") - logg = logg.With("previous-field", "previous-value") - - l := newLoggerWriter(tt.args.appName, tt.args.logTo, logg) - _, _ = l.Write([]byte(tt.logMsg)) - - logs := obs.All() - require.Equal(t, 1, len(logs)) - - log := logs[0] - assert.Equal(t, log.Level, tt.logLevel) - assert.Contains(t, log.Message, tt.logMsg) - assert.Equal(t, log.ContextMap()[agentConsoleName], tt.args.appName) - assert.Equal(t, log.ContextMap()[agentConsoleType], tt.args.logTo.String()) - assert.Equal(t, log.ContextMap()["previous-field"], "previous-value") - } -} diff --git a/internal/pkg/core/plugin/process/watch_posix.go b/internal/pkg/core/plugin/process/watch_posix.go deleted file mode 100644 index 7e3e809e7bc..00000000000 --- a/internal/pkg/core/plugin/process/watch_posix.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -//go:build !windows -// +build !windows - -package process - -import ( - "os" - "syscall" - "time" -) - -// externalProcess is a watch mechanism used in cases where OS requires -// a process to be a child for waiting for process. We need to be able -// await any process. -// This operation is long running. -func (a *Application) externalProcess(proc *os.Process) { - if proc == nil { - return - } - - for { - <-time.After(1 * time.Second) - if proc.Signal(syscall.Signal(0)) != nil { - // failed to contact process, return - return - } - } -} diff --git a/internal/pkg/core/plugin/process/watch_windows.go b/internal/pkg/core/plugin/process/watch_windows.go deleted file mode 100644 index d5baeb1e895..00000000000 --- a/internal/pkg/core/plugin/process/watch_windows.go +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -//go:build windows -// +build windows - -package process - -import ( - "os" - "syscall" - "time" -) - -const ( - // exitCodeStillActive according to docs.microsoft.com/en-us/windows/desktop/api/processthreadsapi/nf-processthreadsapi-getexitcodeprocess - exitCodeStillActive = 259 -) - -// externalProcess is a watch mechanism used in cases where OS requires -// a process to be a child for waiting for process. We need to be able -// await any process -func (a *Application) externalProcess(proc *os.Process) { - if proc == nil { - return - } - - for { - <-time.After(1 * time.Second) - if isWindowsProcessExited(proc.Pid) { - return - } - } -} - -func isWindowsProcessExited(pid int) bool { - const desiredAccess = syscall.STANDARD_RIGHTS_READ | syscall.PROCESS_QUERY_INFORMATION | syscall.SYNCHRONIZE - h, err := syscall.OpenProcess(desiredAccess, false, uint32(pid)) - if err != nil { - // failed to open handle, report exited - return true - } - - // get exit code, this returns immediately in case it is still running - // it returns exitCodeStillActive - var ec uint32 - if err := syscall.GetExitCodeProcess(h, &ec); err != nil { - // failed to contact, report exited - return true - } - - return ec != exitCodeStillActive -} diff --git a/internal/pkg/core/plugin/service/app.go b/internal/pkg/core/plugin/service/app.go deleted file mode 100644 index ab3631f12c0..00000000000 --- a/internal/pkg/core/plugin/service/app.go +++ /dev/null @@ -1,373 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package service - -import ( - "context" - "fmt" - "io" - "net" - "reflect" - "sync" - "time" - - "gopkg.in/yaml.v2" - - "github.com/elastic/elastic-agent-client/v7/pkg/proto" - - "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/core/app" - "github.com/elastic/elastic-agent/internal/pkg/core/monitoring" - "github.com/elastic/elastic-agent/internal/pkg/core/plugin" - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "github.com/elastic/elastic-agent/internal/pkg/core/status" - "github.com/elastic/elastic-agent/internal/pkg/tokenbucket" - "github.com/elastic/elastic-agent/pkg/core/logger" - "github.com/elastic/elastic-agent/pkg/core/process" - "github.com/elastic/elastic-agent/pkg/core/server" -) - -var ( - // ErrAppNotInstalled is returned when configuration is performed on not installed application. - ErrAppNotInstalled = errors.New("application is not installed", errors.TypeApplication) -) - -// Application encapsulates an application that is ran as a service by the system service manager. -type Application struct { - bgContext context.Context - id string - name string - pipelineID string - logLevel string - desc *app.Descriptor - srv *server.Server - srvState *server.ApplicationState - limiter *tokenbucket.Bucket - state state.State - reporter state.Reporter - - uid int - gid int - - monitor monitoring.Monitor - statusReporter status.Reporter - - processConfig *process.Config - - logger *logger.Logger - - credsPort int - credsWG sync.WaitGroup - credsListener net.Listener - - appLock sync.Mutex -} - -// NewApplication creates a new instance of an applications. -func NewApplication( - ctx context.Context, - id, appName, pipelineID, logLevel string, - credsPort int, - desc *app.Descriptor, - srv *server.Server, - cfg *configuration.SettingsConfig, - logger *logger.Logger, - reporter state.Reporter, - monitor monitoring.Monitor, - statusController status.Controller) (*Application, error) { - - s := desc.ProcessSpec() - uid, gid, err := s.UserGroup() - if err != nil { - return nil, err - } - - b, _ := tokenbucket.NewTokenBucket(ctx, 3, 3, 1*time.Second) - return &Application{ - bgContext: ctx, - id: id, - name: appName, - pipelineID: pipelineID, - logLevel: logLevel, - desc: desc, - srv: srv, - processConfig: cfg.ProcessConfig, - logger: logger, - limiter: b, - state: state.State{ - Status: state.Stopped, - }, - reporter: reporter, - monitor: monitor, - uid: uid, - gid: gid, - credsPort: credsPort, - statusReporter: statusController.RegisterApp(id, appName), - }, nil -} - -// Monitor returns monitoring handler of this app. -func (a *Application) Monitor() monitoring.Monitor { - return a.monitor -} - -// Spec returns the program spec of this app. -func (a *Application) Spec() program.Spec { - return a.desc.Spec() -} - -// State returns the application state. -func (a *Application) State() state.State { - a.appLock.Lock() - defer a.appLock.Unlock() - return a.state -} - -// Name returns application name -func (a *Application) Name() string { - return a.name -} - -// Started returns true if the application is started. -func (a *Application) Started() bool { - return a.srvState != nil -} - -// SetState sets the status of the application. -func (a *Application) SetState(s state.Status, msg string, payload map[string]interface{}) { - a.appLock.Lock() - defer a.appLock.Unlock() - a.setState(s, msg, payload) -} - -// Start starts the application with a specified config. -func (a *Application) Start(ctx context.Context, _ app.Taggable, cfg map[string]interface{}) (err error) { - defer func() { - if err != nil { - // inject App metadata - err = errors.New(err, errors.M(errors.MetaKeyAppName, a.name), errors.M(errors.MetaKeyAppName, a.id)) - } - }() - - a.appLock.Lock() - defer a.appLock.Unlock() - - cfgStr, err := yaml.Marshal(cfg) - if err != nil { - return err - } - - // already started - if a.srvState != nil { - a.setState(state.Starting, "Starting", nil) - a.srvState.SetStatus(proto.StateObserved_STARTING, a.state.Message, a.state.Payload) - a.srvState.UpdateConfig(a.srvState.Config()) - } else { - a.setState(state.Starting, "Starting", nil) - - a.srvState, err = a.srv.Register(a, string(cfgStr)) - if err != nil { - return err - } - - // Set input types from the spec - a.srvState.SetInputTypes(a.desc.Spec().ActionInputTypes) - } - - defer func() { - if err != nil { - if a.srvState != nil { - a.srvState.Destroy() - a.srvState = nil - } - } - }() - - if err := a.monitor.Prepare(a.desc.Spec(), a.pipelineID, a.uid, a.gid); err != nil { - return err - } - - if a.limiter != nil { - a.limiter.Add() - } - - // start the credentials listener for the service - if err := a.startCredsListener(); err != nil { - return err - } - - // allow the service manager to ensure that the application is started, currently this does not start/stop - // the actual service in the system service manager - - return nil -} - -// Configure configures the application with the passed configuration. -func (a *Application) Configure(ctx context.Context, config map[string]interface{}) (err error) { - defer func() { - if err != nil { - // inject App metadata - err = errors.New(err, errors.M(errors.MetaKeyAppName, a.name), errors.M(errors.MetaKeyAppName, a.id)) - a.statusReporter.Update(state.Degraded, err.Error(), nil) - } - }() - - a.appLock.Lock() - defer a.appLock.Unlock() - - if a.srvState == nil { - return errors.New(ErrAppNotInstalled) - } - - cfgStr, err := yaml.Marshal(config) - if err != nil { - return errors.New(err, errors.TypeApplication) - } - - isRestartNeeded := plugin.IsRestartNeeded(a.logger, a.Spec(), a.srvState, config) - - err = a.srvState.UpdateConfig(string(cfgStr)) - if err != nil { - return errors.New(err, errors.TypeApplication) - } - - if isRestartNeeded { - a.logger.Infof("initiating restart of '%s' due to config change", a.Name()) - a.appLock.Unlock() - a.Stop() - err = a.Start(ctx, a.desc, config) - // lock back so it wont panic on deferred unlock - a.appLock.Lock() - } - - return err -} - -// Stop stops the current application. -func (a *Application) Stop() { - a.appLock.Lock() - srvState := a.srvState - a.appLock.Unlock() - - if srvState == nil { - return - } - - if err := srvState.Stop(a.processConfig.StopTimeout); err != nil { - a.appLock.Lock() - a.setState( - state.Failed, - fmt.Errorf("failed to stop after %s: %w", a.processConfig.StopTimeout, err).Error(), - nil) - } else { - a.appLock.Lock() - a.setState(state.Stopped, "Stopped", nil) - } - a.srvState = nil - - a.cleanUp() - a.stopCredsListener() - a.appLock.Unlock() -} - -// Shutdown disconnects the service, but doesn't signal it to stop. -func (a *Application) Shutdown() { - a.appLock.Lock() - defer a.appLock.Unlock() - a.logger.Infof("signaling service to stop because of shutdown: %s", a.id) - - if a.srvState == nil { - return - } - - // destroy the application in the server, this skips sending - // the expected stopping state to the service - a.setState(state.Stopped, "Stopped", nil) - a.srvState.Destroy() - a.srvState = nil - - a.cleanUp() - a.stopCredsListener() -} - -// OnStatusChange is the handler called by the GRPC server code. -// -// It updates the status of the application and handles restarting the application when needed. -func (a *Application) OnStatusChange(s *server.ApplicationState, status proto.StateObserved_Status, msg string, payload map[string]interface{}) { - a.appLock.Lock() - defer a.appLock.Unlock() - - // If the application is stopped, do not update the state. Stopped is a final state - // and should not be overridden. - if a.state.Status == state.Stopped { - return - } - - a.setState(state.FromProto(status), msg, payload) -} - -func (a *Application) setState(s state.Status, msg string, payload map[string]interface{}) { - if a.state.Status != s || a.state.Message != msg || !reflect.DeepEqual(a.state.Payload, payload) { - if state.IsStateFiltered(msg, payload) { - return - } - - a.state.Status = s - a.state.Message = msg - a.state.Payload = payload - if a.reporter != nil { - go a.reporter.OnStateChange(a.id, a.name, a.state) - } - a.statusReporter.Update(s, msg, payload) - } -} - -func (a *Application) cleanUp() { - a.monitor.Cleanup(a.desc.Spec(), a.pipelineID) -} - -func (a *Application) startCredsListener() error { - lis, err := net.Listen("tcp", fmt.Sprintf("127.0.0.1:%d", a.credsPort)) - if err != nil { - return errors.New(err, "failed to start connection credentials listener") - } - a.credsListener = lis - a.credsWG.Add(1) - go func() { - for { - conn, err := lis.Accept() - if err != nil { - break - } - a.appLock.Lock() - srvState := a.srvState - a.appLock.Unlock() - if srvState == nil { - // application stopped - _ = conn.Close() - continue - } - if err := srvState.WriteConnInfo(conn); err != nil { - _ = conn.Close() - if !errors.Is(err, io.EOF) { - a.logger.Errorf("failed to write connection credentials: %s", err) - } - continue - } - _ = conn.Close() - } - a.credsWG.Done() - }() - - return nil -} - -func (a *Application) stopCredsListener() { - a.credsListener.Close() - a.credsWG.Wait() - a.credsListener = nil -} diff --git a/internal/pkg/core/retry/config.go b/internal/pkg/core/retry/config.go deleted file mode 100644 index 11cd1e7b418..00000000000 --- a/internal/pkg/core/retry/config.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package retry - -import "time" - -const ( - defaultRetriesCount = 3 - defaultDelay = 30 * time.Second - defaultMaxDelay = 5 * time.Minute -) - -// Config is a configuration of a strategy -type Config struct { - // Enabled determines whether retry is possible. Default is false. - Enabled bool `yaml:"enabled" config:"enabled"` - // RetriesCount specifies number of retries. Default is 3. - // Retry count of 1 means it will be retried one time after one failure. - RetriesCount int `yaml:"retriesCount" config:"retriesCount"` - // Delay specifies delay in ms between retries. Default is 30s - Delay time.Duration `yaml:"delay" config:"delay"` - // MaxDelay specifies maximum delay in ms between retries. Default is 300s (5min) - MaxDelay time.Duration `yaml:"maxDelay" config:"maxDelay"` - // Exponential determines whether delay is treated as exponential. - // With 30s delay and 3 retries: 30, 60, 120s - // Default is false - Exponential bool `yaml:"exponential" config:"exponential"` -} - -// DefaultConfig creates a config with pre-set default values. -func DefaultConfig() *Config { - return &Config{ - Enabled: false, - RetriesCount: 3, - Delay: 30 * time.Second, - MaxDelay: 5 * time.Minute, - Exponential: false, - } -} diff --git a/internal/pkg/core/retry/error.go b/internal/pkg/core/retry/error.go deleted file mode 100644 index b5ef3bda746..00000000000 --- a/internal/pkg/core/retry/error.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package retry - -// Fatal in retry package is an interface each error needs to implement -// in order to say whether or not it is fatal. -type Fatal interface { - Fatal() bool -} - -// FatalError wraps an error and is always fatal -type FatalError struct { - error -} - -// Fatal determines whether or not error is fatal -func (*FatalError) Fatal() bool { - return true -} - -// ErrorMakeFatal is a shorthand for making an error fatal -func ErrorMakeFatal(err error) error { - if err == nil { - return err - } - - return FatalError{err} -} diff --git a/internal/pkg/core/retry/retrystrategy.go b/internal/pkg/core/retry/retrystrategy.go deleted file mode 100644 index 4dd19c7c5d8..00000000000 --- a/internal/pkg/core/retry/retrystrategy.go +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package retry - -import ( - "context" - "errors" - "time" - - "github.com/elastic/elastic-agent/internal/pkg/core/backoff" -) - -// DoWithBackoff ignores retry config of delays and lets backoff decide how much time it needs. -func DoWithBackoff(config *Config, b backoff.Backoff, fn func() error, fatalErrors ...error) error { - retryCount := getRetryCount(config) - var err error - - for retryNo := 0; retryNo <= retryCount; retryNo++ { - err = fn() - if err == nil || isFatal(err, fatalErrors...) { - b.Reset() - return err - } - - if retryNo < retryCount { - b.Wait() - } - } - - return err -} - -// Do runs provided function in a manner specified in retry configuration -func Do(ctx context.Context, config *Config, fn func(ctx context.Context) error, fatalErrors ...error) error { - retryCount := getRetryCount(config) - var err error - -RETRY_LOOP: - for retryNo := 0; retryNo <= retryCount; retryNo++ { - if ctx.Err() != nil { - break - } - - err = fn(ctx) - if err == nil { - return nil - } - - if isFatal(err, fatalErrors...) { - return err - } - - if retryNo < retryCount { - t := time.NewTimer(getDelayDuration(config, retryNo)) - select { - case <-t.C: - case <-ctx.Done(): - t.Stop() - break RETRY_LOOP - } - } - } - - return err -} - -func getRetryCount(config *Config) int { - if config == nil { - return defaultRetriesCount - } - - if !config.Enabled { - return 0 - } - - if config.RetriesCount > 0 { - return config.RetriesCount - } - - return defaultRetriesCount -} - -func getDelayDuration(config *Config, retryNo int) time.Duration { - delay := defaultDelay - - if config != nil { - if config.Delay > 0 { - delay = config.Delay - } - - if config.Exponential { - delay = time.Duration(delay.Nanoseconds() * int64(retryNo+1)) - } - } - - maxDelay := config.MaxDelay - if maxDelay == 0 { - maxDelay = defaultMaxDelay - } - if delay > maxDelay { - delay = maxDelay - } - return time.Duration(delay) -} - -// Error is fatal either if it implements Error interface and says so -// or if it is equal to one of the fatal values provided -func isFatal(err error, fatalErrors ...error) bool { - if fatalerr, ok := err.(Fatal); ok { // nolint:errorlint // Non obvious handling, we will refactor that module for v2. - return fatalerr.Fatal() - } - - for _, e := range fatalErrors { - if errors.Is(e, err) { - return true - } - } - - // What does not match criteria is considered transient - return false -} diff --git a/internal/pkg/core/retry/retrystrategy_test.go b/internal/pkg/core/retry/retrystrategy_test.go deleted file mode 100644 index f6df5258ad2..00000000000 --- a/internal/pkg/core/retry/retrystrategy_test.go +++ /dev/null @@ -1,183 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package retry - -import ( - "context" - "errors" - "testing" - "time" - - "github.com/elastic/elastic-agent/internal/pkg/core/backoff" -) - -func TestRetry(t *testing.T) { - type testCase struct { - Fn func(context.Context) error - ExpectedExecutions int64 - IsErrExpected bool - Enabled bool - Exponential bool - } - - errFatal := errors.New("fatal") - var executions int64 - - testCases := map[string]testCase{ - "not-failing": testCase{Fn: func(_ context.Context) error { executions++; return nil }, ExpectedExecutions: 1, Enabled: true}, - "failing": testCase{Fn: func(_ context.Context) error { executions++; return errors.New("fail") }, ExpectedExecutions: 4, IsErrExpected: true, Enabled: true}, - "fatal-by-enum": testCase{Fn: func(_ context.Context) error { executions++; return errFatal }, ExpectedExecutions: 1, IsErrExpected: true, Enabled: true}, - "fatal-by-iface": testCase{Fn: func(_ context.Context) error { executions++; return ErrFatal{} }, ExpectedExecutions: 1, IsErrExpected: true, Enabled: true}, - "not-fatal-by-iface": testCase{Fn: func(_ context.Context) error { executions++; return ErrNotFatal{} }, ExpectedExecutions: 4, IsErrExpected: true, Enabled: true}, - - "dis-not-failing": testCase{Fn: func(_ context.Context) error { executions++; return nil }, ExpectedExecutions: 1, Enabled: false}, - "dis-failing": testCase{Fn: func(_ context.Context) error { executions++; return errors.New("fail") }, ExpectedExecutions: 1, IsErrExpected: true, Enabled: false}, - "dis-fatal-by-enum": testCase{Fn: func(_ context.Context) error { executions++; return errFatal }, ExpectedExecutions: 1, IsErrExpected: true, Enabled: false}, - "dis-fatal-by-iface": testCase{Fn: func(_ context.Context) error { executions++; return ErrFatal{} }, ExpectedExecutions: 1, IsErrExpected: true, Enabled: false}, - "dis-not-fatal-by-iface": testCase{Fn: func(_ context.Context) error { executions++; return ErrNotFatal{} }, ExpectedExecutions: 1, IsErrExpected: true, Enabled: false}, - - "failing-exp": testCase{Fn: func(_ context.Context) error { executions++; return errors.New("fail") }, ExpectedExecutions: 4, IsErrExpected: true, Enabled: true, Exponential: true}, - } - - config := &Config{ - RetriesCount: 3, - Delay: 500 * time.Millisecond, - } - - for n, tc := range testCases { - testFn := tc.Fn - executions = 0 - config.Enabled = tc.Enabled - config.Exponential = tc.Exponential - - startTime := time.Now() - err := Do(context.Background(), config, testFn, errFatal) - - executionTime := time.Since(startTime) - minExecutionTime := getMinExecutionTime(config.Delay, tc.ExpectedExecutions, tc.Exponential) - maxExecutionTime := getMaxExecutionTime(config.Delay, tc.ExpectedExecutions, tc.Exponential) - if tc.ExpectedExecutions > 1 && (executionTime < minExecutionTime || executionTime > maxExecutionTime) { - t.Fatalf("[%s]: expecting execution time between %d-%d ns, got: %v", n, minExecutionTime, maxExecutionTime, executionTime) - } - - if (err == nil) == tc.IsErrExpected { - t.Fatalf("[%s]: expecting error, got: %v", n, err) - } - - if executions != tc.ExpectedExecutions { - t.Fatalf("[%s]: expecting %d executions, got: %d", n, tc.ExpectedExecutions, executions) - } - } -} - -func TestRetryWithBackoff(t *testing.T) { - type testCase struct { - Fn func() error - ExpectedExecutions int - IsErrExpected bool - Enabled bool - } - - errFatal := errors.New("fatal") - executions := 0 - - testCases := map[string]testCase{ - "not-failing": testCase{Fn: func() error { executions++; return nil }, ExpectedExecutions: 1, Enabled: true}, - "failing": testCase{Fn: func() error { executions++; return errors.New("fail") }, ExpectedExecutions: 4, IsErrExpected: true, Enabled: true}, - "fatal-by-enum": testCase{Fn: func() error { executions++; return errFatal }, ExpectedExecutions: 1, IsErrExpected: true, Enabled: true}, - "fatal-by-iface": testCase{Fn: func() error { executions++; return ErrFatal{} }, ExpectedExecutions: 1, IsErrExpected: true, Enabled: true}, - "not-fatal-by-iface": testCase{Fn: func() error { executions++; return ErrNotFatal{} }, ExpectedExecutions: 4, IsErrExpected: true, Enabled: true}, - } - - config := &Config{ - RetriesCount: 3, - Delay: 5000, - } - maxDelay := time.Duration(config.Delay) * time.Millisecond - - done := make(chan struct{}) - maxWaitTime := 200 * time.Millisecond - minWaitTime := 50 * time.Millisecond - backoff := backoff.NewEqualJitterBackoff(done, minWaitTime, maxWaitTime) - - for n, tc := range testCases { - testFn := tc.Fn - executions = 0 - config.Enabled = tc.Enabled - - startTime := time.Now() - err := DoWithBackoff(config, backoff, testFn, errFatal) - - executionTime := time.Since(startTime) - minExecTime := getBackoffMinTime(minWaitTime, maxWaitTime, tc.ExpectedExecutions) - if tc.ExpectedExecutions > 1 && (executionTime < minExecTime || executionTime > maxDelay) { - t.Fatalf("[%s]: expecting execution time between %d-%d ns, got: %v", n, minExecTime, maxDelay, executionTime) - } - - if (err == nil) == tc.IsErrExpected { - t.Fatalf("[%s]: expecting error, got: %v", n, err) - } - - if executions != tc.ExpectedExecutions { - t.Fatalf("[%s]: expecting %d executions, got: %d", n, tc.ExpectedExecutions, executions) - } - } -} - -type ErrFatal struct{ error } - -func (ErrFatal) Fatal() bool { - return true -} - -type ErrNotFatal struct{ error } - -func (ErrNotFatal) Fatal() bool { - return false -} - -func getMaxExecutionTime(delayDuration time.Duration, executions int64, exponential bool) time.Duration { - delay := delayDuration.Nanoseconds() - execTime := (executions)*delay + (delay / 2) - if exponential { - execTime = 0 - for i := int64(0); i < executions; i++ { - execTime += i * delay - } - execTime += (delay / 2) * executions - } - - return time.Duration(execTime) -} - -func getMinExecutionTime(delayDuration time.Duration, executions int64, exponential bool) time.Duration { - delay := delayDuration.Nanoseconds() - execTime := (executions-1)*delay - (delay / 2) - if exponential { - execTime = 0 - for i := int64(0); i < executions; i++ { - execTime += i * delay - } - execTime -= (delay / 2) - } - - if execTime < 0 { - execTime = 0 - } - return time.Duration(execTime) -} - -func getBackoffMinTime(delay time.Duration, maxWaitTime time.Duration, executions int) time.Duration { - var duration time.Duration - for i := 1; i < executions; i++ { - duration += delay - delay *= 2 - if delay > maxWaitTime { - delay = maxWaitTime - } - } - - return duration -} diff --git a/internal/pkg/core/state/state.go b/internal/pkg/core/state/state.go deleted file mode 100644 index 57dfb639b72..00000000000 --- a/internal/pkg/core/state/state.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package state - -import ( - "context" - "strings" - - "github.com/elastic/elastic-agent-client/v7/pkg/proto" - - "github.com/elastic/elastic-agent/pkg/core/process" -) - -// Status describes the current status of the application process. -type Status int - -const ( - // Stopped is status describing not running application. - Stopped Status = -4 - // Crashed is status describing application is crashed. - Crashed Status = -3 - // Restarting is status describing application is restarting. - Restarting Status = -2 - // Updating is status describing application is updating. - Updating Status = -1 - - // Starting is status describing application is starting. - Starting = Status(proto.StateObserved_STARTING) - // Configuring is status describing application is configuring. - Configuring = Status(proto.StateObserved_CONFIGURING) - // Healthy is status describing application is running. - Healthy = Status(proto.StateObserved_HEALTHY) - // Degraded is status describing application is degraded. - Degraded = Status(proto.StateObserved_DEGRADED) - // Failed is status describing application is failed. - Failed = Status(proto.StateObserved_FAILED) - // Stopping is status describing application is stopping. - Stopping = Status(proto.StateObserved_STOPPING) -) - -var filteredErrors = []string{ - context.Canceled.Error(), -} - -// IsInternal returns true if the status is an internal status and not something that should be reported -// over the protocol as an actual status. -func (s Status) IsInternal() bool { - return s < Starting -} - -// ToProto converts the status to status that is compatible with the protocol. -func (s Status) ToProto() proto.StateObserved_Status { - if !s.IsInternal() { - return proto.StateObserved_Status(s) - } - if s == Updating || s == Restarting { - return proto.StateObserved_STARTING - } - if s == Crashed { - return proto.StateObserved_FAILED - } - if s == Stopped { - return proto.StateObserved_STOPPING - } - // fallback to degraded - return proto.StateObserved_DEGRADED -} - -// FromProto converts the status from protocol to status Agent representation. -func FromProto(s proto.StateObserved_Status) Status { - return Status(s) -} - -// State wraps the process state and application status. -type State struct { - ProcessInfo *process.Info - Status Status - Message string - Payload map[string]interface{} -} - -// Reporter is interface that is called when a state is changed. -type Reporter interface { - // OnStateChange is called when state changes. - OnStateChange(id string, name string, state State) -} - -// IsStateFiltered returns true if state message contains error out of predefined -// collection of ignored errors. -func IsStateFiltered(msg string, payload map[string]interface{}) bool { - for _, e := range filteredErrors { - if strings.Contains(msg, e) { - return true - } - } - return false -} diff --git a/internal/pkg/core/status/reporter.go b/internal/pkg/core/status/reporter.go deleted file mode 100644 index 04c8251fa92..00000000000 --- a/internal/pkg/core/status/reporter.go +++ /dev/null @@ -1,302 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package status - -import ( - "reflect" - "sync" - - "github.com/google/uuid" - - "github.com/elastic/elastic-agent-client/v7/pkg/proto" - - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -// AgentStatusCode is the status code for the Elastic Agent overall. -type AgentStatusCode int - -const ( - // Healthy status means everything is fine. - Healthy AgentStatusCode = iota - // Degraded status means something minor is preventing agent to work properly. - Degraded - // Failed status means agent is unable to work properly. - Failed -) - -// String returns the string value for the agent code. -func (s AgentStatusCode) String() string { - return []string{"online", "degraded", "error"}[s] -} - -// AgentApplicationStatus returns the status of specific application. -type AgentApplicationStatus struct { - ID string - Name string - Status state.Status - Message string - Payload map[string]interface{} -} - -// AgentStatus returns the overall status of the Elastic Agent. -type AgentStatus struct { - Status AgentStatusCode - Message string - Applications []AgentApplicationStatus -} - -// Controller takes track of component statuses. -type Controller interface { - RegisterComponent(string) Reporter - RegisterComponentWithPersistance(string, bool) Reporter - RegisterApp(id string, name string) Reporter - Status() AgentStatus - StatusCode() AgentStatusCode - StatusString() string - UpdateStateID(string) -} - -type controller struct { - mx sync.Mutex - status AgentStatusCode - reporters map[string]*reporter - appReporters map[string]*reporter - log *logger.Logger - stateID string -} - -// NewController creates a new reporter. -func NewController(log *logger.Logger) Controller { - return &controller{ - status: Healthy, - reporters: make(map[string]*reporter), - appReporters: make(map[string]*reporter), - log: log, - } -} - -// UpdateStateID cleans health when new configuration is received. -// To prevent reporting failures from previous configuration. -func (r *controller) UpdateStateID(stateID string) { - if stateID == r.stateID { - return - } - - r.mx.Lock() - - r.stateID = stateID - // cleanup status for component reporters - // the status of app reports remain the same - for _, rep := range r.reporters { - if !rep.isRegistered { - continue - } - - rep.mx.Lock() - if !rep.isPersistent { - rep.status = state.Configuring - rep.message = "" - } - rep.mx.Unlock() - } - r.mx.Unlock() - - r.updateStatus() -} - -// Register registers new component for status updates. -func (r *controller) RegisterComponent(componentIdentifier string) Reporter { - return r.RegisterComponentWithPersistance(componentIdentifier, false) -} - -// Register registers new component for status updates. -func (r *controller) RegisterComponentWithPersistance(componentIdentifier string, persistent bool) Reporter { - id := componentIdentifier + "-" + uuid.New().String()[:8] - rep := &reporter{ - name: componentIdentifier, - isRegistered: true, - unregisterFunc: func() { - r.mx.Lock() - delete(r.reporters, id) - r.mx.Unlock() - }, - notifyChangeFunc: r.updateStatus, - isPersistent: persistent, - } - - r.mx.Lock() - r.reporters[id] = rep - r.mx.Unlock() - - return rep -} - -// RegisterApp registers new component for status updates. -func (r *controller) RegisterApp(componentIdentifier string, name string) Reporter { - id := componentIdentifier + "-" + uuid.New().String()[:8] - rep := &reporter{ - name: name, - status: state.Stopped, - isRegistered: true, - unregisterFunc: func() { - r.mx.Lock() - delete(r.appReporters, id) - r.mx.Unlock() - }, - notifyChangeFunc: r.updateStatus, - } - - r.mx.Lock() - r.appReporters[id] = rep - r.mx.Unlock() - - return rep -} - -// Status retrieves current agent status. -func (r *controller) Status() AgentStatus { - r.mx.Lock() - defer r.mx.Unlock() - apps := make([]AgentApplicationStatus, 0, len(r.appReporters)) - for key, rep := range r.appReporters { - rep.mx.Lock() - apps = append(apps, AgentApplicationStatus{ - ID: key, - Name: rep.name, - Status: rep.status, - Message: rep.message, - Payload: rep.payload, - }) - rep.mx.Unlock() - } - return AgentStatus{ - Status: r.status, - Message: "", - Applications: apps, - } -} - -// StatusCode retrieves current agent status code. -func (r *controller) StatusCode() AgentStatusCode { - r.mx.Lock() - defer r.mx.Unlock() - return r.status -} - -func (r *controller) updateStatus() { - status := Healthy - - r.mx.Lock() - for id, rep := range r.reporters { - s := statusToAgentStatus(rep.status) - if s > status { - status = s - } - - r.log.Debugf("'%s' has status '%s'", id, s) - if status == Failed { - break - } - } - if status != Failed { - for id, rep := range r.appReporters { - s := statusToAgentStatus(rep.status) - if s > status { - status = s - } - - r.log.Debugf("'%s' has status '%s'", id, s) - if status == Failed { - break - } - } - } - - if r.status != status { - r.logStatus(status) - r.status = status - } - - r.mx.Unlock() - -} - -func (r *controller) logStatus(status AgentStatusCode) { - logFn := r.log.Infof - if status == Degraded { - logFn = r.log.Warnf - } else if status == Failed { - logFn = r.log.Errorf - } - - logFn("Elastic Agent status changed to: '%s'", status) -} - -// StatusString retrieves human readable string of current agent status. -func (r *controller) StatusString() string { - return r.StatusCode().String() -} - -// Reporter reports status of component -type Reporter interface { - Update(state.Status, string, map[string]interface{}) - Unregister() -} - -type reporter struct { - name string - mx sync.Mutex - isPersistent bool - isRegistered bool - status state.Status - message string - payload map[string]interface{} - unregisterFunc func() - notifyChangeFunc func() -} - -// Update updates the status of a component. -func (r *reporter) Update(s state.Status, message string, payload map[string]interface{}) { - r.mx.Lock() - defer r.mx.Unlock() - - if !r.isRegistered { - return - } - if state.IsStateFiltered(message, payload) { - return - } - - if r.status != s || r.message != message || !reflect.DeepEqual(r.payload, payload) { - r.status = s - r.message = message - r.payload = payload - r.notifyChangeFunc() - } -} - -// Unregister unregisters status from reporter. Reporter will no longer be taken into consideration -// for overall status computation. -func (r *reporter) Unregister() { - r.mx.Lock() - defer r.mx.Unlock() - - r.isRegistered = false - r.unregisterFunc() - r.notifyChangeFunc() -} - -func statusToAgentStatus(status state.Status) AgentStatusCode { - s := status.ToProto() - if s == proto.StateObserved_DEGRADED { - return Degraded - } - if s == proto.StateObserved_FAILED { - return Failed - } - return Healthy -} diff --git a/internal/pkg/core/status/reporter_test.go b/internal/pkg/core/status/reporter_test.go deleted file mode 100644 index 0d44e402798..00000000000 --- a/internal/pkg/core/status/reporter_test.go +++ /dev/null @@ -1,101 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package status - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -func TestReporter(t *testing.T) { - l, _ := logger.New("", false) - t.Run("healthy by default", func(t *testing.T) { - r := NewController(l) - assert.Equal(t, Healthy, r.StatusCode()) - assert.Equal(t, "online", r.StatusString()) - }) - - t.Run("healthy when all healthy", func(t *testing.T) { - r := NewController(l) - r1 := r.RegisterComponent("r1") - r2 := r.RegisterComponent("r2") - r3 := r.RegisterComponent("r3") - a1 := r.RegisterApp("app-1", "app") - a2 := r.RegisterApp("app-2", "app") - a3 := r.RegisterApp("other-1", "other") - - r1.Update(state.Healthy, "", nil) - r2.Update(state.Healthy, "", nil) - r3.Update(state.Healthy, "", nil) - a1.Update(state.Healthy, "", nil) - a2.Update(state.Healthy, "", nil) - a3.Update(state.Healthy, "", nil) - - assert.Equal(t, Healthy, r.StatusCode()) - assert.Equal(t, "online", r.StatusString()) - }) - - t.Run("degraded when one degraded", func(t *testing.T) { - r := NewController(l) - r1 := r.RegisterComponent("r1") - r2 := r.RegisterComponent("r2") - r3 := r.RegisterComponent("r3") - - r1.Update(state.Healthy, "", nil) - r2.Update(state.Degraded, "degraded", nil) - r3.Update(state.Healthy, "", nil) - - assert.Equal(t, Degraded, r.StatusCode()) - assert.Equal(t, "degraded", r.StatusString()) - }) - - t.Run("failed when one failed", func(t *testing.T) { - r := NewController(l) - r1 := r.RegisterComponent("r1") - r2 := r.RegisterComponent("r2") - r3 := r.RegisterComponent("r3") - - r1.Update(state.Healthy, "", nil) - r2.Update(state.Failed, "failed", nil) - r3.Update(state.Healthy, "", nil) - - assert.Equal(t, Failed, r.StatusCode()) - assert.Equal(t, "error", r.StatusString()) - }) - - t.Run("failed when one failed and one degraded", func(t *testing.T) { - r := NewController(l) - r1 := r.RegisterComponent("r1") - r2 := r.RegisterComponent("r2") - r3 := r.RegisterComponent("r3") - - r1.Update(state.Healthy, "", nil) - r2.Update(state.Failed, "failed", nil) - r3.Update(state.Degraded, "degraded", nil) - - assert.Equal(t, Failed, r.StatusCode()) - assert.Equal(t, "error", r.StatusString()) - }) - - t.Run("degraded when degraded and healthy, failed unregistered", func(t *testing.T) { - r := NewController(l) - r1 := r.RegisterComponent("r1") - r2 := r.RegisterComponent("r2") - r3 := r.RegisterComponent("r3") - - r1.Update(state.Healthy, "", nil) - r2.Update(state.Failed, "failed", nil) - r3.Update(state.Degraded, "degraded", nil) - - r2.Unregister() - - assert.Equal(t, Degraded, r.StatusCode()) - assert.Equal(t, "degraded", r.StatusString()) - }) -} diff --git a/internal/pkg/reporter/backend.go b/internal/pkg/reporter/backend.go deleted file mode 100644 index 39ee2bcda5b..00000000000 --- a/internal/pkg/reporter/backend.go +++ /dev/null @@ -1,13 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package reporter - -import "context" - -// Backend defines a actual implementation of reporting. -type Backend interface { - Report(context.Context, Event) error - Close() error -} diff --git a/internal/pkg/reporter/event.go b/internal/pkg/reporter/event.go deleted file mode 100644 index dff0c1a89a2..00000000000 --- a/internal/pkg/reporter/event.go +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package reporter - -import "time" - -// Event is a reported event. -type Event interface { - Type() string - SubType() string - Time() time.Time - Message() string - Payload() map[string]interface{} -} - -type event struct { - eventype string - subType string - timestamp time.Time - message string - payload map[string]interface{} -} - -func (e event) Type() string { return e.eventype } -func (e event) SubType() string { return e.subType } -func (e event) Time() time.Time { return e.timestamp } -func (e event) Message() string { return e.message } -func (e event) Payload() map[string]interface{} { return e.payload } diff --git a/internal/pkg/reporter/fleet/config/config.go b/internal/pkg/reporter/fleet/config/config.go deleted file mode 100644 index 1e42b956ee8..00000000000 --- a/internal/pkg/reporter/fleet/config/config.go +++ /dev/null @@ -1,19 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package config - -// Config is a configuration describing fleet connected parts -type Config struct { - Threshold int `yaml:"threshold" config:"threshold" validate:"min=1"` - ReportingCheckFrequency int `yaml:"check_frequency_sec" config:"check_frequency_sec" validate:"min=1"` -} - -// DefaultConfig initiates FleetManagementConfig with default values -func DefaultConfig() *Config { - return &Config{ - Threshold: 10000, - ReportingCheckFrequency: 30, - } -} diff --git a/internal/pkg/reporter/fleet/reporter.go b/internal/pkg/reporter/fleet/reporter.go deleted file mode 100644 index d334a9b45cd..00000000000 --- a/internal/pkg/reporter/fleet/reporter.go +++ /dev/null @@ -1,175 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package fleet - -import ( - "context" - "sync" - "time" - - "github.com/elastic/elastic-agent/internal/pkg/fleetapi" - "github.com/elastic/elastic-agent/internal/pkg/reporter" - "github.com/elastic/elastic-agent/internal/pkg/reporter/fleet/config" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -type event struct { - AgentID string `json:"agent_id"` - EventType string `json:"type"` - Ts fleetapi.Time `json:"timestamp"` - SubType string `json:"subtype"` - Msg string `json:"message"` - Payload map[string]interface{} `json:"payload,omitempty"` -} - -func (e *event) Type() string { - return e.EventType -} - -func (e *event) Timestamp() time.Time { - return time.Time(e.Ts) -} - -func (e *event) Message() string { - return e.Msg -} - -// Reporter is a reporter without any effects, serves just as a showcase for further implementations. -type Reporter struct { - info agentInfo - logger *logger.Logger - queue []fleetapi.SerializableEvent - qlock sync.Mutex - threshold int - lastAck time.Time -} - -type agentInfo interface { - AgentID() string -} - -// NewReporter creates a new fleet reporter. -func NewReporter(agentInfo agentInfo, l *logger.Logger, c *config.Config) (*Reporter, error) { - r := &Reporter{ - info: agentInfo, - queue: make([]fleetapi.SerializableEvent, 0), - logger: l, - threshold: c.Threshold, - } - - return r, nil -} - -// Report enqueue event into reporter queue. -func (r *Reporter) Report(ctx context.Context, e reporter.Event) error { - r.qlock.Lock() - defer r.qlock.Unlock() - - r.queue = append(r.queue, &event{ - AgentID: r.info.AgentID(), - EventType: e.Type(), - Ts: fleetapi.Time(e.Time()), - SubType: e.SubType(), - Msg: e.Message(), - Payload: e.Payload(), - }) - - if r.threshold > 0 && len(r.queue) > r.threshold { - // drop some low importance event if needed - r.dropEvent() - } - - return nil -} - -// Events returns a list of event from a queue and a ack function -// which clears those events once caller is done with processing. -func (r *Reporter) Events() ([]fleetapi.SerializableEvent, func()) { - r.qlock.Lock() - defer r.qlock.Unlock() - - cp := r.queueCopy() - - ackFn := func() { - // as time is monotonic and this is on single machine this should be ok. - r.clear(cp, time.Now()) - } - - return cp, ackFn -} - -func (r *Reporter) clear(items []fleetapi.SerializableEvent, ackTime time.Time) { - r.qlock.Lock() - defer r.qlock.Unlock() - - if ackTime.Sub(r.lastAck) <= 0 || - len(r.queue) == 0 || - items == nil || - len(items) == 0 { - return - } - - var dropIdx int - r.lastAck = ackTime - itemsLen := len(items) - -OUTER: - for idx := itemsLen - 1; idx >= 0; idx-- { - for i, v := range r.queue { - if v == items[idx] { - dropIdx = i - break OUTER - } - } - } - - r.queue = r.queue[dropIdx+1:] -} - -// Close stops all the background jobs reporter is running. -// Guards against panic of closing channel multiple times. -func (r *Reporter) Close() error { - return nil -} - -func (r *Reporter) queueCopy() []fleetapi.SerializableEvent { - size := len(r.queue) - batch := make([]fleetapi.SerializableEvent, size) - - copy(batch, r.queue) - return batch -} - -func (r *Reporter) dropEvent() { - if dropped := r.tryDropInfo(); !dropped { - r.dropFirst() - } -} - -// tryDropInfo returns true if info was found and dropped. -func (r *Reporter) tryDropInfo() bool { - for i, e := range r.queue { - if e.Type() != reporter.EventTypeError { - r.queue = append(r.queue[:i], r.queue[i+1:]...) - r.logger.Infof("fleet reporter dropped event because threshold[%d] was reached: %v", r.threshold, e) - return true - } - } - - return false -} - -func (r *Reporter) dropFirst() { - if len(r.queue) == 0 { - return - } - - first := r.queue[0] - r.logger.Infof("fleet reporter dropped event because threshold[%d] was reached: %v", r.threshold, first) - r.queue = r.queue[1:] -} - -// Check it is reporter.Backend. -var _ reporter.Backend = &Reporter{} diff --git a/internal/pkg/reporter/fleet/reporter_test.go b/internal/pkg/reporter/fleet/reporter_test.go deleted file mode 100644 index c5160168a98..00000000000 --- a/internal/pkg/reporter/fleet/reporter_test.go +++ /dev/null @@ -1,241 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package fleet - -import ( - "context" - "testing" - "time" - - "github.com/elastic/elastic-agent/internal/pkg/fleetapi" - "github.com/elastic/elastic-agent/internal/pkg/reporter" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -func TestEventsHaveAgentID(t *testing.T) { - // setup client - threshold := 10 - r := newTestReporter(1*time.Second, threshold) - - // report events - firstBatchSize := 5 - ee := getEvents(firstBatchSize) - for _, e := range ee { - r.Report(context.Background(), e) - } - - // check after delay for output - reportedEvents, _ := r.Events() - if reportedCount := len(reportedEvents); reportedCount != firstBatchSize { - t.Fatalf("expected %v events got %v", firstBatchSize, reportedCount) - } - - for _, e := range reportedEvents { - re, ok := e.(*event) - - if !ok { - t.Fatal("reported event is not an event") - } - - if re.AgentID != "agentID" { - t.Fatalf("reported event id incorrect, expected: 'agentID', got: '%v'", re.AgentID) - } - } - -} - -func TestReporting(t *testing.T) { - // setup client - threshold := 10 - r := newTestReporter(1*time.Second, threshold) - - // report events - firstBatchSize := 5 - ee := getEvents(firstBatchSize) - for _, e := range ee { - r.Report(context.Background(), e) - } - - // check after delay for output - reportedEvents, ack := r.Events() - if reportedCount := len(reportedEvents); reportedCount != firstBatchSize { - t.Fatalf("expected %v events got %v", firstBatchSize, reportedCount) - } - - // reset reported events - ack() - - // report events > threshold - secondBatchSize := threshold + 1 - ee = getEvents(secondBatchSize) - for _, e := range ee { - r.Report(context.Background(), e) - } - - // check events are dropped - reportedEvents, _ = r.Events() - if reportedCount := len(reportedEvents); reportedCount != threshold { - t.Fatalf("expected %v events got %v", secondBatchSize, reportedCount) - } -} - -func TestInfoDrop(t *testing.T) { - // setup client - threshold := 2 - r := newTestReporter(2*time.Second, threshold) - - // report 1 info and 1 error - ee := []reporter.Event{testStateEvent{}, testErrorEvent{}, testErrorEvent{}} - - for _, e := range ee { - r.Report(context.Background(), e) - } - - // check after delay for output - reportedEvents, _ := r.Events() - if reportedCount := len(reportedEvents); reportedCount != 2 { - t.Fatalf("expected %v events got %v", 2, reportedCount) - } - - // check both are errors - if reportedEvents[0].Type() != reportedEvents[1].Type() || reportedEvents[0].Type() != reporter.EventTypeError { - t.Fatalf("expected ERROR events got [1]: '%v', [2]: '%v'", reportedEvents[0].Type(), reportedEvents[1].Type()) - } -} - -func TestOutOfOrderAck(t *testing.T) { - // setup client - threshold := 100 - r := newTestReporter(1*time.Second, threshold) - - // report events - firstBatchSize := 5 - ee := getEvents(firstBatchSize) - for _, e := range ee { - r.Report(context.Background(), e) - } - - // check after delay for output - reportedEvents1, ack1 := r.Events() - if reportedCount := len(reportedEvents1); reportedCount != firstBatchSize { - t.Fatalf("expected %v events got %v", firstBatchSize, reportedCount) - } - - // report events > threshold - secondBatchSize := threshold + 1 - ee = getEvents(secondBatchSize) - for _, e := range ee { - r.Report(context.Background(), e) - } - - // check all events are returned - reportedEvents2, ack2 := r.Events() - if reportedCount := len(reportedEvents2); reportedCount == firstBatchSize+secondBatchSize { - t.Fatalf("expected %v events got %v", secondBatchSize, reportedCount) - } - - // ack second batch - ack2() - - reportedEvents, _ := r.Events() - if reportedCount := len(reportedEvents); reportedCount != 0 { - t.Fatalf("expected all events are removed after second batch ack, got %v events", reportedCount) - } - - defer func() { - r := recover() - if r != nil { - t.Fatalf("expected ack is ignored but it paniced: %v", r) - } - }() - - ack1() - reportedEvents, _ = r.Events() - if reportedCount := len(reportedEvents); reportedCount != 0 { - t.Fatalf("expected all events are still removed after first batch ack, got %v events", reportedCount) - } -} - -func TestAfterDrop(t *testing.T) { - // setup client - threshold := 7 - r := newTestReporter(1*time.Second, threshold) - - // report events - firstBatchSize := 5 - ee := getEvents(firstBatchSize) - for _, e := range ee { - r.Report(context.Background(), e) - } - - // check after delay for output - reportedEvents1, ack1 := r.Events() - if reportedCount := len(reportedEvents1); reportedCount != firstBatchSize { - t.Fatalf("expected %v events got %v", firstBatchSize, reportedCount) - } - - // report events > threshold - secondBatchSize := 5 - ee = getEvents(secondBatchSize) - for _, e := range ee { - r.Report(context.Background(), e) - } - - // check all events are returned - reportedEvents2, _ := r.Events() - if reportedCount := len(reportedEvents2); reportedCount != threshold { - t.Fatalf("expected %v events got %v", secondBatchSize, reportedCount) - } - - // remove first batch from queue - ack1() - - reportedEvents, _ := r.Events() - if reportedCount := len(reportedEvents); reportedCount != secondBatchSize { - t.Fatalf("expected all events from first batch are removed, got %v events", reportedCount) - } - -} - -func getEvents(count int) []reporter.Event { - ee := make([]reporter.Event, 0, count) - for i := 0; i < count; i++ { - ee = append(ee, testStateEvent{}) - } - - return ee -} - -func newTestReporter(frequency time.Duration, threshold int) *Reporter { - log, _ := logger.New("", false) - r := &Reporter{ - info: &testInfo{}, - queue: make([]fleetapi.SerializableEvent, 0), - logger: log, - threshold: threshold, - } - - return r -} - -type testInfo struct{} - -func (*testInfo) AgentID() string { return "agentID" } - -type testStateEvent struct{} - -func (testStateEvent) Type() string { return reporter.EventTypeState } -func (testStateEvent) SubType() string { return reporter.EventSubTypeInProgress } -func (testStateEvent) Time() time.Time { return time.Unix(0, 1) } -func (testStateEvent) Message() string { return "hello" } -func (testStateEvent) Payload() map[string]interface{} { return map[string]interface{}{"key": 1} } - -type testErrorEvent struct{} - -func (testErrorEvent) Type() string { return reporter.EventTypeError } -func (testErrorEvent) SubType() string { return "PATH" } -func (testErrorEvent) Time() time.Time { return time.Unix(0, 1) } -func (testErrorEvent) Message() string { return "hello" } -func (testErrorEvent) Payload() map[string]interface{} { return map[string]interface{}{"key": 1} } diff --git a/internal/pkg/reporter/log/format.go b/internal/pkg/reporter/log/format.go deleted file mode 100644 index 3cac93aa0cf..00000000000 --- a/internal/pkg/reporter/log/format.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package log - -import ( - "fmt" - "time" -) - -// Format used for logging [DefaultFormat, JSONFormat] -type Format bool - -const ( - // DefaultFormat is a log format, resulting in: "2006-01-02T15:04:05: type: 'STATE': event type: 'STARTING' message: Application 'filebeat' is starting." - DefaultFormat Format = true - // JSONFormat is a log format, resulting in: {"timestamp": "2006-01-02T15:04:05", "type": "STATE", "event": {"type": "STARTING", "message": "Application 'filebeat' is starting."} - JSONFormat Format = false -) - -const ( - // e.g "2006-01-02T15:04:05 - message: Application 'filebeat' is starting. - type: 'STATE' - event type: 'STARTING'" - defaultLogFormat = "%s - message: %s - type: '%s' - sub_type: '%s'" - timeFormat = time.RFC3339 -) - -var formatMap = map[string]Format{ - "default": DefaultFormat, - "json": JSONFormat, -} - -var reverseMap = map[bool]string{ - true: "default", - false: "json", -} - -// Unpack enables using of string values in config -func (m *Format) Unpack(v string) error { - mgt, ok := formatMap[v] - if !ok { - return fmt.Errorf( - "unknown format, received '%s' and valid values are default or json", - v, - ) - } - *m = mgt - return nil -} - -// MarshalYAML marshal into a string. -func (m Format) MarshalYAML() (interface{}, error) { - s, ok := reverseMap[bool(m)] - if !ok { - return nil, fmt.Errorf("cannot marshal value of %+v", m) - } - - return s, nil -} diff --git a/internal/pkg/reporter/log/reporter.go b/internal/pkg/reporter/log/reporter.go deleted file mode 100644 index 394544a75f1..00000000000 --- a/internal/pkg/reporter/log/reporter.go +++ /dev/null @@ -1,57 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package log - -import ( - "context" - "fmt" - - "github.com/elastic/elastic-agent/internal/pkg/reporter" -) - -type logger interface { - Error(...interface{}) - Info(...interface{}) -} - -// Reporter is a reporter without any effects, serves just as a showcase for further implementations. -type Reporter struct { - logger logger - formatFunc func(record reporter.Event) string -} - -// NewReporter creates a new noop reporter -func NewReporter(l logger) *Reporter { - return &Reporter{ - logger: l, - formatFunc: defaultFormatFunc, - } -} - -// Report in noop reporter does nothing -func (r *Reporter) Report(ctx context.Context, record reporter.Event) error { - if record.Type() == reporter.EventTypeError { - r.logger.Error(r.formatFunc(record)) - return nil - } - - r.logger.Info(r.formatFunc(record)) - return nil -} - -// Close stops all the background jobs reporter is running. -func (r *Reporter) Close() error { return nil } - -func defaultFormatFunc(e reporter.Event) string { - return fmt.Sprintf(defaultLogFormat, - e.Time().Format(timeFormat), - e.Message(), - e.Type(), - e.SubType(), - ) -} - -// Check it is reporter.Backend -var _ reporter.Backend = &Reporter{} diff --git a/internal/pkg/reporter/log/reporter_test.go b/internal/pkg/reporter/log/reporter_test.go deleted file mode 100644 index 5453c11c674..00000000000 --- a/internal/pkg/reporter/log/reporter_test.go +++ /dev/null @@ -1,98 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package log - -import ( - "context" - "fmt" - "testing" - "time" - - "github.com/elastic/elastic-agent/internal/pkg/reporter" -) - -type testCase struct { - event reporter.Event - expectedInfo string - expectedError string -} - -func TestReport(t *testing.T) { - infoEvent := generateEvent(reporter.EventTypeState, reporter.EventSubTypeStarting) - errorEvent := generateEvent(reporter.EventTypeError, reporter.EventSubTypeConfig) - - testCases := []testCase{ - {infoEvent, DefaultString(infoEvent), ""}, - {errorEvent, "", DefaultString(errorEvent)}, - } - - for _, tc := range testCases { - log := newTestLogger() - rep := NewReporter(log) - - rep.Report(context.Background(), tc.event) - - if got := log.info(); tc.expectedInfo != got { - t.Errorf("[%s(%v)] expected info '%s' got '%s'", tc.event.Type(), tc.event.SubType(), tc.expectedInfo, got) - } - - if got := log.error(); tc.expectedError != got { - t.Errorf("[%s(%v)] expected error '%s' got '%s'", tc.event.Type(), tc.event.SubType(), tc.expectedError, got) - } - } -} - -type testLogger struct { - errorLog string - infoLog string -} - -func newTestLogger() *testLogger { - t := &testLogger{} - return t -} - -func (t *testLogger) Error(args ...interface{}) { - t.errorLog = fmt.Sprint(args...) -} - -func (t *testLogger) Info(args ...interface{}) { - t.infoLog = fmt.Sprint(args...) -} - -func (t *testLogger) error() string { - return t.errorLog -} - -func (t *testLogger) info() string { - return t.infoLog -} - -func generateEvent(eventype, subType string) testEvent { - return testEvent{ - eventtype: eventype, - subType: subType, - timestamp: time.Unix(0, 1), - message: "message", - } -} - -type testEvent struct { - eventtype string - subType string - timestamp time.Time - message string -} - -func (t testEvent) Type() string { return t.eventtype } -func (t testEvent) SubType() string { return t.subType } -func (t testEvent) Time() time.Time { return t.timestamp } -func (t testEvent) Message() string { return t.message } -func (testEvent) Payload() map[string]interface{} { return map[string]interface{}{} } - -func DefaultString(event testEvent) string { - timestamp := event.timestamp.Format(timeFormat) - return fmt.Sprintf("%s - message: message - type: '%s' - sub_type: '%s'", timestamp, event.Type(), event.SubType()) -} diff --git a/internal/pkg/reporter/noop/reporter.go b/internal/pkg/reporter/noop/reporter.go deleted file mode 100644 index 5effde4a396..00000000000 --- a/internal/pkg/reporter/noop/reporter.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package noop - -import ( - "context" - - "github.com/elastic/elastic-agent/internal/pkg/reporter" -) - -// Reporter is a reporter without any effects, serves just as a showcase for further implementations. -type Reporter struct{} - -// NewReporter creates a new noop reporter -func NewReporter() *Reporter { - return &Reporter{} -} - -// Report in noop reporter does nothing -func (*Reporter) Report(_ context.Context, _ reporter.Event) error { return nil } - -// Close stops all the background jobs reporter is running. -func (*Reporter) Close() error { return nil } - -// Check it is reporter.Backend -var _ reporter.Backend = &Reporter{} diff --git a/internal/pkg/reporter/reporter.go b/internal/pkg/reporter/reporter.go deleted file mode 100644 index 8c2a6c12ccb..00000000000 --- a/internal/pkg/reporter/reporter.go +++ /dev/null @@ -1,157 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package reporter - -import ( - "context" - "fmt" - "time" - - "github.com/hashicorp/go-multierror" - - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -const ( - // EventTypeState is an record type describing application state change - EventTypeState = "STATE" - // EventTypeError is an record type describing application error - EventTypeError = "ERROR" - // EventTypeActionResult is an record type describing applications result of an action - EventTypeActionResult = "ACTION_RESULT" - - // EventSubTypeStopped is an event type indicating application is stopped. - EventSubTypeStopped = "STOPPED" - // EventSubTypeStarting is an event type indicating application is starting. - EventSubTypeStarting = "STARTING" - // EventSubTypeInProgress is an event type indicating application is in progress. - EventSubTypeInProgress = "IN_PROGRESS" - // EventSubTypeConfig is an event indicating application config related event. - EventSubTypeConfig = "CONFIG" - // EventSubTypeRunning is an event indicating application running related event. - EventSubTypeRunning = "RUNNING" - // EventSubTypeFailed is an event type indicating application is failed. - EventSubTypeFailed = "FAILED" - // EventSubTypeStopping is an event type indicating application is stopping. - EventSubTypeStopping = "STOPPING" - // EventSubTypeUpdating is an event type indicating update process in progress. - EventSubTypeUpdating = "UPDATING" -) - -type agentInfo interface { - AgentID() string -} - -// Reporter uses multiple backends which needs to be non-blocking -// to report various events. -type Reporter struct { - ctx context.Context - info agentInfo - backends []Backend - - l *logger.Logger -} - -// NewReporter creates a new reporter with provided set of Backends. -func NewReporter(ctx context.Context, logger *logger.Logger, info agentInfo, backends ...Backend) *Reporter { - return &Reporter{ - ctx: ctx, - info: info, - backends: backends, - l: logger, - } -} - -// Close stops the reporter. For further reporting new reporter needs to be created. -func (r *Reporter) Close() { - for _, c := range r.backends { - c.Close() - } -} - -// OnStateChange called when state of an application changes. -func (r *Reporter) OnStateChange(id string, name string, state state.State) { - rec := generateRecord(r.info.AgentID(), id, name, state) - r.report(r.ctx, rec) -} - -func (r *Reporter) report(ctx context.Context, e event) { - var err error - - for _, b := range r.backends { - if er := b.Report(ctx, e); er != nil { - err = multierror.Append(err, er) - } - } - - if err != nil { - r.l.Error(errors.New(err, "failed reporting event")) - } -} - -func generateRecord(agentID string, id string, name string, s state.State) event { - eventType := EventTypeState - - var subType string - var subTypeText string - switch s.Status { - case state.Stopped: - subType = EventSubTypeStopped - subTypeText = EventSubTypeStopped - case state.Starting: - subType = EventSubTypeStarting - subTypeText = EventSubTypeStarting - case state.Configuring: - subType = EventSubTypeConfig - subTypeText = EventSubTypeConfig - case state.Healthy: - subType = EventSubTypeRunning - subTypeText = EventSubTypeRunning - case state.Degraded: - // Fleet doesn't understand degraded - subType = EventSubTypeRunning - subTypeText = "DEGRADED" - case state.Failed: - eventType = EventTypeError - subType = EventSubTypeFailed - subTypeText = EventSubTypeFailed - case state.Crashed: - eventType = EventTypeError - subType = EventSubTypeFailed - subTypeText = "CRASHED" - case state.Stopping: - subType = EventSubTypeStopping - subTypeText = EventSubTypeStopping - case state.Restarting: - subType = EventSubTypeStarting - subTypeText = "RESTARTING" - case state.Updating: - subType = EventSubTypeUpdating - subTypeText = EventSubTypeUpdating - - } - - err := errors.New( - fmt.Errorf(s.Message), - fmt.Sprintf("Application: %s[%s]: State changed to %s", id, agentID, subTypeText), - errors.TypeApplication, - errors.M(errors.MetaKeyAppID, id), - errors.M(errors.MetaKeyAppName, name)) - var payload map[string]interface{} - if s.Payload != nil { - payload = map[string]interface{}{ - name: s.Payload, - } - } - return event{ - eventype: eventType, - subType: subType, - timestamp: time.Now(), - message: err.Error(), - payload: payload, - } -} diff --git a/internal/pkg/reporter/reporter_test.go b/internal/pkg/reporter/reporter_test.go deleted file mode 100644 index d5f3cf4ef51..00000000000 --- a/internal/pkg/reporter/reporter_test.go +++ /dev/null @@ -1,120 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package reporter - -import ( - "context" - "fmt" - "strings" - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/elastic/elastic-agent/internal/pkg/core/state" -) - -var result Event - -type testReporter struct{} - -func (t *testReporter) Close() error { return nil } -func (t *testReporter) Report(_ context.Context, r Event) error { - result = r - return nil -} - -type info struct{} - -func (*info) AgentID() string { return "id" } - -type testScenario struct { - Status state.Status - StatusMessage string - EventType string - EventSubType string - EventMessage string -} - -func TestTypes(t *testing.T) { - rep := NewReporter(context.Background(), nil, &info{}, &testReporter{}) - scenarios := []testScenario{ - { - Status: state.Stopped, - StatusMessage: "Stopped", - EventType: EventTypeState, - EventSubType: EventSubTypeStopped, - EventMessage: "Application: a-stopped[id]: State changed to STOPPED: Stopped", - }, - { - Status: state.Starting, - StatusMessage: "Starting", - EventType: EventTypeState, - EventSubType: EventSubTypeStarting, - EventMessage: "Application: a-starting[id]: State changed to STARTING: Starting", - }, - { - Status: state.Configuring, - StatusMessage: "Configuring", - EventType: EventTypeState, - EventSubType: EventSubTypeConfig, - EventMessage: "Application: a-configuring[id]: State changed to CONFIG: Configuring", - }, - { - Status: state.Healthy, - StatusMessage: "Healthy", - EventType: EventTypeState, - EventSubType: EventSubTypeRunning, - EventMessage: "Application: a-healthy[id]: State changed to RUNNING: Healthy", - }, - { - Status: state.Degraded, - StatusMessage: "Degraded", - EventType: EventTypeState, - EventSubType: EventSubTypeRunning, - EventMessage: "Application: a-degraded[id]: State changed to DEGRADED: Degraded", - }, - { - Status: state.Failed, - StatusMessage: "Failed", - EventType: EventTypeError, - EventSubType: EventSubTypeFailed, - EventMessage: "Application: a-failed[id]: State changed to FAILED: Failed", - }, - { - Status: state.Crashed, - StatusMessage: "Crashed", - EventType: EventTypeError, - EventSubType: EventSubTypeFailed, - EventMessage: "Application: a-crashed[id]: State changed to CRASHED: Crashed", - }, - { - Status: state.Stopping, - StatusMessage: "Stopping", - EventType: EventTypeState, - EventSubType: EventSubTypeStopping, - EventMessage: "Application: a-stopping[id]: State changed to STOPPING: Stopping", - }, - { - Status: state.Restarting, - StatusMessage: "Restarting", - EventType: EventTypeState, - EventSubType: EventSubTypeStarting, - EventMessage: "Application: a-restarting[id]: State changed to RESTARTING: Restarting", - }, - } - for _, scenario := range scenarios { - t.Run(scenario.StatusMessage, func(t *testing.T) { - appID := fmt.Sprintf("a-%s", strings.ToLower(scenario.StatusMessage)) - appName := fmt.Sprintf("app-%s", strings.ToLower(scenario.StatusMessage)) - rep.OnStateChange(appID, appName, state.State{ - Status: scenario.Status, - Message: scenario.StatusMessage, - }) - assert.Equal(t, scenario.EventType, result.Type()) - assert.Equal(t, scenario.EventSubType, result.SubType()) - assert.Equal(t, scenario.EventMessage, result.Message()) - }) - } -} diff --git a/internal/pkg/tokenbucket/token_bucket.go b/internal/pkg/tokenbucket/token_bucket.go deleted file mode 100644 index b530d238e3b..00000000000 --- a/internal/pkg/tokenbucket/token_bucket.go +++ /dev/null @@ -1,85 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package tokenbucket - -import ( - "context" - "fmt" - "time" - - "github.com/elastic/elastic-agent/internal/pkg/scheduler" -) - -// Bucket is a Token Bucket for rate limiting -type Bucket struct { - dropAmount int - rateChan chan struct{} - closeChan chan struct{} - scheduler scheduler.Scheduler -} - -// NewTokenBucket creates a bucket and starts it. -// size: total size of the bucket -// dropAmount: amount which is dropped per every specified interval -// dropRate: specified interval when drop will happen -func NewTokenBucket(ctx context.Context, size, dropAmount int, dropRate time.Duration) (*Bucket, error) { - s := scheduler.NewPeriodic(dropRate) - return newTokenBucketWithScheduler(ctx, size, dropAmount, s) -} - -func newTokenBucketWithScheduler( - ctx context.Context, - size, dropAmount int, - s scheduler.Scheduler, -) (*Bucket, error) { - if dropAmount > size { - return nil, fmt.Errorf( - "TokenBucket: invalid configuration, size '%d' is lower than drop amount '%d'", - size, - dropAmount, - ) - } - - b := &Bucket{ - dropAmount: dropAmount, - rateChan: make(chan struct{}, size), - closeChan: make(chan struct{}), - scheduler: s, - } - go b.run(ctx) - - return b, nil -} - -// Add adds item into a bucket. Add blocks until it is able to add item into a bucket. -func (b *Bucket) Add() { - b.rateChan <- struct{}{} -} - -// Close stops the rate limiting and does not let pass anything anymore. -func (b *Bucket) Close() { - close(b.closeChan) - close(b.rateChan) - b.scheduler.Stop() -} - -// run runs basic loop and consumes configured tokens per every configured period. -func (b *Bucket) run(ctx context.Context) { - for { - select { - case <-b.scheduler.WaitTick(): - for i := 0; i < b.dropAmount; i++ { - select { - case <-b.rateChan: - default: // do not cumulate drops - } - } - case <-b.closeChan: - return - case <-ctx.Done(): - return - } - } -} diff --git a/internal/pkg/tokenbucket/token_bucket_test.go b/internal/pkg/tokenbucket/token_bucket_test.go deleted file mode 100644 index e38a4f3f149..00000000000 --- a/internal/pkg/tokenbucket/token_bucket_test.go +++ /dev/null @@ -1,109 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package tokenbucket - -import ( - "context" - "sync" - "testing" - "time" - - "github.com/stretchr/testify/assert" - - "github.com/elastic/elastic-agent/internal/pkg/scheduler" -) - -func TestTokenBucket(t *testing.T) { - dropAmount := 1 - bucketSize := 3 - - t.Run("when way below the bucket size it should not block", func(t *testing.T) { - stepper := scheduler.NewStepper() - - b, err := newTokenBucketWithScheduler( - context.Background(), - bucketSize, - dropAmount, - stepper, - ) - - assert.NoError(t, err, "initiating a bucket failed") - - // Below the bucket size and should not block. - b.Add() - }) - - t.Run("when below the bucket size it should not block", func(t *testing.T) { - stepper := scheduler.NewStepper() - - b, err := newTokenBucketWithScheduler( - context.Background(), - bucketSize, - dropAmount, - stepper, - ) - - assert.NoError(t, err, "initiating a bucket failed") - - // Below the bucket size and should not block. - b.Add() - b.Add() - }) - - t.Run("when we hit the bucket size it should block", func(t *testing.T) { - stepper := scheduler.NewStepper() - - b, err := newTokenBucketWithScheduler( - context.Background(), - bucketSize, - dropAmount, - stepper, - ) - - assert.NoError(t, err, "initiating a bucket failed") - - // Same as the bucket size and should block. - b.Add() - b.Add() - b.Add() - - // Out of bound unblock calls - unblock := func() { - var wg sync.WaitGroup - wg.Add(1) - go func(wg *sync.WaitGroup) { - wg.Done() - - // will unblock the next Add after a second. - <-time.After(1 * time.Second) - stepper.Next() - }(&wg) - wg.Wait() - } - - unblock() - b.Add() // Should block and be unblocked, if not unblock test will timeout. - unblock() - b.Add() // Should block and be unblocked, if not unblock test will timeout. - }) - - t.Run("When we use a timer scheduler we can unblock", func(t *testing.T) { - d := 1 * time.Second - b, err := NewTokenBucket( - context.Background(), - bucketSize, - dropAmount, - d, - ) - - assert.NoError(t, err, "initiating a bucket failed") - - // Same as the bucket size and should block. - b.Add() - b.Add() - b.Add() - b.Add() // Should block and be unblocked, if not unblock test will timeout. - }) -} diff --git a/internal/spec/apm-server.yml b/internal/spec/apm-server.yml deleted file mode 100644 index 0258eb9fb0f..00000000000 --- a/internal/spec/apm-server.yml +++ /dev/null @@ -1,38 +0,0 @@ -name: APM-Server -cmd: apm-server -artifact: apm-server -args: [ - "-E", "management.enabled=true", - "-E", "gc_percent=${APMSERVER_GOGC:100}" -] -exported_metrics: [ - "apm-server", -] -rules: - - copy_to_list: - item: fleet - to: inputs - on_conflict: noop - - map: - path: fleet - rules: - - remove_key: - key: access_api_key - - remove_key: - key: reporting - - remove_key: - key: agent - - fix_stream: {} - - filter_values: - selector: inputs - key: type - values: - - apm - - filter: - selectors: - - inputs - - output - - fleet - - inject_headers: {} -when: length(${inputs}) > 0 and hasKey(${output}, 'elasticsearch', 'redis', - 'kafka', 'logstash') diff --git a/internal/spec/auditbeat.yml b/internal/spec/auditbeat.yml deleted file mode 100644 index be5d6246706..00000000000 --- a/internal/spec/auditbeat.yml +++ /dev/null @@ -1,98 +0,0 @@ -name: Auditbeat -cmd: auditbeat -args: [ - "-c", "auditbeat.elastic-agent.yml", - "-E", "setup.ilm.enabled=false", - "-E", "setup.template.enabled=false", - "-E", "management.enabled=true", - "-E", "logging.level=debug", - "-E", "gc_percent=${AUDITBEAT_GOGC:100}", - "-E", "auditbeat.config.modules.enabled=false" -] -artifact: beats/auditbeat -restart_on_output_change: true - -rules: -- fix_stream: {} - -# All Auditbeat input types begin with 'audit/'. -- filter_values_with_regexp: - key: type - re: '^audit/.+' - selector: inputs - -# Adds 'index: logs-{data_stream.dataset}-{data_stream.namespace}' to each input. -- inject_index: - type: logs - -# Adds two add_fields processors - one for event.dataset and one for -# data_stream.dataset, data_stream.type, and data_stream.namespace. -- inject_stream_processor: - on_conflict: insert_after - type: logs - -# Convert input[].streams[] into inputs[]. -- extract_list_items: - path: inputs - item: streams - to: inputs - -- map: - path: inputs - rules: - # Input types for Auditbeat begin with 'audit/'. Everything after that is - # treated as the module name. - - translate_with_regexp: - path: type - re: '^audit/(.+)' - with: $1 - - rename: - from: type - to: module - # If a dataset is specified convert that into 'datasets: [$item]'. - - make_array: - item: dataset - to: datasets - - remove_key: - key: dataset - - remove_key: - key: enabled - - remove_key: - key: data_stream - - remove_key: - key: condition - # Require all config to come through the Agent (no local files). - - remove_key: - key: audit_rule_files - -- filter_values: - selector: inputs - key: module - values: - - auditd - - file_integrity - - system - -# Adds two add_fields processors - one for agent.id and one for -# elastic_agent.id, elastic_agent.snapshot, elastic_agent.version. -- inject_agent_info: {} - -- copy: - from: inputs - to: auditbeat - -- rename: - from: auditbeat.inputs - to: modules - -- filter: - selectors: - - auditbeat - - output - - keystore - -# Inject headers into the output configuration. -- inject_headers: {} - -when: length(${auditbeat.modules}) > 0 and hasKey(${output}, 'elasticsearch', - 'redis', 'kafka', 'logstash') diff --git a/internal/spec/cloudbeat.yml b/internal/spec/cloudbeat.yml deleted file mode 100644 index 9cfda42344d..00000000000 --- a/internal/spec/cloudbeat.yml +++ /dev/null @@ -1,35 +0,0 @@ -name: Cloudbeat -cmd: cloudbeat -args: [ - "-E", "setup.ilm.enabled=false", - "-E", "setup.template.enabled=false", - "-E", "management.enabled=true", -] -restart_on_output_change: true -artifact: cloudbeat -action_input_types: - - cloudbeat - -rules: - - fix_stream: {} - - inject_index: - type: logs - - - inject_stream_processor: - on_conflict: insert_after - type: logs - - - filter_values: - selector: inputs - key: type - values: - - cloudbeat - - - inject_agent_info: {} - - - filter: - selectors: - - inputs - - output - -when: length(${inputs}) > 0 and hasKey(${output}, 'elasticsearch', 'redis', 'kafka', 'logstash') diff --git a/internal/spec/endpoint.yml b/internal/spec/endpoint.yml deleted file mode 100644 index 5f452ba1943..00000000000 --- a/internal/spec/endpoint.yml +++ /dev/null @@ -1,67 +0,0 @@ -name: Endpoint Security -cmd: endpoint-security -artifact: endpoint-dev -service: 6788 -action_input_types: -- endpoint -log_paths: - darwin: "/Library/Elastic/Endpoint/state/log/endpoint-*.log" - linux: "/opt/Elastic/Endpoint/state/log/endpoint-*.log" - windows: "C:\\Program Files\\Elastic\\Endpoint\\state\\log\\endpoint-*.log" -check_install: -- exec_file: - path: "endpoint-security" - args: - - "verify" - - "--log" - - "stderr" - timeout: 30 -post_install: -- exec_file: - path: "endpoint-security" - args: - - "install" - - "--log" - - "stderr" - - "--upgrade" - - "--resources" - - "endpoint-security-resources.zip" - timeout: 600 -pre_uninstall: -- exec_file: - path: "endpoint-security" - args: - - "uninstall" - - "--log" - - "stderr" - timeout: 600 -rules: -- fix_stream: {} - -- filter_values: - selector: inputs - key: type - values: - - endpoint - -- filter_values: - selector: inputs - key: enabled - values: - - true - -- map: - path: fleet - rules: - - remove_key: - key: server - -- filter: - selectors: - - fleet - - inputs - - output - - revision - -when: length(${fleet}) > 0 and length(${inputs}) > 0 and hasKey(${output}, 'elasticsearch', 'logstash') -constraints: not (${runtime.arch} == 'arm64' and ${runtime.family} == 'redhat' and ${runtime.major} == '7') diff --git a/internal/spec/filebeat.yml b/internal/spec/filebeat.yml deleted file mode 100644 index 10f8ee4493b..00000000000 --- a/internal/spec/filebeat.yml +++ /dev/null @@ -1,115 +0,0 @@ -name: Filebeat -cmd: filebeat -args: [ - "-E", "setup.ilm.enabled=false", - "-E", "setup.template.enabled=false", - "-E", "management.enabled=true", - "-E", "logging.level=debug", - "-E", "gc_percent=${FILEBEAT_GOGC:100}", - "-E", "filebeat.config.modules.enabled=false" -] -artifact: beats/filebeat -restart_on_output_change: true -rules: -- fix_stream: {} -- inject_index: - type: logs - -- inject_stream_processor: - on_conflict: insert_after - type: logs - -- map: - path: inputs - rules: - - copy_all_to_list: - to: streams - on_conflict: noop - except: ["streams", "enabled", "processors"] - - copy_to_list: - item: processors - to: streams - on_conflict: insert_before - -- rename: - from: inputs - to: inputsstreams - -- extract_list_items: - path: inputsstreams - item: streams - to: inputs - -- map: - path: inputs - rules: - - translate: - path: type - mapper: - logfile: log - event/file: log - event/stdin: stdin - event/tcp: tcp - event/udp: udp - log/docker: docker - log/redis_slowlog: redis - log/syslog: syslog - - remove_key: - key: use_output - - remove_key: - key: data_stream - - remove_key: - key: data_stream.namespace - - remove_key: - key: data_stream.dataset - -- filter_values: - selector: inputs - key: type - values: - - aws-cloudwatch - - aws-s3 - - azure-eventhub - - cloudfoundry - - container - - docker - - gcp-pubsub - - http_endpoint - - httpjson - - journald - - kafka - - log - - mqtt - - netflow - - o365audit - - redis - - stdin - - syslog - - tcp - - udp - - unix - - winlog - - filestream - -- filter_values: - selector: inputs - key: enabled - values: - - true - -- inject_agent_info: {} - -- copy: - from: inputs - to: filebeat - -- filter: - selectors: - - filebeat - - output - - keystore - -- inject_headers: {} - -when: length(${filebeat.inputs}) > 0 and hasKey(${output}, 'elasticsearch', 'redis', - 'kafka', 'logstash') diff --git a/internal/spec/fleet-server.yml b/internal/spec/fleet-server.yml deleted file mode 100644 index ea7af0e3b89..00000000000 --- a/internal/spec/fleet-server.yml +++ /dev/null @@ -1,69 +0,0 @@ -name: Fleet Server -cmd: fleet-server -args: ["--agent-mode"] -artifact: fleet-server -rules: - - fix_stream: {} - - - filter_values: - selector: inputs - key: type - values: - - fleet-server - - - filter_values: - selector: inputs - key: enabled - values: - - true - - - remove_key: - key: output - - - select_into: - selectors: [ fleet.server.output.elasticsearch ] - path: output - - - select_into: - selectors: [ fleet.server.policy.id ] - path: inputs.0.policy - - - insert_defaults: - selectors: - - fleet.server.host - - fleet.server.port - - fleet.server.internal_port - - fleet.server.ssl - path: inputs.0.server - - - map: - path: fleet - rules: - - filter: - selectors: - - agent - - host - - - map: - path: inputs - rules: - - remove_key: - key: use_output - - remove_key: - key: data_stream - - remove_key: - key: data_stream.namespace - - remove_key: - key: data_stream.dataset - - remove_key: - key: streams - - - filter: - selectors: - - fleet - - inputs - - output - - - inject_headers: {} - -when: length(${fleet}) > 0 and length(${inputs}) > 0 and hasKey(${output}, 'elasticsearch') diff --git a/internal/spec/heartbeat.yml b/internal/spec/heartbeat.yml deleted file mode 100644 index ecb373cf791..00000000000 --- a/internal/spec/heartbeat.yml +++ /dev/null @@ -1,24 +0,0 @@ -name: Heartbeat -cmd: heartbeat -args: ["-E", "setup.ilm.enabled=false", "-E", "setup.template.enabled=false", "-E", "management.enabled=true", "-E", "logging.level=debug"] -artifact: beats/heartbeat -restart_on_output_change: true -rules: - - fix_stream: {} - - filter_values_with_regexp: - key: type - re: ^synthetics/.+ - selector: inputs - - filter_values: - selector: inputs - key: enabled - values: - - true - - inject_agent_info: {} - - filter: - selectors: - - inputs - - output - - keystore -when: length(${inputs}) > 0 and hasKey(${output}, 'elasticsearch', 'redis', - 'kafka', 'logstash') diff --git a/internal/spec/metricbeat.yml b/internal/spec/metricbeat.yml deleted file mode 100644 index 3a6f3a0b8f4..00000000000 --- a/internal/spec/metricbeat.yml +++ /dev/null @@ -1,98 +0,0 @@ -name: Metricbeat -cmd: metricbeat -args: [ - "-E", "setup.ilm.enabled=false", - "-E", "setup.template.enabled=false", - "-E", "management.enabled=true", - "-E", "logging.level=debug", - "-E", "gc_percent=${METRICBEAT_GOGC:100}", - "-E", "metricbeat.config.modules.enabled=false" -] -artifact: beats/metricbeat -restart_on_output_change: true -rules: -- fix_stream: {} -- inject_index: - type: metrics - -- inject_stream_processor: - on_conflict: insert_after - type: metrics - -- rename: - from: inputs - to: inputsstreams - -- map: - path: inputsstreams - rules: - - copy_all_to_list: - to: streams - on_conflict: noop - except: ["streams", "id", "enabled", "processors"] - - copy_to_list: - item: processors - to: streams - on_conflict: insert_before - -- extract_list_items: - path: inputsstreams - item: streams - to: inputs - -- filter_values_with_regexp: - key: type - re: ^.+/metrics$ - selector: inputs - -- filter_values: - selector: inputs - key: enabled - values: - - true - -- map: - path: inputs - rules: - - translate_with_regexp: - path: type - re: ^(?P.+)/metrics$ - with: $type - - rename: - from: type - to: module - - make_array: - item: metricset - to: metricsets - - remove_key: - key: metricset - - remove_key: - key: enabled - - remove_key: - key: data_stream - - remove_key: - key: data_stream.dataset - - remove_key: - key: data_stream.namespace - - remove_key: - key: use_output - -- inject_agent_info: {} - -- copy: - from: inputs - to: metricbeat - -- rename: - from: metricbeat.inputs - to: modules - -- filter: - selectors: - - metricbeat - - output - - keystore -- inject_headers: {} - -when: length(${metricbeat.modules}) > 0 and hasKey(${output}, 'elasticsearch', - 'redis', 'kafka', 'logstash') diff --git a/internal/spec/osquerybeat.yml b/internal/spec/osquerybeat.yml deleted file mode 100644 index 36e60901a34..00000000000 --- a/internal/spec/osquerybeat.yml +++ /dev/null @@ -1,39 +0,0 @@ -name: Osquerybeat -cmd: osquerybeat -args: ["-E", "setup.ilm.enabled=false", "-E", "setup.template.enabled=false", "-E", "management.enabled=true", "-E", "logging.level=debug"] -restart_on_output_change: true -artifact: beats/osquerybeat -action_input_types: -- osquery - -check_install: -- exec_file: - path: "osquerybeat" - args: - - "verify" - timeout: 10 - -rules: -- fix_stream: {} -- inject_index: - type: logs - -- inject_stream_processor: - on_conflict: insert_after - type: logs - -- filter_values: - selector: inputs - key: type - values: - - osquery - -- inject_agent_info: {} - -- filter: - selectors: - - inputs - - output - -when: length(${inputs}) > 0 and hasKey(${output}, 'elasticsearch') -constraints: ${runtime.arch} != '386' diff --git a/internal/spec/packetbeat.yml b/internal/spec/packetbeat.yml deleted file mode 100644 index 37c2629f130..00000000000 --- a/internal/spec/packetbeat.yml +++ /dev/null @@ -1,22 +0,0 @@ -name: Packetbeat -cmd: packetbeat -args: ['-E', 'setup.ilm.enabled=false', '-E', 'setup.template.enabled=false', '-E', 'management.enabled=true', '-E', 'logging.level=debug'] -artifact: beats/packetbeat -restart_on_output_change: true -rules: - - filter_values: - selector: inputs - key: type - values: - - packet - - - inject_agent_info: {} - - - filter: - selectors: - - inputs - - output - -when: length(${inputs}) > 0 and hasKey(${output}, 'elasticsearch', 'redis', - 'kafka', 'logstash') - diff --git a/magefile.go b/magefile.go index 721071b2a0f..9ddfe7bb7fb 100644 --- a/magefile.go +++ b/magefile.go @@ -270,24 +270,17 @@ func (Build) Clean() { // TestBinaries build the required binaries for the test suite. func (Build) TestBinaries() error { - p := filepath.Join("internal", "pkg", "agent", "operation", "tests", "scripts") - p2 := filepath.Join("internal", "pkg", "agent", "transpiler", "tests") - p3 := filepath.Join("pkg", "component") - configurableName := "configurable" - serviceableName := "serviceable" + p := filepath.Join("internal", "pkg", "agent", "transpiler", "tests") + p2 := filepath.Join("pkg", "component") execName := "exec" fakeName := "fake" if runtime.GOOS == "windows" { - configurableName += ".exe" - serviceableName += ".exe" execName += ".exe" fakeName += ".exe" } return combineErr( - RunGo("build", "-o", filepath.Join(p, configurableName), filepath.Join(p, "configurable-1.0-darwin-x86_64", "main.go")), - RunGo("build", "-o", filepath.Join(p, serviceableName), filepath.Join(p, "serviceable-1.0-darwin-x86_64", "main.go")), - RunGo("build", "-o", filepath.Join(p2, "exec-1.0-darwin-x86_64", execName), filepath.Join(p2, "exec-1.0-darwin-x86_64", "main.go")), - RunGo("build", "-o", filepath.Join(p3, "fake", fakeName), filepath.Join(p3, "fake", "main.go")), + RunGo("build", "-o", filepath.Join(p, "exec-1.0-darwin-x86_64", execName), filepath.Join(p, "exec-1.0-darwin-x86_64", "main.go")), + RunGo("build", "-o", filepath.Join(p2, "fake", fakeName), filepath.Join(p2, "fake", "main.go")), ) } @@ -487,7 +480,7 @@ func commitID() string { // Update is an alias for executing control protocol, configs, and specs. func Update() { - mg.SerialDeps(Config, BuildSpec, BuildPGP, BuildFleetCfg) + mg.SerialDeps(Config, BuildPGP, BuildFleetCfg) } // CrossBuild cross-builds the beat for all target platforms. @@ -514,19 +507,6 @@ func ControlProto() error { "control.proto") } -// BuildSpec make sure that all the suppported program spec are built into the binary. -func BuildSpec() error { - // go run dev-tools/cmd/buildspec/buildspec.go --in internal/agent/spec/*.yml --out internal/pkg/agent/program/supported.go - goF := filepath.Join("dev-tools", "cmd", "buildspec", "buildspec.go") - in := filepath.Join("internal", "spec", "*.yml") - out := filepath.Join("internal", "pkg", "agent", "program", "supported.go") - - fmt.Printf(">> Buildspec from %s to %s\n", in, out) - return RunGo("run", goF, "--in", in, "--out", out) - - return nil -} - func BuildPGP() error { // go run elastic-agent/dev-tools/cmd/buildpgp/build_pgp.go --in agent/spec/GPG-KEY-elasticsearch --out elastic-agent/pkg/release/pgp.go goF := filepath.Join("dev-tools", "cmd", "buildpgp", "build_pgp.go") diff --git a/pkg/component/platforms.go b/pkg/component/platforms.go index 98e5bf21cd6..552adde716c 100644 --- a/pkg/component/platforms.go +++ b/pkg/component/platforms.go @@ -6,7 +6,11 @@ package component import ( "fmt" + goruntime "runtime" + "strconv" "strings" + + "github.com/elastic/go-sysinfo" ) const ( @@ -103,3 +107,29 @@ type PlatformDetail struct { Major string Minor string } + +// PlatformModifier can modify the platform details before the runtime specifications are loaded. +type PlatformModifier func(detail PlatformDetail) PlatformDetail + +// LoadPlatformDetail loads the platform details for the current system. +func LoadPlatformDetail(modifiers ...PlatformModifier) (PlatformDetail, error) { + info, err := sysinfo.Host() + if err != nil { + return PlatformDetail{}, err + } + os := info.Info().OS + detail := PlatformDetail{ + Platform: Platform{ + OS: goruntime.GOOS, + Arch: goruntime.GOARCH, + GOOS: goruntime.GOOS, + }, + Family: os.Family, + Major: strconv.Itoa(os.Major), + Minor: strconv.Itoa(os.Minor), + } + for _, modifier := range modifiers { + detail = modifier(detail) + } + return detail, nil +} From ff667df7301d8a3635fc56ae7abd2e1404b4d0f8 Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Wed, 27 Jul 2022 14:20:51 -0400 Subject: [PATCH 09/49] [v2] Delete more unused code from v2 transition (#790) * Remove more unused code that was including already deleted code. * Fix all unit tests. * Fix lint. * More lint fixes, maybe this time? * More lint.... really? * Update NOTICE.txt. --- NOTICE.txt | 1208 +++-------------- go.mod | 7 +- go.sum | 12 - .../gateway/fleet/fleet_gateway_test.go | 17 +- internal/pkg/agent/cmd/container.go | 49 +- internal/pkg/agent/cmd/diagnostics.go | 11 +- internal/pkg/agent/cmd/diagnostics_test.go | 4 +- internal/pkg/agent/cmd/run.go | 3 +- internal/pkg/agent/cmd/status_test.go | 125 -- .../pkg/agent/configrequest/config_request.go | 61 - .../configrequest/config_request_test.go | 27 - internal/pkg/agent/configrequest/request.go | 20 - internal/pkg/agent/configrequest/step.go | 33 - internal/pkg/agent/control/control_test.go | 26 +- internal/pkg/basecmd/version/cmd_test.go | 4 +- .../pkg/capabilities/capabilities_test.go | 28 +- internal/pkg/capabilities/input_test.go | 28 +- internal/pkg/capabilities/output_test.go | 21 +- internal/pkg/capabilities/upgrade.go | 5 - internal/pkg/capabilities/upgrade_test.go | 98 +- .../core/monitoring/beats/beats_monitor.go | 309 ----- .../pkg/core/monitoring/beats/drop_test.go | 44 - .../pkg/core/monitoring/beats/monitoring.go | 82 -- internal/pkg/core/monitoring/monitor.go | 39 - .../pkg/core/monitoring/noop/noop_monitor.go | 71 - .../pkg/core/monitoring/server/handler.go | 47 - .../pkg/core/monitoring/server/process.go | 223 --- .../monitoring/server/process_linux_test.go | 46 - .../core/monitoring/server/process_test.go | 90 -- .../pkg/core/monitoring/server/processes.go | 133 -- .../core/monitoring/server/processes_test.go | 172 --- internal/pkg/core/monitoring/server/server.go | 136 -- internal/pkg/core/monitoring/server/stats.go | 36 - internal/pkg/core/monitoring/server/url.go | 208 --- pkg/component/fake/main.go | 8 +- pkg/component/runtime/manager_test.go | 46 +- 36 files changed, 331 insertions(+), 3146 deletions(-) delete mode 100644 internal/pkg/agent/cmd/status_test.go delete mode 100644 internal/pkg/agent/configrequest/config_request.go delete mode 100644 internal/pkg/agent/configrequest/config_request_test.go delete mode 100644 internal/pkg/agent/configrequest/request.go delete mode 100644 internal/pkg/agent/configrequest/step.go delete mode 100644 internal/pkg/core/monitoring/beats/beats_monitor.go delete mode 100644 internal/pkg/core/monitoring/beats/drop_test.go delete mode 100644 internal/pkg/core/monitoring/beats/monitoring.go delete mode 100644 internal/pkg/core/monitoring/monitor.go delete mode 100644 internal/pkg/core/monitoring/noop/noop_monitor.go delete mode 100644 internal/pkg/core/monitoring/server/handler.go delete mode 100644 internal/pkg/core/monitoring/server/process.go delete mode 100644 internal/pkg/core/monitoring/server/process_linux_test.go delete mode 100644 internal/pkg/core/monitoring/server/process_test.go delete mode 100644 internal/pkg/core/monitoring/server/processes.go delete mode 100644 internal/pkg/core/monitoring/server/processes_test.go delete mode 100644 internal/pkg/core/monitoring/server/server.go delete mode 100644 internal/pkg/core/monitoring/server/stats.go delete mode 100644 internal/pkg/core/monitoring/server/url.go diff --git a/NOTICE.txt b/NOTICE.txt index f7854b64dc5..56f82316620 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -1270,217 +1270,6 @@ Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-l limitations under the License. --------------------------------------------------------------------------------- -Dependency : github.com/elastic/elastic-agent-system-metrics -Version: v0.3.0 -Licence type (autodetected): Apache-2.0 --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-system-metrics@v0.3.0/LICENSE.txt: - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - -------------------------------------------------------------------------------- Dependency : github.com/elastic/go-licenser Version: v0.4.0 @@ -2220,80 +2009,6 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. --------------------------------------------------------------------------------- -Dependency : github.com/google/uuid -Version: v1.3.0 -Licence type (autodetected): BSD-3-Clause --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/google/uuid@v1.3.0/LICENSE: - -Copyright (c) 2009,2014 Google Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - --------------------------------------------------------------------------------- -Dependency : github.com/gorilla/mux -Version: v1.8.0 -Licence type (autodetected): BSD-3-Clause --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/gorilla/mux@v1.8.0/LICENSE: - -Copyright (c) 2012-2018 The Gorilla Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -------------------------------------------------------------------------------- Dependency : github.com/hashicorp/go-multierror Version: v1.1.1 @@ -3997,265 +3712,54 @@ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - --------------------------------------------------------------------------------- -Dependency : github.com/tsg/go-daemon -Version: v0.0.0-20200207173439-e704b93fd89b -Licence type (autodetected): BSD-3-Clause --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/tsg/go-daemon@v0.0.0-20200207173439-e704b93fd89b/LICENSE: - -Copyright (c) 2013-2014 Alexandre Fiori. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * The names of authors or contributors may NOT be used to endorse or -promote products derived from this software without specific prior -written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - --------------------------------------------------------------------------------- -Dependency : go.elastic.co/apm -Version: v1.15.0 -Licence type (autodetected): Apache-2.0 --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/go.elastic.co/apm@v1.15.0/LICENSE: - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. - END OF TERMS AND CONDITIONS - APPENDIX: How to apply the Apache License to your work. +-------------------------------------------------------------------------------- +Dependency : github.com/tsg/go-daemon +Version: v0.0.0-20200207173439-e704b93fd89b +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. +Contents of probable licence file $GOMODCACHE/github.com/tsg/go-daemon@v0.0.0-20200207173439-e704b93fd89b/LICENSE: - Copyright 2018 Elasticsearch BV +Copyright (c) 2013-2014 Alexandre Fiori. All rights reserved. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: - http://www.apache.org/licenses/LICENSE-2.0 + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * The names of authors or contributors may NOT be used to endorse or +promote products derived from this software without specific prior +written permission. - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- -Dependency : go.elastic.co/apm/module/apmgorilla +Dependency : go.elastic.co/apm Version: v1.15.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/go.elastic.co/apm/module/apmgorilla@v1.15.0/LICENSE: +Contents of probable licence file $GOMODCACHE/go.elastic.co/apm@v1.15.0/LICENSE: Apache License Version 2.0, January 2004 @@ -7516,287 +7020,74 @@ Version: v1.1.1 Licence type (autodetected): ISC -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/davecgh/go-spew@v1.1.1/LICENSE: - -ISC License - -Copyright (c) 2012-2016 Dave Collins - -Permission to use, copy, modify, and/or distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - - --------------------------------------------------------------------------------- -Dependency : github.com/dnephin/pflag -Version: v1.0.7 -Licence type (autodetected): BSD-3-Clause --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/dnephin/pflag@v1.0.7/LICENSE: - -Copyright (c) 2012 Alex Ogier. All rights reserved. -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - --------------------------------------------------------------------------------- -Dependency : github.com/docker/distribution -Version: v2.8.1+incompatible -Licence type (autodetected): Apache-2.0 --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/docker/distribution@v2.8.1+incompatible/LICENSE: - -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. +Contents of probable licence file $GOMODCACHE/github.com/davecgh/go-spew@v1.1.1/LICENSE: - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. +ISC License - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. +Copyright (c) 2012-2016 Dave Collins - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. - END OF TERMS AND CONDITIONS +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. - APPENDIX: How to apply the Apache License to your work. - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. +-------------------------------------------------------------------------------- +Dependency : github.com/dnephin/pflag +Version: v1.0.7 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- - Copyright {yyyy} {name of copyright owner} +Contents of probable licence file $GOMODCACHE/github.com/dnephin/pflag@v1.0.7/LICENSE: - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Copyright (c) 2012 Alex Ogier. All rights reserved. +Copyright (c) 2012 The Go Authors. All rights reserved. - http://www.apache.org/licenses/LICENSE-2.0 +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- -Dependency : github.com/docker/docker -Version: v20.10.12+incompatible +Dependency : github.com/docker/distribution +Version: v2.8.1+incompatible Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/docker/docker@v20.10.12+incompatible/LICENSE: - +Contents of probable licence file $GOMODCACHE/github.com/docker/distribution@v2.8.1+incompatible/LICENSE: - Apache License +Apache License Version 2.0, January 2004 - https://www.apache.org/licenses/ + http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION @@ -7971,13 +7262,24 @@ Contents of probable licence file $GOMODCACHE/github.com/docker/docker@v20.10.12 END OF TERMS AND CONDITIONS - Copyright 2013-2018 Docker, Inc. + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - https://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -7986,13 +7288,14 @@ Contents of probable licence file $GOMODCACHE/github.com/docker/docker@v20.10.12 limitations under the License. + -------------------------------------------------------------------------------- -Dependency : github.com/docker/go-connections -Version: v0.4.0 +Dependency : github.com/docker/docker +Version: v20.10.12+incompatible Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/docker/go-connections@v0.4.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/docker/docker@v20.10.12+incompatible/LICENSE: Apache License @@ -8172,7 +7475,7 @@ Contents of probable licence file $GOMODCACHE/github.com/docker/go-connections@v END OF TERMS AND CONDITIONS - Copyright 2015 Docker, Inc. + Copyright 2013-2018 Docker, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -8188,16 +7491,17 @@ Contents of probable licence file $GOMODCACHE/github.com/docker/go-connections@v -------------------------------------------------------------------------------- -Dependency : github.com/elastic/go-structform -Version: v0.0.9 +Dependency : github.com/docker/go-connections +Version: v0.4.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/go-structform@v0.0.9/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/docker/go-connections@v0.4.0/LICENSE: + Apache License Version 2.0, January 2004 - http://www.apache.org/licenses/ + https://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION @@ -8372,24 +7676,13 @@ Contents of probable licence file $GOMODCACHE/github.com/elastic/go-structform@v END OF TERMS AND CONDITIONS - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2012–2018 Elastic + Copyright 2015 Docker, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, @@ -8610,217 +7903,6 @@ Contents of probable licence file $GOMODCACHE/github.com/elastic/go-windows@v1.0 limitations under the License. --------------------------------------------------------------------------------- -Dependency : github.com/elastic/gosigar -Version: v0.14.2 -Licence type (autodetected): Apache-2.0 --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/elastic/gosigar@v0.14.2/LICENSE: - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - -2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - -3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - -4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - -5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - -6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - -8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - -Copyright [yyyy] [name of copyright owner] - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. - - -------------------------------------------------------------------------------- Dependency : github.com/elazarl/goproxy Version: v0.0.0-20180725130230-947c36da3153 @@ -10224,6 +9306,43 @@ Contents of probable licence file $GOMODCACHE/github.com/google/shlex@v0.0.0-201 limitations under the License. +-------------------------------------------------------------------------------- +Dependency : github.com/google/uuid +Version: v1.3.0 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/google/uuid@v1.3.0/LICENSE: + +Copyright (c) 2009,2014 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + -------------------------------------------------------------------------------- Dependency : github.com/googleapis/gnostic Version: v0.5.5 @@ -10437,6 +9556,43 @@ Contents of probable licence file $GOMODCACHE/github.com/googleapis/gnostic@v0.5 +-------------------------------------------------------------------------------- +Dependency : github.com/gorilla/mux +Version: v1.8.0 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/gorilla/mux@v1.8.0/LICENSE: + +Copyright (c) 2012-2018 The Gorilla Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + -------------------------------------------------------------------------------- Dependency : github.com/grpc-ecosystem/go-grpc-middleware Version: v1.3.0 diff --git a/go.mod b/go.mod index 2cf5692b25a..7751b77f60c 100644 --- a/go.mod +++ b/go.mod @@ -14,15 +14,12 @@ require ( github.com/elastic/elastic-agent-autodiscover v0.0.0-20220404145827-89887023c1ab github.com/elastic/elastic-agent-client/v7 v7.0.0-20220524131921-43bacbeec516 github.com/elastic/elastic-agent-libs v0.2.3 - github.com/elastic/elastic-agent-system-metrics v0.3.0 github.com/elastic/go-licenser v0.4.0 github.com/elastic/go-sysinfo v1.7.1 github.com/elastic/go-ucfg v0.8.5 github.com/gofrs/flock v0.8.1 github.com/gofrs/uuid v4.2.0+incompatible github.com/google/go-cmp v0.5.6 - github.com/google/uuid v1.3.0 - github.com/gorilla/mux v1.8.0 github.com/hashicorp/go-multierror v1.1.1 github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 @@ -71,9 +68,7 @@ require ( github.com/docker/distribution v2.8.1+incompatible // indirect github.com/docker/docker v20.10.12+incompatible // indirect github.com/docker/go-connections v0.4.0 // indirect - github.com/elastic/go-structform v0.0.9 // indirect github.com/elastic/go-windows v1.0.1 // indirect - github.com/elastic/gosigar v0.14.2 // indirect github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/fatih/color v1.13.0 // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect @@ -86,6 +81,7 @@ require ( github.com/google/gofuzz v1.1.0 // indirect github.com/google/licenseclassifier v0.0.0-20200402202327-879cb1424de0 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect + github.com/google/uuid v1.3.0 // indirect github.com/googleapis/gnostic v0.5.5 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/imdario/mergo v0.3.12 // indirect @@ -144,7 +140,6 @@ require ( github.com/hashicorp/go-version v1.2.0 // indirect github.com/json-iterator/go v1.1.12 // indirect go.elastic.co/apm v1.15.0 - go.elastic.co/apm/module/apmgorilla v1.15.0 go.elastic.co/apm/module/apmgrpc v1.15.0 k8s.io/klog/v2 v2.30.0 // indirect ) diff --git a/go.sum b/go.sum index d12450c533f..8ccda348f78 100644 --- a/go.sum +++ b/go.sum @@ -385,18 +385,14 @@ github.com/elastic/elastic-agent-autodiscover v0.0.0-20220404145827-89887023c1ab github.com/elastic/elastic-agent-client/v7 v7.0.0-20220524131921-43bacbeec516 h1:8sGoTlgXRCesR1+FjBv8YY5CyVhNSDjXlo4uq5q1RGM= github.com/elastic/elastic-agent-client/v7 v7.0.0-20220524131921-43bacbeec516/go.mod h1:fkvyUfFwyAG5OnMF0h+FV9sC0Xn9YLITwQpSuwungQs= github.com/elastic/elastic-agent-libs v0.0.0-20220303160015-5b4e674da3dd/go.mod h1://82M1l73IHx0wDbS2Tzkq6Fx9fkmytS1KgkIyzvNTM= -github.com/elastic/elastic-agent-libs v0.2.2/go.mod h1:1xDLBhIqBIjhJ7lr2s+xRFFkQHpitSp8q2zzv1Dqg+s= github.com/elastic/elastic-agent-libs v0.2.3 h1:GY8M0fxOs/GBY2nIB+JOB91aoD72S87iEcm2qVGFUqI= github.com/elastic/elastic-agent-libs v0.2.3/go.mod h1:1xDLBhIqBIjhJ7lr2s+xRFFkQHpitSp8q2zzv1Dqg+s= -github.com/elastic/elastic-agent-system-metrics v0.3.0 h1:W8L0E8lWJmdguH+oIR7OzuFgopvw8ucZAE9w6iqVlpE= -github.com/elastic/elastic-agent-system-metrics v0.3.0/go.mod h1:RIYhJOS7mUeyIthfOSqmmbEILYSzaDWLi5zQ70bQo+o= github.com/elastic/elastic-package v0.32.1/go.mod h1:l1fEnF52XRBL6a5h6uAemtdViz2bjtjUtgdQcuRhEAY= github.com/elastic/go-elasticsearch/v7 v7.16.0/go.mod h1:OJ4wdbtDNk5g503kvlHLyErCgQwwzmDtaFC4XyOxXA4= github.com/elastic/go-elasticsearch/v8 v8.0.0-20210317102009-a9d74cec0186/go.mod h1:xe9a/L2aeOgFKKgrO3ibQTnMdpAeL0GC+5/HpGScSa4= github.com/elastic/go-licenser v0.3.1/go.mod h1:D8eNQk70FOCVBl3smCGQt/lv7meBeQno2eI1S5apiHQ= github.com/elastic/go-licenser v0.4.0 h1:jLq6A5SilDS/Iz1ABRkO6BHy91B9jBora8FwGRsDqUI= github.com/elastic/go-licenser v0.4.0/go.mod h1:V56wHMpmdURfibNBggaSBfqgPxyT1Tldns1i87iTEvU= -github.com/elastic/go-structform v0.0.9 h1:HpcS7xljL4kSyUfDJ8cXTJC6rU5ChL1wYb6cx3HLD+o= github.com/elastic/go-structform v0.0.9/go.mod h1:CZWf9aIRYY5SuKSmOhtXScE5uQiLZNqAFnwKR4OrIM4= github.com/elastic/go-sysinfo v1.1.1/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= github.com/elastic/go-sysinfo v1.7.1 h1:Wx4DSARcKLllpKT2TnFVdSUJOsybqMYCNQZq1/wO+s0= @@ -407,8 +403,6 @@ github.com/elastic/go-ucfg v0.8.5/go.mod h1:4E8mPOLSUV9hQ7sgLEJ4bvt0KhMuDJa8joDT github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU= github.com/elastic/go-windows v1.0.1 h1:AlYZOldA+UJ0/2nBuqWdo90GFCgG9xuyw9SYzGUtJm0= github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss= -github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4= -github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/elastic/package-spec v1.3.0/go.mod h1:KzGTSDqCkdhmL1IFpOH2ZQNSSE9JEhNtndxU3ZrQilA= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= @@ -646,10 +640,8 @@ github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2c github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= @@ -1199,8 +1191,6 @@ go.elastic.co/apm v1.13.0/go.mod h1:dylGv2HKR0tiCV+wliJz1KHtDyuD8SPe69oV7VyK6WY= go.elastic.co/apm v1.15.0 h1:uPk2g/whK7c7XiZyz/YCUnAUBNPiyNeE3ARX3G6Gx7Q= go.elastic.co/apm v1.15.0/go.mod h1:dylGv2HKR0tiCV+wliJz1KHtDyuD8SPe69oV7VyK6WY= go.elastic.co/apm/module/apmelasticsearch v1.10.0/go.mod h1:lwoaGDfZzfb9e6TXd3h8/KNmLAONOas7o5NLVNmv8Xk= -go.elastic.co/apm/module/apmgorilla v1.15.0 h1:1yTAksffgaFXYEIwlLRiQnxLfy3p3RtpDw8HDupIJfY= -go.elastic.co/apm/module/apmgorilla v1.15.0/go.mod h1:+23mZudYvZ9VgxCQjseLo9EF5gkKEr0KSQBupw+rzP8= go.elastic.co/apm/module/apmgrpc v1.15.0 h1:Z7h58uuMJUoYXK6INFunlcGEXZQ18QKAhPh6NFYDNHE= go.elastic.co/apm/module/apmgrpc v1.15.0/go.mod h1:IEbTGJzY5Xx737PkHDT3bbzh9syovK+IfAlckJsUgPE= go.elastic.co/apm/module/apmhttp v1.10.0/go.mod h1:Y4timwcJ8sQWbWpcw3Y7Mat1OssNpGhpwyfUnpqIDew= @@ -1441,7 +1431,6 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1564,7 +1553,6 @@ golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220405052023-b1e9470b6e64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 h1:xHms4gcpe1YE7A3yIllJXP16CMAGuqwO2lX1mTyyRRc= golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= diff --git a/internal/pkg/agent/application/gateway/fleet/fleet_gateway_test.go b/internal/pkg/agent/application/gateway/fleet/fleet_gateway_test.go index deb871192bc..f7ba6ec961d 100644 --- a/internal/pkg/agent/application/gateway/fleet/fleet_gateway_test.go +++ b/internal/pkg/agent/application/gateway/fleet/fleet_gateway_test.go @@ -2,7 +2,6 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. -//nolint:dupl // duplicate code is in test cases package fleet import ( @@ -322,14 +321,16 @@ func TestFleetGateway(t *testing.T) { errCh := runFleetGateway(ctx, gateway) - var count int - for { - waitFn() - count++ - if count == 4 { - return + func() { + var count int + for { + waitFn() + count++ + if count == 4 { + return + } } - } + }() cancel() err = <-errCh diff --git a/internal/pkg/agent/cmd/container.go b/internal/pkg/agent/cmd/container.go index b3a086439e3..f9f89dd25db 100644 --- a/internal/pkg/agent/cmd/container.go +++ b/internal/pkg/agent/cmd/container.go @@ -6,7 +6,6 @@ package cmd import ( "bytes" - "context" "encoding/json" "fmt" "io" @@ -22,8 +21,6 @@ import ( "syscall" "time" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" - "github.com/spf13/cobra" "gopkg.in/yaml.v2" @@ -33,11 +30,9 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/artifact/install/tar" "github.com/elastic/elastic-agent/internal/pkg/cli" "github.com/elastic/elastic-agent/internal/pkg/config" - "github.com/elastic/elastic-agent/internal/pkg/release" + "github.com/elastic/elastic-agent/pkg/component" "github.com/elastic/elastic-agent/pkg/core/logger" "github.com/elastic/elastic-agent/pkg/core/process" "github.com/elastic/elastic-agent/version" @@ -268,7 +263,7 @@ func runContainerCmd(streams *cli.IOStreams, cfg setupConfig) error { _, err = os.Stat(paths.AgentConfigFile()) if !os.IsNotExist(err) && !cfg.Fleet.Force { // already enrolled, just run the standard run - return run(logToStderr) + return run(logToStderr, isContainer) } if cfg.Kibana.Fleet.Setup || cfg.FleetServer.Enable { @@ -333,7 +328,7 @@ func runContainerCmd(streams *cli.IOStreams, cfg setupConfig) error { } } - return run(logToStderr) + return run(logToStderr, isContainer) } // TokenResp is used to decode a response for generating a service token @@ -700,23 +695,22 @@ func truncateString(b []byte) string { func runLegacyAPMServer(streams *cli.IOStreams, path string) (*process.Info, error) { name := "apm-server" logInfo(streams, "Preparing apm-server for legacy mode.") - cfg := artifact.DefaultConfig() - logInfo(streams, fmt.Sprintf("Extracting apm-server into install directory %s.", path)) - installer, err := tar.NewInstaller(cfg) + platform, err := component.LoadPlatformDetail(isContainer) if err != nil { - return nil, errors.New(err, "creating installer") + return nil, fmt.Errorf("failed to gather system information: %w", err) } - spec := program.Spec{Name: name, Cmd: name, Artifact: name} - version := release.Version() - if release.Snapshot() { - version = fmt.Sprintf("%s-SNAPSHOT", version) + + specs, err := component.LoadRuntimeSpecs(paths.Components(), platform) + if err != nil { + return nil, fmt.Errorf("failed to detect inputs and outputs: %w", err) } - // Extract the bundled apm-server into the APM_SERVER_PATH - if err := installer.Install(context.Background(), spec, version, path); err != nil { - return nil, errors.New(err, - fmt.Sprintf("installing %s (%s) from %s to %s", spec.Name, version, cfg.TargetDirectory, path)) + + spec, err := specs.GetInput(name) + if err != nil { + return nil, fmt.Errorf("failed to detect apm-server input: %w", err) } + // Get the apm-server directory files, err := ioutil.ReadDir(path) if err != nil { @@ -725,9 +719,7 @@ func runLegacyAPMServer(streams *cli.IOStreams, path string) (*process.Info, err if len(files) != 1 || !files[0].IsDir() { return nil, errors.New("expected one directory") } - apmDir := filepath.Join(path, files[0].Name()) - // Start apm-server process respecting path ENVs - apmBinary := filepath.Join(apmDir, spec.Cmd) + // add APM Server specific configuration var args []string addEnv := func(arg, env string) { @@ -748,7 +740,7 @@ func runLegacyAPMServer(streams *cli.IOStreams, path string) (*process.Info, err addEnv("--httpprof", "HTTPPROF") addSettingEnv("gc_percent", "APMSERVER_GOGC") logInfo(streams, "Starting legacy apm-server daemon as a subprocess.") - return process.Start(apmBinary, os.Geteuid(), os.Getegid(), args, nil) + return process.Start(spec.BinaryPath, os.Geteuid(), os.Getegid(), args, nil) } func logToStderr(cfg *configuration.Configuration) { @@ -959,3 +951,12 @@ func envIntWithDefault(defVal string, keys ...string) (int, error) { return strconv.Atoi(valStr) } + +// isContainer changes the platform details to be a container. +// +// Runtime specifications can provide unique configurations when running in a container, this ensures that +// those configurations are used versus the standard Linux configurations. +func isContainer(detail component.PlatformDetail) component.PlatformDetail { + detail.OS = component.Container + return detail +} diff --git a/internal/pkg/agent/cmd/diagnostics.go b/internal/pkg/agent/cmd/diagnostics.go index a1412c147e1..811b88465d2 100644 --- a/internal/pkg/agent/cmd/diagnostics.go +++ b/internal/pkg/agent/cmd/diagnostics.go @@ -14,7 +14,6 @@ import ( "io/fs" "os" "path/filepath" - "runtime" "strings" "text/tabwriter" "time" @@ -29,7 +28,6 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/control/client" "github.com/elastic/elastic-agent/internal/pkg/agent/control/cproto" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" "github.com/elastic/elastic-agent/internal/pkg/cli" "github.com/elastic/elastic-agent/internal/pkg/config/operations" ) @@ -562,9 +560,10 @@ func zipLogs(zw *zip.Writer) error { return err } - if err := collectEndpointSecurityLogs(zw, program.SupportedMap); err != nil { - return fmt.Errorf("failed to collect endpoint-security logs: %w", err) - } + // TODO(blakerouse): Fix diagnostics for v2 + //if err := collectEndpointSecurityLogs(zw, program.SupportedMap); err != nil { + // return fmt.Errorf("failed to collect endpoint-security logs: %w", err) + //} // using Data() + "/logs", for some reason default paths/Logs() is the home dir... logPath := filepath.Join(paths.Home(), "logs") + string(filepath.Separator) @@ -595,6 +594,7 @@ func zipLogs(zw *zip.Writer) error { }) } +/* func collectEndpointSecurityLogs(zw *zip.Writer, specs map[string]program.Spec) error { spec, ok := specs["endpoint-security"] if !ok { @@ -628,6 +628,7 @@ func collectEndpointSecurityLogs(zw *zip.Writer, specs map[string]program.Spec) return saveLogs(name, path, zw) }) } +*/ func saveLogs(name string, logPath string, zw *zip.Writer) error { lf, err := os.Open(logPath) diff --git a/internal/pkg/agent/cmd/diagnostics_test.go b/internal/pkg/agent/cmd/diagnostics_test.go index d55f0a06721..cec6a6f3450 100644 --- a/internal/pkg/agent/cmd/diagnostics_test.go +++ b/internal/pkg/agent/cmd/diagnostics_test.go @@ -4,6 +4,8 @@ package cmd +/* + import ( "archive/zip" "bytes" @@ -19,7 +21,6 @@ import ( "github.com/stretchr/testify/require" "github.com/elastic/elastic-agent/internal/pkg/agent/control/client" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" ) var testDiagnostics = DiagnosticsInfo{ @@ -137,3 +138,4 @@ func Test_collectEndpointSecurityLogs_noEndpointSecurity(t *testing.T) { err := collectEndpointSecurityLogs(zw, specs) assert.NoError(t, err, "collectEndpointSecurityLogs should not return an error") } +*/ diff --git a/internal/pkg/agent/cmd/run.go b/internal/pkg/agent/cmd/run.go index e2d3fc0e751..ef04422cff9 100644 --- a/internal/pkg/agent/cmd/run.go +++ b/internal/pkg/agent/cmd/run.go @@ -35,6 +35,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/config" monitoringCfg "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/config" "github.com/elastic/elastic-agent/internal/pkg/release" + "github.com/elastic/elastic-agent/pkg/component" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -57,7 +58,7 @@ func newRunCommandWithArgs(_ []string, streams *cli.IOStreams) *cobra.Command { } } -func run(override cfgOverrider, modifiers ...application.PlatformModifier) error { +func run(override cfgOverrider, modifiers ...component.PlatformModifier) error { // Windows: Mark service as stopped. // After this is run, the service is considered by the OS to be stopped. // This must be the first deferred cleanup task (last to execute). diff --git a/internal/pkg/agent/cmd/status_test.go b/internal/pkg/agent/cmd/status_test.go deleted file mode 100644 index 9bcd3b097e9..00000000000 --- a/internal/pkg/agent/cmd/status_test.go +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package cmd - -import ( - "os" - - "github.com/elastic/elastic-agent/internal/pkg/agent/control/client" -) - -var testStatus = &client.AgentStatus{ - Status: client.Healthy, - Message: "", - Applications: []*client.ApplicationStatus{{ - ID: "id_1", - Name: "filebeat", - Status: client.Healthy, - Message: "Running", - Payload: nil, - }, { - ID: "id_2", - Name: "metricbeat", - Status: client.Healthy, - Message: "Running", - Payload: nil, - }, { - ID: "id_3", - Name: "filebeat_monitoring", - Status: client.Healthy, - Message: "Running", - Payload: nil, - }, { - ID: "id_4", - Name: "metricbeat_monitoring", - Status: client.Healthy, - Message: "Running", - Payload: nil, - }, - }, -} - -func ExamplehumanStatusOutput() { - humanStatusOutput(os.Stdout, testStatus) - // Output: - // Status: HEALTHY - // Message: (no message) - // Applications: - // * filebeat (HEALTHY) - // Running - // * metricbeat (HEALTHY) - // Running - // * filebeat_monitoring (HEALTHY) - // Running - // * metricbeat_monitoring (HEALTHY) - // Running -} - -func ExamplejsonOutput() { - jsonOutput(os.Stdout, testStatus) - // Output: - // { - // "Status": 2, - // "Message": "", - // "Applications": [ - // { - // "ID": "id_1", - // "Name": "filebeat", - // "Status": 2, - // "Message": "Running", - // "Payload": null - // }, - // { - // "ID": "id_2", - // "Name": "metricbeat", - // "Status": 2, - // "Message": "Running", - // "Payload": null - // }, - // { - // "ID": "id_3", - // "Name": "filebeat_monitoring", - // "Status": 2, - // "Message": "Running", - // "Payload": null - // }, - // { - // "ID": "id_4", - // "Name": "metricbeat_monitoring", - // "Status": 2, - // "Message": "Running", - // "Payload": null - // } - // ] - // } -} - -func ExampleyamlOutput() { - yamlOutput(os.Stdout, testStatus) - // Output: - // status: 2 - // message: "" - // applications: - // - id: id_1 - // name: filebeat - // status: 2 - // message: Running - // payload: {} - // - id: id_2 - // name: metricbeat - // status: 2 - // message: Running - // payload: {} - // - id: id_3 - // name: filebeat_monitoring - // status: 2 - // message: Running - // payload: {} - // - id: id_4 - // name: metricbeat_monitoring - // status: 2 - // message: Running - // payload: {} -} diff --git a/internal/pkg/agent/configrequest/config_request.go b/internal/pkg/agent/configrequest/config_request.go deleted file mode 100644 index 8d492ab6a64..00000000000 --- a/internal/pkg/agent/configrequest/config_request.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package configrequest - -import ( - "strings" - "time" - - "github.com/elastic/elastic-agent/internal/pkg/agent/program" -) - -const shortID = 8 - -type configRequest struct { - id string - createdAt time.Time - programs []program.Program -} - -// New created a new Request. -func New(id string, createdAt time.Time, programs []program.Program) Request { - return &configRequest{ - id: id, - createdAt: createdAt, - programs: programs, - } -} - -func (c *configRequest) String() string { - names := c.ProgramNames() - return "[" + c.ShortID() + "] Config: " + strings.Join(names, ", ") -} - -func (c *configRequest) ID() string { - return c.id -} - -func (c *configRequest) ShortID() string { - if len(c.id) <= shortID { - return c.id - } - return c.id[0:shortID] -} - -func (c *configRequest) CreatedAt() time.Time { - return c.createdAt -} - -func (c *configRequest) Programs() []program.Program { - return c.programs -} - -func (c *configRequest) ProgramNames() []string { - names := make([]string, 0, len(c.programs)) - for _, name := range c.programs { - names = append(names, name.Spec.Name) - } - return names -} diff --git a/internal/pkg/agent/configrequest/config_request_test.go b/internal/pkg/agent/configrequest/config_request_test.go deleted file mode 100644 index eb294d73650..00000000000 --- a/internal/pkg/agent/configrequest/config_request_test.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package configrequest - -import ( - "testing" - - "github.com/stretchr/testify/require" -) - -func TestConfigRequest(t *testing.T) { - t.Run("limit case for ShortID()", func(t *testing.T) { - c := configRequest{id: "bye"} - require.Equal(t, "bye", c.ShortID()) - - // TODO(PH): add validation when we create the config request. - c = configRequest{id: ""} - require.Equal(t, "", c.ShortID()) - }) - - t.Run("ShortID()", func(t *testing.T) { - c := configRequest{id: "HELLOWORLDBYEBYE"} - require.Equal(t, "HELLOWOR", c.ShortID()) - }) -} diff --git a/internal/pkg/agent/configrequest/request.go b/internal/pkg/agent/configrequest/request.go deleted file mode 100644 index 799e78f894f..00000000000 --- a/internal/pkg/agent/configrequest/request.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package configrequest - -import ( - "time" - - "github.com/elastic/elastic-agent/internal/pkg/agent/program" -) - -// Request is the minimal interface a config request must have. -type Request interface { - ID() string - ShortID() string - CreatedAt() time.Time - Programs() []program.Program - ProgramNames() []string -} diff --git a/internal/pkg/agent/configrequest/step.go b/internal/pkg/agent/configrequest/step.go deleted file mode 100644 index 332c720f9ca..00000000000 --- a/internal/pkg/agent/configrequest/step.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package configrequest - -import "github.com/elastic/elastic-agent/internal/pkg/agent/program" - -const ( - // StepRun is a name of Start program event - StepRun = "sc-run" - // StepRemove is a name of Remove program event causing beat in version to be uninstalled - StepRemove = "sc-remove" - - // MetaConfigKey is key used to store configuration in metadata - MetaConfigKey = "config" -) - -// Step is a step needed to be applied -type Step struct { - // ID identifies kind of operation needed to be executed - ID string - // Version is a version of a program - Version string - // Spec for the program - ProgramSpec program.Spec - // Meta contains additional data such as version, configuration or tags. - Meta map[string]interface{} -} - -func (s *Step) String() string { - return "[ID:" + s.ID + ", PROCESS: " + s.ProgramSpec.Command() + " VERSION:" + s.Version + "]" -} diff --git a/internal/pkg/agent/control/control_test.go b/internal/pkg/agent/control/control_test.go index 09f77960185..c189ab4534d 100644 --- a/internal/pkg/agent/control/control_test.go +++ b/internal/pkg/agent/control/control_test.go @@ -16,13 +16,12 @@ import ( "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent/internal/pkg/agent/control/client" "github.com/elastic/elastic-agent/internal/pkg/agent/control/server" - "github.com/elastic/elastic-agent/internal/pkg/core/status" "github.com/elastic/elastic-agent/internal/pkg/release" "github.com/elastic/elastic-agent/pkg/core/logger" ) func TestServerClient_Version(t *testing.T) { - srv := server.New(newErrorLogger(t), nil, nil, nil, apmtest.DiscardTracer) + srv := server.New(newErrorLogger(t), nil, nil, apmtest.DiscardTracer) err := srv.Start() require.NoError(t, err) defer srv.Stop() @@ -43,29 +42,6 @@ func TestServerClient_Version(t *testing.T) { }, ver) } -func TestServerClient_Status(t *testing.T) { - l := newErrorLogger(t) - statusCtrl := status.NewController(l) - srv := server.New(l, nil, statusCtrl, nil, apmtest.DiscardTracer) - err := srv.Start() - require.NoError(t, err) - defer srv.Stop() - - c := client.New() - err = c.Connect(context.Background()) - require.NoError(t, err) - defer c.Disconnect() - - status, err := c.Status(context.Background()) - require.NoError(t, err) - - assert.Equal(t, &client.AgentStatus{ - Status: client.Healthy, - Message: "", - Applications: []*client.ApplicationStatus{}, - }, status) -} - func newErrorLogger(t *testing.T) *logger.Logger { t.Helper() diff --git a/internal/pkg/basecmd/version/cmd_test.go b/internal/pkg/basecmd/version/cmd_test.go index f7e4ae2e74f..5b21ed252ad 100644 --- a/internal/pkg/basecmd/version/cmd_test.go +++ b/internal/pkg/basecmd/version/cmd_test.go @@ -57,7 +57,7 @@ func TestCmdBinaryOnlyYAML(t *testing.T) { } func TestCmdDaemon(t *testing.T) { - srv := server.New(newErrorLogger(t), nil, nil, nil, apmtest.DiscardTracer) + srv := server.New(newErrorLogger(t), nil, nil, apmtest.DiscardTracer) require.NoError(t, srv.Start()) defer srv.Stop() @@ -73,7 +73,7 @@ func TestCmdDaemon(t *testing.T) { } func TestCmdDaemonYAML(t *testing.T) { - srv := server.New(newErrorLogger(t), nil, nil, nil, apmtest.DiscardTracer) + srv := server.New(newErrorLogger(t), nil, nil, apmtest.DiscardTracer) require.NoError(t, srv.Start()) defer srv.Stop() diff --git a/internal/pkg/capabilities/capabilities_test.go b/internal/pkg/capabilities/capabilities_test.go index 9f349836856..fb08455f792 100644 --- a/internal/pkg/capabilities/capabilities_test.go +++ b/internal/pkg/capabilities/capabilities_test.go @@ -16,9 +16,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent/internal/pkg/config" - "github.com/elastic/elastic-agent/internal/pkg/core/status" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -35,8 +33,7 @@ func TestLoadCapabilities(t *testing.T) { for _, tc := range testCases { t.Run(tc, func(t *testing.T) { filename := filepath.Join("testdata", fmt.Sprintf("%s-capabilities.yml", tc)) - controller := status.NewController(l) - caps, err := Load(filename, l, controller) + caps, err := Load(filename, l) assert.NoError(t, err) assert.NotNil(t, caps) @@ -84,8 +81,7 @@ func TestInvalidLoadCapabilities(t *testing.T) { for _, tc := range testCases { t.Run(tc, func(t *testing.T) { filename := filepath.Join("testdata", fmt.Sprintf("%s-capabilities.yml", tc)) - controller := status.NewController(l) - caps, err := Load(filename, l, controller) + caps, err := Load(filename, l) assert.NoError(t, err) assert.NotNil(t, caps) @@ -130,15 +126,12 @@ func fixInputsType(mm map[string]interface{}) { } func TestCapabilityManager(t *testing.T) { - l := newErrorLogger(t) - t.Run("filter", func(t *testing.T) { m := getConfig() mgr := &capabilitiesManager{ caps: []Capability{ filterKeywordCap{keyWord: "filter"}, }, - reporter: status.NewController(l).RegisterComponent("test"), } newIn, err := mgr.Apply(m) @@ -163,7 +156,6 @@ func TestCapabilityManager(t *testing.T) { filterKeywordCap{keyWord: "filter"}, blockCap{}, }, - reporter: status.NewController(l).RegisterComponent("test"), } newIn, err := mgr.Apply(m) @@ -188,7 +180,6 @@ func TestCapabilityManager(t *testing.T) { filterKeywordCap{keyWord: "filter"}, blockCap{}, }, - reporter: status.NewController(l).RegisterComponent("test"), } newIn, err := mgr.Apply(m) @@ -213,7 +204,6 @@ func TestCapabilityManager(t *testing.T) { filterKeywordCap{keyWord: "filter"}, keepAsIsCap{}, }, - reporter: status.NewController(l).RegisterComponent("test"), } newIn, err := mgr.Apply(m) @@ -238,7 +228,6 @@ func TestCapabilityManager(t *testing.T) { filterKeywordCap{keyWord: "filter"}, keepAsIsCap{}, }, - reporter: status.NewController(l).RegisterComponent("test"), } newIn, err := mgr.Apply(m) @@ -263,7 +252,6 @@ func TestCapabilityManager(t *testing.T) { filterKeywordCap{keyWord: "filter"}, filterKeywordCap{keyWord: "key"}, }, - reporter: status.NewController(l).RegisterComponent("test"), } newIn, err := mgr.Apply(m) @@ -286,7 +274,6 @@ func TestCapabilityManager(t *testing.T) { filterKeywordCap{keyWord: "key"}, filterKeywordCap{keyWord: "filter"}, }, - reporter: status.NewController(l).RegisterComponent("test"), } newIn, err := mgr.Apply(m) @@ -336,14 +323,3 @@ func getConfig() map[string]string { "key": "val", } } - -func newErrorLogger(t *testing.T) *logger.Logger { - t.Helper() - - loggerCfg := logger.DefaultLoggingConfig() - loggerCfg.Level = logp.ErrorLevel - - log, err := logger.NewFromConfig("", loggerCfg, false) - require.NoError(t, err) - return log -} diff --git a/internal/pkg/capabilities/input_test.go b/internal/pkg/capabilities/input_test.go index fe48fb026e3..0d4892ad137 100644 --- a/internal/pkg/capabilities/input_test.go +++ b/internal/pkg/capabilities/input_test.go @@ -2,15 +2,15 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//nolint:dupl // duplicate code is in test cases package capabilities import ( "fmt" "testing" - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" @@ -18,7 +18,6 @@ import ( ) func TestMultiInput(t *testing.T) { - tr := &testReporter{} l, _ := logger.New("test", false) t.Run("no match", func(t *testing.T) { @@ -122,7 +121,7 @@ func TestMultiInput(t *testing.T) { initialInputs := []string{"system/metrics", "system/logs"} expectedInputs := []string{"system/logs"} - cap, err := newInputsCapability(l, rd, tr) + cap, err := newInputsCapability(l, rd) assert.NoError(t, err, "error not expected, provided eql is valid") assert.NotNil(t, cap, "cap should be created") @@ -144,7 +143,8 @@ func TestMultiInput(t *testing.T) { for _, in := range expectedInputs { var typeFound bool - nodes := inputsList.Value().([]transpiler.Node) + nodes, ok := inputsList.Value().([]transpiler.Node) + require.True(t, ok) for _, inputNode := range nodes { typeNode, found := inputNode.Find("type") assert.True(t, found, "type not found") @@ -170,7 +170,7 @@ func TestMultiInput(t *testing.T) { Input: "system/metrics", }}, } - cap, err := newInputsCapability(l, rd, tr) + cap, err := newInputsCapability(l, rd) assert.NoError(t, err, "error not expected, provided eql is valid") assert.NotNil(t, cap, "cap should be created") @@ -185,10 +185,9 @@ func TestMultiInput(t *testing.T) { func TestInput(t *testing.T) { l, _ := logger.New("test", false) - tr := &testReporter{} t.Run("invalid rule", func(t *testing.T) { r := &upgradeCapability{} - cap, err := newInputCapability(l, r, tr) + cap, err := newInputCapability(l, r) assert.NoError(t, err, "no error expected") assert.Nil(t, cap, "cap should not be created") }) @@ -198,7 +197,7 @@ func TestInput(t *testing.T) { Type: "allow", Input: "", } - cap, err := newInputCapability(l, r, tr) + cap, err := newInputCapability(l, r) assert.NoError(t, err, "error not expected, provided eql is valid") assert.NotNil(t, cap, "cap should be created") }) @@ -250,8 +249,7 @@ func TestInput(t *testing.T) { } func runInputTest(t *testing.T, l *logger.Logger, r *inputCapability, expectedInputs []string, initialInputs []string) { - tr := &testReporter{} - cap, err := newInputCapability(l, r, tr) + cap, err := newInputCapability(l, r) assert.NoError(t, err, "error not expected, provided eql is valid") assert.NotNil(t, cap, "cap should be created") @@ -311,8 +309,7 @@ func runInputTest(t *testing.T, l *logger.Logger, r *inputCapability, expectedIn } func runMultiInputTest(t *testing.T, l *logger.Logger, rd *ruleDefinitions, expectedInputs []string, initialInputs []string) { - tr := &testReporter{} - cap, err := newInputsCapability(l, rd, tr) + cap, err := newInputsCapability(l, rd) assert.NoError(t, err, "error not expected, provided eql is valid") assert.NotNil(t, cap, "cap should be created") @@ -392,8 +389,3 @@ func getInputsMap(tt ...string) map[string]interface{} { return astMap } - -type testReporter struct{} - -func (*testReporter) Update(state.Status, string, map[string]interface{}) {} -func (*testReporter) Unregister() {} diff --git a/internal/pkg/capabilities/output_test.go b/internal/pkg/capabilities/output_test.go index 790b41cff63..ef730859642 100644 --- a/internal/pkg/capabilities/output_test.go +++ b/internal/pkg/capabilities/output_test.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//nolint:dupl // duplicate code is in test cases package capabilities import ( @@ -9,6 +10,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" @@ -16,7 +18,6 @@ import ( ) func TestMultiOutput(t *testing.T) { - tr := &testReporter{} l, _ := logger.New("test", false) t.Run("no match", func(t *testing.T) { rd := &ruleDefinitions{ @@ -100,7 +101,7 @@ func TestMultiOutput(t *testing.T) { initialOutputs := []string{"elasticsearch", "logstash"} expectedOutputs := []string{"elasticsearch"} - cap, err := newOutputsCapability(l, rd, tr) + cap, err := newOutputsCapability(l, rd) assert.NoError(t, err, "error not expected, provided eql is valid") assert.NotNil(t, cap, "cap should be created") @@ -122,7 +123,8 @@ func TestMultiOutput(t *testing.T) { for _, in := range expectedOutputs { var typeFound bool - nodes := outputsDict.Value().([]transpiler.Node) + nodes, ok := outputsDict.Value().([]transpiler.Node) + require.True(t, ok) for _, outputKeyNode := range nodes { outputNode, ok := outputKeyNode.(*transpiler.Key).Value().(*transpiler.Dict) assert.True(t, ok, "output type key not string") @@ -152,7 +154,7 @@ func TestMultiOutput(t *testing.T) { }}, } - cap, err := newOutputsCapability(l, rd, tr) + cap, err := newOutputsCapability(l, rd) assert.NoError(t, err, "error not expected, provided eql is valid") assert.NotNil(t, cap, "cap should be created") @@ -166,11 +168,10 @@ func TestMultiOutput(t *testing.T) { } func TestOutput(t *testing.T) { - tr := &testReporter{} l, _ := logger.New("test", false) t.Run("invalid rule", func(t *testing.T) { r := &upgradeCapability{} - cap, err := newOutputCapability(l, r, tr) + cap, err := newOutputCapability(l, r) assert.NoError(t, err, "no error expected") assert.Nil(t, cap, "cap should not be created") }) @@ -180,7 +181,7 @@ func TestOutput(t *testing.T) { Type: "allow", Output: "", } - cap, err := newOutputCapability(l, r, tr) + cap, err := newOutputCapability(l, r) assert.NoError(t, err, "error not expected, provided eql is valid") assert.NotNil(t, cap, "cap should be created") }) @@ -231,8 +232,7 @@ func TestOutput(t *testing.T) { } func runMultiOutputTest(t *testing.T, l *logger.Logger, rd *ruleDefinitions, expectedOutputs []string, initialOutputs []string) { - tr := &testReporter{} - cap, err := newOutputsCapability(l, rd, tr) + cap, err := newOutputsCapability(l, rd) assert.NoError(t, err, "error not expected, provided eql is valid") assert.NotNil(t, cap, "cap should be created") @@ -286,8 +286,7 @@ func runMultiOutputTest(t *testing.T, l *logger.Logger, rd *ruleDefinitions, exp } func runOutputTest(t *testing.T, l *logger.Logger, r *outputCapability, expectedOutputs []string, initialOutputs []string) { - tr := &testReporter{} - cap, err := newOutputCapability(l, r, tr) + cap, err := newOutputCapability(l, r) assert.NoError(t, err, "error not expected, provided eql is valid") assert.NotNil(t, cap, "cap should be created") diff --git a/internal/pkg/capabilities/upgrade.go b/internal/pkg/capabilities/upgrade.go index 2773f6c9709..067db487e39 100644 --- a/internal/pkg/capabilities/upgrade.go +++ b/internal/pkg/capabilities/upgrade.go @@ -13,11 +13,6 @@ import ( "github.com/elastic/elastic-agent/pkg/core/logger" ) -const ( - versionKey = "version" - sourceURIKey = "source_uri" -) - // NewUpgradeCapability creates capability filter for upgrade. // Available variables: // - version diff --git a/internal/pkg/capabilities/upgrade_test.go b/internal/pkg/capabilities/upgrade_test.go index db6d0f50ab4..40dcf730cca 100644 --- a/internal/pkg/capabilities/upgrade_test.go +++ b/internal/pkg/capabilities/upgrade_test.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//nolint:dupl // duplicate code is in test cases package capabilities import ( @@ -14,11 +15,10 @@ import ( ) func TestUpgrade(t *testing.T) { - tr := &testReporter{} l, _ := logger.New("test", false) t.Run("invalid rule", func(t *testing.T) { r := &inputCapability{} - cap, err := newUpgradeCapability(l, r, tr) + cap, err := newUpgradeCapability(l, r) assert.NoError(t, err, "no error expected") assert.Nil(t, cap, "cap should not be created") }) @@ -33,7 +33,7 @@ func TestUpgrade(t *testing.T) { }, } - cap, err := newUpgradesCapability(l, rd, tr) + cap, err := newUpgradesCapability(l, rd) assert.NoError(t, err, "error not expected, provided eql is valid") assert.NotNil(t, cap, "cap should be created") }) @@ -47,11 +47,13 @@ func TestUpgrade(t *testing.T) { }, }, } - cap, err := newUpgradesCapability(l, rd, tr) + cap, err := newUpgradesCapability(l, rd) assert.NoError(t, err, "error not expected, provided eql is valid") assert.NotNil(t, cap, "cap should be created") - ta := &testUpgradeAction{version: "8.0.0"} + ta := map[string]interface{}{ + "version": "8.0.0", + } outAfter, err := cap.Apply(ta) assert.NoError(t, err, "should not be failing") @@ -69,11 +71,13 @@ func TestUpgrade(t *testing.T) { }, } - cap, err := newUpgradesCapability(l, rd, tr) + cap, err := newUpgradesCapability(l, rd) assert.NoError(t, err, "error not expected, provided eql is valid") assert.NotNil(t, cap, "cap should be created") - ta := &testUpgradeAction{version: "8.0.0"} + ta := map[string]interface{}{ + "version": "8.0.0", + } outAfter, err := cap.Apply(ta) assert.Error(t, err, "should fail") @@ -90,11 +94,13 @@ func TestUpgrade(t *testing.T) { }, }, } - cap, err := newUpgradesCapability(l, rd, tr) + cap, err := newUpgradesCapability(l, rd) assert.NoError(t, err, "error not expected, provided eql is valid") assert.NotNil(t, cap, "cap should be created") - ta := &testUpgradeAction{version: "9.0.0"} + ta := map[string]interface{}{ + "version": "8.0.0", + } outAfter, err := cap.Apply(ta) assert.NotEqual(t, ErrBlocked, err, "should not be blocking") @@ -111,11 +117,13 @@ func TestUpgrade(t *testing.T) { }, }, } - cap, err := newUpgradesCapability(l, rd, tr) + cap, err := newUpgradesCapability(l, rd) assert.NoError(t, err, "error not expected, provided eql is valid") assert.NotNil(t, cap, "cap should be created") - ta := &testUpgradeAction{version: "8.0.0"} + ta := map[string]interface{}{ + "version": "8.0.0", + } outAfter, err := cap.Apply(ta) assert.Equal(t, ErrBlocked, err, "should be blocking") @@ -132,11 +140,13 @@ func TestUpgrade(t *testing.T) { }, }, } - cap, err := newUpgradesCapability(l, rd, tr) + cap, err := newUpgradesCapability(l, rd) assert.NoError(t, err, "error not expected, provided eql is valid") assert.NotNil(t, cap, "cap should be created") - ta := &testUpgradeAction{version: "8.1.0"} + ta := map[string]interface{}{ + "version": "8.1.0", + } outAfter, err := cap.Apply(ta) assert.Equal(t, ErrBlocked, err, "should be blocking") @@ -153,11 +163,13 @@ func TestUpgrade(t *testing.T) { }, }, } - cap, err := newUpgradesCapability(l, rd, tr) + cap, err := newUpgradesCapability(l, rd) assert.NoError(t, err, "error not expected, provided eql is valid") assert.NotNil(t, cap, "cap should be created") - ta := &testUpgradeAction{version: "7.157.0"} + ta := map[string]interface{}{ + "version": "7.157.0", + } outAfter, err := cap.Apply(ta) assert.Equal(t, ErrBlocked, err, "should be blocking") @@ -174,11 +186,13 @@ func TestUpgrade(t *testing.T) { }, }, } - cap, err := newUpgradesCapability(l, rd, tr) + cap, err := newUpgradesCapability(l, rd) assert.NoError(t, err, "error not expected, provided eql is valid") assert.NotNil(t, cap, "cap should be created") - ta := &testUpgradeAction{version: "8.2.0"} + ta := map[string]interface{}{ + "version": "8.2.0", + } outAfter, err := cap.Apply(ta) assert.NotEqual(t, ErrBlocked, err, "should not be blocking") @@ -195,7 +209,7 @@ func TestUpgrade(t *testing.T) { }, }, } - cap, err := newUpgradesCapability(l, rd, tr) + cap, err := newUpgradesCapability(l, rd) assert.NoError(t, err, "error not expected, provided eql is valid") assert.NotNil(t, cap, "cap should be created") @@ -221,13 +235,13 @@ func TestUpgrade(t *testing.T) { }, }, } - cap, err := newUpgradesCapability(l, rd, tr) + cap, err := newUpgradesCapability(l, rd) assert.NoError(t, err, "error not expected, provided eql is valid") assert.NotNil(t, cap, "cap should be created") - apiAction := &fleetapi.ActionUpgrade{ - Version: "9.0.0", - SourceURI: "http://artifacts.elastic.co", + apiAction := map[string]interface{}{ + "version": "9.0.0", + "source_uri": "http://artifacts.elastic.co", } outAfter, err := cap.Apply(apiAction) @@ -245,13 +259,13 @@ func TestUpgrade(t *testing.T) { }, }, } - cap, err := newUpgradesCapability(l, rd, tr) + cap, err := newUpgradesCapability(l, rd) assert.NoError(t, err, "error not expected, provided eql is valid") assert.NotNil(t, cap, "cap should be created") - apiAction := fleetapi.ActionUpgrade{ - Version: "9.0.0", - SourceURI: "http://artifacts.elastic.co", + apiAction := map[string]interface{}{ + "version": "9.0.0", + "source_uri": "http://artifacts.elastic.co", } outAfter, err := cap.Apply(apiAction) @@ -269,13 +283,13 @@ func TestUpgrade(t *testing.T) { }, }, } - cap, err := newUpgradesCapability(l, rd, tr) + cap, err := newUpgradesCapability(l, rd) assert.NoError(t, err, "error not expected, provided eql is valid") assert.NotNil(t, cap, "cap should be created") - apiAction := fleetapi.ActionUpgrade{ - Version: "9.0.0", - SourceURI: "https://artifacts.elastic.co", + apiAction := map[string]interface{}{ + "version": "9.0.0", + "source_uri": "https://artifacts.elastic.co", } outAfter, err := cap.Apply(apiAction) @@ -293,13 +307,13 @@ func TestUpgrade(t *testing.T) { }, }, } - cap, err := newUpgradesCapability(l, rd, tr) + cap, err := newUpgradesCapability(l, rd) assert.NoError(t, err, "error not expected, provided eql is valid") assert.NotNil(t, cap, "cap should be created") - apiAction := fleetapi.ActionUpgrade{ - Version: "9.0.0", - SourceURI: "http://artifacts.elastic.co", + apiAction := map[string]interface{}{ + "version": "9.0.0", + "source_uri": "http://artifacts.elastic.co", } outAfter, err := cap.Apply(apiAction) @@ -316,7 +330,7 @@ func TestUpgrade(t *testing.T) { }, }, } - cap, err := newUpgradesCapability(l, rd, tr) + cap, err := newUpgradesCapability(l, rd) assert.NoError(t, err, "error not expected, provided eql is valid") assert.NotNil(t, cap, "cap should be created") @@ -328,17 +342,3 @@ func TestUpgrade(t *testing.T) { assert.Equal(t, apiAction, outAfter, "action should not be altered") }) } - -type testUpgradeAction struct { - version string -} - -// Version to upgrade to. -func (a *testUpgradeAction) Version() string { - return a.version -} - -// SourceURI for download. -func (a *testUpgradeAction) SourceURI() string { - return "http://artifacts.elastic.co" -} diff --git a/internal/pkg/core/monitoring/beats/beats_monitor.go b/internal/pkg/core/monitoring/beats/beats_monitor.go deleted file mode 100644 index b795c5ecb58..00000000000 --- a/internal/pkg/core/monitoring/beats/beats_monitor.go +++ /dev/null @@ -1,309 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package beats - -import ( - "fmt" - "net/url" - "os" - "path/filepath" - "runtime" - "strings" - "unicode" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" - - "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/config" - monitoringConfig "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/config" -) - -const httpPlusPrefix = "http+" -const defaultMonitoringNamespace = "default" - -// Monitor implements the monitoring.Monitor interface providing information -// about beats. -type Monitor struct { - operatingSystem string - config *monitoringConfig.MonitoringConfig - installPath string -} - -// NewMonitor creates a beats monitor. -func NewMonitor(downloadConfig *artifact.Config, monitoringCfg *monitoringConfig.MonitoringConfig, logMetrics bool) *Monitor { - if monitoringCfg == nil { - monitoringCfg = monitoringConfig.DefaultConfig() - monitoringCfg.Pprof = &monitoringConfig.PprofConfig{Enabled: false} - monitoringCfg.HTTP.Buffer = &monitoringConfig.BufferConfig{Enabled: false} - } - monitoringCfg.LogMetrics = logMetrics - - return &Monitor{ - operatingSystem: downloadConfig.OS(), - installPath: downloadConfig.InstallPath, - config: monitoringCfg, - } -} - -// Reload reloads state of the monitoring based on config. -func (b *Monitor) Reload(rawConfig *config.Config) error { - cfg := configuration.DefaultConfiguration() - if err := rawConfig.Unpack(&cfg); err != nil { - return err - } - - if cfg == nil || cfg.Settings == nil || cfg.Settings.MonitoringConfig == nil { - b.config = monitoringConfig.DefaultConfig() - } else { - if cfg.Settings.MonitoringConfig.Pprof == nil { - cfg.Settings.MonitoringConfig.Pprof = b.config.Pprof - } - if cfg.Settings.MonitoringConfig.HTTP.Buffer == nil { - cfg.Settings.MonitoringConfig.HTTP.Buffer = b.config.HTTP.Buffer - } - b.config = cfg.Settings.MonitoringConfig - logMetrics := true - if cfg.Settings.LoggingConfig != nil { - logMetrics = cfg.Settings.LoggingConfig.Metrics.Enabled - } - b.config.LogMetrics = logMetrics - } - - return nil -} - -// Close disables monitoring -func (b *Monitor) Close() { - b.config.Enabled = false - b.config.MonitorMetrics = false - b.config.MonitorLogs = false -} - -// IsMonitoringEnabled returns true if monitoring is enabled. -func (b *Monitor) IsMonitoringEnabled() bool { return b.config.Enabled } - -// MonitoringNamespace returns monitoring namespace configured. -func (b *Monitor) MonitoringNamespace() string { - if b.config.Namespace == "" { - return defaultMonitoringNamespace - } - return b.config.Namespace -} - -// WatchLogs returns true if monitoring is enabled and monitor should watch logs. -func (b *Monitor) WatchLogs() bool { return b.config.Enabled && b.config.MonitorLogs } - -// WatchMetrics returns true if monitoring is enabled and monitor should watch metrics. -func (b *Monitor) WatchMetrics() bool { return b.config.Enabled && b.config.MonitorMetrics } - -func (b *Monitor) generateMonitoringEndpoint(spec program.Spec, pipelineID string) string { - return MonitoringEndpoint(spec, b.operatingSystem, pipelineID) -} - -func (b *Monitor) generateLoggingFile(spec program.Spec, pipelineID string) string { - return getLoggingFile(spec, b.operatingSystem, b.installPath, pipelineID) -} - -func (b *Monitor) generateLoggingPath(spec program.Spec, pipelineID string) string { - return filepath.Dir(b.generateLoggingFile(spec, pipelineID)) -} - -func (b *Monitor) ownLoggingPath(spec program.Spec) bool { - // if the spec file defines a custom log path then agent will not take ownership of the logging path - _, ok := spec.LogPaths[b.operatingSystem] - return !ok -} - -// EnrichArgs enriches arguments provided to application, in order to enable -// monitoring -func (b *Monitor) EnrichArgs(spec program.Spec, pipelineID string, args []string, isSidecar bool) []string { - appendix := make([]string, 0, 7) - - monitoringEndpoint := b.generateMonitoringEndpoint(spec, pipelineID) - if monitoringEndpoint != "" { - endpoint := monitoringEndpoint - if isSidecar { - endpoint += "_monitor" - } - appendix = append(appendix, - "-E", "http.enabled=true", - "-E", "http.host="+endpoint, - ) - if b.config.Pprof != nil && b.config.Pprof.Enabled { - appendix = append(appendix, - "-E", "http.pprof.enabled=true", - ) - } - if b.config.HTTP.Buffer != nil && b.config.HTTP.Buffer.Enabled { - appendix = append(appendix, - "-E", "http.buffer.enabled=true", - ) - } - } - - loggingPath := b.generateLoggingPath(spec, pipelineID) - if loggingPath != "" { - logFile := spec.CommandName() - if isSidecar { - logFile += "_monitor" - } - logFile = fmt.Sprintf("%s", logFile) - appendix = append(appendix, - "-E", "logging.files.path="+loggingPath, - "-E", "logging.files.name="+logFile, - "-E", "logging.files.keepfiles=7", - "-E", "logging.files.permission=0640", - "-E", "logging.files.interval=1h", - ) - - if !b.config.LogMetrics { - appendix = append(appendix, - "-E", "logging.metrics.enabled=false", - ) - } - } - - return append(args, appendix...) -} - -// Cleanup removes -func (b *Monitor) Cleanup(spec program.Spec, pipelineID string) error { - // do not cleanup logs, they might not be all processed - drop := b.monitoringDrop(spec, pipelineID) - if drop == "" { - return nil - } - - return os.RemoveAll(drop) -} - -// Prepare executes steps in order for monitoring to work correctly -func (b *Monitor) Prepare(spec program.Spec, pipelineID string, uid, gid int) error { - if !b.ownLoggingPath(spec) { - // spec file passes a log path; so its up to the application to ensure the - // path exists and the write permissions are set so Elastic Agent can read it - return nil - } - - drops := []string{b.generateLoggingPath(spec, pipelineID)} - if drop := b.monitoringDrop(spec, pipelineID); drop != "" { - drops = append(drops, drop) - } - - for _, drop := range drops { - if drop == "" { - continue - } - - _, err := os.Stat(drop) - if err != nil { - if !os.IsNotExist(err) { - return err - } - - // create - if err := os.MkdirAll(drop, 0775); err != nil { - return err - } - } - - if err := changeOwner(drop, uid, gid); err != nil { - return err - } - } - - return nil -} - -// LogPath describes a path where application stores logs. Empty if -// application is not monitorable. -func (b *Monitor) LogPath(spec program.Spec, pipelineID string) string { - if !b.WatchLogs() { - return "" - } - - return b.generateLoggingFile(spec, pipelineID) -} - -// MetricsPath describes a location where application exposes metrics -// collectable by metricbeat. -func (b *Monitor) MetricsPath(spec program.Spec, pipelineID string) string { - if !b.WatchMetrics() { - return "" - } - - return b.generateMonitoringEndpoint(spec, pipelineID) -} - -// MetricsPathPrefixed return metrics path prefixed with http+ prefix. -func (b *Monitor) MetricsPathPrefixed(spec program.Spec, pipelineID string) string { - return httpPlusPrefix + b.MetricsPath(spec, pipelineID) -} - -func (b *Monitor) monitoringDrop(spec program.Spec, pipelineID string) string { - return monitoringDrop(b.generateMonitoringEndpoint(spec, pipelineID)) -} - -func monitoringDrop(path string) (drop string) { - defer func() { - if drop != "" { - // Dir call changes separator to the one used in OS - // '/var/lib' -> '\var\lib\' on windows - baseLen := len(filepath.Dir(drop)) - drop = drop[:baseLen] - } - }() - - if strings.Contains(path, "localhost") { - return "" - } - - path = strings.TrimPrefix(path, httpPlusPrefix) - - // npipe is virtual without a drop - if isNpipe(path) { - return "" - } - - if isWindowsPath(path) { - return path - } - - u, _ := url.Parse(path) - if u == nil || (u.Scheme != "" && u.Scheme != "file" && u.Scheme != "unix") { - return "" - } - - if u.Scheme == "file" { - return strings.TrimPrefix(path, "file://") - } - - if u.Scheme == "unix" { - return strings.TrimPrefix(path, "unix://") - } - - return path -} - -func isNpipe(path string) bool { - return strings.HasPrefix(path, "npipe") || strings.HasPrefix(path, `\\.\pipe\`) -} - -func isWindowsPath(path string) bool { - if len(path) < 4 { - return false - } - return unicode.IsLetter(rune(path[0])) && path[1] == ':' -} - -func changeOwner(path string, uid, gid int) error { - if runtime.GOOS == "windows" { - // on windows it always returns the syscall.EWINDOWS error, wrapped in *PathError - return nil - } - - return os.Chown(path, uid, gid) -} diff --git a/internal/pkg/core/monitoring/beats/drop_test.go b/internal/pkg/core/monitoring/beats/drop_test.go deleted file mode 100644 index 5c2f6be7f19..00000000000 --- a/internal/pkg/core/monitoring/beats/drop_test.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package beats - -import ( - "testing" -) - -type testCase struct { - Endpoint string - Drop string -} - -func TestMonitoringDrops(t *testing.T) { - cases := []testCase{ - {`/var/lib/drop/abc.sock`, "/var/lib/drop"}, - {`npipe://drop`, ""}, - {`http+npipe://drop`, ""}, - {`\\.\pipe\drop`, ""}, - {`unix:///var/lib/drop/abc.sock`, "/var/lib/drop"}, - {`http+unix:///var/lib/drop/abc.sock`, "/var/lib/drop"}, - {`file:///var/lib/drop/abc.sock`, "/var/lib/drop"}, - {`http://localhost/stats`, ""}, - {`localhost/stats`, ""}, - {`http://localhost:8080/stats`, ""}, - {`localhost:8080/stats`, ""}, - {`http://1.2.3.4/stats`, ""}, - {`http://1.2.3.4:5678/stats`, ""}, - {`1.2.3.4:5678/stats`, ""}, - {`http://hithere.com:5678/stats`, ""}, - {`hithere.com:5678/stats`, ""}, - } - - for _, c := range cases { - t.Run(c.Endpoint, func(t *testing.T) { - drop := monitoringDrop(c.Endpoint) - if drop != c.Drop { - t.Errorf("Case[%s]: Expected '%s', got '%s'", c.Endpoint, c.Drop, drop) - } - }) - } -} diff --git a/internal/pkg/core/monitoring/beats/monitoring.go b/internal/pkg/core/monitoring/beats/monitoring.go deleted file mode 100644 index cb93d74e136..00000000000 --- a/internal/pkg/core/monitoring/beats/monitoring.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package beats - -import ( - "crypto/sha256" - "fmt" - "path/filepath" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - monitoringConfig "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/config" -) - -const ( - // args: data path, pipeline name, application name - logFileFormat = "%s/logs/%s/%s" - // args: data path, install path, pipeline name, application name - logFileFormatWin = "%s\\logs\\%s\\%s" - - // args: pipeline name, application name - mbEndpointFileFormatWin = `npipe:///%s-%s` - - // args: pipeline name, application name - agentMbEndpointFileFormatWin = `npipe:///elastic-agent` - // agentMbEndpointHTTP is used with cloud and exposes metrics on http endpoint - agentMbEndpointHTTP = "http://%s:%d" -) - -// MonitoringEndpoint is an endpoint where process is exposing its metrics. -func MonitoringEndpoint(spec program.Spec, operatingSystem, pipelineID string) string { - if endpoint, ok := spec.MetricEndpoints[operatingSystem]; ok { - return endpoint - } - if operatingSystem == "windows" { - return fmt.Sprintf(mbEndpointFileFormatWin, pipelineID, spec.CommandName()) - } - // unix socket path must be less than 104 characters - path := fmt.Sprintf("unix://%s.sock", filepath.Join(paths.TempDir(), pipelineID, spec.CommandName(), spec.CommandName())) - if len(path) < 104 { - return path - } - // place in global /tmp (or /var/tmp on Darwin) to ensure that its small enough to fit; current path is way to long - // for it to be used, but needs to be unique per Agent (in the case that multiple are running) - return fmt.Sprintf(`unix:///tmp/elastic-agent/%x.sock`, sha256.Sum256([]byte(path))) -} - -func getLoggingFile(spec program.Spec, operatingSystem, installPath, pipelineID string) string { - if path, ok := spec.LogPaths[operatingSystem]; ok { - return path - } - if operatingSystem == "windows" { - return fmt.Sprintf(logFileFormatWin, paths.Home(), pipelineID, spec.CommandName()) - } - return fmt.Sprintf(logFileFormat, paths.Home(), pipelineID, spec.CommandName()) -} - -// AgentMonitoringEndpoint returns endpoint with exposed metrics for agent. -func AgentMonitoringEndpoint(operatingSystem string, cfg *monitoringConfig.MonitoringHTTPConfig) string { - if cfg != nil && cfg.Enabled { - return fmt.Sprintf(agentMbEndpointHTTP, cfg.Host, cfg.Port) - } - - if operatingSystem == "windows" { - return agentMbEndpointFileFormatWin - } - // unix socket path must be less than 104 characters - path := fmt.Sprintf("unix://%s.sock", filepath.Join(paths.TempDir(), "elastic-agent")) - if len(path) < 104 { - return path - } - // place in global /tmp to ensure that its small enough to fit; current path is way to long - // for it to be used, but needs to be unique per Agent (in the case that multiple are running) - return fmt.Sprintf(`unix:///tmp/elastic-agent/%x.sock`, sha256.Sum256([]byte(path))) -} - -// AgentPrefixedMonitoringEndpoint returns endpoint with exposed metrics for agent. -func AgentPrefixedMonitoringEndpoint(operatingSystem string, cfg *monitoringConfig.MonitoringHTTPConfig) string { - return httpPlusPrefix + AgentMonitoringEndpoint(operatingSystem, cfg) -} diff --git a/internal/pkg/core/monitoring/monitor.go b/internal/pkg/core/monitoring/monitor.go deleted file mode 100644 index 2c87e384976..00000000000 --- a/internal/pkg/core/monitoring/monitor.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package monitoring - -import ( - "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/config" - "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/beats" -) - -// Monitor is a monitoring interface providing information about the way -// how application is monitored -type Monitor interface { - LogPath(spec program.Spec, pipelineID string) string - MetricsPath(spec program.Spec, pipelineID string) string - MetricsPathPrefixed(spec program.Spec, pipelineID string) string - - Prepare(spec program.Spec, pipelineID string, uid, gid int) error - EnrichArgs(spec program.Spec, pipelineID string, args []string, isSidecar bool) []string - Cleanup(spec program.Spec, pipelineID string) error - Reload(cfg *config.Config) error - IsMonitoringEnabled() bool - MonitoringNamespace() string - WatchLogs() bool - WatchMetrics() bool - Close() -} - -// NewMonitor creates beats a monitor based on a process configuration. -func NewMonitor(cfg *configuration.SettingsConfig) (Monitor, error) { - logMetrics := true - if cfg.LoggingConfig != nil { - logMetrics = cfg.LoggingConfig.Metrics.Enabled - } - return beats.NewMonitor(cfg.DownloadConfig, cfg.MonitoringConfig, logMetrics), nil -} diff --git a/internal/pkg/core/monitoring/noop/noop_monitor.go b/internal/pkg/core/monitoring/noop/noop_monitor.go deleted file mode 100644 index 44e47982455..00000000000 --- a/internal/pkg/core/monitoring/noop/noop_monitor.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package noop - -import ( - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/config" -) - -// Monitor is a monitoring interface providing information about the way -// how beat is monitored -type Monitor struct { -} - -// NewMonitor creates a beats monitor. -func NewMonitor() *Monitor { - return &Monitor{} -} - -// EnrichArgs enriches arguments provided to application, in order to enable -// monitoring -func (b *Monitor) EnrichArgs(_ program.Spec, _ string, args []string, _ bool) []string { - return args -} - -// Cleanup cleans up all drops. -func (b *Monitor) Cleanup(program.Spec, string) error { - return nil -} - -// Close closes the monitor. -func (b *Monitor) Close() {} - -// Prepare executes steps in order for monitoring to work correctly -func (b *Monitor) Prepare(program.Spec, string, int, int) error { - return nil -} - -// LogPath describes a path where application stores logs. Empty if -// application is not monitorable -func (b *Monitor) LogPath(program.Spec, string) string { - return "" -} - -// MetricsPath describes a location where application exposes metrics -// collectable by metricbeat. -func (b *Monitor) MetricsPath(program.Spec, string) string { - return "" -} - -// MetricsPathPrefixed return metrics path prefixed with http+ prefix. -func (b *Monitor) MetricsPathPrefixed(program.Spec, string) string { - return "" -} - -// Reload reloads state based on configuration. -func (b *Monitor) Reload(cfg *config.Config) error { return nil } - -// IsMonitoringEnabled returns true if monitoring is configured. -func (b *Monitor) IsMonitoringEnabled() bool { return false } - -// WatchLogs return true if monitoring is configured and monitoring logs is enabled. -func (b *Monitor) WatchLogs() bool { return false } - -// WatchMetrics return true if monitoring is configured and monitoring metrics is enabled. -func (b *Monitor) WatchMetrics() bool { return false } - -// MonitoringNamespace returns monitoring namespace configured. -func (b *Monitor) MonitoringNamespace() string { return "default" } diff --git a/internal/pkg/core/monitoring/server/handler.go b/internal/pkg/core/monitoring/server/handler.go deleted file mode 100644 index a3134ed53c9..00000000000 --- a/internal/pkg/core/monitoring/server/handler.go +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package server - -import ( - "fmt" - "net/http" -) - -type apiError interface { - Status() int -} - -func createHandler(fn func(w http.ResponseWriter, r *http.Request) error) *apiHandler { - return &apiHandler{ - innerFn: fn, - } -} - -type apiHandler struct { - innerFn func(w http.ResponseWriter, r *http.Request) error -} - -// ServeHTTP sets status code based on err returned -func (h *apiHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - err := h.innerFn(w, r) - if err != nil { - switch e := err.(type) { // nolint:errorlint // Will need refactor. - case apiError: - w.WriteHeader(e.Status()) - default: - w.WriteHeader(http.StatusInternalServerError) - - } - - writeResponse(w, unexpectedErrorWithReason(err.Error())) - } -} - -func unexpectedErrorWithReason(reason string, args ...interface{}) errResponse { - return errResponse{ - Type: errTypeUnexpected, - Reason: fmt.Sprintf(reason, args...), - } -} diff --git a/internal/pkg/core/monitoring/server/process.go b/internal/pkg/core/monitoring/server/process.go deleted file mode 100644 index 1d1d9c80806..00000000000 --- a/internal/pkg/core/monitoring/server/process.go +++ /dev/null @@ -1,223 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package server - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "net/http" - "strings" - "syscall" - "time" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" - - "github.com/gorilla/mux" - - "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" - "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/beats" -) - -const ( - processIDKey = "processID" - monitoringSuffix = "-monitoring" - separator = "-" - timeout = 10 * time.Second - errTypeUnexpected = "UNEXPECTED" - - httpPlusPrefix = "http+" -) - -var ( - // ErrProgramNotSupported returned when requesting metrics for not supported program. - ErrProgramNotSupported = errors.New("specified program is not supported") - invalidChars = map[rune]struct{}{ - '"': {}, - '<': {}, - '>': {}, - '|': {}, - 0: {}, - ':': {}, - '*': {}, - '?': {}, - '\\': {}, - '/': {}, - ';': {}, - } -) - -func processHandler(statsHandler func(http.ResponseWriter, *http.Request) error) func(http.ResponseWriter, *http.Request) error { - return func(w http.ResponseWriter, r *http.Request) error { - w.Header().Set("Content-Type", "application/json; charset=utf-8") - - vars := mux.Vars(r) - id, found := vars[processIDKey] - - if !found { - return errorfWithStatus(http.StatusNotFound, "productID not found") - } - - if id == paths.BinaryName { - // proxy stats for elastic agent process - return statsHandler(w, r) - } - - beatsPath := vars["beatsPath"] - if _, ok := beatsPathAllowlist[beatsPath]; !ok { - return errorfWithStatus(http.StatusNotFound, "endpoint not found") - } - - endpoint, err := generateEndpoint(id) - if err != nil { - return err - } - metricsBytes, statusCode, metricsErr := processMetrics(r.Context(), endpoint, beatsPath) - if metricsErr != nil { - return metricsErr - } - - if statusCode > 0 { - w.WriteHeader(statusCode) - } - - fmt.Fprint(w, string(metricsBytes)) - return nil - } -} - -var beatsPathAllowlist = map[string]struct{}{ - "": {}, - "stats": {}, - "state": {}, -} - -func processMetrics(ctx context.Context, endpoint, path string) ([]byte, int, error) { - hostData, err := parseURL(endpoint, "http", "", "", path, "") - if err != nil { - return nil, 0, errorWithStatus(http.StatusInternalServerError, err) - } - - dialer, err := hostData.transport.Make(timeout) - if err != nil { - return nil, 0, errorWithStatus(http.StatusInternalServerError, err) - } - - client := http.Client{ - Timeout: timeout, - Transport: &http.Transport{ - Dial: dialer.Dial, - }, - } - - req, err := http.NewRequest("GET", hostData.uri, nil) - if err != nil { - return nil, 0, errorWithStatus( - http.StatusInternalServerError, - fmt.Errorf("fetching metrics failed: %w", err), - ) - } - - req.Close = true - cctx, cancelFn := context.WithCancel(ctx) - defer cancelFn() - - resp, err := client.Do(req.WithContext(cctx)) - if err != nil { - statusCode := http.StatusInternalServerError - if errors.Is(err, syscall.ENOENT) { - statusCode = http.StatusNotFound - } - return nil, 0, errorWithStatus(statusCode, err) - } - defer resp.Body.Close() - - rb, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, 0, errorWithStatus(http.StatusInternalServerError, err) - } - - return rb, resp.StatusCode, nil -} - -func generateEndpoint(id string) (string, error) { - detail, err := parseID(id) - if err != nil { - return "", err - } - - endpoint := beats.MonitoringEndpoint(detail.spec, artifact.DefaultConfig().OS(), detail.output) - if !strings.HasPrefix(endpoint, httpPlusPrefix) && !strings.HasPrefix(endpoint, "http") { - // add prefix for npipe and unix - endpoint = httpPlusPrefix + endpoint - } - - if detail.isMonitoring { - endpoint += "_monitor" - } - return endpoint, nil -} - -func writeResponse(w http.ResponseWriter, c interface{}) { - bytes, err := json.Marshal(c) - if err != nil { - // json marshal failed - fmt.Fprintf(w, "Not valid json: %v", err) - return - } - - fmt.Fprint(w, string(bytes)) - -} - -type programDetail struct { - output string - binaryName string - isMonitoring bool - spec program.Spec -} - -func parseID(id string) (programDetail, error) { - var detail programDetail - if !isIDValid(id) { - return detail, errorfWithStatus(http.StatusBadRequest, "provided ID is not valid") - } - - for p, spec := range program.SupportedMap { - if !strings.HasPrefix(id, p+separator) { - continue - } - - detail.binaryName = p - detail.spec = spec - break - } - - if detail.binaryName == "" { - return detail, errorWithStatus(http.StatusNotFound, ErrProgramNotSupported) - } - - if strings.HasSuffix(id, monitoringSuffix) { - detail.isMonitoring = true - id = strings.TrimSuffix(id, monitoringSuffix) - } - - detail.output = strings.TrimPrefix(id, detail.binaryName+separator) - - return detail, nil -} - -func isIDValid(id string) bool { - for _, c := range id { - if _, found := invalidChars[c]; found { - return false - } - } - - return true -} diff --git a/internal/pkg/core/monitoring/server/process_linux_test.go b/internal/pkg/core/monitoring/server/process_linux_test.go deleted file mode 100644 index c147daf9b29..00000000000 --- a/internal/pkg/core/monitoring/server/process_linux_test.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -//go:build linux -// +build linux - -package server - -import ( - "context" - "net" - "net/http" - "net/http/httptest" - "os" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestProcessProxyRequest(t *testing.T) { - sock := "/tmp/elastic-agent-test.sock" - defer os.Remove(sock) - - endpoint := "http+unix://" + sock - server := httptest.NewUnstartedServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // Write the path to the client so they can verify the request - // was correct - w.Write([]byte(r.URL.Path)) - })) - - // Mimic subprocesses and listen on a unix socket - l, err := net.Listen("unix", sock) - require.NoError(t, err) - server.Listener = l - server.Start() - defer server.Close() - - for _, path := range []string{"stats", "", "state"} { - respBytes, _, err := processMetrics(context.Background(), endpoint, path) - require.NoError(t, err) - // Verify that the server saw the path we tried to request - assert.Equal(t, "/"+path, string(respBytes)) - } -} diff --git a/internal/pkg/core/monitoring/server/process_test.go b/internal/pkg/core/monitoring/server/process_test.go deleted file mode 100644 index 658f64df893..00000000000 --- a/internal/pkg/core/monitoring/server/process_test.go +++ /dev/null @@ -1,90 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package server - -import ( - "errors" - "net/http" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestParseID(t *testing.T) { - cases := []struct { - Name string - ID string - ExpectedError bool - ExpectedStatusCode int - ExpectedProgram programDetail - }{ - {"path injected id", ".././../etc/passwd", true, http.StatusBadRequest, programDetail{}}, - {"pipe injected id", "first | second", true, http.StatusBadRequest, programDetail{}}, - {"filebeat with suffix", "filebeat;cat demo-default-monitoring", true, http.StatusBadRequest, programDetail{}}, - - {"filebeat correct", "filebeat-default", false, http.StatusBadRequest, programDetail{output: "default", binaryName: "filebeat"}}, - {"filebeat monitor correct", "filebeat-default-monitoring", false, http.StatusBadRequest, programDetail{output: "default", binaryName: "filebeat", isMonitoring: true}}, - - {"mb correct", "metricbeat-default", false, http.StatusBadRequest, programDetail{output: "default", binaryName: "metricbeat"}}, - {"mb monitor correct", "metricbeat-default-monitoring", false, http.StatusBadRequest, programDetail{output: "default", binaryName: "metricbeat", isMonitoring: true}}, - - {"endpoint correct", "endpoint-security-default", false, http.StatusBadRequest, programDetail{output: "default", binaryName: "endpoint-security"}}, - {"endpoint monitor correct", "endpoint-security-default-monitoring", false, http.StatusBadRequest, programDetail{output: "default", binaryName: "endpoint-security", isMonitoring: true}}, - - {"unknown", "unknown-default", true, http.StatusNotFound, programDetail{}}, - {"unknown monitor", "unknown-default-monitoring", true, http.StatusNotFound, programDetail{}}, - } - - for _, tc := range cases { - t.Run(tc.Name, func(t *testing.T) { - pd, err := parseID(tc.ID) - if !tc.ExpectedError { - require.NoError(t, err) - } - - if tc.ExpectedStatusCode > 0 && tc.ExpectedError { - statErr, ok := err.(apiError) // nolint:errorlint // will need refactor. - require.True(t, ok) - require.Equal(t, tc.ExpectedStatusCode, statErr.Status()) - } - - require.Equal(t, tc.ExpectedProgram.binaryName, pd.binaryName) - require.Equal(t, tc.ExpectedProgram.output, pd.output) - require.Equal(t, tc.ExpectedProgram.isMonitoring, pd.isMonitoring) - }) - } -} - -func TestStatusErr(t *testing.T) { - cases := map[string]struct { - Error error - ExpectedStatusCode int - }{ - "no error": {nil, 0}, - "normal error": {errors.New("something bad happened"), http.StatusInternalServerError}, - "status bound err - not found": {errorWithStatus(http.StatusNotFound, errors.New("something was not found")), http.StatusNotFound}, - "status bound err - internal": {errorWithStatus(http.StatusInternalServerError, errors.New("something was not found")), http.StatusInternalServerError}, - "status bound err - bad request": {errorWithStatus(http.StatusBadRequest, errors.New("something really bad happened")), http.StatusBadRequest}, - } - - dummyHandler := func(err error) func(w http.ResponseWriter, r *http.Request) error { - return func(w http.ResponseWriter, r *http.Request) error { - return err - } - } - - for name, tc := range cases { - t.Run(name, func(t *testing.T) { - h := createHandler(dummyHandler(tc.Error)) - tw := &testWriter{} - r, err := http.NewRequest("GET", "", nil) - require.NoError(t, err) - - h.ServeHTTP(tw, r) - - require.Equal(t, tc.ExpectedStatusCode, tw.statusCode) - }) - } -} diff --git a/internal/pkg/core/monitoring/server/processes.go b/internal/pkg/core/monitoring/server/processes.go deleted file mode 100644 index 65a1d2c6995..00000000000 --- a/internal/pkg/core/monitoring/server/processes.go +++ /dev/null @@ -1,133 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package server - -import ( - "net/http" - "strconv" - "strings" - - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "github.com/elastic/elastic-agent/internal/pkg/sorted" -) - -const ( - configuredType = "configured" - internalType = "internal" -) - -type sourceInfo struct { - // Kind is a kind of process e.g configured or internal - // configured - used for user configured processes - // internal - used for monitoring processes - Kind string `json:"kind"` - - // Outputs process is handling. - Outputs []string `json:"outputs"` -} - -type processInfo struct { - // ID is a unique id of the process. - ID string `json:"id"` - - // PID is a current process ID. - PID string `json:"pid"` - - // Binary name e.g filebeat, this does not contain absolute path. - Binary string `json:"binary"` - - // Source information - Source sourceInfo `json:"source"` -} - -type processesResponse struct { - Processes []processInfo `json:"processes"` -} - -type errResponse struct { - // Type is a type of error - Type string `json:"type"` - - // Reason is a detailed error message - Reason string `json:"reason"` -} - -type stater interface { - State() map[string]state.State -} - -func processesHandler(routesFetchFn func() *sorted.Set) func(http.ResponseWriter, *http.Request) { - return func(w http.ResponseWriter, _ *http.Request) { - w.Header().Set("Content-Type", "application/json; charset=utf-8") - - resp := processesResponse{ - Processes: processesFromRoutes(routesFetchFn), - } - - writeResponse(w, resp) - } -} - -func processesFromRoutes(routesFetchFn func() *sorted.Set) []processInfo { - var processes []processInfo - routes := routesFetchFn() - - for _, k := range routes.Keys() { - op, found := routes.Get(k) - if !found { - continue - } - - s, ok := op.(stater) - if !ok { - continue - } - - states := s.State() - - for app, state := range states { - binaryName, isMonitoring := appNameFromDescriptor(app) - appType := configuredType - if isMonitoring { - appType = internalType - } - - var pid int - if state.ProcessInfo != nil { - pid = state.ProcessInfo.PID - } - - processInfo := processInfo{ - ID: processID(k, binaryName, isMonitoring), - PID: strconv.Itoa(pid), - Binary: binaryName, - Source: sourceInfo{ - Kind: appType, - Outputs: []string{k}, - }, - } - - processes = append(processes, processInfo) - } - } - - return processes -} - -func processID(output, binaryName string, isMonitoring bool) string { - id := binaryName + separator + output - if isMonitoring { - return id + monitoringSuffix - } - - return id -} - -func appNameFromDescriptor(d string) (string, bool) { - // monitoring desctiptor contains suffix with tag - // non monitoring just `binaryname--version` - parts := strings.Split(d, "--") - return parts[0], len(parts) > 2 -} diff --git a/internal/pkg/core/monitoring/server/processes_test.go b/internal/pkg/core/monitoring/server/processes_test.go deleted file mode 100644 index f700274a16b..00000000000 --- a/internal/pkg/core/monitoring/server/processes_test.go +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package server - -import ( - "bytes" - "encoding/json" - "fmt" - "net/http" - "os" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/stretchr/testify/assert" - - "github.com/elastic/elastic-agent/internal/pkg/core/state" - "github.com/elastic/elastic-agent/internal/pkg/sorted" - "github.com/elastic/elastic-agent/pkg/core/process" -) - -func TestProcesses(t *testing.T) { - testRoutes := func(routes map[string]stater) func() *sorted.Set { - set := sorted.NewSet() - for k, s := range routes { - set.Add(k, s) - } - - return func() *sorted.Set { return set } - } - - t.Run("nothing running", func(t *testing.T) { - r := testRoutes(nil) - w := &testWriter{} - fn := processesHandler(r) - fn(w, nil) - - pr := processesResponse{ - Processes: nil, - } - - assert.Equal(t, 1, len(w.responses)) - if !assert.True(t, jsonComparer(w.responses[0], pr)) { - diff := cmp.Diff(pr, w.responses[0]) - t.Logf("Mismatch (-want, +got)\n%s", diff) - } - }) - - t.Run("process running", func(t *testing.T) { - r := testRoutes(map[string]stater{ - "default": &testStater{ - states: map[string]state.State{ - "filebeat--8.0.0": { - ProcessInfo: &process.Info{ - PID: 123, - Process: &os.Process{ - Pid: 123, - }, - }, - Status: state.Configuring, - }, - }, - }, - }) - w := &testWriter{} - fn := processesHandler(r) - fn(w, nil) - - pr := processesResponse{ - Processes: []processInfo{ - { - ID: "filebeat-default", - PID: "123", - Binary: "filebeat", - Source: sourceInfo{Kind: "configured", Outputs: []string{"default"}}, - }, - }, - } - - assert.Equal(t, 1, len(w.responses)) - if !assert.True(t, jsonComparer(w.responses[0], pr)) { - diff := cmp.Diff(w.responses[0], pr) - t.Logf("Mismatch (-want, +got)\n%s", diff) - } - }) - - t.Run("monitoring running", func(t *testing.T) { - r := testRoutes(map[string]stater{ - "default": &testStater{ - states: map[string]state.State{ - "filebeat--8.0.0--tag": { - ProcessInfo: &process.Info{ - PID: 123, - Process: &os.Process{ - Pid: 123, - }, - }, - Status: state.Configuring, - }, - }, - }, - }) - w := &testWriter{} - fn := processesHandler(r) - fn(w, nil) - - pr := processesResponse{ - Processes: []processInfo{ - { - ID: "filebeat-default-monitoring", - PID: "123", - Binary: "filebeat", - Source: sourceInfo{Kind: "internal", Outputs: []string{"default"}}, - }, - }, - } - - assert.Equal(t, 1, len(w.responses)) - if !assert.True(t, jsonComparer(w.responses[0], pr)) { - diff := cmp.Diff(w.responses[0], pr) - t.Logf("Mismatch (-want, +got)\n%s", diff) - } - }) -} - -type testStater struct { - states map[string]state.State -} - -func (s *testStater) State() map[string]state.State { - return s.states -} - -type testWriter struct { - responses []string - statusCode int -} - -func (w *testWriter) Header() http.Header { - return http.Header{} -} - -func (w *testWriter) Write(r []byte) (int, error) { - if w.responses == nil { - w.responses = make([]string, 0) - } - w.responses = append(w.responses, string(r)) - - return len(r), nil -} - -func (w *testWriter) WriteHeader(statusCode int) { - w.statusCode = statusCode -} - -func jsonComparer(expected string, candidate interface{}) bool { - candidateJSON, err := json.Marshal(&candidate) - if err != nil { - fmt.Println(err) - return false - } - - cbytes := make([]byte, 0, len(candidateJSON)) - bbuf := bytes.NewBuffer(cbytes) - if err := json.Compact(bbuf, candidateJSON); err != nil { - fmt.Println(err) - return false - } - - return bytes.Equal([]byte(expected), bbuf.Bytes()) -} diff --git a/internal/pkg/core/monitoring/server/server.go b/internal/pkg/core/monitoring/server/server.go deleted file mode 100644 index e5929909158..00000000000 --- a/internal/pkg/core/monitoring/server/server.go +++ /dev/null @@ -1,136 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package server - -import ( - "fmt" - "net/http" - "os" - "path/filepath" - "runtime" - "strings" - - "github.com/gorilla/mux" - "go.elastic.co/apm" - "go.elastic.co/apm/module/apmgorilla" - - "github.com/elastic/elastic-agent-libs/api" - "github.com/elastic/elastic-agent-libs/config" - "github.com/elastic/elastic-agent-libs/monitoring" - "github.com/elastic/elastic-agent-libs/monitoring/report/buffer" - "github.com/elastic/elastic-agent/internal/pkg/sorted" - "github.com/elastic/elastic-agent/pkg/core/logger" -) - -// New creates a new server exposing metrics and process information. -func New( - log *logger.Logger, - endpointConfig api.Config, - ns func(string) *monitoring.Namespace, - routesFetchFn func() *sorted.Set, - enableProcessStats bool, - enableBuffer bool, - tracer *apm.Tracer, -) (*api.Server, error) { - if err := createAgentMonitoringDrop(endpointConfig.Host); err != nil { - // log but ignore - log.Errorf("failed to create monitoring drop: %v", err) - } - - cfg, err := config.NewConfigFrom(endpointConfig) - if err != nil { - return nil, err - } - - return exposeMetricsEndpoint(log, cfg, ns, routesFetchFn, enableProcessStats, enableBuffer, tracer) -} - -func exposeMetricsEndpoint( - log *logger.Logger, - config *config.C, - ns func(string) *monitoring.Namespace, - routesFetchFn func() *sorted.Set, - enableProcessStats bool, - enableBuffer bool, - tracer *apm.Tracer, -) (*api.Server, error) { - r := mux.NewRouter() - if tracer != nil { - r.Use(apmgorilla.Middleware(apmgorilla.WithTracer(tracer))) - } - statsHandler := statsHandler(ns("stats")) - r.Handle("/stats", createHandler(statsHandler)) - - if enableProcessStats { - r.HandleFunc("/processes", processesHandler(routesFetchFn)) - r.Handle("/processes/{processID}", createHandler(processHandler(statsHandler))) - r.Handle("/processes/{processID}/", createHandler(processHandler(statsHandler))) - r.Handle("/processes/{processID}/{beatsPath}", createHandler(processHandler(statsHandler))) - } - - if enableBuffer { - bufferReporter, err := buffer.MakeReporter(config) // beat.Info is not used by buffer reporter - if err != nil { - return nil, fmt.Errorf("unable to create buffer reporter for elastic-agent: %w", err) - } - r.Handle("/buffer", bufferReporter) - } - - mux := http.NewServeMux() - mux.Handle("/", r) - - return api.New(log, mux, config) -} - -func createAgentMonitoringDrop(drop string) error { - if drop == "" || runtime.GOOS == "windows" { - return nil - } - - path := strings.TrimPrefix(drop, "unix://") - if strings.HasSuffix(path, ".sock") { - path = filepath.Dir(path) - } - - _, err := os.Stat(path) - if err != nil { - if !os.IsNotExist(err) { - return err - } - - // create - if err := os.MkdirAll(path, 0775); err != nil { - return err - } - } - - return os.Chown(path, os.Geteuid(), os.Getegid()) -} - -func errorWithStatus(status int, err error) *statusError { - return &statusError{ - err: err, - status: status, - } -} - -func errorfWithStatus(status int, msg string, args ...string) *statusError { - err := fmt.Errorf(msg, args) - return errorWithStatus(status, err) -} - -// StatusError holds correlation between error and a status -type statusError struct { - err error - status int -} - -func (s *statusError) Status() int { - return s.status -} - -func (s *statusError) Error() string { - return s.err.Error() -} diff --git a/internal/pkg/core/monitoring/server/stats.go b/internal/pkg/core/monitoring/server/stats.go deleted file mode 100644 index 366da268aad..00000000000 --- a/internal/pkg/core/monitoring/server/stats.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package server - -import ( - "encoding/json" - "fmt" - "net/http" - - "github.com/elastic/elastic-agent-libs/monitoring" -) - -func statsHandler(ns *monitoring.Namespace) func(http.ResponseWriter, *http.Request) error { - return func(w http.ResponseWriter, r *http.Request) error { - w.Header().Set("Content-Type", "application/json; charset=utf-8") - - data := monitoring.CollectStructSnapshot( - ns.GetRegistry(), - monitoring.Full, - false, - ) - - bytes, err := json.Marshal(data) - var content string - if err != nil { - content = fmt.Sprintf("Not valid json: %v", err) - } else { - content = string(bytes) - } - fmt.Fprint(w, content) - - return nil - } -} diff --git a/internal/pkg/core/monitoring/server/url.go b/internal/pkg/core/monitoring/server/url.go deleted file mode 100644 index c4d6507cfec..00000000000 --- a/internal/pkg/core/monitoring/server/url.go +++ /dev/null @@ -1,208 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package server - -import ( - "fmt" - "net" - "net/url" - "strings" - - "github.com/elastic/elastic-agent-libs/transport/dialer" -) - -type hostData struct { - transport dialer.Builder // The transport builder to use when creating the connection. - - uri string // The full URI that should be used in connections. - sanitizedURI string // A sanitized version of the URI without credentials. - - // Parts of the URI. - host string // The host and possibly port. - user string // Username - password string // Password -} - -// ParseURL returns hostData object from a raw 'host' value and a series of -// defaults that are added to the URL if not present in the rawHost value. -// Values from the rawHost take precedence over the defaults. -func parseURL(rawHost, scheme, user, pass, path, query string) (hostData, error) { - u, transport, err := getURL(rawHost, scheme, user, pass, path, query) - - if err != nil { - return hostData{}, err - } - - return newHostDataFromURLWithTransport(transport, u), nil -} - -// NewHostDataFromURLWithTransport Allow to specify what kind of transport to in conjunction of the -// url, this is useful if you use a combined scheme like "http+unix://" or "http+npipe". -func newHostDataFromURLWithTransport(transport dialer.Builder, u *url.URL) hostData { - var user, pass string - if u.User != nil { - user = u.User.Username() - pass, _ = u.User.Password() - } - - host := u.Host - if strings.HasSuffix(u.Scheme, "unix") || strings.HasSuffix(u.Scheme, "npipe") { - host = u.Path - } - - return hostData{ - transport: transport, - uri: u.String(), - sanitizedURI: redactURLCredentials(u).String(), - host: host, - user: user, - password: pass, - } -} - -// getURL constructs a URL from the rawHost value and adds the provided user, -// password, path, and query params if one was not set in the rawURL value. -func getURL( - rawURL, scheme, username, password, path, query string, -) (*url.URL, dialer.Builder, error) { - - if parts := strings.SplitN(rawURL, "://", 2); len(parts) != 2 { - // Add scheme. - rawURL = fmt.Sprintf("%s://%s", scheme, rawURL) - } - - var t dialer.Builder - - u, err := url.Parse(rawURL) - if err != nil { - return nil, t, fmt.Errorf("error parsing URL: %w", err) - } - - // discover the transport to use to communicate with the host if we have a combined scheme. - // possible values are mb.TransportTCP, mb.transportUnix or mb.TransportNpipe. - switch u.Scheme { - case "http+unix": - t = dialer.NewUnixDialerBuilder(u.Path) - u.Path = "" - u.Scheme = "http" //nolint:goconst // it's not worth making it const, name of http will not change - u.Host = "unix" - case "http+npipe": - p := u.Path - u.Path = "" - u.Scheme = "http" - u.Host = "npipe" - - if p == "" && u.Host != "" { - p = u.Host - } - - // cleanup of all possible prefixes - p = strings.TrimPrefix(p, "/pipe") - p = strings.TrimPrefix(p, `\\.\pipe`) - p = strings.TrimPrefix(p, "\\") - p = strings.TrimPrefix(p, "/") - - segs := strings.SplitAfterN(p, "/", 2) - if len(segs) == 2 { - p = strings.TrimSuffix(segs[0], "/") - u.Path = "/" + segs[1] - } - - p = `\\.\pipe\` + strings.Replace(p, "/", "\\", -1) - t = dialer.NewNpipeDialerBuilder(p) - default: - t = dialer.NewDefaultDialerBuilder() - } - - setURLUser(u, username, password) - - if !strings.HasSuffix(u.Scheme, "unix") && !strings.HasSuffix(u.Scheme, "npipe") { - if u.Host == "" { - return nil, t, fmt.Errorf("error parsing URL: empty host") - } - - // Validate the host. The port is optional. - host, _, err := net.SplitHostPort(u.Host) - if err != nil { - if strings.Contains(err.Error(), "missing port") { - host = u.Host - } else { - return nil, t, fmt.Errorf("error parsing URL: %w", err) - } - } - if host == "" { - return nil, t, fmt.Errorf("error parsing URL: empty host") - } - } - - if u.Path == "" && path != "" { - // The path given in the host config takes precedence over the - // default path. - if !strings.HasPrefix(path, "/") { - path = "/" + path - } - u.Path = path - } - - //Adds the query params in the url - u, err = setQueryParams(u, query) - return u, t, err -} - -// setURLUser set the user credentials in the given URL. If the username or -// password is not set in the URL then the default is used (if provided). -func setURLUser(u *url.URL, defaultUser, defaultPass string) { - var user, pass string - var userIsSet, passIsSet bool - if u.User != nil { - user = u.User.Username() - if user != "" { - userIsSet = true - } - pass, passIsSet = u.User.Password() - } - - if !userIsSet && defaultUser != "" { - userIsSet = true - user = defaultUser - } - - if !passIsSet && defaultPass != "" { - passIsSet = true - pass = defaultPass - } - - if passIsSet { - u.User = url.UserPassword(user, pass) - } else if userIsSet { - u.User = url.User(user) - } -} - -// setQueryParams adds the query params to existing query parameters overwriting any -// keys that already exist. -func setQueryParams(u *url.URL, query string) (*url.URL, error) { - q := u.Query() - params, err := url.ParseQuery(query) - if err != nil { - return u, err - } - for key, values := range params { - for _, v := range values { - q.Set(key, v) - } - } - u.RawQuery = q.Encode() - return u, nil - -} - -// redactURLCredentials returns the URL as a string with the username and -// password redacted. -func redactURLCredentials(u *url.URL) *url.URL { - redacted := *u - redacted.User = nil - return &redacted -} diff --git a/pkg/component/fake/main.go b/pkg/component/fake/main.go index d92586aa0a3..000e2fd36fa 100644 --- a/pkg/component/fake/main.go +++ b/pkg/component/fake/main.go @@ -80,7 +80,7 @@ func run() error { } case err := <-c.Errors(): if err != nil && !errors.Is(err, context.Canceled) && !errors.Is(err, io.EOF) { - fmt.Fprintf(os.Stderr, "GRPC client error: %s", err) + fmt.Fprintf(os.Stderr, "GRPC client error: %+v\n", err) } } } @@ -148,7 +148,7 @@ type fakeInput struct { stateMsg string } -func newFakeInput(unit *client.Unit, cfg inputConfig) (*fakeInput, error) { +func newFakeInput(unit *client.Unit, cfg inputConfig) *fakeInput { i := &fakeInput{ unit: unit, cfg: cfg, @@ -158,7 +158,7 @@ func newFakeInput(unit *client.Unit, cfg inputConfig) (*fakeInput, error) { unit.RegisterAction(&stateSetterAction{i}) unit.RegisterAction(&killAction{}) _ = unit.UpdateState(i.state, i.stateMsg, nil) - return i, nil + return i } func (f *fakeInput) Unit() *client.Unit { @@ -243,7 +243,7 @@ func newRunningUnit(unit *client.Unit) (runningUnit, error) { } switch cfg.Type { case fake: - return newFakeInput(unit, cfg) + return newFakeInput(unit, cfg), nil } return nil, fmt.Errorf("unknown unit config type: %s", cfg.Type) } diff --git a/pkg/component/runtime/manager_test.go b/pkg/component/runtime/manager_test.go index 41a62557ea8..f496a01100e 100644 --- a/pkg/component/runtime/manager_test.go +++ b/pkg/component/runtime/manager_test.go @@ -25,6 +25,10 @@ import ( "github.com/elastic/elastic-agent/pkg/component" ) +const ( + exeExt = ".exe" +) + var ( fakeInputSpec = component.InputSpec{ Name: "fake", @@ -151,7 +155,7 @@ func TestManager_FakeInput_StartStop(t *testing.T) { binaryPath := filepath.Join("..", "fake", "fake") if runtime.GOOS == component.Windows { - binaryPath += ".exe" + binaryPath += exeExt } comp := component.Component{ ID: "fake-default", @@ -268,7 +272,7 @@ func TestManager_FakeInput_Configure(t *testing.T) { binaryPath := filepath.Join("..", "fake", "fake") if runtime.GOOS == component.Windows { - binaryPath += ".exe" + binaryPath += exeExt } comp := component.Component{ ID: "fake-default", @@ -390,7 +394,7 @@ func TestManager_FakeInput_RemoveUnit(t *testing.T) { binaryPath := filepath.Join("..", "fake", "fake") if runtime.GOOS == component.Windows { - binaryPath += ".exe" + binaryPath += exeExt } comp := component.Component{ ID: "fake-default", @@ -544,7 +548,7 @@ func TestManager_FakeInput_ActionState(t *testing.T) { binaryPath := filepath.Join("..", "fake", "fake") if runtime.GOOS == component.Windows { - binaryPath += ".exe" + binaryPath += exeExt } comp := component.Component{ ID: "fake-default", @@ -586,15 +590,19 @@ func TestManager_FakeInput_ActionState(t *testing.T) { if unit.State == client.UnitStateFailed { subErrCh <- fmt.Errorf("unit failed: %s", unit.Message) } else if unit.State == client.UnitStateHealthy { - actionCtx, actionCancel := context.WithTimeout(context.Background(), 3*time.Second) - _, err := m.PerformAction(actionCtx, comp.Units[0], "set_state", map[string]interface{}{ - "state": client.UnitStateDegraded, - "message": "Action Set Degraded", - }) - actionCancel() - if err != nil { - subErrCh <- err - } + // must be called in a separate go routine because it cannot block receiving from the + // subscription channel + go func() { + actionCtx, actionCancel := context.WithTimeout(context.Background(), 15*time.Second) + _, err := m.PerformAction(actionCtx, comp.Units[0], "set_state", map[string]interface{}{ + "state": client.UnitStateDegraded, + "message": "Action Set Degraded", + }) + actionCancel() + if err != nil { + subErrCh <- err + } + }() } else if unit.State == client.UnitStateDegraded { // action set it to degraded subErrCh <- nil @@ -666,7 +674,7 @@ func TestManager_FakeInput_Restarts(t *testing.T) { binaryPath := filepath.Join("..", "fake", "fake") if runtime.GOOS == component.Windows { - binaryPath += ".exe" + binaryPath += exeExt } comp := component.Component{ ID: "fake-default", @@ -705,8 +713,6 @@ func TestManager_FakeInput_Restarts(t *testing.T) { if state.State == client.UnitStateFailed { if !killed { subErrCh <- fmt.Errorf("component failed: %s", state.Message) - } else { - // expected to go to failed as it was killed with the action } } else { unit, ok := state.Units[ComponentUnitKey{UnitType: client.UnitTypeInput, UnitID: "fake-input"}] @@ -714,8 +720,6 @@ func TestManager_FakeInput_Restarts(t *testing.T) { if unit.State == client.UnitStateFailed { if !killed { subErrCh <- fmt.Errorf("unit failed: %s", unit.Message) - } else { - // expected to go to failed as it was killed with the action } } else if unit.State == client.UnitStateHealthy { // force the input to exit and it should be restarted @@ -805,7 +809,7 @@ func TestManager_FakeInput_RestartsOnMissedCheckins(t *testing.T) { binaryPath := filepath.Join("..", "fake", "fake") if runtime.GOOS == component.Windows { - binaryPath += ".exe" + binaryPath += exeExt } comp := component.Component{ ID: "fake-default", @@ -922,7 +926,7 @@ func TestManager_FakeInput_InvalidAction(t *testing.T) { binaryPath := filepath.Join("..", "fake", "fake") if runtime.GOOS == component.Windows { - binaryPath += ".exe" + binaryPath += exeExt } comp := component.Component{ ID: "fake-default", @@ -1042,7 +1046,7 @@ func TestManager_FakeInput_MultiComponent(t *testing.T) { binaryPath := filepath.Join("..", "fake", "fake") if runtime.GOOS == component.Windows { - binaryPath += ".exe" + binaryPath += exeExt } runtimeSpec := component.InputRuntimeSpec{ InputType: "fake", From 9b68ea4bfa1a614026fa7c1c4cc044825967b766 Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Thu, 28 Jul 2022 11:37:22 -0400 Subject: [PATCH 10/49] [v2] Merge July 27th main into v2 feature branch (#789) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [Automation] Update elastic stack version to 8.4.0-40cff009 for testing (#557) Co-authored-by: apmmachine * [Automation] Update elastic stack version to 8.4.0-5e6770b1 for testing (#564) Co-authored-by: apmmachine * Fix regression and use comma separated values (#560) Fix regression from https://github.com/elastic/elastic-agent/pull/509 * Change in Jenkinsfile will trigger k8s run (#568) * [Automation] Update elastic stack version to 8.4.0-da5a1c6d for testing (#573) Co-authored-by: apmmachine * Add `@metadata.input_id` and `@metadata.stream_id` when injecting streams (#527) These 2 value are going to be used in the shipper to identify where an event came from in order to apply processors accordingly. Also, added test cases for the processor to verify the change and updated test cases with the new processor. * Add filemod times to contents of diagnostics collect command (#570) * Add filemod times to contents of diagnostics collect command Add filemod times to the files and directories in the zip archive. Log files (and sub dirs) will use the modtime returned by the fileinfo for the source. Others will use the timestamp from when the zip is created. * Fix linter * [Automation] Update elastic stack version to 8.4.0-b13123ee for testing (#581) Co-authored-by: apmmachine * Fix Agent upgrade 8.2->8.3 (#578) * Fix Agent upgrade 8.2->8.3 * Improve the upgrade encryption handling. Add .yml files cleanup. * Rollback ActionUpgrade to action_id, add MarkerActionUpgrade adapter struct for marker serialization compatibility * Update containerd (#577) * [Automation] Update elastic stack version to 8.4.0-4fe26f2a for testing (#591) Co-authored-by: apmmachine * Set explicit ExitTimeOut for MacOS agent launchd plist (#594) * Set explicit ExitTimeOut for MacOS agent launchd plist * [Automation] Update elastic stack version to 8.4.0-2e32a640 for testing (#599) Co-authored-by: apmmachine * ci: enable build notifications as GitHub issues (#595) * status identifies failing component, fleet gateway may report degraded, liveness endpoint added (#569) * Add liveness endpoint Add /liveness route to metrics server. This route will report the status from pkg/core/status. fleet-gateway will now report a degraded state if a checkin fails. This may not propogate to fleet-server as a failed checkin means communications between the agent and the server are not working. It may also lead to the server reporting degraded for up to 30s (fleet-server polling time) when teh agent is able to successfully connect. * linter fix * add nolint direcrtive * Linter fix * Review feedback, add doc strings * Rename noop controller file to _test file * [Automation] Update elastic stack version to 8.4.0-722a7d79 for testing (#607) Co-authored-by: apmmachine * ci: enable flaky test detector (#605) * [Automation] Update elastic stack version to 8.4.0-210dd487 for testing (#620) Co-authored-by: apmmachine * mergify: remove backport automation for non active branches (#615) * chore: use elastic-agent profile to run the E2E tests (#610) * [Automation] Update elastic stack version to 8.4.0-a6aa9f3b for testing (#631) Co-authored-by: apmmachine * add macros pointing to new agent's repo and fix old macro calls (#458) * Add mount of /etc/machine-id for managed Agent in k8s (#530) * Set hostPID=true for managed agent in k8s (#528) * Set hostPID=true for managed agent in k8s * Add comment on hostPID. * [Automation] Update elastic stack version to 8.4.0-86cc80f3 for testing (#648) Co-authored-by: apmmachine * Update elastic-agent-libs version: includes restriction on default VerificationMode to `full` (#521) * update version * mage fmt update * update dependency * update changelog * redact sensitive information in diagnostics collect command (#566) * Support Cloudbeat regex input type (#638) * support input type with regex * Update supported.go * Changing the regex to support backward compatible * Disable flaky test download test (#641) * [Automation] Update elastic stack version to 8.4.0-3d206b5d for testing (#656) Co-authored-by: apmmachine * [Automation] Update elastic stack version to 8.4.0-3ad82aa8 for testing (#661) Co-authored-by: apmmachine * jjbb: exclude allowed branches, tags and PRs (#658) cosmetic change in the description and boolean based * Update elastic-agent-project-board.yml (#649) * ci: fix labels that clashes with the Orka workers (#659) * [Automation] Update elastic stack version to 8.4.0-03bd6f3f for testing (#668) Co-authored-by: apmmachine * [Automation] Update elastic stack version to 8.4.0-533f1e30 for testing (#675) Co-authored-by: apmmachine * Osquerybeat: Fix osquerybeat is not running with logstash output (#674) * [Automation] Update elastic stack version to 8.4.0-d0a4da44 for testing (#684) Co-authored-by: apmmachine * [Automation] Update elastic stack version to 8.4.0-dd98ded4 for testing (#703) Co-authored-by: apmmachine * [Automation] Update elastic stack version to 8.4.0-164d9a10 for testing (#705) Co-authored-by: apmmachine * Add missing license headers (#711) * [Automation] Update elastic stack version to 8.4.0-00048b66 for testing (#713) Co-authored-by: apmmachine * Allow - in eql variable names (#710) * fix to allow dashes in variable names in EQL expressions extend eql to allow the '-' char to appear in variable names, i.e., ${data.some-var} and additional test cases to eql, the transpiler, and the k8s provider to verify this works. Note that the bug was caused by the EQL limitation, the otehr test cases were added when attempting to find it. * Regenerate grammer with antlr 4.7.1, add CHANGELOG * Fix linter issue * Fix typo * Fix transpiler to allow : in dynamic variables. (#680) Fix transpiler regex to allow ':' characters in dynamic variables so that users can input "${dynamic.lookup|'fallback.here'}". Co-authored-by: Aleksandr Maus * Fix for the filebeat spec file picking up packetbeat inputs (#700) * Reproduce filebeat picking up packetbeat inputs * Filebeat: filter inputs as first input transform. Move input filtering to be the first input transformation that occurs in the filebeat spec file. Fixes https://github.com/elastic/elastic-agent/issues/427. * Update changelog. * [Automation] Update elastic stack version to 8.4.0-3cd57abb for testing (#724) Co-authored-by: apmmachine * [Automation] Update elastic stack version to 8.4.0-a324b98b for testing (#727) Co-authored-by: apmmachine * ci: run on MacOS12 (#696) * [Automation] Update elastic stack version to 8.4.0-31315ca3 for testing (#732) Co-authored-by: apmmachine * fix typo on package command (#734) This commit fixes the typo in the package command on the README.md. * Allow / to be used in variable names (#718) * Allow the / character to be used in variable names. Allow / to be used in variable names from dynamic providers and eql expressions. Ensure that k8s providers can provide variables with slashes in their names. * run antlr4 * Fix tests * Fix Elastic Agent non-fleet broken upgrade between 8.3.x releases (#701) * Fix Elastic Agent non-fleet broken upgrade between 8.3.x releases * Migrates vault directory on linux and windows to the top directory of the agent, so it can be shared without needing the upgrade handler call, like for example with side-by-side install/upgrade from .rpm/.deb * Extended vault to allow read-only open, useful when the vault at particular location needs to be only read not created. * Correct the typo in the log messages * Update lint flagged function comment with 'unused', was flagged with 'deadcode' on the previous run * Address code review feedback * Add missing import for linux utz * Change vault path from Top() to Config(), this a better location, next to fleet.enc based on the install/upgrade testing with .rpm/.deb installs * Fix the missing state migration for .rpm/.deb upgrade. The post install script now performs the migration and creates the symlink after that. * Fix typo in the postinstall script * Update the vault migration code, add the agent configuration match check with the agent secret * [Automation] Update elastic stack version to 8.4.0-31269fd2 for testing (#746) Co-authored-by: apmmachine * wrap errors and fix some docs typo and convention (#743) * automate the ironbank docker context generation (#679) * Update README.md Adding M1 variable to export to be able to build AMD images * fix flaky (#730) * Add filestream ID on standalone kubernetes manifest (#742) This commit add unique IDs for the filestream inputs used by the Kubernetes integration in the Elastic-Agent standalone Kubernetes configuration/manifest file. * Alter github action to run on different OSs (#769) Alter the linter action to run on different OSs instead of on linux with the $GOOS env var. * [Automation] Update elastic stack version to 8.4.0-d058e92f for testing (#771) Co-authored-by: apmmachine * elastic-agent manifests: add comments; add cloudnative team as a codeowner for the k8s manifests (#708) * managed elastic-agent: add comments; add cloudnative team as a codeowner for the k8s manifests Signed-off-by: Tetiana Kravchenko * add comments to the standalone elastic-agent, similar to the documentation we have https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-standalone.html Signed-off-by: Tetiana Kravchenko * Apply suggestions from code review Co-authored-by: Michael Katsoulis Co-authored-by: Andrew Gizas * remove comment for FLEET_ENROLLMENT_TOKEN; use Needed everywhere instead of Required Signed-off-by: Tetiana Kravchenko * rephrase regarding accessing kube-state-metrics when used third party tools, like kube-rbac-proxy Signed-off-by: Tetiana Kravchenko * run make check Signed-off-by: Tetiana Kravchenko * keep manifests in sync to pass ci check Signed-off-by: Tetiana Kravchenko * add info on where to find FLEET_URL and FLEET_ENROLLMENT_TOKEN Signed-off-by: Tetiana Kravchenko * add links to elastic-agent documentation Signed-off-by: Tetiana Kravchenko * update comment on FLEET_ENROLLMENT_TOKEN Signed-off-by: Tetiana Kravchenko Co-authored-by: Michael Katsoulis Co-authored-by: Andrew Gizas * [Elastic-Agent] Added source uri reloading (#686) * Update will cleanup unneeded artifacts. (#752) * Update will cleanup unneeded artifacts. The update process will cleanup unneeded artifacts. When an update starts all artifacts that do not have the current version number in it's name will be removed. If artifact retrieval fails, downloaded artifacts are removed. On a successful upgrade, all contents of the downloads dir will be removed. * Clean up linter warnings * Wrap errors * cleanup tests * Fix passed version * Use os.RemoveAll * ci: propagate e2e-testing errors (#695) * [Release] add-backport-next (#784) * Update go.sum. * Fix upgrade. * Fix the upgrade artifact reload. * Fix lint in coordinator. Co-authored-by: apmmachine <58790750+apmmachine@users.noreply.github.com> Co-authored-by: apmmachine Co-authored-by: Pier-Hugues Pellerin Co-authored-by: Denis Rechkunov Co-authored-by: Michel Laterman <82832767+michel-laterman@users.noreply.github.com> Co-authored-by: Aleksandr Maus Co-authored-by: Victor Martinez Co-authored-by: Manuel de la Peña Co-authored-by: Anderson Queiroz Co-authored-by: Daniel Araujo Almeida Co-authored-by: Mariana Dima Co-authored-by: ofiriro3 Co-authored-by: Julien Lind Co-authored-by: Craig MacKenzie Co-authored-by: Tiago Queiroz Co-authored-by: Pierre HILBERT Co-authored-by: Tetiana Kravchenko Co-authored-by: Michael Katsoulis Co-authored-by: Andrew Gizas Co-authored-by: Michal Pristas Co-authored-by: Elastic Machine --- .ci/Jenkinsfile | 23 +- .ci/jobs/elastic-agent-mbp.yml | 5 +- .github/CODEOWNERS | 4 +- .../workflows/elastic-agent-project-board.yml | 2 +- .github/workflows/golangci-lint.yml | 14 +- .gitignore | 1 - .mergify.yml | 31 +- CHANGELOG.asciidoc | 215 +- CHANGELOG.next.asciidoc | 326 +-- NOTICE.txt | 16 +- README.md | 11 +- .../elastic-agent-managed-kubernetes.yaml | 38 +- .../elastic-agent-managed-daemonset.yaml | 31 +- .../elastic-agent-managed-role.yaml | 7 +- .../elastic-agent-standalone-kubernetes.yaml | 109 +- ...-agent-standalone-daemonset-configmap.yaml | 91 +- .../elastic-agent-standalone-daemonset.yaml | 11 + .../elastic-agent-standalone-role.yaml | 7 +- dev-tools/packaging/files/ironbank/LICENSE | 280 +++ .../files/ironbank/config/docker-entrypoint | 11 + dev-tools/packaging/packages.yml | 1812 ++++++++--------- .../templates/ironbank/Dockerfile.tmpl | 90 + .../templates/ironbank/README.md.tmpl | 43 + .../ironbank/hardening_manifest.yaml.tmpl | 68 + .../templates/linux/postinstall.sh.tmpl | 38 + go.mod | 7 +- go.sum | 18 +- .../handlers/handler_action_policy_change.go | 2 +- internal/pkg/agent/application/application.go | 4 +- .../application/coordinator/coordinator.go | 20 +- .../agent/application/coordinator/handler.go | 47 + .../pkg/agent/application/info/agent_id.go | 16 +- .../agent/application/info/agent_metadata.go | 7 +- .../agent/application/paths/paths_linux.go | 2 +- .../agent/application/paths/paths_windows.go | 2 +- .../pkg/agent/application/secret/secret.go | 42 +- .../application/upgrade/artifact/config.go | 5 +- .../artifact/download/http/downloader_test.go | 13 +- .../pkg/agent/application/upgrade/cleanup.go | 36 + .../agent/application/upgrade/cleanup_test.go | 44 + .../agent/application/upgrade/step_mark.go | 77 +- .../pkg/agent/application/upgrade/upgrade.go | 175 +- internal/pkg/agent/cleaner/cleaner.go | 111 + internal/pkg/agent/cleaner/cleaner_test.go | 68 + internal/pkg/agent/cmd/diagnostics_test.go | 81 +- internal/pkg/agent/install/svc.go | 63 +- .../pkg/agent/migration/migrate_secret.go | 163 ++ .../agent/migration/migrate_secret_test.go | 387 ++++ .../pkg/agent/storage/encrypted_disk_store.go | 3 +- internal/pkg/agent/transpiler/rules.go | 53 + internal/pkg/agent/transpiler/rules_test.go | 108 + internal/pkg/agent/transpiler/vars.go | 2 +- internal/pkg/agent/transpiler/vars_test.go | 30 +- internal/pkg/agent/vault/seed.go | 27 + internal/pkg/agent/vault/seed_test.go | 43 +- internal/pkg/agent/vault/vault_darwin.go | 8 +- internal/pkg/agent/vault/vault_linux.go | 28 +- internal/pkg/agent/vault/vault_options.go | 28 + internal/pkg/agent/vault/vault_windows.go | 29 +- .../providers/kubernetes/node_test.go | 18 +- .../providers/kubernetes/pod_test.go | 46 +- .../providers/kubernetes/service_test.go | 16 +- internal/pkg/eql/Eql.g4 | 2 +- internal/pkg/eql/eql_test.go | 23 +- internal/pkg/eql/parser/EqlLexer.interp | 2 +- internal/pkg/eql/parser/eql_lexer.go | 155 +- internal/pkg/fileutil/fileutil.go | 46 + magefile.go | 109 + testing/environments/snapshot.yml | 4 +- 69 files changed, 3896 insertions(+), 1558 deletions(-) create mode 100644 dev-tools/packaging/files/ironbank/LICENSE create mode 100644 dev-tools/packaging/files/ironbank/config/docker-entrypoint create mode 100644 dev-tools/packaging/templates/ironbank/Dockerfile.tmpl create mode 100644 dev-tools/packaging/templates/ironbank/README.md.tmpl create mode 100644 dev-tools/packaging/templates/ironbank/hardening_manifest.yaml.tmpl create mode 100644 dev-tools/packaging/templates/linux/postinstall.sh.tmpl create mode 100644 internal/pkg/agent/application/coordinator/handler.go create mode 100644 internal/pkg/agent/application/upgrade/cleanup.go create mode 100644 internal/pkg/agent/application/upgrade/cleanup_test.go create mode 100644 internal/pkg/agent/cleaner/cleaner.go create mode 100644 internal/pkg/agent/cleaner/cleaner_test.go create mode 100644 internal/pkg/agent/migration/migrate_secret.go create mode 100644 internal/pkg/agent/migration/migrate_secret_test.go create mode 100644 internal/pkg/agent/vault/vault_options.go create mode 100644 internal/pkg/fileutil/fileutil.go diff --git a/.ci/Jenkinsfile b/.ci/Jenkinsfile index bcacd5e1be3..78078f79358 100644 --- a/.ci/Jenkinsfile +++ b/.ci/Jenkinsfile @@ -50,7 +50,7 @@ pipeline { dir("${BASE_DIR}"){ setEnvVar('ONLY_DOCS', isGitRegionMatch(patterns: [ '.*\\.(asciidoc|md)' ], shouldMatchAll: true).toString()) setEnvVar('PACKAGING_CHANGES', isGitRegionMatch(patterns: [ '(^dev-tools/packaging/.*|.ci/Jenkinsfile)' ], shouldMatchAll: false).toString()) - setEnvVar('K8S_CHANGES', isGitRegionMatch(patterns: [ '(^deploy/kubernetes/.*|^version/docs/version.asciidoc)' ], shouldMatchAll: false).toString()) + setEnvVar('K8S_CHANGES', isGitRegionMatch(patterns: [ '(^deploy/kubernetes/.*|^version/docs/version.asciidoc|.ci/Jenkinsfile)' ], shouldMatchAll: false).toString()) } } } @@ -79,7 +79,7 @@ pipeline { axes { axis { name 'PLATFORM' - values 'ubuntu-20.04 && immutable', 'aarch64', 'windows-2016 && windows-immutable', 'windows-2022 && windows-immutable', 'darwin && orka && x86_64' + values 'ubuntu-20.04 && immutable', 'aws && aarch64', 'windows-2016 && windows-immutable', 'windows-2022 && windows-immutable', 'macos12 && x86_64' } } stages { @@ -164,7 +164,7 @@ pipeline { } } environment { - ARCH = "${PLATFORM.equals('aarch64') ? 'arm64' : 'amd64'}" + ARCH = "${PLATFORM.contains('aarch64') ? 'arm64' : 'amd64'}" DEV = true EXTERNAL = true } @@ -175,7 +175,7 @@ pipeline { withMageEnv(){ dir("${BASE_DIR}"){ withPackageEnv("${PLATFORM}") { - cmd(label: 'Go package', script: 'mage package') + cmd(label: 'Go package', script: 'mage package ironbank') uploadPackagesToGoogleBucket( credentialsId: env.JOB_GCS_EXT_CREDENTIALS, repo: env.REPO, @@ -219,7 +219,7 @@ pipeline { axes { axis { name 'K8S_VERSION' - values "v1.24.0, v1.23.6, v1.22.9, v1.21.12" + values "v1.24.0", "v1.23.6", "v1.22.9", "v1.21.12" } } stages { @@ -251,18 +251,23 @@ pipeline { } steps { // TODO: what's the testMatrixFile to be used if any - runE2E(testMatrixFile: '', + runE2E(testMatrixFile: '.ci/.e2e-tests-for-elastic-agent.yaml', beatVersion: "${env.BEAT_VERSION}-SNAPSHOT", elasticAgentVersion: "${env.BEAT_VERSION}-SNAPSHOT", gitHubCheckName: "e2e-tests", gitHubCheckRepo: env.REPO, - gitHubCheckSha1: env.GIT_BASE_COMMIT) + gitHubCheckSha1: env.GIT_BASE_COMMIT, + propagate: true, + wait: true) } } } post { cleanup { - notifyBuildResult(prComment: true) + notifyBuildResult(prComment: true, + analyzeFlakey: !isTag(), jobName: getFlakyJobName(withBranch: (isPR() ? env.CHANGE_TARGET : env.BRANCH_NAME)), + githubIssue: isBranch() && currentBuild.currentResult != "SUCCESS", + githubLabels: 'Team:Elastic-Agent-Control-Plane') } } } @@ -274,7 +279,7 @@ def isCodeCoverageEnabled() { def withPackageEnv(platform, Closure body) { if (isUnix()) { - if (platform.contains('macosx')) { + if (isDarwin()) { withPackageDarwinEnv() { body() } diff --git a/.ci/jobs/elastic-agent-mbp.yml b/.ci/jobs/elastic-agent-mbp.yml index f3772fd3855..8947d15880a 100644 --- a/.ci/jobs/elastic-agent-mbp.yml +++ b/.ci/jobs/elastic-agent-mbp.yml @@ -2,7 +2,7 @@ - job: name: "elastic-agent/elastic-agent-mbp" display-name: elastic-agent - description: "POC to isolate elastic agent from beats" + description: "Elastic agent" project-type: multibranch script-path: .ci/Jenkinsfile scm: @@ -12,6 +12,7 @@ discover-pr-forks-trust: permission discover-pr-origin: merge-current discover-tags: true + head-filter-regex: '(main|7\.17|8\.\d+|PR-.*|v\d+\.\d+\.\d+)' notification-context: 'fleet-ci' repo: elastic-agent repo-owner: elastic @@ -39,4 +40,4 @@ timeout: 100 timeout: '15' use-author: true - wipe-workspace: 'True' + wipe-workspace: true diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index bc147bf0680..d8bc0072d7b 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,2 +1,4 @@ -# Team responsable for Fleet Server +# Team responsible for Fleet Server * @elastic/elastic-agent-control-plane + +/deploy/kubernetes @elastic/obs-cloudnative-monitoring diff --git a/.github/workflows/elastic-agent-project-board.yml b/.github/workflows/elastic-agent-project-board.yml index 1b296620b09..e6add0d093c 100644 --- a/.github/workflows/elastic-agent-project-board.yml +++ b/.github/workflows/elastic-agent-project-board.yml @@ -14,7 +14,7 @@ jobs: with: headers: '{"GraphQL-Features": "projects_next_graphql"}' query: | - mutation add_to_project($projectid:String!,$contentid:String!) { + mutation add_to_project($projectid:[ID!]!,$contentid:ID!) { updateIssue(input: {id:$contentid, projectIds:$projectid}) { clientMutationId } diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 79a22cbabc5..8079fe1c673 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -14,18 +14,10 @@ jobs: golangci: strategy: matrix: - include: - - GOOS: windows - - GOOS: linux - - GOOS: darwin + os: [ ubuntu-latest, macos-latest, windows-latest ] name: lint - runs-on: ubuntu-latest + runs-on: ${{ matrix.os }} steps: - - name: Echo details - env: - GOOS: ${{ matrix.GOOS }} - run: echo Go GOOS=$GOOS - - uses: actions/checkout@v2 # Uses Go version from the repository. @@ -38,8 +30,6 @@ jobs: go-version: "${{ steps.goversion.outputs.version }}" - name: golangci-lint - env: - GOOS: ${{ matrix.GOOS }} uses: golangci/golangci-lint-action@v2 with: # Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version diff --git a/.gitignore b/.gitignore index 3939307f99c..9940bf5068e 100644 --- a/.gitignore +++ b/.gitignore @@ -60,4 +60,3 @@ pkg/component/fake/fake # VSCode /.vscode - diff --git a/.mergify.yml b/.mergify.yml index 6e1e3c5f651..3fe46362854 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -181,55 +181,42 @@ pull_request_rules: labels: - "backport" title: "[{{ destination_branch }}](backport #{{ number }}) {{ title }}" - - name: backport patches to 8.0 branch - conditions: - - merged - - label=backport-v8.0.0 - actions: - backport: - assignees: - - "{{ author }}" - branches: - - "8.0" - labels: - - "backport" - title: "[{{ destination_branch }}](backport #{{ number }}) {{ title }}" - - name: backport patches to 8.1 branch + - name: backport patches to 8.2 branch conditions: - merged - - label=backport-v8.1.0 + - label=backport-v8.2.0 actions: backport: assignees: - "{{ author }}" branches: - - "8.1" + - "8.2" labels: - "backport" title: "[{{ destination_branch }}](backport #{{ number }}) {{ title }}" - - name: backport patches to 8.2 branch + - name: backport patches to 8.3 branch conditions: - merged - - label=backport-v8.2.0 + - label=backport-v8.3.0 actions: backport: assignees: - "{{ author }}" branches: - - "8.2" + - "8.3" labels: - "backport" title: "[{{ destination_branch }}](backport #{{ number }}) {{ title }}" - - name: backport patches to 8.3 branch + - name: backport patches to 8.4 branch conditions: - merged - - label=backport-v8.3.0 + - label=backport-v8.4.0 actions: backport: assignees: - "{{ author }}" branches: - - "8.3" + - "8.4" labels: - "backport" title: "[{{ destination_branch }}](backport #{{ number }}) {{ title }}" diff --git a/CHANGELOG.asciidoc b/CHANGELOG.asciidoc index fad7186655f..a2b19fb1a90 100644 --- a/CHANGELOG.asciidoc +++ b/CHANGELOG.asciidoc @@ -1,71 +1,74 @@ // Use these for links to issue and pulls. Note issues and pulls redirect one to // each other on Github, so don't worry too much on using the right prefix. -:issue: https://github.com/elastic/beats/issues/ -:pull: https://github.com/elastic/beats/pull/ +:issue-beats: https://github.com/elastic/beats/issues/ +:pull-beats: https://github.com/elastic/beats/pull/ + +:issue: https://github.com/elastic/elastic-agent/issues/ +:pull: https://github.com/elastic/elastic-agent/pull/ [[release-notes-7.9.0]] === Elastic Agent version 7.9.0 ==== Breaking changes -- Change fleet.yml structure, causes upgraded agent to register as new agent {pull}19248[19248] -- Remove obfuscation of fleet.yml, causes re-enroll of agent to Fleet {pull}19678[19678] -- Rename enroll --ca_sha256 to --ca-sha256 {pull}19900[19900] -- Rename enroll --certificate_authorities to --certificate-authorities {pull}19900[19900] -- Don't build 32 bits version of Elastic Agent. {issue}25533[25533] +- Change fleet.yml structure, causes upgraded agent to register as new agent {pull-beats}[19248] +- Remove obfuscation of fleet.yml, causes re-enroll of agent to Fleet {pull-beats}[19678] +- Rename enroll --ca_sha256 to --ca-sha256 {pull-beats}[19900] +- Rename enroll --certificate_authorities to --certificate-authorities {pull-beats}[19900] +- Don't build 32 bits version of Elastic Agent. {issue-beats}[25533] ==== Bugfixes -- Fix install service script for windows {pull}18814[18814] -- Properly stops subprocess on shutdown {pull}19567[19567] -- Forward revision number of the configuration to the endpoint. {pull}19759[19759] -- Remove support for logs type and use logfile {pull}19761[19761] -- Avoid comparing uncomparable types on enroll {issue}19976[19976] -- Fix issues with merging of elastic-agent.yml and fleet.yml {pull}20026[20026] -- Unzip failures on Windows 8/Windows server 2012 {pull}20088[20088] -- Fix failing unit tests on windows {pull}20127[20127] -- Prevent closing closed reader {pull}20214[20214] -- Improve GRPC stop to be more relaxed {pull}20118[20118] -- Fix Windows service installation script {pull}20203[20203] -- Fix timeout issue stopping service applications {pull}20256[20256] -- Fix incorrect hash when upgrading agent {pull}22322[22322] -- Fix refresh of monitoring configuration {pull}23619[23619] -- Fixed nil pointer during unenroll {pull}23609[23609] -- Fixed reenroll scenario {pull}23686[23686] -- Fixed Monitoring filebeat and metricbeat not connecting to Agent over GRPC {pull}23843[23843] -- Fixed make status readable in the log. {pull}23849[23849] -- Windows agent doesn't uninstall with a lowercase `c:` drive in the path {pull}23998[23998] -- Fix reloading of log level for services {pull}24055[24055] -- Fix: Successfully installed and enrolled agent running standalone{pull}24128[24128] -- Make installer atomic on windows {pull}24253[24253] -- Remove installed services on agent uninstall {pull}24151[24151] -- Fix failing installation on windows 7 {pull}24387[24387] -- Fix capabilities resolution in inspect command {pull}24346[24346] -- Fix windows installer during enroll {pull}24343[24343] -- Logging to file disabled on enroll {issue}24173[24173] -- Prevent uninstall failures on empty config {pull}24838[24838] -- Fix issue with FLEET_CA not being used with Fleet Server in container {pull}26529[26529] +- Fix install service script for windows {pull-beats}[18814] +- Properly stops subprocess on shutdown {pull-beats}[19567] +- Forward revision number of the configuration to the endpoint. {pull-beats}[19759] +- Remove support for logs type and use logfile {pull-beats}[19761] +- Avoid comparing uncomparable types on enroll {issue-beats}[19976] +- Fix issues with merging of elastic-agent.yml and fleet.yml {pull-beats}[20026] +- Unzip failures on Windows 8/Windows server 2012 {pull-beats}[20088] +- Fix failing unit tests on windows {pull-beats}[20127] +- Prevent closing closed reader {pull-beats}[20214] +- Improve GRPC stop to be more relaxed {pull-beats}[20118] +- Fix Windows service installation script {pull-beats}[20203] +- Fix timeout issue stopping service applications {pull-beats}[20256] +- Fix incorrect hash when upgrading agent {pull-beats}[22322] +- Fix refresh of monitoring configuration {pull-beats}[23619] +- Fixed nil pointer during unenroll {pull-beats}[23609] +- Fixed reenroll scenario {pull-beats}[23686] +- Fixed Monitoring filebeat and metricbeat not connecting to Agent over GRPC {pull-beats}[23843] +- Fixed make status readable in the log. {pull-beats}[23849] +- Windows agent doesn't uninstall with a lowercase `c:` drive in the path {pull-beats}[23998] +- Fix reloading of log level for services {pull-beats}[24055] +- Fix: Successfully installed and enrolled agent running standalone{pull-beats}[24128] +- Make installer atomic on windows {pull-beats}[24253] +- Remove installed services on agent uninstall {pull-beats}[24151] +- Fix failing installation on windows 7 {pull-beats}[24387] +- Fix capabilities resolution in inspect command {pull-beats}[24346] +- Fix windows installer during enroll {pull-beats}[24343] +- Logging to file disabled on enroll {issue-beats}[24173] +- Prevent uninstall failures on empty config {pull-beats}[24838] +- Fix issue with FLEET_CA not being used with Fleet Server in container {pull-beats}[26529] ==== New features -- Change monitoring defaults for agent {pull}18927[18927] -- Agent verifies packages before using them {pull}18876[18876] -- Change stream.* to dataset.* fields {pull}18967[18967] -- Agent now runs the GRPC server and spawned application connect by to Agent {pull}18973[18973] -- Rename input.type logs to logfile {pull}19360[19360] -- Agent now installs/uninstalls Elastic Endpoint {pull}19248[19248] -- Agent now downloads Elastic Endpoint {pull}19503[19503] -- Refuse invalid stream values in configuration {pull}19587[19587] -- Agent now load balances across multiple Kibana instances {pull}19628[19628] -- Configuration cleanup {pull}19848[19848] -- Agent now sends its own logs to elasticsearch {pull}19811[19811] -- Add --insecure option to enroll command {pull}19900[19900] -- Will retry to enroll if the server return a 429. {pull}19918[19811] -- Add --staging option to enroll command {pull}20026[20026] -- Add `event.dataset` to all events {pull}20076[20076] -- Send datastreams fields {pull}20416[20416] -- Agent supports capabilities definition {pull}23848[23848] -- Restart process on output change {pull}24907[24907] +- Change monitoring defaults for agent {pull-beats}[18927] +- Agent verifies packages before using them {pull-beats}[18876] +- Change stream.* to dataset.* fields {pull-beats}[18967] +- Agent now runs the GRPC server and spawned application connect by to Agent {pull-beats}[18973] +- Rename input.type logs to logfile {pull-beats}[19360] +- Agent now installs/uninstalls Elastic Endpoint {pull-beats}[19248] +- Agent now downloads Elastic Endpoint {pull-beats}[19503] +- Refuse invalid stream values in configuration {pull-beats}[19587] +- Agent now load balances across multiple Kibana instances {pull-beats}[19628] +- Configuration cleanup {pull-beats}[19848] +- Agent now sends its own logs to elasticsearch {pull-beats}[19811] +- Add --insecure option to enroll command {pull-beats}[19900] +- Will retry to enroll if the server return a 429. {pull-beats}[19811] +- Add --staging option to enroll command {pull-beats}[20026] +- Add `event.dataset` to all events {pull-beats}[20076] +- Send datastreams fields {pull-beats}[20416] +- Agent supports capabilities definition {pull-beats}[23848] +- Restart process on output change {pull-beats}[24907] === Docs @@ -75,61 +78,61 @@ === Elastic Agent version 7.8.0 ==== Breaking changes -- Rename agent to elastic-agent {pull}17391[17391] +- Rename agent to elastic-agent {pull-beats}[17391] ==== Bugfixes -- Fixed tests on windows {pull}16922[16922] -- Fixed installers for SNAPSHOTs and windows {pull}17077[17077] -- Fixed merge of config {pull}17399[17399] -- Handle abs paths on windows correctly {pull}17461[17461] -- Improved cancellation of agent {pull}17318[17318] -- Fixed process spawning on Windows {pull}17751[17751] -- Fix issues when running `mage package` for all the platforms. {pull}17767[17767] -- Rename the User-Agent string from Beats Agent to Elastic Agent. {pull}17765[17765] -- Remove the kbn-version on each request to the Kibana API. {pull}17764[17764] -- Fixed injected log path to monitoring beat {pull}17833[17833] -- Make sure that the Elastic Agent connect over TLS in cloud. {pull}17843[17843] -- Moved stream.* fields to top of event {pull}17858[17858] -- Use /tmp for default monitoring endpoint location for libbeat {pull}18131[18131] -- Use default output by default {pull}18091[18091] -- Fix panic and flaky tests for the Agent. {pull}18135[18135] -- Fix default configuration after enroll {pull}18232[18232] -- Fix make sure the collected logs or metrics include streams information. {pull}18261[18261] -- Fix version to 7.8 {pull}18286[18286] -- Fix an issue where the checkin_frequency, jitter, and backoff options where not configurable. {pull}17843[17843] -- Ensure that the beats uses the params prefer_v2_templates on bulk request. {pull}18318[18318] -- Stop monitoring on config change {pull}18284[18284] -- Enable more granular control of monitoring {pull}18346[18346] -- Fix jq: command not found {pull}18408[18408] -- Avoid Chown on windows {pull}18512[18512] -- Clean action store after enrolling to new configuration {pull}18656[18656] -- Avoid watching monitor logs {pull}18723[18723] -- Correctly report platform and family. {issue}18665[18665] -- Guard against empty stream.datasource and namespace {pull}18769[18769] -- Fix install service script for windows {pull}18814[18814] +- Fixed tests on windows {pull-beats}[16922] +- Fixed installers for SNAPSHOTs and windows {pull-beats}[17077] +- Fixed merge of config {pull-beats}[17399] +- Handle abs paths on windows correctly {pull-beats}[17461] +- Improved cancellation of agent {pull-beats}[17318] +- Fixed process spawning on Windows {pull-beats}[17751] +- Fix issues when running `mage package` for all the platforms. {pull-beats}[17767] +- Rename the User-Agent string from Beats Agent to Elastic Agent. {pull-beats}[17765] +- Remove the kbn-version on each request to the Kibana API. {pull-beats}[17764] +- Fixed injected log path to monitoring beat {pull-beats}[17833] +- Make sure that the Elastic Agent connect over TLS in cloud. {pull-beats}[17843] +- Moved stream.* fields to top of event {pull-beats}[17858] +- Use /tmp for default monitoring endpoint location for libbeat {pull-beats}[18131] +- Use default output by default {pull-beats}[18091] +- Fix panic and flaky tests for the Agent. {pull-beats}[18135] +- Fix default configuration after enroll {pull-beats}[18232] +- Fix make sure the collected logs or metrics include streams information. {pull-beats}[18261] +- Fix version to 7.8 {pull-beats}[18286] +- Fix an issue where the checkin_frequency, jitter, and backoff options where not configurable. {pull-beats}[17843] +- Ensure that the beats uses the params prefer_v2_templates on bulk request. {pull-beats}[18318] +- Stop monitoring on config change {pull-beats}[18284] +- Enable more granular control of monitoring {pull-beats}[18346] +- Fix jq: command not found {pull-beats}[18408] +- Avoid Chown on windows {pull-beats}[18512] +- Clean action store after enrolling to new configuration {pull-beats}[18656] +- Avoid watching monitor logs {pull-beats}[18723] +- Correctly report platform and family. {issue-beats}[18665] +- Guard against empty stream.datasource and namespace {pull-beats}[18769] +- Fix install service script for windows {pull-beats}[18814] ==== New features -- Generate index name in a format type-dataset-namespace {pull}16903[16903] -- OS agnostic default configuration {pull}17016[17016] -- Introduced post install hooks {pull}17241[17241] -- Support for config constraints {pull}17112[17112] -- Introduced `mage demo` command {pull}17312[17312] -- Display the stability of the agent at enroll and start. {pull}17336[17336] -- Expose stream.* variables in events {pull}17468[17468] -- Monitoring configuration reloadable {pull}17855[17855] -- Pack ECS metadata to request payload send to fleet {pull}17894[17894] -- Allow CLI overrides of paths {pull}17781[17781] -- Enable Filebeat input: S3, Azureeventhub, cloudfoundry, httpjson, netflow, o365audit. {pull}17909[17909] -- Configurable log level {pull}18083[18083] -- Use data subfolder as default for process logs {pull}17960[17960] -- Enable introspecting configuration {pull}18124[18124] -- Follow home path for all config files {pull}18161[18161] -- Do not require unnecessary configuration {pull}18003[18003] -- Use nested objects so fleet can handle metadata correctly {pull}18234[18234] -- Enable debug log level for Metricbeat and Filebeat when run under the Elastic Agent. {pull}17935[17935] -- Pick up version from libbeat {pull}18350[18350] -- More clear output of inspect command {pull}18405[18405] -- When not port are specified and the https is used fallback to 443 {pull}18844[18844] -- Basic upgrade process {pull}21002[21002] +- Generate index name in a format type-dataset-namespace {pull-beats}[16903] +- OS agnostic default configuration {pull-beats}[17016] +- Introduced post install hooks {pull-beats}[17241] +- Support for config constraints {pull-beats}[17112] +- Introduced `mage demo` command {pull-beats}[17312] +- Display the stability of the agent at enroll and start. {pull-beats}[17336] +- Expose stream.* variables in events {pull-beats}[17468] +- Monitoring configuration reloadable {pull-beats}[17855] +- Pack ECS metadata to request payload send to fleet {pull-beats}[17894] +- Allow CLI overrides of paths {pull-beats}[17781] +- Enable Filebeat input: S3, Azureeventhub, cloudfoundry, httpjson, netflow, o365audit. {pull-beats}[17909] +- Configurable log level {pull-beats}[18083] +- Use data subfolder as default for process logs {pull-beats}[17960] +- Enable introspecting configuration {pull-beats}[18124] +- Follow home path for all config files {pull-beats}[18161] +- Do not require unnecessary configuration {pull-beats}[18003] +- Use nested objects so fleet can handle metadata correctly {pull-beats}[18234] +- Enable debug log level for Metricbeat and Filebeat when run under the Elastic Agent. {pull-beats}[17935] +- Pick up version from libbeat {pull-beats}[18350] +- More clear output of inspect command {pull-beats}[18405] +- When not port are specified and the https is used fallback to 443 {pull-beats}[18844] +- Basic upgrade process {pull-beats}[21002] diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 2c3e563cf21..2361baf73f5 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -1,179 +1,189 @@ // Use these for links to issue and pulls. Note issues and pulls redirect one to // each other on Github, so don't worry too much on using the right prefix. -:issue: https://github.com/elastic/beats/issues/ -:pull: https://github.com/elastic/beats/pull/ +:issue-beats: https://github.com/elastic/beats/issues/ +:pull-beats: https://github.com/elastic/beats/pull/ + +:issue: https://github.com/elastic/elastic-agent/issues/ +:pull: https://github.com/elastic/elastic-agent/pull/ === Elastic Agent version HEAD ==== Breaking changes -- Docker container is not run as root by default. {pull}21213[21213] -- Read Fleet connection information from `fleet.*` instead of `fleet.kibana.*`. {pull}24713[24713] -- Beats build for 32Bit Windows or Linux system will refuse to run on a 64bit system. {pull}25186[25186] -- Remove the `--kibana-url` from `install` and `enroll` command. {pull}25529[25529] -- Default to port 80 and 443 for Kibana and Fleet Server connections. {pull}25723[25723] -- Remove deprecated/undocumented IncludeCreatorMetadata setting from kubernetes metadata config options {pull}28006[28006] -- The `/processes/` endpoint proxies to the subprocess's monitoring endpoint, instead of querying its `/stats` endpoint {pull}28165[28165] -- Remove username/password for fleet-server authentication. {pull}29458[29458] +- Docker container is not run as root by default. {pull-beats}[21213] +- Read Fleet connection information from `fleet.*` instead of `fleet.kibana.*`. {pull-beats}[24713] +- Beats build for 32Bit Windows or Linux system will refuse to run on a 64bit system. {pull-beats}[25186] +- Remove the `--kibana-url` from `install` and `enroll` command. {pull-beats}[25529] +- Default to port 80 and 443 for Kibana and Fleet Server connections. {pull-beats}[25723] +- Remove deprecated/undocumented IncludeCreatorMetadata setting from kubernetes metadata config options {pull-beats}[28006] +- The `/processes/` endpoint proxies to the subprocess's monitoring endpoint, instead of querying its `/stats` endpoint {pull-beats}[28165] +- Remove username/password for fleet-server authentication. {pull-beats}[29458] ==== Bugfixes -- Fix rename *ConfigChange to *PolicyChange to align on changes in the UI. {pull}20779[20779] -- Thread safe sorted set {pull}21290[21290] -- Copy Action store on upgrade {pull}21298[21298] -- Include inputs in action store actions {pull}21298[21298] -- Fix issue where inputs without processors defined would panic {pull}21628[21628] -- Prevent reporting ecs version twice {pull}21616[21616] -- Partial extracted beat result in failure to spawn beat {issue}21718[21718] -- Use symlink path for reexecutions {pull}21835[21835] -- Use ML_SYSTEM to detect if agent is running as a service {pull}21884[21884] -- Use local temp instead of system one {pull}21883[21883] -- Rename monitoring index from `elastic.agent` to `elastic_agent` {pull}21932[21932] -- Fix issue with named pipes on Windows 7 {pull}21931[21931] -- Fix missing elastic_agent event data {pull}21994[21994] -- Ensure shell wrapper path exists before writing wrapper on install {pull}22144[22144] -- Fix deb/rpm packaging for Elastic Agent {pull}22153[22153] -- Fix composable input processor promotion to fix duplicates {pull}22344[22344] -- Fix sysv init files for deb/rpm installation {pull}22543[22543] -- Fix shell wrapper for deb/rpm packaging {pull}23038[23038] -- Fixed parsing of npipe URI {pull}22978[22978] -- Select default agent policy if no enrollment token provided. {pull}23973[23973] -- Remove artifacts on transient download errors {pull}23235[23235] -- Support for linux/arm64 {pull}23479[23479] -- Skip top level files when unziping archive during upgrade {pull}23456[23456] -- Do not take ownership of Endpoint log path {pull}23444[23444] -- Fixed fetching DBus service PID {pull}23496[23496] -- Fix issue of missing log messages from filebeat monitor {pull}23514[23514] -- Increase checkin grace period to 30 seconds {pull}23568[23568] -- Fix libbeat from reporting back degraded on config update {pull}23537[23537] -- Rewrite check if agent is running with admin rights on Windows {pull}23970[23970] -- Fix issues with dynamic inputs and conditions {pull}23886[23886] -- Fix bad substitution of API key. {pull}24036[24036] -- Fix docker enrollment issue related to Fleet Server change. {pull}24155[24155] -- Improve log on failure of Endpoint Security installation. {pull}24429[24429] -- Verify communication to Kibana before updating Fleet client. {pull}24489[24489] -- Fix nil pointer when null is generated as list item. {issue}23734[23734] -- Add support for filestream input. {pull}24820[24820] -- Add check for URL set when cert and cert key. {pull}24904[24904] -- Fix install command for Fleet Server bootstrap, remove need for --enrollment-token when using --fleet-server {pull}24981[24981] -- Respect host configuration for exposed processes endpoint {pull}25114[25114] -- Set --inscure in container when FLEET_SERVER_ENABLE and FLEET_INSECURE set {pull}25137[25137] -- Fixed: limit for retries to Kibana configurable {issue}25063[25063] -- Fix issue with status and inspect inside of container {pull}25204[25204] -- Remove FLEET_SERVER_POLICY_NAME env variable as it was not used {pull}25149[25149] -- Reduce log level for listener cleanup to debug {pull}25274 -- Passing in policy id to container command works {pull}25352[25352] -- Reduce log level for listener cleanup to debug {pull}25274[25274] -- Delay the restart of application when a status report of failure is given {pull}25339[25339] -- Don't log when upgrade capability doesn't apply {pull}25386[25386] -- Fixed issue when unversioned home is set and invoked watcher failing with ENOENT {issue}25371[25371] -- Fixed Elastic Agent: expecting Dict and received *transpiler.Key for '0' {issue}24453[24453] -- Fix AckBatch to do nothing when no actions passed {pull}25562[25562] -- Add error log entry when listener creation fails {issue}23483[23482] -- Handle case where policy doesn't contain Fleet connection information {pull}25707[25707] -- Fix fleet-server.yml spec to not overwrite existing keys {pull}25741[25741] -- Agent sends wrong log level to Endpoint {issue}25583[25583] -- Fix startup with failing configuration {pull}26057[26057] -- Change timestamp in elatic-agent-json.log to use UTC {issue}25391[25391] -- Fix add support for Logstash output. {pull}24305[24305] -- Do not log Elasticsearch configuration for monitoring output when running with debug. {pull}26583[26583] -- Fix issue where proxy enrollment options broke enrollment command. {pull}26749[26749] -- Remove symlink.prev from previously failed upgrade {pull}26785[26785] -- Fix apm-server supported outputs not being in sync with supported output types. {pull}26885[26885] -- Set permissions during installation {pull}26665[26665] -- Disable monitoring during fleet-server bootstrapping. {pull}27222[27222] -- Fix issue with atomic extract running in K8s {pull}27396[27396] -- Fix issue with install directory in state path in K8s {pull}27396[27396] -- Disable monitoring during fleet-server bootstrapping. {pull}27222[27222] -- Change output.elasticsearch.proxy_disabled flag to output.elasticsearch.proxy_disable so fleet uses it. {issue}27670[27670] {pull}27671[27671] -- Add validation for certificate flags to ensure they are absolute paths. {pull}27779[27779] -- Migrate state on upgrade {pull}27825[27825] -- Add "_monitoring" suffix to monitoring instance names to remove ambiguity with the status command. {issue}25449[25449] -- Ignore ErrNotExists when fixing permissions. {issue}27836[27836] {pull}27846[27846] -- Snapshot artifact lookup will use agent.download proxy settings. {issue}27903[27903] {pull}27904[27904] -- Fix lazy acker to only add new actions to the batch. {pull}27981[27981] -- Allow HTTP metrics to run in bootstrap mode. Add ability to adjust timeouts for Fleet Server. {pull}28260[28260] -- Fix agent configuration overwritten by default fleet config. {pull}29297[29297] -- Allow agent containers to use basic auth to create a service token. {pull}29651[29651] -- Fix issue where a failing artifact verification does not remove the bad artifact. {pull}30281[30281] -- Reduce Elastic Agent shut down time by stopping processes concurrently {pull}29650[29650] +- Fix rename *ConfigChange to *PolicyChange to align on changes in the UI. {pull-beats}[20779] +- Thread safe sorted set {pull-beats}[21290] +- Copy Action store on upgrade {pull-beats}[21298] +- Include inputs in action store actions {pull-beats}[21298] +- Fix issue where inputs without processors defined would panic {pull-beats}[21628] +- Prevent reporting ecs version twice {pull-beats}[21616] +- Partial extracted beat result in failure to spawn beat {issue-beats}[21718] +- Use symlink path for reexecutions {pull-beats}[21835] +- Use ML_SYSTEM to detect if agent is running as a service {pull-beats}[21884] +- Use local temp instead of system one {pull-beats}[21883] +- Rename monitoring index from `elastic.agent` to `elastic_agent` {pull-beats}[21932] +- Fix issue with named pipes on Windows 7 {pull-beats}[21931] +- Fix missing elastic_agent event data {pull-beats}[21994] +- Ensure shell wrapper path exists before writing wrapper on install {pull-beats}[22144] +- Fix deb/rpm packaging for Elastic Agent {pull-beats}[22153] +- Fix composable input processor promotion to fix duplicates {pull-beats}[22344] +- Fix sysv init files for deb/rpm installation {pull-beats}[22543] +- Fix shell wrapper for deb/rpm packaging {pull-beats}[23038] +- Fixed parsing of npipe URI {pull-beats}[22978] +- Select default agent policy if no enrollment token provided. {pull-beats}[23973] +- Remove artifacts on transient download errors {pull-beats}[23235] +- Support for linux/arm64 {pull-beats}[23479] +- Skip top level files when unziping archive during upgrade {pull-beats}[23456] +- Do not take ownership of Endpoint log path {pull-beats}[23444] +- Fixed fetching DBus service PID {pull-beats}[23496] +- Fix issue of missing log messages from filebeat monitor {pull-beats}[23514] +- Increase checkin grace period to 30 seconds {pull-beats}[23568] +- Fix libbeat from reporting back degraded on config update {pull-beats}[23537] +- Rewrite check if agent is running with admin rights on Windows {pull-beats}[23970] +- Fix issues with dynamic inputs and conditions {pull-beats}[23886] +- Fix bad substitution of API key. {pull-beats}[24036] +- Fix docker enrollment issue related to Fleet Server change. {pull-beats}[24155] +- Improve log on failure of Endpoint Security installation. {pull-beats}[24429] +- Verify communication to Kibana before updating Fleet client. {pull-beats}[24489] +- Fix nil pointer when null is generated as list item. {issue-beats}[23734] +- Add support for filestream input. {pull-beats}[24820] +- Add check for URL set when cert and cert key. {pull-beats}[24904] +- Fix install command for Fleet Server bootstrap, remove need for --enrollment-token when using --fleet-server {pull-beats}[24981] +- Respect host configuration for exposed processes endpoint {pull-beats}[25114] +- Set --inscure in container when FLEET_SERVER_ENABLE and FLEET_INSECURE set {pull-beats}[25137] +- Fixed: limit for retries to Kibana configurable {issue-beats}[25063] +- Fix issue with status and inspect inside of container {pull-beats}[25204] +- Remove FLEET_SERVER_POLICY_NAME env variable as it was not used {pull-beats}[25149] +- Reduce log level for listener cleanup to debug {pull-beats} +- Passing in policy id to container command works {pull-beats}[25352] +- Reduce log level for listener cleanup to debug {pull-beats}[25274] +- Delay the restart of application when a status report of failure is given {pull-beats}[25339] +- Don't log when upgrade capability doesn't apply {pull-beats}[25386] +- Fixed issue when unversioned home is set and invoked watcher failing with ENOENT {issue-beats}[25371] +- Fixed Elastic Agent: expecting Dict and received *transpiler.Key for '0' {issue-beats}[24453] +- Fix AckBatch to do nothing when no actions passed {pull-beats}[25562] +- Add error log entry when listener creation fails {issue-beats}[23482] +- Handle case where policy doesn't contain Fleet connection information {pull-beats}[25707] +- Fix fleet-server.yml spec to not overwrite existing keys {pull-beats}[25741] +- Agent sends wrong log level to Endpoint {issue-beats}[25583] +- Fix startup with failing configuration {pull-beats}[26057] +- Change timestamp in elatic-agent-json.log to use UTC {issue-beats}[25391] +- Fix add support for Logstash output. {pull-beats}[24305] +- Do not log Elasticsearch configuration for monitoring output when running with debug. {pull-beats}[26583] +- Fix issue where proxy enrollment options broke enrollment command. {pull-beats}[26749] +- Remove symlink.prev from previously failed upgrade {pull-beats}[26785] +- Fix apm-server supported outputs not being in sync with supported output types. {pull-beats}[26885] +- Set permissions during installation {pull-beats}[26665] +- Disable monitoring during fleet-server bootstrapping. {pull-beats}[27222] +- Fix issue with atomic extract running in K8s {pull-beats}[27396] +- Fix issue with install directory in state path in K8s {pull-beats}[27396] +- Disable monitoring during fleet-server bootstrapping. {pull-beats}[27222] +- Change output.elasticsearch.proxy_disabled flag to output.elasticsearch.proxy_disable so fleet uses it. {issue-beats}[27670] {pull-beats}[27671] +- Add validation for certificate flags to ensure they are absolute paths. {pull-beats}[27779] +- Migrate state on upgrade {pull-beats}[27825] +- Add "_monitoring" suffix to monitoring instance names to remove ambiguity with the status command. {issue-beats}[25449] +- Ignore ErrNotExists when fixing permissions. {issue-beats}[27836] {pull-beats}[27846] +- Snapshot artifact lookup will use agent.download proxy settings. {issue-beats}[27903] {pull-beats}[27904] +- Fix lazy acker to only add new actions to the batch. {pull-beats}[27981] +- Allow HTTP metrics to run in bootstrap mode. Add ability to adjust timeouts for Fleet Server. {pull-beats}[28260] +- Fix agent configuration overwritten by default fleet config. {pull-beats}[29297] +- Allow agent containers to use basic auth to create a service token. {pull-beats}[29651] +- Fix issue where a failing artifact verification does not remove the bad artifact. {pull-beats}[30281] +- Reduce Elastic Agent shut down time by stopping processes concurrently {pull-beats}[29650] - Move `context cancelled` error from fleet gateway into debug level. {pull}187[187] - Update library containerd to 1.5.10. {pull}186[186] - Add fleet-server to output of elastic-agent inspect output command (and diagnostic bundle). {pull}243[243] - Update API calls that the agent makes to Kibana when running the container command. {pull}253[253] - diagnostics collect log names are fixed on Windows machines, command will ignore failures. AgentID is included in diagnostics(and diagnostics collect) output. {issue}81[81] {issue}92[92] {issue}190[190] {pull}262[262] - Collects stdout and stderr of applications run as a process and logs them. {issue}[88] +- Remove VerificationMode option to empty string. Default value is `full`. {issue}[184] +- diagnostics collect file mod times are set. {pull}570[570] +- Allow ':' characters in dynamic variables {issue}624[624] {pull}680[680] +- Allow the - char to appear as part of variable names in eql expressions. {issue}709[709] {pull}710[710] +- Allow the / char in variable names in eql and transpiler. {issue}715[715] {pull}718[718] +- Fix data duplication for standalone agent on Kubernetes using the default manifest {issue-beats}31512[31512] {pull}742[742] +- Agent updates will clean up unneeded artifacts. {issue}693[693] {issue}694[694] {pull}752[752] ==== New features -- Prepare packaging for endpoint and asc files {pull}20186[20186] -- Improved version CLI {pull}20359[20359] -- Enroll CLI now restarts running daemon {pull}20359[20359] -- Add restart CLI cmd {pull}20359[20359] -- Add new `synthetics/*` inputs to run Heartbeat {pull}20387[20387] -- Users of the Docker image can now pass `FLEET_ENROLL_INSECURE=1` to include the `--insecure` flag with the `elastic-agent enroll` command {issue}20312[20312] {pull}20713[20713] -- Add `docker` composable dynamic provider. {pull}20842[20842] -- Add support for dynamic inputs with providers and `{{variable|"default"}}` substitution. {pull}20839[20839] -- Add support for EQL based condition on inputs {pull}20994[20994] -- Send `fleet.host.id` to Endpoint Security {pull}21042[21042] -- Add `install` and `uninstall` subcommands {pull}21206[21206] -- Use new form of fleet API paths {pull}21478[21478] -- Add `kubernetes` composable dynamic provider. {pull}21480[21480] -- Send updating state {pull}21461[21461] -- Add `elastic.agent.id` and `elastic.agent.version` to published events from filebeat and metricbeat {pull}21543[21543] -- Add `upgrade` subcommand to perform upgrade of installed Elastic Agent {pull}21425[21425] -- Update `fleet.yml` and Kibana hosts when a policy change updates the Kibana hosts {pull}21599[21599] -- Update `install` command to perform enroll before starting Elastic Agent {pull}21772[21772] -- Update `fleet.kibana.path` from a POLICY_CHANGE {pull}21804[21804] -- Removed `install-service.ps1` and `uninstall-service.ps1` from Windows .zip packaging {pull}21694[21694] -- Add `priority` to `AddOrUpdate` on dynamic composable input providers communication channel {pull}22352[22352] -- Ship `endpoint-security` logs to elasticsearch {pull}22526[22526] -- Log level reloadable from fleet {pull}22690[22690] -- Push log level downstream {pull}22815[22815] -- Add metrics collection for Agent {pull}22793[22793] -- Add support for Fleet Server {pull}23736[23736] -- Add support for enrollment with local bootstrap of Fleet Server {pull}23865[23865] -- Add TLS support for Fleet Server {pull}24142[24142] -- Add support for Fleet Server running under Elastic Agent {pull}24220[24220] -- Add CA support to Elastic Agent docker image {pull}24486[24486] -- Add k8s secrets provider for Agent {pull}24789[24789] -- Add STATE_PATH, CONFIG_PATH, LOGS_PATH to Elastic Agent docker image {pull}24817[24817] -- Add status subcommand {pull}24856[24856] -- Add leader_election provider for k8s {pull}24267[24267] -- Add --fleet-server-service-token and FLEET_SERVER_SERVICE_TOKEN options {pull}25083[25083] -- Keep http and logging config during enroll {pull}25132[25132] -- Log output of container to $LOGS_PATH/elastic-agent-start.log when LOGS_PATH set {pull}25150[25150] -- Use `filestream` input for internal log collection. {pull}25660[25660] -- Enable agent to send custom headers to kibana/ES {pull}26275[26275] -- Set `agent.id` to the Fleet Agent ID in events published from inputs backed by Beats. {issue}21121[21121] {pull}26394[26394] {pull}26548[26548] -- Add proxy support to artifact downloader and communication with fleet server. {pull}25219[25219] -- Add proxy support to enroll command. {pull}26514[26514] -- Enable configuring monitoring namespace {issue}26439[26439] -- Communicate with Fleet Server over HTTP2. {pull}26474[26474] -- Pass logging.metrics.enabled to beats to stop beats from adding metrics into their logs. {issue}26758[26758] {pull}26828[26828] -- Support Node and Service autodiscovery in kubernetes dynamic provider. {pull}26801[26801] -- Increase Agent's mem limits in k8s. {pull}27153[27153] -- Add new --enroll-delay option for install and enroll commands. {pull}27118[27118] -- Add link to troubleshooting guide on fatal exits. {issue}26367[26367] {pull}27236[27236] -- Agent now adapts the beats queue size based on output settings. {issue}26638[26638] {pull}27429[27429] -- Support ephemeral containers in Kubernetes dynamic provider. {issue}27020[#27020] {pull}27707[27707] -- Add complete k8s metadata through composable provider. {pull}27691[27691] -- Add diagnostics command to gather beat metadata. {pull}28265[28265] -- Add diagnostics collect command to gather beat metadata, config, policy, and logs and bundle it into an archive. {pull}28461[28461] -- Add `KIBANA_FLEET_SERVICE_TOKEN` to Elastic Agent container. {pull}28096[28096] -- Enable pprof endpoints for beats processes. Allow pprof endpoints for elastic-agent if enabled. {pull}28983[28983] -- Add `--pprof` flag to `elastic-agent diagnostics` and an `elastic-agent pprof` command to allow operators to gather pprof data from the agent and beats running under it. {pull}28798[28798] -- Allow pprof endpoints for elastic-agent or beats if enabled. {pull}28983[28983] {pull}29155[29155] -- Add --fleet-server-es-ca-trusted-fingerprint flag to allow agent/fleet-server to work with elasticsearch clusters using self signed certs. {pull}29128[29128] -- Discover changes in Kubernetes nodes metadata as soon as they happen. {pull}23139[23139] -- Add results of inspect output command into archive produced by diagnostics collect. {pull}29902[29902] -- Add support for loading input configuration from external configuration files in standalone mode. You can load inputs from YAML configuration files under the folder `{path.config}/inputs.d`. {pull}30087[30087] -- Install command will skip install/uninstall steps when installation via package is detected on Linux distros. {pull}30289[30289] -- Update docker/distribution dependency library to fix a security issues concerning OCI Manifest Type Confusion Issue. {pull}30462[30462] -- Add action_input_type for the .fleet-actions-results {pull}30562[30562] -- Add support for enabling the metrics buffer endpoint in the elastic-agent and beats it runs. diagnostics collect command will gather metrics-buffer data if enabled. {pull}30471[30471] +- Prepare packaging for endpoint and asc files {pull-beats}[20186] +- Improved version CLI {pull-beats}[20359] +- Enroll CLI now restarts running daemon {pull-beats}[20359] +- Add restart CLI cmd {pull-beats}[20359] +- Add new `synthetics/*` inputs to run Heartbeat {pull-beats}[20387] +- Users of the Docker image can now pass `FLEET_ENROLL_INSECURE=1` to include the `--insecure` flag with the `elastic-agent enroll` command {issue-beats}[20312] {pull-beats}[20713] +- Add `docker` composable dynamic provider. {pull-beats}[20842] +- Add support for dynamic inputs with providers and `{{variable|"default"}}` substitution. {pull-beats}[20839] +- Add support for EQL based condition on inputs {pull-beats}[20994] +- Send `fleet.host.id` to Endpoint Security {pull-beats}[21042] +- Add `install` and `uninstall` subcommands {pull-beats}[21206] +- Use new form of fleet API paths {pull-beats}[21478] +- Add `kubernetes` composable dynamic provider. {pull-beats}[21480] +- Send updating state {pull-beats}[21461] +- Add `elastic.agent.id` and `elastic.agent.version` to published events from filebeat and metricbeat {pull-beats}[21543] +- Add `upgrade` subcommand to perform upgrade of installed Elastic Agent {pull-beats}[21425] +- Update `fleet.yml` and Kibana hosts when a policy change updates the Kibana hosts {pull-beats}[21599] +- Update `install` command to perform enroll before starting Elastic Agent {pull-beats}[21772] +- Update `fleet.kibana.path` from a POLICY_CHANGE {pull-beats}[21804] +- Removed `install-service.ps1` and `uninstall-service.ps1` from Windows .zip packaging {pull-beats}[21694] +- Add `priority` to `AddOrUpdate` on dynamic composable input providers communication channel {pull-beats}[22352] +- Ship `endpoint-security` logs to elasticsearch {pull-beats}[22526] +- Log level reloadable from fleet {pull-beats}[22690] +- Push log level downstream {pull-beats}[22815] +- Add metrics collection for Agent {pull-beats}[22793] +- Add support for Fleet Server {pull-beats}[23736] +- Add support for enrollment with local bootstrap of Fleet Server {pull-beats}[23865] +- Add TLS support for Fleet Server {pull-beats}[24142] +- Add support for Fleet Server running under Elastic Agent {pull-beats}[24220] +- Add CA support to Elastic Agent docker image {pull-beats}[24486] +- Add k8s secrets provider for Agent {pull-beats}[24789] +- Add STATE_PATH, CONFIG_PATH, LOGS_PATH to Elastic Agent docker image {pull-beats}[24817] +- Add status subcommand {pull-beats}[24856] +- Add leader_election provider for k8s {pull-beats}[24267] +- Add --fleet-server-service-token and FLEET_SERVER_SERVICE_TOKEN options {pull-beats}[25083] +- Keep http and logging config during enroll {pull-beats}[25132] +- Log output of container to $LOGS_PATH/elastic-agent-start.log when LOGS_PATH set {pull-beats}[25150] +- Use `filestream` input for internal log collection. {pull-beats}[25660] +- Enable agent to send custom headers to kibana/ES {pull-beats}[26275] +- Set `agent.id` to the Fleet Agent ID in events published from inputs backed by Beats. {issue-beats}[21121] {pull-beats}[26394] {pull-beats}[26548] +- Add proxy support to artifact downloader and communication with fleet server. {pull-beats}[25219] +- Add proxy support to enroll command. {pull-beats}[26514] +- Enable configuring monitoring namespace {issue-beats}[26439] +- Communicate with Fleet Server over HTTP2. {pull-beats}[26474] +- Pass logging.metrics.enabled to beats to stop beats from adding metrics into their logs. {issue-beats}[26758] {pull-beats}[26828] +- Support Node and Service autodiscovery in kubernetes dynamic provider. {pull-beats}[26801] +- Increase Agent's mem limits in k8s. {pull-beats}[27153] +- Add new --enroll-delay option for install and enroll commands. {pull-beats}[27118] +- Add link to troubleshooting guide on fatal exits. {issue-beats}[26367] {pull-beats}[27236] +- Agent now adapts the beats queue size based on output settings. {issue-beats}[26638] {pull-beats}[27429] +- Support ephemeral containers in Kubernetes dynamic provider. {issue-beats}[#27020] {pull-beats}[27707] +- Add complete k8s metadata through composable provider. {pull-beats}[27691] +- Add diagnostics command to gather beat metadata. {pull-beats}[28265] +- Add diagnostics collect command to gather beat metadata, config, policy, and logs and bundle it into an archive. {pull-beats}[28461] +- Add `KIBANA_FLEET_SERVICE_TOKEN` to Elastic Agent container. {pull-beats}[28096] +- Enable pprof endpoints for beats processes. Allow pprof endpoints for elastic-agent if enabled. {pull-beats}[28983] +- Add `--pprof` flag to `elastic-agent diagnostics` and an `elastic-agent pprof` command to allow operators to gather pprof data from the agent and beats running under it. {pull-beats}[28798] +- Allow pprof endpoints for elastic-agent or beats if enabled. {pull-beats}[28983] {pull-beats}[29155] +- Add --fleet-server-es-ca-trusted-fingerprint flag to allow agent/fleet-server to work with elasticsearch clusters using self signed certs. {pull-beats}[29128] +- Discover changes in Kubernetes nodes metadata as soon as they happen. {pull-beats}[23139] +- Add results of inspect output command into archive produced by diagnostics collect. {pull-beats}[29902] +- Add support for loading input configuration from external configuration files in standalone mode. You can load inputs from YAML configuration files under the folder `{path.config}/inputs.d`. {pull-beats}[30087] +- Install command will skip install/uninstall steps when installation via package is detected on Linux distros. {pull-beats}[30289] +- Update docker/distribution dependency library to fix a security issues concerning OCI Manifest Type Confusion Issue. {pull-beats}[30462] +- Add action_input_type for the .fleet-actions-results {pull-beats}[30562] +- Add support for enabling the metrics buffer endpoint in the elastic-agent and beats it runs. diagnostics collect command will gather metrics-buffer data if enabled. {pull-beats}[30471] - Update ack response schema and processing, add retrier for acks {pull}200[200] - Enhance error messages and logs for process start {pull}225[225] -- Changed the default policy selection logic. When the agent has no policy id or name defined, it will fall back to defaults (defined by $FLEET_SERVER_POLICY_ID and $FLEET_DEFAULT_TOKEN_POLICY_NAME environment variables respectively). {issue}29774[29774] {pull}226[226] +- Changed the default policy selection logic. When the agent has no policy id or name defined, it will fall back to defaults (defined by $FLEET_SERVER_POLICY_ID and $FLEET_DEFAULT_TOKEN_POLICY_NAME environment variables respectively). {issue-beats}[29774] {pull}226[226] - Add Elastic APM instrumentation {pull}180[180] - Agent can be built for `darwin/arm64`. When it's built for both `darwin/arm64` and `darwin/adm64` a universal binary is also built and packaged. {pull}203[203] - Add support for Cloudbeat. {pull}179[179] @@ -183,3 +193,7 @@ - Save the agent configuration and the state encrypted on the disk. {issue}535[535] {pull}398[398] - Bump node.js version for heartbeat/synthetics to 16.15.0 - Support scheduled actions and cancellation of pending actions. {issue}393[393] {pull}419[419] +- Add `@metadata.input_id` and `@metadata.stream_id` when applying the inject stream processor {pull}527[527] +- Add liveness endpoint, allow fleet-gateway component to report degraded state, add update time and messages to status output. {issue}390[390] {pull}569[569] +- Redact sensitive information on diagnostics collect command. {issue}[241] {pull}[566] +- Fix incorrectly creating a filebeat redis input when a policy contains a packetbeat redis input. {issue}[427] {pull}[700] diff --git a/NOTICE.txt b/NOTICE.txt index 56f82316620..1451a7531c1 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -1061,11 +1061,11 @@ SOFTWARE -------------------------------------------------------------------------------- Dependency : github.com/elastic/elastic-agent-libs -Version: v0.2.3 +Version: v0.2.6 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-libs@v0.2.3/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-libs@v0.2.6/LICENSE: Apache License Version 2.0, January 2004 @@ -6576,11 +6576,11 @@ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- Dependency : github.com/containerd/containerd -Version: v1.5.10 +Version: v1.5.13 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/containerd/containerd@v1.5.10/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/containerd/containerd@v1.5.13/LICENSE: Apache License @@ -14276,11 +14276,11 @@ THE SOFTWARE. -------------------------------------------------------------------------------- Dependency : go.uber.org/goleak -Version: v1.1.11 +Version: v1.1.12 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/go.uber.org/goleak@v1.1.11/LICENSE: +Contents of probable licence file $GOMODCACHE/go.uber.org/goleak@v1.1.12/LICENSE: The MIT License (MIT) @@ -15343,11 +15343,11 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : gopkg.in/yaml.v3 -Version: v3.0.0-20210107192922-496545a6307b +Version: v3.0.1 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/gopkg.in/yaml.v3@v3.0.0-20210107192922-496545a6307b/LICENSE: +Contents of probable licence file $GOMODCACHE/gopkg.in/yaml.v3@v3.0.1/LICENSE: This project is covered by two different licenses: MIT and Apache. diff --git a/README.md b/README.md index 25aff95042e..2c0dbe31f69 100644 --- a/README.md +++ b/README.md @@ -10,6 +10,13 @@ Prerequisites: - [Docker](https://docs.docker.com/get-docker/) - [X-pack](https://github.com/elastic/beats/tree/main/x-pack) to pre-exist in the parent folder of the local Git repository checkout +If you are on a Mac with M1 chip, don't forget to export some docker variable to be able to build for AMD +``` +export DOCKER_BUILDKIT=0 +export COMPOSE_DOCKER_CLI_BUILD=0 +export DOCKER_DEFAULT_PLATFORM=linux/amd64 +``` + In Linux operating systems that you can not run docker as a root user you need to follow [linux-postinstall steps](https://docs.docker.com/engine/install/linux-postinstall/) ### Testing docker container @@ -17,7 +24,7 @@ In Linux operating systems that you can not run docker as a root user you need t Running Elastic Agent in a docker container is a common use case. To build the Elastic Agent and create a docker image run the following command: ``` -DEV=true SNAPSHOT=true PLATFORMS=linux/amd64 TYPES=docker mage package +DEV=true SNAPSHOT=true PLATFORMS=linux/amd64 PACKAGES=docker mage package ``` If you are in the 7.13 branch, this will create the `docker.elastic.co/beats/elastic-agent:7.13.0-SNAPSHOT` image in your local environment. Now you can use this to for example test this container with the stack in elastic-package: @@ -45,7 +52,7 @@ for the standard variant. 1. Build elastic-agent: ```bash -DEV=true PLATFORMS=linux/amd64 TYPES=docker mage package +DEV=true PLATFORMS=linux/amd64 PACKAGES=docker mage package ``` Use environmental variables `GOHOSTOS` and `GOHOSTARCH` to specify PLATFORMS variable accordingly. eg. diff --git a/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml b/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml index 882e7b46e21..1e2403f47a2 100644 --- a/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml +++ b/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml @@ -1,3 +1,4 @@ +# For more information refer to https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-managed-by-fleet.html apiVersion: apps/v1 kind: DaemonSet metadata: @@ -14,31 +15,41 @@ spec: labels: app: elastic-agent spec: + # Tolerations are needed to run Elastic Agent on Kubernetes master nodes. + # Agents running on master nodes collect metrics from the control plane components (scheduler, controller manager) of Kubernetes tolerations: - key: node-role.kubernetes.io/master effect: NoSchedule serviceAccountName: elastic-agent hostNetwork: true + # 'hostPID: true' enables the Elastic Security integration to observe all process exec events on the host. + # Sharing the host process ID namespace gives visibility of all processes running on the same host. + hostPID: true dnsPolicy: ClusterFirstWithHostNet containers: - name: elastic-agent image: docker.elastic.co/beats/elastic-agent:8.3.0 env: + # Set to 1 for enrollment into Fleet server. If not set, Elastic Agent is run in standalone mode - name: FLEET_ENROLL value: "1" - # Set to true in case of insecure or unverified HTTP + # Set to true to communicate with Fleet with either insecure HTTP or unverified HTTPS - name: FLEET_INSECURE value: "true" - # The ip:port pair of fleet server + # Fleet Server URL to enroll the Elastic Agent into + # FLEET_URL can be found in Kibana, go to Management > Fleet > Settings - name: FLEET_URL value: "https://fleet-server:8220" - # If left empty KIBANA_HOST, KIBANA_FLEET_USERNAME, KIBANA_FLEET_PASSWORD are needed + # Elasticsearch API key used to enroll Elastic Agents in Fleet (https://www.elastic.co/guide/en/fleet/current/fleet-enrollment-tokens.html#fleet-enrollment-tokens) + # If FLEET_ENROLLMENT_TOKEN is empty then KIBANA_HOST, KIBANA_FLEET_USERNAME, KIBANA_FLEET_PASSWORD are needed - name: FLEET_ENROLLMENT_TOKEN value: "" - name: KIBANA_HOST value: "http://kibana:5601" + # The basic authentication username used to connect to Kibana and retrieve a service_token to enable Fleet - name: KIBANA_FLEET_USERNAME value: "elastic" + # The basic authentication password used to connect to Kibana and retrieve a service_token to enable Fleet - name: KIBANA_FLEET_PASSWORD value: "changeme" - name: NODE_NAME @@ -85,6 +96,9 @@ spec: - name: etcsysmd mountPath: /hostfs/etc/systemd readOnly: true + - name: etc-mid + mountPath: /etc/machine-id + readOnly: true volumes: - name: proc hostPath: @@ -98,21 +112,32 @@ spec: - name: varlog hostPath: path: /var/log + # Needed for cloudbeat - name: etc-kubernetes hostPath: path: /etc/kubernetes + # Needed for cloudbeat - name: var-lib hostPath: path: /var/lib + # Needed for cloudbeat - name: passwd hostPath: path: /etc/passwd + # Needed for cloudbeat - name: group hostPath: path: /etc/group + # Needed for cloudbeat - name: etcsysmd hostPath: path: /etc/systemd + # Mount /etc/machine-id from the host to determine host ID + # Needed for Elastic Security integration + - name: etc-mid + hostPath: + path: /etc/machine-id + type: File --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -170,6 +195,7 @@ rules: - pods - services - configmaps + # Needed for cloudbeat - serviceaccounts - persistentvolumes - persistentvolumeclaims @@ -201,11 +227,12 @@ rules: - jobs - cronjobs verbs: [ "get", "list", "watch" ] - # required for apiserver + # Needed for apiserver - nonResourceURLs: - "/metrics" verbs: - get + # Needed for cloudbeat - apiGroups: ["rbac.authorization.k8s.io"] resources: - clusterrolebindings @@ -213,6 +240,7 @@ rules: - rolebindings - roles verbs: ["get", "list", "watch"] + # Needed for cloudbeat - apiGroups: ["policy"] resources: - podsecuritypolicies @@ -222,7 +250,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: elastic-agent - # should be the namespace where elastic-agent is running + # Should be the namespace where elastic-agent is running namespace: kube-system labels: k8s-app: elastic-agent diff --git a/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-daemonset.yaml b/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-daemonset.yaml index 097d9786e03..c3c679efa36 100644 --- a/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-daemonset.yaml +++ b/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-daemonset.yaml @@ -1,3 +1,4 @@ +# For more information refer to https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-managed-by-fleet.html apiVersion: apps/v1 kind: DaemonSet metadata: @@ -14,31 +15,41 @@ spec: labels: app: elastic-agent spec: + # Tolerations are needed to run Elastic Agent on Kubernetes master nodes. + # Agents running on master nodes collect metrics from the control plane components (scheduler, controller manager) of Kubernetes tolerations: - key: node-role.kubernetes.io/master effect: NoSchedule serviceAccountName: elastic-agent hostNetwork: true + # 'hostPID: true' enables the Elastic Security integration to observe all process exec events on the host. + # Sharing the host process ID namespace gives visibility of all processes running on the same host. + hostPID: true dnsPolicy: ClusterFirstWithHostNet containers: - name: elastic-agent image: docker.elastic.co/beats/elastic-agent:%VERSION% env: + # Set to 1 for enrollment into Fleet server. If not set, Elastic Agent is run in standalone mode - name: FLEET_ENROLL value: "1" - # Set to true in case of insecure or unverified HTTP + # Set to true to communicate with Fleet with either insecure HTTP or unverified HTTPS - name: FLEET_INSECURE value: "true" - # The ip:port pair of fleet server + # Fleet Server URL to enroll the Elastic Agent into + # FLEET_URL can be found in Kibana, go to Management > Fleet > Settings - name: FLEET_URL value: "https://fleet-server:8220" - # If left empty KIBANA_HOST, KIBANA_FLEET_USERNAME, KIBANA_FLEET_PASSWORD are needed + # Elasticsearch API key used to enroll Elastic Agents in Fleet (https://www.elastic.co/guide/en/fleet/current/fleet-enrollment-tokens.html#fleet-enrollment-tokens) + # If FLEET_ENROLLMENT_TOKEN is empty then KIBANA_HOST, KIBANA_FLEET_USERNAME, KIBANA_FLEET_PASSWORD are needed - name: FLEET_ENROLLMENT_TOKEN value: "" - name: KIBANA_HOST value: "http://kibana:5601" + # The basic authentication username used to connect to Kibana and retrieve a service_token to enable Fleet - name: KIBANA_FLEET_USERNAME value: "elastic" + # The basic authentication password used to connect to Kibana and retrieve a service_token to enable Fleet - name: KIBANA_FLEET_PASSWORD value: "changeme" - name: NODE_NAME @@ -85,6 +96,9 @@ spec: - name: etcsysmd mountPath: /hostfs/etc/systemd readOnly: true + - name: etc-mid + mountPath: /etc/machine-id + readOnly: true volumes: - name: proc hostPath: @@ -98,18 +112,29 @@ spec: - name: varlog hostPath: path: /var/log + # Needed for cloudbeat - name: etc-kubernetes hostPath: path: /etc/kubernetes + # Needed for cloudbeat - name: var-lib hostPath: path: /var/lib + # Needed for cloudbeat - name: passwd hostPath: path: /etc/passwd + # Needed for cloudbeat - name: group hostPath: path: /etc/group + # Needed for cloudbeat - name: etcsysmd hostPath: path: /etc/systemd + # Mount /etc/machine-id from the host to determine host ID + # Needed for Elastic Security integration + - name: etc-mid + hostPath: + path: /etc/machine-id + type: File diff --git a/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-role.yaml b/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-role.yaml index 0ef5b850782..0d961215f4e 100644 --- a/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-role.yaml +++ b/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-role.yaml @@ -13,6 +13,7 @@ rules: - pods - services - configmaps + # Needed for cloudbeat - serviceaccounts - persistentvolumes - persistentvolumeclaims @@ -44,11 +45,12 @@ rules: - jobs - cronjobs verbs: [ "get", "list", "watch" ] - # required for apiserver + # Needed for apiserver - nonResourceURLs: - "/metrics" verbs: - get + # Needed for cloudbeat - apiGroups: ["rbac.authorization.k8s.io"] resources: - clusterrolebindings @@ -56,6 +58,7 @@ rules: - rolebindings - roles verbs: ["get", "list", "watch"] + # Needed for cloudbeat - apiGroups: ["policy"] resources: - podsecuritypolicies @@ -65,7 +68,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: elastic-agent - # should be the namespace where elastic-agent is running + # Should be the namespace where elastic-agent is running namespace: kube-system labels: k8s-app: elastic-agent diff --git a/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml b/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml index ab360f19bcb..0984f0dc8ac 100644 --- a/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml +++ b/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml @@ -1,3 +1,4 @@ +# For more information refer to https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-standalone.html apiVersion: v1 kind: ConfigMap metadata: @@ -63,7 +64,9 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s - # If `https` is used to access `kube-state-metrics`, then to all `kubernetes.state_*` datasets should be added: + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token # ssl.certificate_authorities: # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt @@ -76,6 +79,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_daemonset type: metrics @@ -85,6 +94,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_deployment type: metrics @@ -94,6 +109,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_job type: metrics @@ -103,6 +124,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_node type: metrics @@ -112,6 +139,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_persistentvolume type: metrics @@ -121,6 +154,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_persistentvolumeclaim type: metrics @@ -130,6 +169,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_pod type: metrics @@ -139,6 +184,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_replicaset type: metrics @@ -148,6 +199,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_resourcequota type: metrics @@ -157,6 +214,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_service type: metrics @@ -166,6 +229,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_statefulset type: metrics @@ -175,6 +244,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_storageclass type: metrics @@ -184,6 +259,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - name: system-logs type: logfile use_output: default @@ -227,6 +308,7 @@ data: fields: ecs.version: 1.12.0 - name: container-log + id: container-log-${kubernetes.pod.name}-${kubernetes.container.id} type: filestream use_output: default meta: @@ -252,6 +334,7 @@ data: paths: - /var/log/containers/*${kubernetes.container.id}.log - name: audit-log + id: audit-log type: filestream use_output: default meta: @@ -415,7 +498,7 @@ data: period: 10s ssl.verification_mode: none condition: ${kubernetes.labels.component} == 'kube-controller-manager' - # Openshift: + # On Openshift condition should be adjusted: # condition: ${kubernetes.labels.app} == 'kube-controller-manager' - data_stream: dataset: kubernetes.scheduler @@ -428,7 +511,7 @@ data: period: 10s ssl.verification_mode: none condition: ${kubernetes.labels.component} == 'kube-scheduler' - # Openshift: + # On Openshift condition should be adjusted: # condition: ${kubernetes.labels.app} == 'openshift-kube-scheduler' - data_stream: dataset: kubernetes.proxy @@ -437,7 +520,7 @@ data: - proxy hosts: - 'localhost:10249' - # Openshift: + # On Openshift port should be adjusted: # - 'localhost:29101' period: 10s - data_stream: @@ -557,6 +640,8 @@ spec: labels: app: elastic-agent-standalone spec: + # Tolerations are needed to run Elastic Agent on Kubernetes master nodes. + # Agents running on master nodes collect metrics from the control plane components (scheduler, controller manager) of Kubernetes tolerations: - key: node-role.kubernetes.io/master effect: NoSchedule @@ -571,10 +656,14 @@ spec: "-e", ] env: + # The basic authentication username used to connect to Elasticsearch + # This user needs the privileges required to publish events to Elasticsearch. - name: ES_USERNAME value: "elastic" + # The basic authentication password used to connect to Elasticsearch - name: ES_PASSWORD value: "" + # The Elasticsearch host to communicate with - name: ES_HOST value: "" - name: NODE_NAME @@ -642,18 +731,23 @@ spec: - name: varlog hostPath: path: /var/log + # Needed for cloudbeat - name: etc-kubernetes hostPath: path: /etc/kubernetes + # Needed for cloudbeat - name: var-lib hostPath: path: /var/lib + # Needed for cloudbeat - name: passwd hostPath: path: /etc/passwd + # Needed for cloudbeat - name: group hostPath: path: /etc/group + # Needed for cloudbeat - name: etcsysmd hostPath: path: /etc/systemd @@ -714,6 +808,7 @@ rules: - pods - services - configmaps + # Needed for cloudbeat - serviceaccounts - persistentvolumes - persistentvolumeclaims @@ -745,11 +840,12 @@ rules: - nodes/stats verbs: - get - # required for apiserver + # Needed for apiserver - nonResourceURLs: - "/metrics" verbs: - get + # Needed for cloudbeat - apiGroups: ["rbac.authorization.k8s.io"] resources: - clusterrolebindings @@ -757,6 +853,7 @@ rules: - rolebindings - roles verbs: ["get", "list", "watch"] + # Needed for cloudbeat - apiGroups: ["policy"] resources: - podsecuritypolicies @@ -766,7 +863,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: elastic-agent-standalone - # should be the namespace where elastic-agent is running + # Should be the namespace where elastic-agent is running namespace: kube-system labels: k8s-app: elastic-agent-standalone diff --git a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset-configmap.yaml b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset-configmap.yaml index 6894f32bbe4..7048bf22adb 100644 --- a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset-configmap.yaml +++ b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset-configmap.yaml @@ -1,3 +1,4 @@ +# For more information refer to https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-standalone.html apiVersion: v1 kind: ConfigMap metadata: @@ -63,7 +64,9 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s - # If `https` is used to access `kube-state-metrics`, then to all `kubernetes.state_*` datasets should be added: + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token # ssl.certificate_authorities: # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt @@ -76,6 +79,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_daemonset type: metrics @@ -85,6 +94,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_deployment type: metrics @@ -94,6 +109,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_job type: metrics @@ -103,6 +124,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_node type: metrics @@ -112,6 +139,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_persistentvolume type: metrics @@ -121,6 +154,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_persistentvolumeclaim type: metrics @@ -130,6 +169,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_pod type: metrics @@ -139,6 +184,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_replicaset type: metrics @@ -148,6 +199,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_resourcequota type: metrics @@ -157,6 +214,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_service type: metrics @@ -166,6 +229,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_statefulset type: metrics @@ -175,6 +244,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - data_stream: dataset: kubernetes.state_storageclass type: metrics @@ -184,6 +259,12 @@ data: hosts: - 'kube-state-metrics:8080' period: 10s + # Openshift: + # if to access `kube-state-metrics` are used third party tools, like kube-rbac-proxy or similar, that perform RBAC authorization + # and/or tls termination, then configuration below should be considered: + # bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + # ssl.certificate_authorities: + # - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt - name: system-logs type: logfile use_output: default @@ -227,6 +308,7 @@ data: fields: ecs.version: 1.12.0 - name: container-log + id: container-log-${kubernetes.pod.name}-${kubernetes.container.id} type: filestream use_output: default meta: @@ -252,6 +334,7 @@ data: paths: - /var/log/containers/*${kubernetes.container.id}.log - name: audit-log + id: audit-log type: filestream use_output: default meta: @@ -415,7 +498,7 @@ data: period: 10s ssl.verification_mode: none condition: ${kubernetes.labels.component} == 'kube-controller-manager' - # Openshift: + # On Openshift condition should be adjusted: # condition: ${kubernetes.labels.app} == 'kube-controller-manager' - data_stream: dataset: kubernetes.scheduler @@ -428,7 +511,7 @@ data: period: 10s ssl.verification_mode: none condition: ${kubernetes.labels.component} == 'kube-scheduler' - # Openshift: + # On Openshift condition should be adjusted: # condition: ${kubernetes.labels.app} == 'openshift-kube-scheduler' - data_stream: dataset: kubernetes.proxy @@ -437,7 +520,7 @@ data: - proxy hosts: - 'localhost:10249' - # Openshift: + # On Openshift port should be adjusted: # - 'localhost:29101' period: 10s - data_stream: diff --git a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset.yaml b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset.yaml index 2a0f23107f1..0bf131ec8ea 100644 --- a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset.yaml +++ b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset.yaml @@ -14,6 +14,8 @@ spec: labels: app: elastic-agent-standalone spec: + # Tolerations are needed to run Elastic Agent on Kubernetes master nodes. + # Agents running on master nodes collect metrics from the control plane components (scheduler, controller manager) of Kubernetes tolerations: - key: node-role.kubernetes.io/master effect: NoSchedule @@ -28,10 +30,14 @@ spec: "-e", ] env: + # The basic authentication username used to connect to Elasticsearch + # This user needs the privileges required to publish events to Elasticsearch. - name: ES_USERNAME value: "elastic" + # The basic authentication password used to connect to Elasticsearch - name: ES_PASSWORD value: "" + # The Elasticsearch host to communicate with - name: ES_HOST value: "" - name: NODE_NAME @@ -99,18 +105,23 @@ spec: - name: varlog hostPath: path: /var/log + # Needed for cloudbeat - name: etc-kubernetes hostPath: path: /etc/kubernetes + # Needed for cloudbeat - name: var-lib hostPath: path: /var/lib + # Needed for cloudbeat - name: passwd hostPath: path: /etc/passwd + # Needed for cloudbeat - name: group hostPath: path: /etc/group + # Needed for cloudbeat - name: etcsysmd hostPath: path: /etc/systemd diff --git a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-role.yaml b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-role.yaml index b253f0520fe..8a644f3aadf 100644 --- a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-role.yaml +++ b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-role.yaml @@ -13,6 +13,7 @@ rules: - pods - services - configmaps + # Needed for cloudbeat - serviceaccounts - persistentvolumes - persistentvolumeclaims @@ -44,11 +45,12 @@ rules: - nodes/stats verbs: - get - # required for apiserver + # Needed for apiserver - nonResourceURLs: - "/metrics" verbs: - get + # Needed for cloudbeat - apiGroups: ["rbac.authorization.k8s.io"] resources: - clusterrolebindings @@ -56,6 +58,7 @@ rules: - rolebindings - roles verbs: ["get", "list", "watch"] + # Needed for cloudbeat - apiGroups: ["policy"] resources: - podsecuritypolicies @@ -65,7 +68,7 @@ apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: elastic-agent-standalone - # should be the namespace where elastic-agent is running + # Should be the namespace where elastic-agent is running namespace: kube-system labels: k8s-app: elastic-agent-standalone diff --git a/dev-tools/packaging/files/ironbank/LICENSE b/dev-tools/packaging/files/ironbank/LICENSE new file mode 100644 index 00000000000..ef2739c152e --- /dev/null +++ b/dev-tools/packaging/files/ironbank/LICENSE @@ -0,0 +1,280 @@ +ELASTIC LICENSE AGREEMENT + +PLEASE READ CAREFULLY THIS ELASTIC LICENSE AGREEMENT (THIS "AGREEMENT"), WHICH +CONSTITUTES A LEGALLY BINDING AGREEMENT AND GOVERNS ALL OF YOUR USE OF ALL OF +THE ELASTIC SOFTWARE WITH WHICH THIS AGREEMENT IS INCLUDED ("ELASTIC SOFTWARE") +THAT IS PROVIDED IN OBJECT CODE FORMAT, AND, IN ACCORDANCE WITH SECTION 2 BELOW, +CERTAIN OF THE ELASTIC SOFTWARE THAT IS PROVIDED IN SOURCE CODE FORMAT. BY +INSTALLING OR USING ANY OF THE ELASTIC SOFTWARE GOVERNED BY THIS AGREEMENT, YOU +ARE ASSENTING TO THE TERMS AND CONDITIONS OF THIS AGREEMENT. IF YOU DO NOT AGREE +WITH SUCH TERMS AND CONDITIONS, YOU MAY NOT INSTALL OR USE THE ELASTIC SOFTWARE +GOVERNED BY THIS AGREEMENT. IF YOU ARE INSTALLING OR USING THE SOFTWARE ON +BEHALF OF A LEGAL ENTITY, YOU REPRESENT AND WARRANT THAT YOU HAVE THE ACTUAL +AUTHORITY TO AGREE TO THE TERMS AND CONDITIONS OF THIS AGREEMENT ON BEHALF OF +SUCH ENTITY. + +Posted Date: April 20, 2018 + +This Agreement is entered into by and between Elasticsearch BV ("Elastic") and +You, or the legal entity on behalf of whom You are acting (as applicable, +"You"). + +1. OBJECT CODE END USER LICENSES, RESTRICTIONS AND THIRD PARTY OPEN SOURCE +SOFTWARE + + 1.1 Object Code End User License. Subject to the terms and conditions of + Section 1.2 of this Agreement, Elastic hereby grants to You, AT NO CHARGE and + for so long as you are not in breach of any provision of this Agreement, a + License to the Basic Features and Functions of the Elastic Software. + + 1.2 Reservation of Rights; Restrictions. As between Elastic and You, Elastic + and its licensors own all right, title and interest in and to the Elastic + Software, and except as expressly set forth in Sections 1.1, and 2.1 of this + Agreement, no other license to the Elastic Software is granted to You under + this Agreement, by implication, estoppel or otherwise. You agree not to: (i) + reverse engineer or decompile, decrypt, disassemble or otherwise reduce any + Elastic Software provided to You in Object Code, or any portion thereof, to + Source Code, except and only to the extent any such restriction is prohibited + by applicable law, (ii) except as expressly permitted in this Agreement, + prepare derivative works from, modify, copy or use the Elastic Software Object + Code or the Commercial Software Source Code in any manner; (iii) except as + expressly permitted in Section 1.1 above, transfer, sell, rent, lease, + distribute, sublicense, loan or otherwise transfer, Elastic Software Object + Code, in whole or in part, to any third party; (iv) use Elastic Software + Object Code for providing time-sharing services, any software-as-a-service, + service bureau services or as part of an application services provider or + other service offering (collectively, "SaaS Offering") where obtaining access + to the Elastic Software or the features and functions of the Elastic Software + is a primary reason or substantial motivation for users of the SaaS Offering + to access and/or use the SaaS Offering ("Prohibited SaaS Offering"); (v) + circumvent the limitations on use of Elastic Software provided to You in + Object Code format that are imposed or preserved by any License Key, or (vi) + alter or remove any Marks and Notices in the Elastic Software. If You have any + question as to whether a specific SaaS Offering constitutes a Prohibited SaaS + Offering, or are interested in obtaining Elastic's permission to engage in + commercial or non-commercial distribution of the Elastic Software, please + contact elastic_license@elastic.co. + + 1.3 Third Party Open Source Software. The Commercial Software may contain or + be provided with third party open source libraries, components, utilities and + other open source software (collectively, "Open Source Software"), which Open + Source Software may have applicable license terms as identified on a website + designated by Elastic. Notwithstanding anything to the contrary herein, use of + the Open Source Software shall be subject to the license terms and conditions + applicable to such Open Source Software, to the extent required by the + applicable licensor (which terms shall not restrict the license rights granted + to You hereunder, but may contain additional rights). To the extent any + condition of this Agreement conflicts with any license to the Open Source + Software, the Open Source Software license will govern with respect to such + Open Source Software only. Elastic may also separately provide you with + certain open source software that is licensed by Elastic. Your use of such + Elastic open source software will not be governed by this Agreement, but by + the applicable open source license terms. + +2. COMMERCIAL SOFTWARE SOURCE CODE + + 2.1 Limited License. Subject to the terms and conditions of Section 2.2 of + this Agreement, Elastic hereby grants to You, AT NO CHARGE and for so long as + you are not in breach of any provision of this Agreement, a limited, + non-exclusive, non-transferable, fully paid up royalty free right and license + to the Commercial Software in Source Code format, without the right to grant + or authorize sublicenses, to prepare Derivative Works of the Commercial + Software, provided You (i) do not hack the licensing mechanism, or otherwise + circumvent the intended limitations on the use of Elastic Software to enable + features other than Basic Features and Functions or those features You are + entitled to as part of a Subscription, and (ii) use the resulting object code + only for reasonable testing purposes. + + 2.2 Restrictions. Nothing in Section 2.1 grants You the right to (i) use the + Commercial Software Source Code other than in accordance with Section 2.1 + above, (ii) use a Derivative Work of the Commercial Software outside of a + Non-production Environment, in any production capacity, on a temporary or + permanent basis, or (iii) transfer, sell, rent, lease, distribute, sublicense, + loan or otherwise make available the Commercial Software Source Code, in whole + or in part, to any third party. Notwithstanding the foregoing, You may + maintain a copy of the repository in which the Source Code of the Commercial + Software resides and that copy may be publicly accessible, provided that you + include this Agreement with Your copy of the repository. + +3. TERMINATION + + 3.1 Termination. This Agreement will automatically terminate, whether or not + You receive notice of such Termination from Elastic, if You breach any of its + provisions. + + 3.2 Post Termination. Upon any termination of this Agreement, for any reason, + You shall promptly cease the use of the Elastic Software in Object Code format + and cease use of the Commercial Software in Source Code format. For the + avoidance of doubt, termination of this Agreement will not affect Your right + to use Elastic Software, in either Object Code or Source Code formats, made + available under the Apache License Version 2.0. + + 3.3 Survival. Sections 1.2, 2.2. 3.3, 4 and 5 shall survive any termination or + expiration of this Agreement. + +4. DISCLAIMER OF WARRANTIES AND LIMITATION OF LIABILITY + + 4.1 Disclaimer of Warranties. TO THE MAXIMUM EXTENT PERMITTED UNDER APPLICABLE + LAW, THE ELASTIC SOFTWARE IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND, + AND ELASTIC AND ITS LICENSORS MAKE NO WARRANTIES WHETHER EXPRESSED, IMPLIED OR + STATUTORY REGARDING OR RELATING TO THE ELASTIC SOFTWARE. TO THE MAXIMUM EXTENT + PERMITTED UNDER APPLICABLE LAW, ELASTIC AND ITS LICENSORS SPECIFICALLY + DISCLAIM ALL IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE AND NON-INFRINGEMENT WITH RESPECT TO THE ELASTIC SOFTWARE, AND WITH + RESPECT TO THE USE OF THE FOREGOING. FURTHER, ELASTIC DOES NOT WARRANT RESULTS + OF USE OR THAT THE ELASTIC SOFTWARE WILL BE ERROR FREE OR THAT THE USE OF THE + ELASTIC SOFTWARE WILL BE UNINTERRUPTED. + + 4.2 Limitation of Liability. IN NO EVENT SHALL ELASTIC OR ITS LICENSORS BE + LIABLE TO YOU OR ANY THIRD PARTY FOR ANY DIRECT OR INDIRECT DAMAGES, + INCLUDING, WITHOUT LIMITATION, FOR ANY LOSS OF PROFITS, LOSS OF USE, BUSINESS + INTERRUPTION, LOSS OF DATA, COST OF SUBSTITUTE GOODS OR SERVICES, OR FOR ANY + SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND, IN CONNECTION WITH + OR ARISING OUT OF THE USE OR INABILITY TO USE THE ELASTIC SOFTWARE, OR THE + PERFORMANCE OF OR FAILURE TO PERFORM THIS AGREEMENT, WHETHER ALLEGED AS A + BREACH OF CONTRACT OR TORTIOUS CONDUCT, INCLUDING NEGLIGENCE, EVEN IF ELASTIC + HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +5. MISCELLANEOUS + + This Agreement completely and exclusively states the entire agreement of the + parties regarding the subject matter herein, and it supersedes, and its terms + govern, all prior proposals, agreements, or other communications between the + parties, oral or written, regarding such subject matter. This Agreement may be + modified by Elastic from time to time, and any such modifications will be + effective upon the "Posted Date" set forth at the top of the modified + Agreement. If any provision hereof is held unenforceable, this Agreement will + continue without said provision and be interpreted to reflect the original + intent of the parties. This Agreement and any non-contractual obligation + arising out of or in connection with it, is governed exclusively by Dutch law. + This Agreement shall not be governed by the 1980 UN Convention on Contracts + for the International Sale of Goods. All disputes arising out of or in + connection with this Agreement, including its existence and validity, shall be + resolved by the courts with jurisdiction in Amsterdam, The Netherlands, except + where mandatory law provides for the courts at another location in The + Netherlands to have jurisdiction. The parties hereby irrevocably waive any and + all claims and defenses either might otherwise have in any such action or + proceeding in any of such courts based upon any alleged lack of personal + jurisdiction, improper venue, forum non conveniens or any similar claim or + defense. A breach or threatened breach, by You of Section 2 may cause + irreparable harm for which damages at law may not provide adequate relief, and + therefore Elastic shall be entitled to seek injunctive relief without being + required to post a bond. You may not assign this Agreement (including by + operation of law in connection with a merger or acquisition), in whole or in + part to any third party without the prior written consent of Elastic, which + may be withheld or granted by Elastic in its sole and absolute discretion. + Any assignment in violation of the preceding sentence is void. Notices to + Elastic may also be sent to legal@elastic.co. + +6. DEFINITIONS + + The following terms have the meanings ascribed: + + 6.1 "Affiliate" means, with respect to a party, any entity that controls, is + controlled by, or which is under common control with, such party, where + "control" means ownership of at least fifty percent (50%) of the outstanding + voting shares of the entity, or the contractual right to establish policy for, + and manage the operations of, the entity. + + 6.2 "Basic Features and Functions" means those features and functions of the + Elastic Software that are eligible for use under a Basic license, as set forth + at https://www.elastic.co/subscriptions, as may be modified by Elastic from + time to time. + + 6.3 "Commercial Software" means the Elastic Software Source Code in any file + containing a header stating the contents are subject to the Elastic License or + which is contained in the repository folder labeled "x-pack", unless a LICENSE + file present in the directory subtree declares a different license. + + 6.4 "Derivative Work of the Commercial Software" means, for purposes of this + Agreement, any modification(s) or enhancement(s) to the Commercial Software, + which represent, as a whole, an original work of authorship. + + 6.5 "License" means a limited, non-exclusive, non-transferable, fully paid up, + royalty free, right and license, without the right to grant or authorize + sublicenses, solely for Your internal business operations to (i) install and + use the applicable Features and Functions of the Elastic Software in Object + Code, and (ii) permit Contractors and Your Affiliates to use the Elastic + software as set forth in (i) above, provided that such use by Contractors must + be solely for Your benefit and/or the benefit of Your Affiliates, and You + shall be responsible for all acts and omissions of such Contractors and + Affiliates in connection with their use of the Elastic software that are + contrary to the terms and conditions of this Agreement. + + 6.6 "License Key" means a sequence of bytes, including but not limited to a + JSON blob, that is used to enable certain features and functions of the + Elastic Software. + + 6.7 "Marks and Notices" means all Elastic trademarks, trade names, logos and + notices present on the Documentation as originally provided by Elastic. + + 6.8 "Non-production Environment" means an environment for development, testing + or quality assurance, where software is not used for production purposes. + + 6.9 "Object Code" means any form resulting from mechanical transformation or + translation of Source Code form, including but not limited to compiled object + code, generated documentation, and conversions to other media types. + + 6.10 "Source Code" means the preferred form of computer software for making + modifications, including but not limited to software source code, + documentation source, and configuration files. + + 6.11 "Subscription" means the right to receive Support Services and a License + to the Commercial Software. + + +GOVERNMENT END USER ADDENDUM TO THE ELASTIC LICENSE AGREEMENT + + This ADDENDUM TO THE ELASTIC LICENSE AGREEMENT (this "Addendum") applies +only to U.S. Federal Government, State Government, and Local Government +entities ("Government End Users") of the Elastic Software. This Addendum is +subject to, and hereby incorporated into, the Elastic License Agreement, +which is being entered into as of even date herewith, by Elastic and You (the +"Agreement"). This Addendum sets forth additional terms and conditions +related to Your use of the Elastic Software. Capitalized terms not defined in +this Addendum have the meaning set forth in the Agreement. + + 1. LIMITED LICENSE TO DISTRIBUTE (DSOP ONLY). Subject to the terms and +conditions of the Agreement (including this Addendum), Elastic grants the +Department of Defense Enterprise DevSecOps Initiative (DSOP) a royalty-free, +non-exclusive, non-transferable, limited license to reproduce and distribute +the Elastic Software solely through a software distribution repository +controlled and managed by DSOP, provided that DSOP: (i) distributes the +Elastic Software complete and unmodified, inclusive of the Agreement +(including this Addendum) and (ii) does not remove or alter any proprietary +legends or notices contained in the Elastic Software. + + 2. CHOICE OF LAW. The choice of law and venue provisions set forth shall +prevail over those set forth in Section 5 of the Agreement. + + "For U.S. Federal Government Entity End Users. This Agreement and any + non-contractual obligation arising out of or in connection with it, is + governed exclusively by U.S. Federal law. To the extent permitted by + federal law, the laws of the State of Delaware (excluding Delaware choice + of law rules) will apply in the absence of applicable federal law. + + For State and Local Government Entity End Users. This Agreement and any + non-contractual obligation arising out of or in connection with it, is + governed exclusively by the laws of the state in which you are located + without reference to conflict of laws. Furthermore, the Parties agree that + the Uniform Computer Information Transactions Act or any version thereof, + adopted by any state in any form ('UCITA'), shall not apply to this + Agreement and, to the extent that UCITA is applicable, the Parties agree to + opt out of the applicability of UCITA pursuant to the opt-out provision(s) + contained therein." + + 3. ELASTIC LICENSE MODIFICATION. Section 5 of the Agreement is hereby +amended to replace + + "This Agreement may be modified by Elastic from time to time, and any + such modifications will be effective upon the "Posted Date" set forth at + the top of the modified Agreement." + + with: + + "This Agreement may be modified by Elastic from time to time; provided, + however, that any such modifications shall apply only to Elastic Software + that is installed after the "Posted Date" set forth at the top of the + modified Agreement." + +V100820.0 diff --git a/dev-tools/packaging/files/ironbank/config/docker-entrypoint b/dev-tools/packaging/files/ironbank/config/docker-entrypoint new file mode 100644 index 00000000000..7ebe21745f4 --- /dev/null +++ b/dev-tools/packaging/files/ironbank/config/docker-entrypoint @@ -0,0 +1,11 @@ +#!/bin/bash + +set -eo pipefail + +# For information on the possible environment variables that can be passed into the container. Run the following +# command for information on the options that are available. +# +# `./elastic-agent container --help` +# + +elastic-agent container "$@" diff --git a/dev-tools/packaging/packages.yml b/dev-tools/packaging/packages.yml index 0c2e0da906e..c02c0596e0e 100644 --- a/dev-tools/packaging/packages.yml +++ b/dev-tools/packaging/packages.yml @@ -1,910 +1,902 @@ ---- - -# This file contains the package specifications for both Community Beats and -# Official Beats. The shared section contains YAML anchors that are used to -# define common parts of the package in order to not repeat ourselves. - -shared: - - &common - name: '{{.BeatName}}' - service_name: '{{.BeatServiceName}}' - os: '{{.GOOS}}' - arch: '{{.PackageArch}}' - vendor: '{{.BeatVendor}}' - version: '{{ beat_version }}' - license: '{{.BeatLicense}}' - url: '{{.BeatURL}}' - description: '{{.BeatDescription}}' - - # agent specific - # Deb/RPM spec for community beats. - - &deb_rpm_agent_spec - <<: *common - post_install_script: '{{ elastic_beats_dir }}/dev-tools/packaging/files/linux/systemd-daemon-reload.sh' - files: - /usr/share/{{.BeatName}}/bin/{{.BeatName}}{{.BinaryExt}}: - source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - mode: 0755 - /usr/share/{{.BeatName}}/LICENSE.txt: - source: '{{ repo.RootDir }}/LICENSE.txt' - mode: 0644 - /usr/share/{{.BeatName}}/NOTICE.txt: - source: '{{ repo.RootDir }}/NOTICE.txt' - mode: 0644 - /usr/share/{{.BeatName}}/README.md: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/common/README.md.tmpl' - mode: 0644 - /usr/share/{{.BeatName}}/.build_hash.txt: - content: > - {{ commit }} - mode: 0644 - /etc/{{.BeatName}}/elastic-agent.reference.yml: - source: 'elastic-agent.reference.yml' - mode: 0644 - /etc/{{.BeatName}}/elastic-agent.yml: - source: 'elastic-agent.yml' - mode: 0600 - config: true - /etc/{{.BeatName}}/.elastic-agent.active.commit: - content: > - {{ commit }} - mode: 0644 - /usr/share/{{.BeatName}}/bin/{{.BeatName}}-god: - source: build/golang-crossbuild/god-{{.GOOS}}-{{.Platform.Arch}} - mode: 0755 - /usr/bin/{{.BeatName}}: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/linux/elastic-agent.sh.tmpl' - mode: 0755 - /lib/systemd/system/{{.BeatServiceName}}.service: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/linux/elastic-agent.unit.tmpl' - mode: 0644 - /etc/init.d/{{.BeatServiceName}}: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/{{.PackageType}}/elastic-agent.init.sh.tmpl' - mode: 0755 - /var/lib/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}}: - source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - mode: 0755 - /var/lib/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/components: - source: '{{.AgentDropPath}}/{{.GOOS}}-{{.AgentArchName}}.tar.gz/' - mode: 0755 - config_mode: 0644 - skip_on_missing: true - - # MacOS pkg spec for community beats. - - &macos_agent_pkg_spec - <<: *common - extra_vars: - # OS X 10.11 El Capitan is the oldest supported by Go 1.14. - # https://golang.org/doc/go1.14#ports - min_supported_osx_version: 10.11 - identifier: 'co.{{.BeatVendor | tolower}}.beats.{{.BeatName}}' - install_path: /Library/Application Support - pre_install_script: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/darwin/scripts/preinstall.tmpl' - post_install_script: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/darwin/scripts/postinstall.elastic-agent.tmpl' - files: - /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/bin/{{.BeatName}}{{.BinaryExt}}: - source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - mode: 0755 - /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/LICENSE.txt: - source: '{{ repo.RootDir }}/LICENSE.txt' - mode: 0644 - /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/NOTICE.txt: - source: '{{ repo.RootDir }}/NOTICE.txt' - mode: 0644 - /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/README.md: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/common/README.md.tmpl' - mode: 0644 - /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/.build_hash.txt: - content: > - {{ commit }} - mode: 0644 - /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/{{.identifier}}.plist: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/darwin/launchd-daemon.plist.tmpl' - mode: 0644 - /etc/{{.BeatName}}/elastic-agent.reference.yml: - source: 'elastic-agent.reference.yml' - mode: 0644 - /etc/{{.BeatName}}/elastic-agent.yml: - source: 'elastic-agent.yml' - mode: 0600 - config: true - /etc/{{.BeatName}}/.elastic-agent.active.commit: - content: > - {{ commit }} - mode: 0644 - /etc/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}}: - source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - mode: 0755 - /etc/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/components: - source: '{{.AgentDropPath}}/{{.GOOS}}-{{.AgentArchName}}.tar.gz/' - mode: 0755 - config_mode: 0644 - skip_on_missing: true - - - &agent_binary_files - '{{.BeatName}}{{.BinaryExt}}': - source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - mode: 0755 - 'data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}}': - source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - mode: 0755 - LICENSE.txt: - source: '{{ repo.RootDir }}/LICENSE.txt' - mode: 0644 - NOTICE.txt: - source: '{{ repo.RootDir }}/NOTICE.txt' - mode: 0644 - README.md: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/common/README.md.tmpl' - mode: 0644 - .build_hash.txt: - content: > - {{ commit }} - mode: 0644 - 'elastic-agent.reference.yml': - source: 'elastic-agent.reference.yml' - mode: 0644 - 'elastic-agent.yml': - source: 'elastic-agent.yml' - mode: 0600 - config: true - '.elastic-agent.active.commit': - content: > - {{ commit }} - mode: 0644 - - # Binary package spec (tar.gz for linux/darwin) for community beats. - - &agent_binary_spec - <<: *common - files: - <<: *agent_binary_files - 'data/{{.BeatName}}-{{ commit_short }}/components': - source: '{{.AgentDropPath}}/{{.GOOS}}-{{.AgentArchName}}.tar.gz/' - mode: 0755 - config_mode: 0644 - skip_on_missing: true - - - # Binary package spec (zip for windows) for community beats. - - &agent_windows_binary_spec - <<: *common - files: - <<: *agent_binary_files - 'data/{{.BeatName}}-{{ commit_short }}/components': - source: '{{.AgentDropPath}}/{{.GOOS}}-{{.AgentArchName}}.zip/' - mode: 0755 - config_mode: 0644 - skip_on_missing: true - - - &agent_docker_spec - <<: *agent_binary_spec - extra_vars: - from: 'ubuntu:20.04' - buildFrom: 'ubuntu:20.04' - dockerfile: 'Dockerfile.elastic-agent.tmpl' - docker_entrypoint: 'docker-entrypoint.elastic-agent.tmpl' - user: '{{ .BeatName }}' - linux_capabilities: '' - image_name: '' - beats_install_path: "install" - files: - 'elastic-agent.yml': - source: 'elastic-agent.docker.yml' - mode: 0600 - config: true - '.elastic-agent.active.commit': - content: > - {{ commit }} - mode: 0644 - 'data/cloud_downloads/metricbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz': - source: '{{.AgentDropPath}}/archives/{{.GOOS}}-{{.AgentArchName}}.tar.gz/metricbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz' - mode: 0755 - 'data/cloud_downloads/filebeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz': - source: '{{.AgentDropPath}}/archives/{{.GOOS}}-{{.AgentArchName}}.tar.gz/filebeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz' - mode: 0755 - - - &agent_docker_arm_spec - <<: *agent_docker_spec - extra_vars: - from: 'arm64v8/ubuntu:20.04' - buildFrom: 'arm64v8/ubuntu:20.04' - - - &agent_docker_cloud_spec - <<: *agent_docker_spec - extra_vars: - image_name: '{{.BeatName}}-cloud' - repository: 'docker.elastic.co/beats-ci' - - - &agent_docker_complete_spec - <<: *agent_docker_spec - extra_vars: - image_name: '{{.BeatName}}-complete' - - # Deb/RPM spec for community beats. - - &deb_rpm_spec - <<: *common - post_install_script: '{{ elastic_beats_dir }}/dev-tools/packaging/files/linux/systemd-daemon-reload.sh' - files: - /usr/share/{{.BeatName}}/bin/{{.BeatName}}{{.BinaryExt}}: - source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - mode: 0755 - /etc/{{.BeatName}}/fields.yml: - source: fields.yml - mode: 0644 - /usr/share/{{.BeatName}}/LICENSE.txt: - source: '{{ repo.RootDir }}/LICENSE.txt' - mode: 0644 - /usr/share/{{.BeatName}}/NOTICE.txt: - source: '{{ repo.RootDir }}/NOTICE.txt' - mode: 0644 - /usr/share/{{.BeatName}}/README.md: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/common/README.md.tmpl' - mode: 0644 - /usr/share/{{.BeatName}}/.build_hash.txt: - content: > - {{ commit }} - mode: 0644 - /etc/{{.BeatName}}/{{.BeatName}}.reference.yml: - source: '{{.BeatName}}.reference.yml' - mode: 0644 - /etc/{{.BeatName}}/{{.BeatName}}.yml: - source: '{{.BeatName}}.yml' - mode: 0600 - config: true - /usr/share/{{.BeatName}}/kibana: - source: _meta/kibana.generated - mode: 0644 - /usr/share/{{.BeatName}}/bin/{{.BeatName}}-god: - source: build/golang-crossbuild/god-{{.GOOS}}-{{.Platform.Arch}} - mode: 0755 - /usr/bin/{{.BeatName}}: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/linux/beatname.sh.tmpl' - mode: 0755 - /lib/systemd/system/{{.BeatServiceName}}.service: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/linux/systemd.unit.tmpl' - mode: 0644 - /etc/init.d/{{.BeatServiceName}}: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/{{.PackageType}}/init.sh.tmpl' - mode: 0755 - - # MacOS pkg spec for community beats. - - &macos_beat_pkg_spec - <<: *common - extra_vars: - # OS X 10.8 Mountain Lion is the oldest supported by Go 1.10. - # https://golang.org/doc/go1.10#ports - min_supported_osx_version: 10.8 - identifier: 'co.{{.BeatVendor | tolower}}.beats.{{.BeatName}}' - install_path: /Library/Application Support - pre_install_script: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/darwin/scripts/preinstall.tmpl' - post_install_script: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/darwin/scripts/postinstall.tmpl' - files: - /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/bin/{{.BeatName}}{{.BinaryExt}}: - source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - mode: 0755 - /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/LICENSE.txt: - source: '{{ repo.RootDir }}/LICENSE.txt' - mode: 0644 - /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/NOTICE.txt: - source: '{{ repo.RootDir }}/NOTICE.txt' - mode: 0644 - /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/README.md: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/common/README.md.tmpl' - mode: 0644 - /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/.build_hash.txt: - content: > - {{ commit }} - mode: 0644 - /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/{{.identifier}}.plist: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/darwin/launchd-daemon.plist.tmpl' - mode: 0644 - /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/kibana: - source: _meta/kibana.generated - mode: 0644 - /etc/{{.BeatName}}/fields.yml: - source: fields.yml - mode: 0644 - /etc/{{.BeatName}}/{{.BeatName}}.reference.yml: - source: '{{.BeatName}}.reference.yml' - mode: 0644 - /etc/{{.BeatName}}/{{.BeatName}}.yml: - source: '{{.BeatName}}.yml' - mode: 0600 - config: true - - - &binary_files - '{{.BeatName}}{{.BinaryExt}}': - source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - mode: 0755 - fields.yml: - source: fields.yml - mode: 0644 - LICENSE.txt: - source: '{{ repo.RootDir }}/LICENSE.txt' - mode: 0644 - NOTICE.txt: - source: '{{ repo.RootDir }}/NOTICE.txt' - mode: 0644 - README.md: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/common/README.md.tmpl' - mode: 0644 - .build_hash.txt: - content: > - {{ commit }} - mode: 0644 - '{{.BeatName}}.reference.yml': - source: '{{.BeatName}}.reference.yml' - mode: 0644 - '{{.BeatName}}.yml': - source: '{{.BeatName}}.yml' - mode: 0600 - config: true - kibana: - source: _meta/kibana.generated - mode: 0644 - - # Binary package spec (tar.gz for linux/darwin) for community beats. - - &binary_spec - <<: *common - files: - <<: *binary_files - - # Binary package spec (zip for windows) for community beats. - - &windows_binary_spec - <<: *common - files: - <<: *binary_files - install-service-{{.BeatName}}.ps1: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/windows/install-service.ps1.tmpl' - mode: 0755 - uninstall-service-{{.BeatName}}.ps1: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/windows/uninstall-service.ps1.tmpl' - mode: 0755 - - - &docker_spec - <<: *binary_spec - extra_vars: - from: 'ubuntu:20.04' - buildFrom: 'ubuntu:20.04' - user: '{{ .BeatName }}' - linux_capabilities: '' - files: - '{{.BeatName}}.yml': - source: '{{.BeatName}}.docker.yml' - mode: 0600 - config: true - - - &docker_arm_spec - <<: *docker_spec - extra_vars: - from: 'arm64v8/ubuntu:20.04' - buildFrom: 'arm64v8/ubuntu:20.04' - - - &docker_ubi_spec - extra_vars: - image_name: '{{.BeatName}}-ubi8' - from: 'docker.elastic.co/ubi8/ubi-minimal' - - - &docker_arm_ubi_spec - extra_vars: - image_name: '{{.BeatName}}-ubi8' - from: 'registry.access.redhat.com/ubi8/ubi-minimal:8.2' - - - &elastic_docker_spec - extra_vars: - repository: 'docker.elastic.co/beats' - - # - # License modifiers for Apache 2.0 - # - - &apache_license_for_binaries - license: "ASL 2.0" - files: - LICENSE.txt: - source: '{{ repo.RootDir }}/dev-tools/licenses/APACHE-LICENSE-2.0.txt' - mode: 0644 - - - &apache_license_for_deb_rpm - license: "ASL 2.0" - files: - /usr/share/{{.BeatName}}/LICENSE.txt: - source: '{{ repo.RootDir }}/dev-tools/licenses/APACHE-LICENSE-2.0.txt' - mode: 0644 - - - &apache_license_for_macos_pkg - license: "ASL 2.0" - files: - /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/LICENSE.txt: - source: '{{ repo.RootDir }}/dev-tools/licenses/APACHE-LICENSE-2.0.txt' - mode: 0644 - - # - # License modifiers for the Elastic License - # - - &elastic_license_for_binaries - license: "Elastic License" - files: - LICENSE.txt: - source: '{{ repo.RootDir }}/dev-tools/licenses/ELASTIC-LICENSE.txt' - mode: 0644 - - - &elastic_license_for_deb_rpm - license: "Elastic License" - files: - /usr/share/{{.BeatName}}/LICENSE.txt: - source: '{{ repo.RootDir }}/dev-tools/licenses/ELASTIC-LICENSE.txt' - mode: 0644 - - - &elastic_license_for_macos_pkg - license: "Elastic License" - files: - /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/LICENSE.txt: - source: '{{ repo.RootDir }}/dev-tools/licenses/ELASTIC-LICENSE.txt' - mode: 0644 - -# specs is a list of named packaging "flavors". -specs: - # Community Beats - community_beat: - - os: windows - types: [zip] - spec: - <<: *windows_binary_spec - - - os: darwin - types: [tgz] - spec: - <<: *binary_spec - - - os: linux - types: [tgz] - spec: - <<: *binary_spec - - - os: linux - types: [deb, rpm] - spec: - <<: *deb_rpm_spec - - - os: linux - types: [docker] - spec: - <<: *docker_spec - - - os: aix - types: [tgz] - spec: - <<: *binary_spec - - # Elastic Beat with Apache License (OSS) and binary taken the current - # directory. - elastic_beat_oss: - - os: windows - types: [zip] - spec: - <<: *windows_binary_spec - <<: *apache_license_for_binaries - name: '{{.BeatName}}-oss' - - - os: darwin - types: [tgz] - spec: - <<: *binary_spec - <<: *apache_license_for_binaries - name: '{{.BeatName}}-oss' - - - os: linux - types: [tgz] - spec: - <<: *binary_spec - <<: *apache_license_for_binaries - name: '{{.BeatName}}-oss' - - - os: linux - types: [deb, rpm] - spec: - <<: *deb_rpm_spec - <<: *apache_license_for_deb_rpm - name: '{{.BeatName}}-oss' - - - os: linux - types: [docker] - spec: - <<: *docker_spec - <<: *elastic_docker_spec - <<: *apache_license_for_binaries - name: '{{.BeatName}}-oss' - - - os: aix - types: [tgz] - spec: - <<: *binary_spec - <<: *apache_license_for_binaries - name: '{{.BeatName}}-oss' - - # Elastic Beat with Elastic License and binary taken the current directory. - elastic_beat_xpack: - ### - # Elastic Licensed Packages - ### - - os: windows - types: [zip] - spec: - <<: *windows_binary_spec - <<: *elastic_license_for_binaries - - - os: darwin - types: [tgz] - spec: - <<: *binary_spec - <<: *elastic_license_for_binaries - - - os: linux - types: [tgz] - spec: - <<: *binary_spec - <<: *elastic_license_for_binaries - - - os: linux - types: [deb, rpm] - spec: - <<: *deb_rpm_spec - <<: *elastic_license_for_deb_rpm - - - os: linux - arch: amd64 - types: [docker] - spec: - <<: *docker_spec - <<: *elastic_docker_spec - <<: *elastic_license_for_binaries - - - os: linux - arch: amd64 - types: [docker] - spec: - <<: *docker_spec - <<: *docker_ubi_spec - <<: *elastic_docker_spec - <<: *elastic_license_for_binaries - - - os: linux - arch: arm64 - types: [docker] - spec: - <<: *docker_arm_spec - <<: *elastic_docker_spec - <<: *elastic_license_for_binaries - - - os: linux - arch: arm64 - types: [docker] - spec: - <<: *docker_arm_spec - <<: *docker_arm_ubi_spec - <<: *elastic_docker_spec - <<: *elastic_license_for_binaries - - - os: aix - types: [tgz] - spec: - <<: *binary_spec - <<: *elastic_license_for_binaries - - # Elastic Beat with Elastic License and binary taken the current directory. - elastic_beat_xpack_reduced: - ### - # Elastic Licensed Packages - ### - - os: windows - types: [zip] - spec: - <<: *windows_binary_spec - <<: *elastic_license_for_binaries - - - os: darwin - types: [tgz] - spec: - <<: *binary_spec - <<: *elastic_license_for_binaries - - - os: linux - types: [tgz] - spec: - <<: *binary_spec - <<: *elastic_license_for_binaries - - - os: aix - types: [tgz] - spec: - <<: *binary_spec - <<: *elastic_license_for_binaries - - # Elastic Beat with Elastic License and binary taken from the x-pack dir. - elastic_beat_xpack_separate_binaries: - ### - # Elastic Licensed Packages - ### - - os: windows - types: [zip] - spec: - <<: *windows_binary_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - - os: darwin - types: [tgz] - spec: - <<: *binary_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - - os: linux - types: [tgz] - spec: - <<: *binary_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - - os: linux - types: [deb, rpm] - spec: - <<: *deb_rpm_spec - <<: *elastic_license_for_deb_rpm - files: - /usr/share/{{.BeatName}}/bin/{{.BeatName}}{{.BinaryExt}}: - source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - - os: linux - arch: amd64 - types: [docker] - spec: - <<: *docker_spec - <<: *elastic_docker_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - - os: linux - arch: amd64 - types: [docker] - spec: - <<: *docker_spec - <<: *docker_ubi_spec - <<: *elastic_docker_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - - os: linux - arch: arm64 - types: [docker] - spec: - <<: *docker_arm_spec - <<: *elastic_docker_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - - os: linux - arch: arm64 - types: [docker] - spec: - <<: *docker_arm_spec - <<: *docker_arm_ubi_spec - <<: *elastic_docker_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - - os: aix - types: [tgz] - spec: - <<: *binary_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - # Elastic Beat with Elastic License and binary taken from the x-pack dir. - elastic_beat_agent_binaries: - ### - # Elastic Licensed Packages - ### - - os: windows - types: [zip] - spec: - <<: *agent_windows_binary_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - - os: darwin - types: [tgz] - spec: - <<: *agent_binary_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}} - symlink: true - mode: 0755 - - - os: linux - types: [tgz] - spec: - <<: *agent_binary_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}} - symlink: true - mode: 0755 - - - os: linux - types: [deb, rpm] - spec: - <<: *deb_rpm_agent_spec - <<: *elastic_license_for_deb_rpm - files: - /usr/share/{{.BeatName}}/bin/{{.BeatName}}{{.BinaryExt}}: - source: /var/lib/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}} - symlink: true - mode: 0755 - - - os: linux - arch: amd64 - types: [docker] - spec: - <<: *agent_docker_spec - <<: *elastic_docker_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - # Complete image gets a 'complete' variant for synthetics and other large - # packages too big to fit in the main image - - os: linux - arch: amd64 - types: [docker] - spec: - <<: *agent_docker_spec - <<: *agent_docker_complete_spec - <<: *elastic_docker_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - # Cloud specific docker image - - os: linux - arch: amd64 - types: [docker] - spec: - <<: *elastic_docker_spec - <<: *agent_docker_spec - <<: *agent_docker_cloud_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - - os: linux - arch: amd64 - types: [docker] - spec: - <<: *agent_docker_spec - <<: *docker_ubi_spec - <<: *elastic_docker_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - - os: linux - arch: arm64 - types: [docker] - spec: - <<: *agent_docker_arm_spec - <<: *elastic_docker_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - # Complete image gets a 'complete' variant for synthetics and other large - # packages too big to fit in the main image - - os: linux - arch: arm64 - types: [docker] - spec: - <<: *agent_docker_arm_spec - <<: *agent_docker_complete_spec - <<: *elastic_docker_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - # Cloud specific docker image - - os: linux - arch: arm64 - types: [docker] - spec: - <<: *elastic_docker_spec - <<: *agent_docker_arm_spec - <<: *agent_docker_cloud_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - - os: linux - arch: arm64 - types: [docker] - spec: - <<: *agent_docker_arm_spec - <<: *docker_arm_ubi_spec - <<: *elastic_docker_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - - os: aix - types: [tgz] - spec: - <<: *agent_binary_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}} - symlink: true - mode: 0755 - - - # Elastic Beat with Elastic License and binary taken from the x-pack dir. - elastic_beat_agent_demo_binaries: - ### - # Elastic Licensed Packages - ### - - - os: linux - types: [tgz] - spec: - <<: *agent_binary_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - - os: linux - types: [docker] - spec: - <<: *agent_docker_spec - <<: *elastic_docker_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - - os: aix - types: [tgz] - spec: - <<: *agent_binary_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} +--- + +# This file contains the package specifications for both Community Beats and +# Official Beats. The shared section contains YAML anchors that are used to +# define common parts of the package in order to not repeat ourselves. + +shared: + - &common + name: '{{.BeatName}}' + service_name: '{{.BeatServiceName}}' + os: '{{.GOOS}}' + arch: '{{.PackageArch}}' + vendor: '{{.BeatVendor}}' + version: '{{ beat_version }}' + license: '{{.BeatLicense}}' + url: '{{.BeatURL}}' + description: '{{.BeatDescription}}' + + # agent specific + # Deb/RPM spec for community beats. + - &deb_rpm_agent_spec + <<: *common + post_install_script: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/linux/postinstall.sh.tmpl' + files: + /usr/share/{{.BeatName}}/LICENSE.txt: + source: '{{ repo.RootDir }}/LICENSE.txt' + mode: 0644 + /usr/share/{{.BeatName}}/NOTICE.txt: + source: '{{ repo.RootDir }}/NOTICE.txt' + mode: 0644 + /usr/share/{{.BeatName}}/README.md: + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/common/README.md.tmpl' + mode: 0644 + /usr/share/{{.BeatName}}/.build_hash.txt: + content: > + {{ commit }} + mode: 0644 + /etc/{{.BeatName}}/elastic-agent.reference.yml: + source: 'elastic-agent.reference.yml' + mode: 0644 + /etc/{{.BeatName}}/elastic-agent.yml: + source: 'elastic-agent.yml' + mode: 0600 + config: true + /etc/{{.BeatName}}/.elastic-agent.active.commit: + content: > + {{ commit }} + mode: 0644 + /usr/share/{{.BeatName}}/bin/{{.BeatName}}-god: + source: build/golang-crossbuild/god-{{.GOOS}}-{{.Platform.Arch}} + mode: 0755 + /usr/bin/{{.BeatName}}: + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/linux/elastic-agent.sh.tmpl' + mode: 0755 + /lib/systemd/system/{{.BeatServiceName}}.service: + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/linux/elastic-agent.unit.tmpl' + mode: 0644 + /etc/init.d/{{.BeatServiceName}}: + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/{{.PackageType}}/elastic-agent.init.sh.tmpl' + mode: 0755 + /var/lib/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}}: + source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + mode: 0755 + /var/lib/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/components: + source: '{{.AgentDropPath}}/{{.GOOS}}-{{.AgentArchName}}.tar.gz/' + mode: 0755 + config_mode: 0644 + skip_on_missing: true + + # MacOS pkg spec for community beats. + - &macos_agent_pkg_spec + <<: *common + extra_vars: + # OS X 10.11 El Capitan is the oldest supported by Go 1.14. + # https://golang.org/doc/go1.14#ports + min_supported_osx_version: 10.11 + identifier: 'co.{{.BeatVendor | tolower}}.beats.{{.BeatName}}' + install_path: /Library/Application Support + pre_install_script: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/darwin/scripts/preinstall.tmpl' + post_install_script: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/darwin/scripts/postinstall.elastic-agent.tmpl' + files: + /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/bin/{{.BeatName}}{{.BinaryExt}}: + source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + mode: 0755 + /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/LICENSE.txt: + source: '{{ repo.RootDir }}/LICENSE.txt' + mode: 0644 + /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/NOTICE.txt: + source: '{{ repo.RootDir }}/NOTICE.txt' + mode: 0644 + /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/README.md: + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/common/README.md.tmpl' + mode: 0644 + /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/.build_hash.txt: + content: > + {{ commit }} + mode: 0644 + /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/{{.identifier}}.plist: + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/darwin/launchd-daemon.plist.tmpl' + mode: 0644 + /etc/{{.BeatName}}/elastic-agent.reference.yml: + source: 'elastic-agent.reference.yml' + mode: 0644 + /etc/{{.BeatName}}/elastic-agent.yml: + source: 'elastic-agent.yml' + mode: 0600 + config: true + /etc/{{.BeatName}}/.elastic-agent.active.commit: + content: > + {{ commit }} + mode: 0644 + /etc/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}}: + source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + mode: 0755 + /etc/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/components: + source: '{{.AgentDropPath}}/{{.GOOS}}-{{.AgentArchName}}.tar.gz/' + mode: 0755 + config_mode: 0644 + skip_on_missing: true + + - &agent_binary_files + '{{.BeatName}}{{.BinaryExt}}': + source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + mode: 0755 + 'data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}}': + source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + mode: 0755 + LICENSE.txt: + source: '{{ repo.RootDir }}/LICENSE.txt' + mode: 0644 + NOTICE.txt: + source: '{{ repo.RootDir }}/NOTICE.txt' + mode: 0644 + README.md: + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/common/README.md.tmpl' + mode: 0644 + .build_hash.txt: + content: > + {{ commit }} + mode: 0644 + 'elastic-agent.reference.yml': + source: 'elastic-agent.reference.yml' + mode: 0644 + 'elastic-agent.yml': + source: 'elastic-agent.yml' + mode: 0600 + config: true + '.elastic-agent.active.commit': + content: > + {{ commit }} + mode: 0644 + + # Binary package spec (tar.gz for linux/darwin) for community beats. + - &agent_binary_spec + <<: *common + files: + <<: *agent_binary_files + 'data/{{.BeatName}}-{{ commit_short }}/components': + source: '{{.AgentDropPath}}/{{.GOOS}}-{{.AgentArchName}}.tar.gz/' + mode: 0755 + config_mode: 0644 + skip_on_missing: true + + + # Binary package spec (zip for windows) for community beats. + - &agent_windows_binary_spec + <<: *common + files: + <<: *agent_binary_files + 'data/{{.BeatName}}-{{ commit_short }}/components': + source: '{{.AgentDropPath}}/{{.GOOS}}-{{.AgentArchName}}.zip/' + mode: 0755 + config_mode: 0644 + skip_on_missing: true + + - &agent_docker_spec + <<: *agent_binary_spec + extra_vars: + from: 'ubuntu:20.04' + buildFrom: 'ubuntu:20.04' + dockerfile: 'Dockerfile.elastic-agent.tmpl' + docker_entrypoint: 'docker-entrypoint.elastic-agent.tmpl' + user: '{{ .BeatName }}' + linux_capabilities: '' + image_name: '' + beats_install_path: "install" + files: + 'elastic-agent.yml': + source: 'elastic-agent.docker.yml' + mode: 0600 + config: true + '.elastic-agent.active.commit': + content: > + {{ commit }} + mode: 0644 + 'data/cloud_downloads/metricbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz': + source: '{{.AgentDropPath}}/archives/{{.GOOS}}-{{.AgentArchName}}.tar.gz/metricbeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz' + mode: 0755 + 'data/cloud_downloads/filebeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz': + source: '{{.AgentDropPath}}/archives/{{.GOOS}}-{{.AgentArchName}}.tar.gz/filebeat-{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}-{{.GOOS}}-{{.AgentArchName}}.tar.gz' + mode: 0755 + + - &agent_docker_arm_spec + <<: *agent_docker_spec + extra_vars: + from: 'arm64v8/ubuntu:20.04' + buildFrom: 'arm64v8/ubuntu:20.04' + + - &agent_docker_cloud_spec + <<: *agent_docker_spec + extra_vars: + image_name: '{{.BeatName}}-cloud' + repository: 'docker.elastic.co/beats-ci' + + - &agent_docker_complete_spec + <<: *agent_docker_spec + extra_vars: + image_name: '{{.BeatName}}-complete' + + # Deb/RPM spec for community beats. + - &deb_rpm_spec + <<: *common + post_install_script: '{{ elastic_beats_dir }}/dev-tools/packaging/files/linux/systemd-daemon-reload.sh' + files: + /usr/share/{{.BeatName}}/bin/{{.BeatName}}{{.BinaryExt}}: + source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + mode: 0755 + /etc/{{.BeatName}}/fields.yml: + source: fields.yml + mode: 0644 + /usr/share/{{.BeatName}}/LICENSE.txt: + source: '{{ repo.RootDir }}/LICENSE.txt' + mode: 0644 + /usr/share/{{.BeatName}}/NOTICE.txt: + source: '{{ repo.RootDir }}/NOTICE.txt' + mode: 0644 + /usr/share/{{.BeatName}}/README.md: + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/common/README.md.tmpl' + mode: 0644 + /usr/share/{{.BeatName}}/.build_hash.txt: + content: > + {{ commit }} + mode: 0644 + /etc/{{.BeatName}}/{{.BeatName}}.reference.yml: + source: '{{.BeatName}}.reference.yml' + mode: 0644 + /etc/{{.BeatName}}/{{.BeatName}}.yml: + source: '{{.BeatName}}.yml' + mode: 0600 + config: true + /usr/share/{{.BeatName}}/kibana: + source: _meta/kibana.generated + mode: 0644 + /usr/share/{{.BeatName}}/bin/{{.BeatName}}-god: + source: build/golang-crossbuild/god-{{.GOOS}}-{{.Platform.Arch}} + mode: 0755 + /usr/bin/{{.BeatName}}: + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/linux/beatname.sh.tmpl' + mode: 0755 + /lib/systemd/system/{{.BeatServiceName}}.service: + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/linux/systemd.unit.tmpl' + mode: 0644 + /etc/init.d/{{.BeatServiceName}}: + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/{{.PackageType}}/init.sh.tmpl' + mode: 0755 + + # MacOS pkg spec for community beats. + - &macos_beat_pkg_spec + <<: *common + extra_vars: + # OS X 10.8 Mountain Lion is the oldest supported by Go 1.10. + # https://golang.org/doc/go1.10#ports + min_supported_osx_version: 10.8 + identifier: 'co.{{.BeatVendor | tolower}}.beats.{{.BeatName}}' + install_path: /Library/Application Support + pre_install_script: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/darwin/scripts/preinstall.tmpl' + post_install_script: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/darwin/scripts/postinstall.tmpl' + files: + /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/bin/{{.BeatName}}{{.BinaryExt}}: + source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + mode: 0755 + /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/LICENSE.txt: + source: '{{ repo.RootDir }}/LICENSE.txt' + mode: 0644 + /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/NOTICE.txt: + source: '{{ repo.RootDir }}/NOTICE.txt' + mode: 0644 + /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/README.md: + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/common/README.md.tmpl' + mode: 0644 + /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/.build_hash.txt: + content: > + {{ commit }} + mode: 0644 + /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/{{.identifier}}.plist: + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/darwin/launchd-daemon.plist.tmpl' + mode: 0644 + /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/kibana: + source: _meta/kibana.generated + mode: 0644 + /etc/{{.BeatName}}/fields.yml: + source: fields.yml + mode: 0644 + /etc/{{.BeatName}}/{{.BeatName}}.reference.yml: + source: '{{.BeatName}}.reference.yml' + mode: 0644 + /etc/{{.BeatName}}/{{.BeatName}}.yml: + source: '{{.BeatName}}.yml' + mode: 0600 + config: true + + - &binary_files + '{{.BeatName}}{{.BinaryExt}}': + source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + mode: 0755 + fields.yml: + source: fields.yml + mode: 0644 + LICENSE.txt: + source: '{{ repo.RootDir }}/LICENSE.txt' + mode: 0644 + NOTICE.txt: + source: '{{ repo.RootDir }}/NOTICE.txt' + mode: 0644 + README.md: + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/common/README.md.tmpl' + mode: 0644 + .build_hash.txt: + content: > + {{ commit }} + mode: 0644 + '{{.BeatName}}.reference.yml': + source: '{{.BeatName}}.reference.yml' + mode: 0644 + '{{.BeatName}}.yml': + source: '{{.BeatName}}.yml' + mode: 0600 + config: true + kibana: + source: _meta/kibana.generated + mode: 0644 + + # Binary package spec (tar.gz for linux/darwin) for community beats. + - &binary_spec + <<: *common + files: + <<: *binary_files + + # Binary package spec (zip for windows) for community beats. + - &windows_binary_spec + <<: *common + files: + <<: *binary_files + install-service-{{.BeatName}}.ps1: + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/windows/install-service.ps1.tmpl' + mode: 0755 + uninstall-service-{{.BeatName}}.ps1: + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/windows/uninstall-service.ps1.tmpl' + mode: 0755 + + - &docker_spec + <<: *binary_spec + extra_vars: + from: 'ubuntu:20.04' + buildFrom: 'ubuntu:20.04' + user: '{{ .BeatName }}' + linux_capabilities: '' + files: + '{{.BeatName}}.yml': + source: '{{.BeatName}}.docker.yml' + mode: 0600 + config: true + + - &docker_arm_spec + <<: *docker_spec + extra_vars: + from: 'arm64v8/ubuntu:20.04' + buildFrom: 'arm64v8/ubuntu:20.04' + + - &docker_ubi_spec + extra_vars: + image_name: '{{.BeatName}}-ubi8' + from: 'docker.elastic.co/ubi8/ubi-minimal' + + - &docker_arm_ubi_spec + extra_vars: + image_name: '{{.BeatName}}-ubi8' + from: 'registry.access.redhat.com/ubi8/ubi-minimal:8.2' + + - &elastic_docker_spec + extra_vars: + repository: 'docker.elastic.co/beats' + + # + # License modifiers for Apache 2.0 + # + - &apache_license_for_binaries + license: "ASL 2.0" + files: + LICENSE.txt: + source: '{{ repo.RootDir }}/dev-tools/licenses/APACHE-LICENSE-2.0.txt' + mode: 0644 + + - &apache_license_for_deb_rpm + license: "ASL 2.0" + files: + /usr/share/{{.BeatName}}/LICENSE.txt: + source: '{{ repo.RootDir }}/dev-tools/licenses/APACHE-LICENSE-2.0.txt' + mode: 0644 + + - &apache_license_for_macos_pkg + license: "ASL 2.0" + files: + /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/LICENSE.txt: + source: '{{ repo.RootDir }}/dev-tools/licenses/APACHE-LICENSE-2.0.txt' + mode: 0644 + + # + # License modifiers for the Elastic License + # + - &elastic_license_for_binaries + license: "Elastic License" + files: + LICENSE.txt: + source: '{{ repo.RootDir }}/dev-tools/licenses/ELASTIC-LICENSE.txt' + mode: 0644 + + - &elastic_license_for_deb_rpm + license: "Elastic License" + files: + /usr/share/{{.BeatName}}/LICENSE.txt: + source: '{{ repo.RootDir }}/dev-tools/licenses/ELASTIC-LICENSE.txt' + mode: 0644 + + - &elastic_license_for_macos_pkg + license: "Elastic License" + files: + /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/LICENSE.txt: + source: '{{ repo.RootDir }}/dev-tools/licenses/ELASTIC-LICENSE.txt' + mode: 0644 + +# specs is a list of named packaging "flavors". +specs: + # Community Beats + community_beat: + - os: windows + types: [zip] + spec: + <<: *windows_binary_spec + + - os: darwin + types: [tgz] + spec: + <<: *binary_spec + + - os: linux + types: [tgz] + spec: + <<: *binary_spec + + - os: linux + types: [deb, rpm] + spec: + <<: *deb_rpm_spec + + - os: linux + types: [docker] + spec: + <<: *docker_spec + + - os: aix + types: [tgz] + spec: + <<: *binary_spec + + # Elastic Beat with Apache License (OSS) and binary taken the current + # directory. + elastic_beat_oss: + - os: windows + types: [zip] + spec: + <<: *windows_binary_spec + <<: *apache_license_for_binaries + name: '{{.BeatName}}-oss' + + - os: darwin + types: [tgz] + spec: + <<: *binary_spec + <<: *apache_license_for_binaries + name: '{{.BeatName}}-oss' + + - os: linux + types: [tgz] + spec: + <<: *binary_spec + <<: *apache_license_for_binaries + name: '{{.BeatName}}-oss' + + - os: linux + types: [deb, rpm] + spec: + <<: *deb_rpm_spec + <<: *apache_license_for_deb_rpm + name: '{{.BeatName}}-oss' + + - os: linux + types: [docker] + spec: + <<: *docker_spec + <<: *elastic_docker_spec + <<: *apache_license_for_binaries + name: '{{.BeatName}}-oss' + + - os: aix + types: [tgz] + spec: + <<: *binary_spec + <<: *apache_license_for_binaries + name: '{{.BeatName}}-oss' + + # Elastic Beat with Elastic License and binary taken the current directory. + elastic_beat_xpack: + ### + # Elastic Licensed Packages + ### + - os: windows + types: [zip] + spec: + <<: *windows_binary_spec + <<: *elastic_license_for_binaries + + - os: darwin + types: [tgz] + spec: + <<: *binary_spec + <<: *elastic_license_for_binaries + + - os: linux + types: [tgz] + spec: + <<: *binary_spec + <<: *elastic_license_for_binaries + + - os: linux + types: [deb, rpm] + spec: + <<: *deb_rpm_spec + <<: *elastic_license_for_deb_rpm + + - os: linux + arch: amd64 + types: [docker] + spec: + <<: *docker_spec + <<: *elastic_docker_spec + <<: *elastic_license_for_binaries + + - os: linux + arch: amd64 + types: [docker] + spec: + <<: *docker_spec + <<: *docker_ubi_spec + <<: *elastic_docker_spec + <<: *elastic_license_for_binaries + + - os: linux + arch: arm64 + types: [docker] + spec: + <<: *docker_arm_spec + <<: *elastic_docker_spec + <<: *elastic_license_for_binaries + + - os: linux + arch: arm64 + types: [docker] + spec: + <<: *docker_arm_spec + <<: *docker_arm_ubi_spec + <<: *elastic_docker_spec + <<: *elastic_license_for_binaries + + - os: aix + types: [tgz] + spec: + <<: *binary_spec + <<: *elastic_license_for_binaries + + # Elastic Beat with Elastic License and binary taken the current directory. + elastic_beat_xpack_reduced: + ### + # Elastic Licensed Packages + ### + - os: windows + types: [zip] + spec: + <<: *windows_binary_spec + <<: *elastic_license_for_binaries + + - os: darwin + types: [tgz] + spec: + <<: *binary_spec + <<: *elastic_license_for_binaries + + - os: linux + types: [tgz] + spec: + <<: *binary_spec + <<: *elastic_license_for_binaries + + - os: aix + types: [tgz] + spec: + <<: *binary_spec + <<: *elastic_license_for_binaries + + # Elastic Beat with Elastic License and binary taken from the x-pack dir. + elastic_beat_xpack_separate_binaries: + ### + # Elastic Licensed Packages + ### + - os: windows + types: [zip] + spec: + <<: *windows_binary_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + - os: darwin + types: [tgz] + spec: + <<: *binary_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + - os: linux + types: [tgz] + spec: + <<: *binary_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + - os: linux + types: [deb, rpm] + spec: + <<: *deb_rpm_spec + <<: *elastic_license_for_deb_rpm + files: + /usr/share/{{.BeatName}}/bin/{{.BeatName}}{{.BinaryExt}}: + source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + - os: linux + arch: amd64 + types: [docker] + spec: + <<: *docker_spec + <<: *elastic_docker_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + - os: linux + arch: amd64 + types: [docker] + spec: + <<: *docker_spec + <<: *docker_ubi_spec + <<: *elastic_docker_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + - os: linux + arch: arm64 + types: [docker] + spec: + <<: *docker_arm_spec + <<: *elastic_docker_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + - os: linux + arch: arm64 + types: [docker] + spec: + <<: *docker_arm_spec + <<: *docker_arm_ubi_spec + <<: *elastic_docker_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + - os: aix + types: [tgz] + spec: + <<: *binary_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + # Elastic Beat with Elastic License and binary taken from the x-pack dir. + elastic_beat_agent_binaries: + ### + # Elastic Licensed Packages + ### + - os: windows + types: [zip] + spec: + <<: *agent_windows_binary_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + - os: darwin + types: [tgz] + spec: + <<: *agent_binary_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}} + symlink: true + mode: 0755 + + - os: linux + types: [tgz] + spec: + <<: *agent_binary_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}} + symlink: true + mode: 0755 + + - os: linux + types: [deb, rpm] + spec: + <<: *deb_rpm_agent_spec + <<: *elastic_license_for_deb_rpm + + - os: linux + arch: amd64 + types: [docker] + spec: + <<: *agent_docker_spec + <<: *elastic_docker_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + # Complete image gets a 'complete' variant for synthetics and other large + # packages too big to fit in the main image + - os: linux + arch: amd64 + types: [docker] + spec: + <<: *agent_docker_spec + <<: *agent_docker_complete_spec + <<: *elastic_docker_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + # Cloud specific docker image + - os: linux + arch: amd64 + types: [docker] + spec: + <<: *elastic_docker_spec + <<: *agent_docker_spec + <<: *agent_docker_cloud_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + - os: linux + arch: amd64 + types: [docker] + spec: + <<: *agent_docker_spec + <<: *docker_ubi_spec + <<: *elastic_docker_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + - os: linux + arch: arm64 + types: [docker] + spec: + <<: *agent_docker_arm_spec + <<: *elastic_docker_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + # Complete image gets a 'complete' variant for synthetics and other large + # packages too big to fit in the main image + - os: linux + arch: arm64 + types: [docker] + spec: + <<: *agent_docker_arm_spec + <<: *agent_docker_complete_spec + <<: *elastic_docker_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + # Cloud specific docker image + - os: linux + arch: arm64 + types: [docker] + spec: + <<: *elastic_docker_spec + <<: *agent_docker_arm_spec + <<: *agent_docker_cloud_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + - os: linux + arch: arm64 + types: [docker] + spec: + <<: *agent_docker_arm_spec + <<: *docker_arm_ubi_spec + <<: *elastic_docker_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + - os: aix + types: [tgz] + spec: + <<: *agent_binary_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}} + symlink: true + mode: 0755 + + + # Elastic Beat with Elastic License and binary taken from the x-pack dir. + elastic_beat_agent_demo_binaries: + ### + # Elastic Licensed Packages + ### + + - os: linux + types: [tgz] + spec: + <<: *agent_binary_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + - os: linux + types: [docker] + spec: + <<: *agent_docker_spec + <<: *elastic_docker_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + + - os: aix + types: [tgz] + spec: + <<: *agent_binary_spec + <<: *elastic_license_for_binaries + files: + '{{.BeatName}}{{.BinaryExt}}': + source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} diff --git a/dev-tools/packaging/templates/ironbank/Dockerfile.tmpl b/dev-tools/packaging/templates/ironbank/Dockerfile.tmpl new file mode 100644 index 00000000000..04c4dfde930 --- /dev/null +++ b/dev-tools/packaging/templates/ironbank/Dockerfile.tmpl @@ -0,0 +1,90 @@ +################################################################################ +# Build stage 0 +# Extract Elastic Agent and make various file manipulations. +################################################################################ +ARG BASE_REGISTRY=registry1.dsop.io +ARG BASE_IMAGE=ironbank/redhat/ubi/ubi8 +ARG BASE_TAG=8.6 + +FROM ${BASE_REGISTRY}/${BASE_IMAGE}:${BASE_TAG} as prep_files + +ARG ELASTIC_STACK={{ beat_version }} +ARG ELASTIC_PRODUCT=elastic-agent +ARG OS_AND_ARCH=linux-x86_64 + +RUN mkdir /usr/share/${ELASTIC_PRODUCT} +WORKDIR /usr/share/${ELASTIC_PRODUCT} +COPY --chown=1000:0 ${ELASTIC_PRODUCT}-${ELASTIC_STACK}-${OS_AND_ARCH}.tar.gz . +RUN tar --strip-components=1 -zxf ${ELASTIC_PRODUCT}-${ELASTIC_STACK}-${OS_AND_ARCH}.tar.gz \ + && rm ${ELASTIC_PRODUCT}-${ELASTIC_STACK}-${OS_AND_ARCH}.tar.gz + +# Support arbitrary user ids +# Ensure that group permissions are the same as user permissions. +# This will help when relying on GID-0 to run Kibana, rather than UID-1000. +# OpenShift does this, for example. +# REF: https://docs.okd.io/latest/openshift_images/create-images.html +RUN chmod -R g=u /usr/share/${ELASTIC_PRODUCT} + +# Create auxiliary folders and assigning default permissions. +RUN mkdir -p /usr/share/${ELASTIC_PRODUCT}/data /usr/share/${ELASTIC_PRODUCT}/logs && \ + chown -R root:root /usr/share/${ELASTIC_PRODUCT} && \ + find /usr/share/${ELASTIC_PRODUCT} -type d -exec chmod 0750 {} \; && \ + find /usr/share/${ELASTIC_PRODUCT} -type f -exec chmod 0640 {} \; && \ + chmod 0750 /usr/share/${ELASTIC_PRODUCT}/${ELASTIC_PRODUCT} && \ + chmod 0770 /usr/share/${ELASTIC_PRODUCT}/data /usr/share/${ELASTIC_PRODUCT}/logs + +################################################################################ +# Build stage 1 +# Copy prepared files from the previous stage and complete the image. +################################################################################ +FROM ${BASE_REGISTRY}/${BASE_IMAGE}:${BASE_TAG} + +ARG ELASTIC_PRODUCT=elastic-agent + +COPY LICENSE /licenses/elastic-${ELASTIC_PRODUCT} + +# Add a dumb init process +COPY tinit /tinit +RUN chmod +x /tinit + +# Bring in product from the initial stage. +COPY --from=prep_files --chown=1000:0 /usr/share/${ELASTIC_PRODUCT} /usr/share/${ELASTIC_PRODUCT} +WORKDIR /usr/share/${ELASTIC_PRODUCT} +RUN ln -s /usr/share/${ELASTIC_PRODUCT} /opt/${ELASTIC_PRODUCT} + +ENV ELASTIC_CONTAINER="true" +RUN ln -s /usr/share/${ELASTIC_PRODUCT}/${ELASTIC_PRODUCT} /usr/bin/${ELASTIC_PRODUCT} + +# Support arbitrary user ids +# Ensure gid 0 write permissions for OpenShift. +RUN chmod -R g+w /usr/share/${ELASTIC_PRODUCT} + +# config file ("${ELASTIC_PRODUCT}.yml") can only be writable by the root and group root +# it is needed on some configurations where the container needs to run as root +RUN chown root:root /usr/share/${ELASTIC_PRODUCT}/${ELASTIC_PRODUCT}.yml \ + && chmod go-w /usr/share/${ELASTIC_PRODUCT}/${ELASTIC_PRODUCT}.yml + +# Remove the suid bit everywhere to mitigate "Stack Clash" +RUN find / -xdev -perm -4000 -exec chmod u-s {} + + +# Provide a non-root user to run the process. +RUN groupadd --gid 1000 ${ELASTIC_PRODUCT} && useradd --uid 1000 --gid 1000 --groups 0 --home-dir /usr/share/${ELASTIC_PRODUCT} --no-create-home ${ELASTIC_PRODUCT} + +# Elastic Agent permissions +RUN find /usr/share//elastic-agent/data -type d -exec chmod 0770 {} \; && \ + find /usr/share//elastic-agent/data -type f -exec chmod 0660 {} \; && \ + chmod +x /usr/share//elastic-agent/data/elastic-agent-*/elastic-agent + +COPY jq /usr/local/bin +RUN chown root:root /usr/local/bin/jq && chmod 0755 /usr/local/bin/jq + +COPY config/docker-entrypoint /usr/local/bin/docker-entrypoint +RUN chmod 755 /usr/local/bin/docker-entrypoint + +USER ${ELASTIC_PRODUCT} +ENV ELASTIC_PRODUCT=${ELASTIC_PRODUCT} + +ENTRYPOINT ["/tinit", "--", "/usr/local/bin/docker-entrypoint"] +CMD [""] + +HEALTHCHECK --interval=10s --timeout=5s --start-period=1m --retries=5 CMD test -w '/tmp/elastic-agent/elastic-agent.sock' diff --git a/dev-tools/packaging/templates/ironbank/README.md.tmpl b/dev-tools/packaging/templates/ironbank/README.md.tmpl new file mode 100644 index 00000000000..271fdb8c0d7 --- /dev/null +++ b/dev-tools/packaging/templates/ironbank/README.md.tmpl @@ -0,0 +1,43 @@ +# elastic-agent + +**elastic-agent** is a single, unified way to add monitoring for logs, metrics, and other types of data to each host. A single agent makes it easier and faster to deploy monitoring across your infrastructure. The agent’s single, unified configuration makes it easier to add integrations for new data sources. + +For more information about elastic-agent, please visit +https://www.elastic.co/guide/en/ingest-management/7.17/index.html. + +--- + +**NOTE** + +This functionality is in beta and is subject to change. The design and code is less mature than official GA features and is being provided as-is with no warranties. Beta features are not subject to the support SLA of official GA features. + +--- + +### Installation instructions + +Please follow the documentation on [Quick start](https://www.elastic.co/guide/en/fleet/{{ .MajorMinor }}/fleet-elastic-agent-quick-start.html). + +### Where to file issues and PRs + +- [Issues](https://github.com/elastic/elastic-agent/issues) +- [PRs](https://github.com/elastic/elastic-agent/pulls) + +### DoD Restrictions + +### Where to get help + +- [elastic-agent Discuss Forums](https://discuss.elastic.co/tags/c/elastic-stack/beats/28/elastic-agent) +- [elastic-agent Documentation](https://www.elastic.co/guide/en/ingest-management/current/index.html) + +### Still need help? + +You can learn more about the Elastic Community and also understand how to get more help +visiting [Elastic Community](https://www.elastic.co/community). + +This software is governed by the [Elastic +License](https://github.com/elastic/beats/blob/{{ .MajorMinor }}/licenses/ELASTIC-LICENSE.txt), +and includes the full set of [free +features](https://www.elastic.co/subscriptions). + +View the detailed release notes +[here](https://www.elastic.co/guide/en/beats/libbeat/current/release-notes-{{ beat_version }}.html). diff --git a/dev-tools/packaging/templates/ironbank/hardening_manifest.yaml.tmpl b/dev-tools/packaging/templates/ironbank/hardening_manifest.yaml.tmpl new file mode 100644 index 00000000000..3c753caa0fb --- /dev/null +++ b/dev-tools/packaging/templates/ironbank/hardening_manifest.yaml.tmpl @@ -0,0 +1,68 @@ +--- +apiVersion: v1 + +# The repository name in registry1, excluding /ironbank/ +name: "elastic/beats/elastic-agent" + +# List of tags to push for the repository in registry1 +# The most specific version should be the first tag and will be shown +# on ironbank.dsop.io +tags: +- "{{ beat_version }}" +- "latest" + +# Build args passed to Dockerfile ARGs +args: + BASE_IMAGE: "redhat/ubi/ubi8" + BASE_TAG: "8.6" + ELASTIC_STACK: "{{ beat_version }}" + ELASTIC_PRODUCT: "elastic-agent" + +# Docker image labels +labels: + org.opencontainers.image.title: "elastic-agent" + ## Human-readable description of the software packaged in the image + org.opencontainers.image.description: "elastic-agent is a single, unified way to add monitoring for logs, metrics, and other types of data to each host" + ## License(s) under which contained software is distributed + org.opencontainers.image.licenses: "Elastic License" + ## URL to find more information on the image + org.opencontainers.image.url: "https://www.elastic.co/products/beats/elastic-agent" + ## Name of the distributing entity, organization or individual + org.opencontainers.image.vendor: "Elastic" + org.opencontainers.image.version: "{{ beat_version }}" + ## Keywords to help with search (ex. "cicd,gitops,golang") + mil.dso.ironbank.image.keywords: "log,metrics,monitoring,observabilty,o11y,oblt,beats,elastic,elasticsearch,golang" + ## This value can be "opensource" or "commercial" + mil.dso.ironbank.image.type: "commercial" + ## Product the image belongs to for grouping multiple images + mil.dso.ironbank.product.name: "beats" + +# List of resources to make available to the offline build context +resources: + - filename: "elastic-agent-{{ beat_version }}-linux-x86_64.tar.gz" + url: "/elastic-agent-{{ beat_version }}-linux-x86_64.tar.gz" + validation: + type: "sha512" + value: "" + - filename: tinit + url: https://github.com/krallin/tini/releases/download/v0.19.0/tini-amd64 + validation: + type: sha256 + value: 93dcc18adc78c65a028a84799ecf8ad40c936fdfc5f2a57b1acda5a8117fa82c + - filename: jq + url: https://github.com/stedolan/jq/releases/download/jq-1.6/jq-linux64 + validation: + type: sha256 + value: af986793a515d500ab2d35f8d2aecd656e764504b789b66d7e1a0b727a124c44 + +# List of project maintainers +maintainers: + - email: "nassim.kammah@elastic.co" + name: "Nassim Kammah" + username: "nassim.kammah" + - email: "ivan.fernandez@elastic.co" + name: "Ivan Fernandez Calvo" + username: "ivan.fernandez" + - email: "victor.martinez@elastic.co" + name: "Victor Martinez" + username: "victor.martinez" diff --git a/dev-tools/packaging/templates/linux/postinstall.sh.tmpl b/dev-tools/packaging/templates/linux/postinstall.sh.tmpl new file mode 100644 index 00000000000..083ebb91060 --- /dev/null +++ b/dev-tools/packaging/templates/linux/postinstall.sh.tmpl @@ -0,0 +1,38 @@ +#!/usr/bin/env bash + +set -e + +symlink="/usr/share/elastic-agent/bin/elastic-agent" +old_agent_dir="$( dirname "$(readlink -f -- "$symlink")" )" + +commit_hash="{{ commit_short }}" + +yml_path="$old_agent_dir/state.yml" +enc_path="$old_agent_dir/state.enc" + +new_agent_dir="$( dirname "$old_agent_dir")/elastic-agent-$commit_hash" + +if ! [[ "$old_agent_dir" -ef "$new_agent_dir" ]]; then + echo "migrate state from $old_agent_dir to $new_agent_dir" + + if test -f "$yml_path"; then + echo "found "$yml_path", copy to "$new_agent_dir"." + cp "$yml_path" "$new_agent_dir" + fi + + if test -f "$enc_path"; then + echo "found "$enc_path", copy to "$new_agent_dir"." + cp "$enc_path" "$new_agent_dir" + fi + + if test -f "$symlink"; then + echo "found symlink $symlink, unlink" + unlink "$symlink" + fi + + echo "create symlink "$symlink" to "$new_agent_dir/elastic-agent"" + ln -s "$new_agent_dir/elastic-agent" "$symlink" +fi + +systemctl daemon-reload 2> /dev/null +exit 0 diff --git a/go.mod b/go.mod index 7751b77f60c..267e46602f2 100644 --- a/go.mod +++ b/go.mod @@ -13,7 +13,7 @@ require ( github.com/elastic/e2e-testing v1.99.2-0.20220117192005-d3365c99b9c4 github.com/elastic/elastic-agent-autodiscover v0.0.0-20220404145827-89887023c1ab github.com/elastic/elastic-agent-client/v7 v7.0.0-20220524131921-43bacbeec516 - github.com/elastic/elastic-agent-libs v0.2.3 + github.com/elastic/elastic-agent-libs v0.2.6 github.com/elastic/go-licenser v0.4.0 github.com/elastic/go-sysinfo v1.7.1 github.com/elastic/go-ucfg v0.8.5 @@ -61,7 +61,7 @@ require ( github.com/armon/go-radix v1.0.0 // indirect github.com/cavaliercoder/badio v0.0.0-20160213150051-ce5280129e9e // indirect github.com/cenkalti/backoff/v4 v4.1.1 // indirect - github.com/containerd/containerd v1.5.10 // indirect + github.com/containerd/containerd v1.5.13 // indirect github.com/cyphar/filepath-securejoin v0.2.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dnephin/pflag v1.0.7 // indirect @@ -115,6 +115,7 @@ require ( go.elastic.co/apm/v2 v2.0.0 // indirect go.elastic.co/fastjson v1.1.0 // indirect go.uber.org/atomic v1.9.0 // indirect + go.uber.org/goleak v1.1.12 // indirect go.uber.org/multierr v1.8.0 // indirect golang.org/x/mod v0.5.1 // indirect golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4 // indirect @@ -127,7 +128,7 @@ require ( google.golang.org/genproto v0.0.0-20220426171045-31bebdecfb46 // indirect google.golang.org/grpc/examples v0.0.0-20220304170021-431ea809a767 // indirect gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect howett.net/plist v1.0.0 // indirect k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect k8s.io/utils v0.0.0-20211116205334-6203023598ed // indirect diff --git a/go.sum b/go.sum index 8ccda348f78..67d8b0f1cc1 100644 --- a/go.sum +++ b/go.sum @@ -92,7 +92,7 @@ github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2 github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= -github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= +github.com/Microsoft/hcsshim v0.8.24/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= @@ -229,6 +229,7 @@ github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4S github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= +github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= @@ -250,8 +251,8 @@ github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoT github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= github.com/containerd/containerd v1.5.2/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= -github.com/containerd/containerd v1.5.10 h1:3cQ2uRVCkJVcx5VombsE7105Gl9Wrl7ORAO3+4+ogf4= -github.com/containerd/containerd v1.5.10/go.mod h1:fvQqCfadDGga5HZyn3j4+dx56qj2I9YwBrlSdalvJYQ= +github.com/containerd/containerd v1.5.13 h1:XqvKw9i4P7/mFrC3TSM7yV5cwFZ9avXe6M3YANKnzEE= +github.com/containerd/containerd v1.5.13/go.mod h1:3AlCrzKROjIuP3JALsY14n8YtntaUDBu7vek+rPN5Vc= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= @@ -385,8 +386,8 @@ github.com/elastic/elastic-agent-autodiscover v0.0.0-20220404145827-89887023c1ab github.com/elastic/elastic-agent-client/v7 v7.0.0-20220524131921-43bacbeec516 h1:8sGoTlgXRCesR1+FjBv8YY5CyVhNSDjXlo4uq5q1RGM= github.com/elastic/elastic-agent-client/v7 v7.0.0-20220524131921-43bacbeec516/go.mod h1:fkvyUfFwyAG5OnMF0h+FV9sC0Xn9YLITwQpSuwungQs= github.com/elastic/elastic-agent-libs v0.0.0-20220303160015-5b4e674da3dd/go.mod h1://82M1l73IHx0wDbS2Tzkq6Fx9fkmytS1KgkIyzvNTM= -github.com/elastic/elastic-agent-libs v0.2.3 h1:GY8M0fxOs/GBY2nIB+JOB91aoD72S87iEcm2qVGFUqI= -github.com/elastic/elastic-agent-libs v0.2.3/go.mod h1:1xDLBhIqBIjhJ7lr2s+xRFFkQHpitSp8q2zzv1Dqg+s= +github.com/elastic/elastic-agent-libs v0.2.6 h1:DpcUcCVYZ7lNtHLUlyT1u/GtGAh49wpL15DTH7+8O5o= +github.com/elastic/elastic-agent-libs v0.2.6/go.mod h1:chO3rtcLyGlKi9S0iGVZhYCzDfdDsAQYBc+ui588AFE= github.com/elastic/elastic-package v0.32.1/go.mod h1:l1fEnF52XRBL6a5h6uAemtdViz2bjtjUtgdQcuRhEAY= github.com/elastic/go-elasticsearch/v7 v7.16.0/go.mod h1:OJ4wdbtDNk5g503kvlHLyErCgQwwzmDtaFC4XyOxXA4= github.com/elastic/go-elasticsearch/v8 v8.0.0-20210317102009-a9d74cec0186/go.mod h1:xe9a/L2aeOgFKKgrO3ibQTnMdpAeL0GC+5/HpGScSa4= @@ -1251,8 +1252,9 @@ go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/goleak v1.1.11 h1:wy28qYRKZgnJTxGxvye5/wgWr1EKjmUDGYox5mGlRlI= go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= +go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= +go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= @@ -1553,6 +1555,7 @@ golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 h1:xHms4gcpe1YE7A3yIllJXP16CMAGuqwO2lX1mTyyRRc= golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= @@ -1876,8 +1879,9 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/gotestsum v1.7.0 h1:RwpqwwFKBAa2h+F6pMEGpE707Edld0etUD3GhqqhDNc= diff --git a/internal/pkg/agent/application/actions/handlers/handler_action_policy_change.go b/internal/pkg/agent/application/actions/handlers/handler_action_policy_change.go index a3f4ff0b3ea..0913b484712 100644 --- a/internal/pkg/agent/application/actions/handlers/handler_action_policy_change.go +++ b/internal/pkg/agent/application/actions/handlers/handler_action_policy_change.go @@ -159,7 +159,7 @@ func (h *PolicyChange) handleFleetServerHosts(ctx context.Context, c *config.Con errors.TypeNetwork, errors.M("hosts", h.config.Fleet.Client.Hosts)) } // discard body for proper cancellation and connection reuse - io.Copy(ioutil.Discard, resp.Body) + _, _ = io.Copy(ioutil.Discard, resp.Body) resp.Body.Close() reader, err := fleetToReader(h.agentInfo, h.config) diff --git a/internal/pkg/agent/application/application.go b/internal/pkg/agent/application/application.go index d5bc6a182d8..bf0d0fd6444 100644 --- a/internal/pkg/agent/application/application.go +++ b/internal/pkg/agent/application/application.go @@ -70,7 +70,7 @@ func New( return nil, fmt.Errorf("failed to load configuration: %w", err) } - upgrader := upgrade.NewUpgrader(log, cfg.Settings.DownloadConfig) + upgrader := upgrade.NewUpgrader(log, cfg.Settings.DownloadConfig, agentInfo) runtime, err := runtime.NewManager(log, cfg.Settings.GRPC.String(), tracer) if err != nil { @@ -121,7 +121,7 @@ func New( return nil, errors.New(err, "failed to initialize composable controller") } - coord := coordinator.New(log, specs, reexec, upgrader, runtime, configMgr, composable, caps, compModifiers...) + coord := coordinator.New(log, agentInfo, specs, reexec, upgrader, runtime, configMgr, composable, caps, compModifiers...) if managed != nil { // the coordinator requires the config manager as well as in managed-mode the config manager requires the // coordinator, so it must be set here once the coordinator is created diff --git a/internal/pkg/agent/application/coordinator/coordinator.go b/internal/pkg/agent/application/coordinator/coordinator.go index dac48400179..906b9af2d64 100644 --- a/internal/pkg/agent/application/coordinator/coordinator.go +++ b/internal/pkg/agent/application/coordinator/coordinator.go @@ -42,6 +42,9 @@ type UpgradeManager interface { // Upgradeable returns true if can be upgraded. Upgradeable() bool + // Reload reloads the configuration for the upgrade manager. + Reload(rawConfig *config.Config) error + // Upgrade upgrades running agent. Upgrade(ctx context.Context, version string, sourceURI string, action *fleetapi.ActionUpgrade) (_ reexec.ShutdownCallbackFn, err error) } @@ -129,7 +132,8 @@ type StateFetcher interface { // // All configuration changes, update variables, and upgrade actions are managed and controlled by the coordinator. type Coordinator struct { - logger *logger.Logger + logger *logger.Logger + agentInfo *info.AgentInfo specs component.RuntimeSpecs @@ -150,9 +154,10 @@ type Coordinator struct { } // New creates a new coordinator. -func New(logger *logger.Logger, specs component.RuntimeSpecs, reexecMgr ReExecManager, upgradeMgr UpgradeManager, runtimeMgr RuntimeManager, configMgr ConfigManager, varsMgr VarsManager, caps capabilities.Capability, modifiers ...ComponentsModifier) *Coordinator { +func New(logger *logger.Logger, agentInfo *info.AgentInfo, specs component.RuntimeSpecs, reexecMgr ReExecManager, upgradeMgr UpgradeManager, runtimeMgr RuntimeManager, configMgr ConfigManager, varsMgr VarsManager, caps capabilities.Capability, modifiers ...ComponentsModifier) *Coordinator { return &Coordinator{ logger: logger, + agentInfo: agentInfo, specs: specs, reexecMgr: reexecMgr, upgradeMgr: upgradeMgr, @@ -429,6 +434,10 @@ func (c *Coordinator) processConfig(ctx context.Context, cfg *config.Config) (er } } + if err := c.upgradeMgr.Reload(cfg); err != nil { + return fmt.Errorf("failed to reload upgrade manager configuration: %w", err) + } + c.state.config = cfg c.state.ast = rawAst @@ -505,10 +514,9 @@ type coordinatorState struct { message string overrideState *coordinatorOverrideState - config *config.Config - ast *transpiler.AST - vars []*transpiler.Vars - components []component.Component + config *config.Config + ast *transpiler.AST + vars []*transpiler.Vars } type coordinatorOverrideState struct { diff --git a/internal/pkg/agent/application/coordinator/handler.go b/internal/pkg/agent/application/coordinator/handler.go new file mode 100644 index 00000000000..22130d1a776 --- /dev/null +++ b/internal/pkg/agent/application/coordinator/handler.go @@ -0,0 +1,47 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package coordinator + +import ( + "encoding/json" + "net/http" + "time" + + "github.com/elastic/elastic-agent/internal/pkg/agent/control/client" +) + +// LivenessResponse is the response body for the liveness endpoint. +type LivenessResponse struct { + ID string `json:"id"` + Status string `json:"status"` + Message string `json:"message"` + UpdateTime time.Time `json:"update_timestamp"` +} + +// ServeHTTP is an HTTP Handler for the coordinatorr. +// Response code is 200 for a healthy agent, and 503 otherwise. +// Response body is a JSON object that contains the agent ID, status, message, and the last status update time. +func (c *Coordinator) ServeHTTP(wr http.ResponseWriter, req *http.Request) { + s := c.State() + lr := LivenessResponse{ + ID: c.agentInfo.AgentID(), + Status: s.State.String(), + Message: s.Message, + + // TODO(blakerouse): Coordinator should be changed to store the last timestamp that the state has changed. + UpdateTime: time.Now().UTC(), + } + status := http.StatusOK + if s.State != client.Healthy { + status = http.StatusServiceUnavailable + } + + wr.Header().Set("Content-Type", "application/json") + wr.WriteHeader(status) + enc := json.NewEncoder(wr) + if err := enc.Encode(lr); err != nil { + c.logger.Errorf("Unable to encode liveness response: %v", err) + } +} diff --git a/internal/pkg/agent/application/info/agent_id.go b/internal/pkg/agent/application/info/agent_id.go index e0a6c64acbe..8056fd0cce1 100644 --- a/internal/pkg/agent/application/info/agent_id.go +++ b/internal/pkg/agent/application/info/agent_id.go @@ -71,7 +71,7 @@ func getInfoFromStore(s ioStore, logLevel string) (*persistentAgentInfo, error) agentConfigFile := paths.AgentConfigFile() reader, err := s.Load() if err != nil { - return nil, err + return nil, fmt.Errorf("failed to load from ioStore: %w", err) } // reader is closed by this function @@ -203,20 +203,20 @@ func loadAgentInfo(forceUpdate bool, logLevel string, createAgentID bool) (*pers agentConfigFile := paths.AgentConfigFile() diskStore := storage.NewEncryptedDiskStore(agentConfigFile) - agentinfo, err := getInfoFromStore(diskStore, logLevel) + agentInfo, err := getInfoFromStore(diskStore, logLevel) if err != nil { - return nil, err + return nil, fmt.Errorf("could not get agent info from store: %w", err) } - if agentinfo != nil && !forceUpdate && (agentinfo.ID != "" || !createAgentID) { - return agentinfo, nil + if agentInfo != nil && !forceUpdate && (agentInfo.ID != "" || !createAgentID) { + return agentInfo, nil } - if err := updateID(agentinfo, diskStore); err != nil { - return nil, err + if err := updateID(agentInfo, diskStore); err != nil { + return nil, fmt.Errorf("could not update agent ID on disk store: %w", err) } - return agentinfo, nil + return agentInfo, nil } func updateID(agentInfo *persistentAgentInfo, s ioStore) error { diff --git a/internal/pkg/agent/application/info/agent_metadata.go b/internal/pkg/agent/application/info/agent_metadata.go index a532487a446..49afeca9dc7 100644 --- a/internal/pkg/agent/application/info/agent_metadata.go +++ b/internal/pkg/agent/application/info/agent_metadata.go @@ -10,10 +10,11 @@ import ( "runtime" "strings" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/release" "github.com/elastic/go-sysinfo" "github.com/elastic/go-sysinfo/types" + + "github.com/elastic/elastic-agent/internal/pkg/agent/errors" + "github.com/elastic/elastic-agent/internal/pkg/release" ) // ECSMeta is a collection of agent related metadata in ECS compliant object form. @@ -123,7 +124,7 @@ const ( func Metadata() (*ECSMeta, error) { agentInfo, err := NewAgentInfo(false) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to create new agent info: %w", err) } meta, err := agentInfo.ECSMetadata() diff --git a/internal/pkg/agent/application/paths/paths_linux.go b/internal/pkg/agent/application/paths/paths_linux.go index 22faeb5f75a..37cc57c33af 100644 --- a/internal/pkg/agent/application/paths/paths_linux.go +++ b/internal/pkg/agent/application/paths/paths_linux.go @@ -14,5 +14,5 @@ const defaultAgentVaultPath = "vault" // AgentVaultPath is the directory that contains all the files for the value func AgentVaultPath() string { - return filepath.Join(Home(), defaultAgentVaultPath) + return filepath.Join(Config(), defaultAgentVaultPath) } diff --git a/internal/pkg/agent/application/paths/paths_windows.go b/internal/pkg/agent/application/paths/paths_windows.go index 2fc6fd008a0..0b81aa2061b 100644 --- a/internal/pkg/agent/application/paths/paths_windows.go +++ b/internal/pkg/agent/application/paths/paths_windows.go @@ -42,5 +42,5 @@ func ArePathsEqual(expected, actual string) bool { // AgentVaultPath is the directory that contains all the files for the value func AgentVaultPath() string { - return filepath.Join(Home(), defaultAgentVaultPath) + return filepath.Join(Config(), defaultAgentVaultPath) } diff --git a/internal/pkg/agent/application/secret/secret.go b/internal/pkg/agent/application/secret/secret.go index edce9eda174..bd2ee546454 100644 --- a/internal/pkg/agent/application/secret/secret.go +++ b/internal/pkg/agent/application/secret/secret.go @@ -2,10 +2,12 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +// Package secret manages application secrets. package secret import ( "encoding/json" + "fmt" "runtime" "sync" "time" @@ -52,7 +54,7 @@ func Create(key string, opts ...OptionFunc) error { options := applyOptions(opts...) v, err := vault.New(options.vaultPath) if err != nil { - return err + return fmt.Errorf("could not create new vault: %w", err) } defer v.Close() @@ -80,12 +82,7 @@ func Create(key string, opts ...OptionFunc) error { CreatedOn: time.Now().UTC(), } - b, err := json.Marshal(secret) - if err != nil { - return err - } - - return v.Set(key, b) + return set(v, key, secret) } // GetAgentSecret read the agent secret from the vault @@ -93,10 +90,17 @@ func GetAgentSecret(opts ...OptionFunc) (secret Secret, err error) { return Get(agentSecretKey, opts...) } +// SetAgentSecret saves the agent secret from the vault +// This is needed for migration from 8.3.0-8.3.2 to higher versions +func SetAgentSecret(secret Secret, opts ...OptionFunc) error { + return Set(agentSecretKey, secret, opts...) +} + // Get reads the secret key from the vault func Get(key string, opts ...OptionFunc) (secret Secret, err error) { options := applyOptions(opts...) - v, err := vault.New(options.vaultPath) + // open vault readonly, will not create the vault directory or the seed it was not created before + v, err := vault.New(options.vaultPath, vault.WithReadonly(true)) if err != nil { return secret, err } @@ -111,12 +115,32 @@ func Get(key string, opts ...OptionFunc) (secret Secret, err error) { return secret, err } +// Set saves the secret key to the vault +func Set(key string, secret Secret, opts ...OptionFunc) error { + options := applyOptions(opts...) + v, err := vault.New(options.vaultPath) + if err != nil { + return fmt.Errorf("could not create new vault: %w", err) + } + defer v.Close() + return set(v, key, secret) +} + +func set(v *vault.Vault, key string, secret Secret) error { + b, err := json.Marshal(secret) + if err != nil { + return fmt.Errorf("could not marshal secret: %w", err) + } + + return v.Set(key, b) +} + // Remove removes the secret key from the vault func Remove(key string, opts ...OptionFunc) error { options := applyOptions(opts...) v, err := vault.New(options.vaultPath) if err != nil { - return err + return fmt.Errorf("could not create new vault: %w", err) } defer v.Close() diff --git a/internal/pkg/agent/application/upgrade/artifact/config.go b/internal/pkg/agent/application/upgrade/artifact/config.go index c190c02d239..6db38fa612c 100644 --- a/internal/pkg/agent/application/upgrade/artifact/config.go +++ b/internal/pkg/agent/application/upgrade/artifact/config.go @@ -17,6 +17,9 @@ const ( darwin = "darwin" linux = "linux" windows = "windows" + + // DefaultSourceURI is the default source URI for downloading artifacts. + DefaultSourceURI = "https://artifacts.elastic.co/downloads/" ) // Config is a configuration used for verifier and downloader @@ -56,7 +59,7 @@ func DefaultConfig() *Config { transport.Timeout = 10 * time.Minute return &Config{ - SourceURI: "https://artifacts.elastic.co/downloads/", + SourceURI: DefaultSourceURI, TargetDirectory: paths.Downloads(), InstallPath: paths.Install(), HTTPTransportSettings: transport, diff --git a/internal/pkg/agent/application/upgrade/artifact/download/http/downloader_test.go b/internal/pkg/agent/application/upgrade/artifact/download/http/downloader_test.go index 5a6762b40fc..11784e2d0f5 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/http/downloader_test.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/http/downloader_test.go @@ -65,9 +65,9 @@ func TestDownloadBodyError(t *testing.T) { } require.GreaterOrEqual(t, len(log.info), 1, "download error not logged at info level") - assert.Equal(t, log.info[len(log.info)-1].record, "download from %s failed at %s @ %sps: %s") + assert.True(t, containsMessage(log.info, "download from %s failed at %s @ %sps: %s")) require.GreaterOrEqual(t, len(log.warn), 1, "download error not logged at warn level") - assert.Equal(t, log.warn[len(log.warn)-1].record, "download from %s failed at %s @ %sps: %s") + assert.True(t, containsMessage(log.warn, "download from %s failed at %s @ %sps: %s")) } func TestDownloadLogProgressWithLength(t *testing.T) { @@ -208,3 +208,12 @@ func (f *recordLogger) Warnf(record string, args ...interface{}) { defer f.lock.Unlock() f.warn = append(f.warn, logMessage{record, args}) } + +func containsMessage(logs []logMessage, msg string) bool { + for _, item := range logs { + if item.record == msg { + return true + } + } + return false +} diff --git a/internal/pkg/agent/application/upgrade/cleanup.go b/internal/pkg/agent/application/upgrade/cleanup.go new file mode 100644 index 00000000000..5e0618dfe78 --- /dev/null +++ b/internal/pkg/agent/application/upgrade/cleanup.go @@ -0,0 +1,36 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package upgrade + +import ( + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/hashicorp/go-multierror" + + "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" +) + +// preUpgradeCleanup will remove files that do not have the passed version number from the downloads directory. +func preUpgradeCleanup(version string) error { + files, err := os.ReadDir(paths.Downloads()) + if err != nil { + return fmt.Errorf("unable to read directory %q: %w", paths.Downloads(), err) + } + var rErr error + for _, file := range files { + if file.IsDir() { + continue + } + if !strings.Contains(file.Name(), version) { + if err := os.Remove(filepath.Join(paths.Downloads(), file.Name())); err != nil { + rErr = multierror.Append(rErr, fmt.Errorf("unable to remove file %q: %w", filepath.Join(paths.Downloads(), file.Name()), err)) + } + } + } + return rErr +} diff --git a/internal/pkg/agent/application/upgrade/cleanup_test.go b/internal/pkg/agent/application/upgrade/cleanup_test.go new file mode 100644 index 00000000000..736a9c42b3d --- /dev/null +++ b/internal/pkg/agent/application/upgrade/cleanup_test.go @@ -0,0 +1,44 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package upgrade + +import ( + "os" + "path/filepath" + "testing" + + "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" + + "github.com/stretchr/testify/require" +) + +func setupDir(t *testing.T) { + t.Helper() + dir := t.TempDir() + paths.SetDownloads(dir) + + err := os.WriteFile(filepath.Join(dir, "test-8.3.0-file"), []byte("hello, world!"), 0600) + require.NoError(t, err) + err = os.WriteFile(filepath.Join(dir, "test-8.4.0-file"), []byte("hello, world!"), 0600) + require.NoError(t, err) + err = os.WriteFile(filepath.Join(dir, "test-8.5.0-file"), []byte("hello, world!"), 0600) + require.NoError(t, err) + err = os.WriteFile(filepath.Join(dir, "test-hash-file"), []byte("hello, world!"), 0600) + require.NoError(t, err) +} + +func TestPreUpgradeCleanup(t *testing.T) { + setupDir(t) + err := preUpgradeCleanup("8.4.0") + require.NoError(t, err) + + files, err := os.ReadDir(paths.Downloads()) + require.NoError(t, err) + require.Len(t, files, 1) + require.Equal(t, "test-8.4.0-file", files[0].Name()) + p, err := os.ReadFile(filepath.Join(paths.Downloads(), files[0].Name())) + require.NoError(t, err) + require.Equal(t, []byte("hello, world!"), p) +} diff --git a/internal/pkg/agent/application/upgrade/step_mark.go b/internal/pkg/agent/application/upgrade/step_mark.go index 51b0adbb184..7757ff6a9a1 100644 --- a/internal/pkg/agent/application/upgrade/step_mark.go +++ b/internal/pkg/agent/application/upgrade/step_mark.go @@ -38,6 +38,58 @@ type UpdateMarker struct { Action *fleetapi.ActionUpgrade `json:"action" yaml:"action"` } +// MarkerActionUpgrade adapter struct compatible with pre 8.3 version of the marker file format +type MarkerActionUpgrade struct { + ActionID string `yaml:"id"` + ActionType string `yaml:"type"` + Version string `yaml:"version"` + SourceURI string `yaml:"source_uri,omitempty"` +} + +func convertToMarkerAction(a *fleetapi.ActionUpgrade) *MarkerActionUpgrade { + if a == nil { + return nil + } + return &MarkerActionUpgrade{ + ActionID: a.ActionID, + ActionType: a.ActionType, + Version: a.Version, + SourceURI: a.SourceURI, + } +} + +func convertToActionUpgrade(a *MarkerActionUpgrade) *fleetapi.ActionUpgrade { + if a == nil { + return nil + } + return &fleetapi.ActionUpgrade{ + ActionID: a.ActionID, + ActionType: a.ActionType, + Version: a.Version, + SourceURI: a.SourceURI, + } +} + +type updateMarkerSerializer struct { + Hash string `yaml:"hash"` + UpdatedOn time.Time `yaml:"updated_on"` + PrevVersion string `yaml:"prev_version"` + PrevHash string `yaml:"prev_hash"` + Acked bool `yaml:"acked"` + Action *MarkerActionUpgrade `yaml:"action"` +} + +func newMarkerSerializer(m *UpdateMarker) *updateMarkerSerializer { + return &updateMarkerSerializer{ + Hash: m.Hash, + UpdatedOn: m.UpdatedOn, + PrevVersion: m.PrevVersion, + PrevHash: m.PrevHash, + Acked: m.Acked, + Action: convertToMarkerAction(m.Action), + } +} + // markUpgrade marks update happened so we can handle grace period func (u *Upgrader) markUpgrade(_ context.Context, hash string, action *fleetapi.ActionUpgrade) error { prevVersion := release.Version() @@ -46,7 +98,7 @@ func (u *Upgrader) markUpgrade(_ context.Context, hash string, action *fleetapi. prevHash = prevHash[:hashLen] } - marker := UpdateMarker{ + marker := &UpdateMarker{ Hash: hash, UpdatedOn: time.Now(), PrevVersion: prevVersion, @@ -54,7 +106,7 @@ func (u *Upgrader) markUpgrade(_ context.Context, hash string, action *fleetapi. Action: action, } - markerBytes, err := yaml.Marshal(marker) + markerBytes, err := yaml.Marshal(newMarkerSerializer(marker)) if err != nil { return errors.New(err, errors.TypeConfig, "failed to parse marker file") } @@ -103,16 +155,31 @@ func LoadMarker() (*UpdateMarker, error) { return nil, err } - marker := &UpdateMarker{} + marker := &updateMarkerSerializer{} if err := yaml.Unmarshal(markerBytes, &marker); err != nil { return nil, err } - return marker, nil + return &UpdateMarker{ + Hash: marker.Hash, + UpdatedOn: marker.UpdatedOn, + PrevVersion: marker.PrevVersion, + PrevHash: marker.PrevHash, + Acked: marker.Acked, + Action: convertToActionUpgrade(marker.Action), + }, nil } func saveMarker(marker *UpdateMarker) error { - markerBytes, err := yaml.Marshal(marker) + makerSerializer := &updateMarkerSerializer{ + Hash: marker.Hash, + UpdatedOn: marker.UpdatedOn, + PrevVersion: marker.PrevVersion, + PrevHash: marker.PrevHash, + Acked: marker.Acked, + Action: convertToMarkerAction(marker.Action), + } + markerBytes, err := yaml.Marshal(makerSerializer) if err != nil { return err } diff --git a/internal/pkg/agent/application/upgrade/upgrade.go b/internal/pkg/agent/application/upgrade/upgrade.go index 444927a6052..edc70c3f5c0 100644 --- a/internal/pkg/agent/application/upgrade/upgrade.go +++ b/internal/pkg/agent/application/upgrade/upgrade.go @@ -5,25 +5,23 @@ package upgrade import ( - "bytes" "context" "fmt" "io/ioutil" "os" "path/filepath" - "runtime" "strings" + "github.com/elastic/elastic-agent/internal/pkg/config" + "github.com/otiai10/copy" "go.elastic.co/apm" "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/application/reexec" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/secret" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" - "github.com/elastic/elastic-agent/internal/pkg/agent/storage" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "github.com/elastic/elastic-agent/internal/pkg/release" @@ -34,12 +32,11 @@ const ( agentName = "elastic-agent" hashLen = 6 agentCommitFile = ".elastic-agent.active.commit" - darwin = "darwin" ) var ( agentArtifact = artifact.Artifact{ - Name: "elastic-agent", + Name: "Elastic Agent", Cmd: agentName, Artifact: "beats/" + agentName, } @@ -54,6 +51,7 @@ var ( type Upgrader struct { log *logger.Logger settings *artifact.Config + agentInfo *info.AgentInfo upgradeable bool } @@ -65,28 +63,72 @@ func IsUpgradeable() bool { } // NewUpgrader creates an upgrader which is capable of performing upgrade operation -func NewUpgrader(log *logger.Logger, settings *artifact.Config) *Upgrader { +func NewUpgrader(log *logger.Logger, settings *artifact.Config, agentInfo *info.AgentInfo) *Upgrader { return &Upgrader{ log: log, settings: settings, + agentInfo: agentInfo, upgradeable: IsUpgradeable(), } } +// Reload reloads the artifact configuration for the upgrader. +func (u *Upgrader) Reload(rawConfig *config.Config) error { + type reloadConfig struct { + // SourceURI: source of the artifacts, e.g https://artifacts.elastic.co/downloads/ + SourceURI string `json:"agent.download.sourceURI" config:"agent.download.sourceURI"` + + // FleetSourceURI: source of the artifacts, e.g https://artifacts.elastic.co/downloads/ coming from fleet which uses + // different naming. + FleetSourceURI string `json:"agent.download.source_uri" config:"agent.download.source_uri"` + } + cfg := &reloadConfig{} + if err := rawConfig.Unpack(&cfg); err != nil { + return errors.New(err, "failed to unpack config during reload") + } + + var newSourceURI string + if cfg.FleetSourceURI != "" { + // fleet configuration takes precedence + newSourceURI = cfg.FleetSourceURI + } else if cfg.SourceURI != "" { + newSourceURI = cfg.SourceURI + } + + if newSourceURI != "" { + u.log.Infof("Source URI changed from %q to %q", u.settings.SourceURI, newSourceURI) + u.settings.SourceURI = newSourceURI + } else { + // source uri unset, reset to default + u.log.Infof("Source URI reset from %q to %q", u.settings.SourceURI, artifact.DefaultSourceURI) + u.settings.SourceURI = artifact.DefaultSourceURI + } + return nil +} + // Upgradeable returns true if the Elastic Agent can be upgraded. func (u *Upgrader) Upgradeable() bool { return u.upgradeable } -// Upgrade upgrades running agent, function returns shutdown callback if some needs to be executed for cases when -// reexec is called by caller. +// Upgrade upgrades running agent, function returns shutdown callback that must be called by reexec. func (u *Upgrader) Upgrade(ctx context.Context, version string, sourceURI string, action *fleetapi.ActionUpgrade) (_ reexec.ShutdownCallbackFn, err error) { span, ctx := apm.StartSpan(ctx, "upgrade", "app.internal") defer span.End() + err = preUpgradeCleanup(u.agentInfo.Version()) + if err != nil { + u.log.Errorf("Unable to clean downloads dir %q before update: %v", paths.Downloads(), err) + } + sourceURI = u.sourceURI(sourceURI) archivePath, err := u.downloadArtifact(ctx, version, sourceURI) if err != nil { + // Run the same preUpgradeCleanup task to get rid of any newly downloaded files + // This may have an issue if users are upgrading to the same version number. + if dErr := preUpgradeCleanup(u.agentInfo.Version()); dErr != nil { + u.log.Errorf("Unable to remove file after verification failure: %v", dErr) + } return nil, err } @@ -103,19 +145,10 @@ func (u *Upgrader) Upgrade(ctx context.Context, version string, sourceURI string return nil, ErrSameVersion } - // Copy vault directory for linux/windows only - if err := copyVault(newHash); err != nil { - return nil, errors.New(err, "failed to copy vault") - } - if err := copyActionStore(newHash); err != nil { return nil, errors.New(err, "failed to copy action store") } - if err := encryptConfigIfNeeded(u.log, newHash); err != nil { - return nil, errors.New(err, "failed to encrypt the configuration") - } - if err := ChangeSymlink(ctx, newHash); err != nil { rollbackInstall(ctx, newHash) return nil, err @@ -132,6 +165,13 @@ func (u *Upgrader) Upgrade(ctx context.Context, version string, sourceURI string } cb := shutdownCallback(u.log, paths.Home(), release.Version(), version, release.TrimCommit(newHash)) + + // Clean everything from the downloads dir + err = os.RemoveAll(paths.Downloads()) + if err != nil { + u.log.Errorf("Unable to clean downloads dir %q after update: %v", paths.Downloads(), err) + } + return cb, nil } @@ -158,6 +198,8 @@ func (u *Upgrader) Ack(ctx context.Context, acker acker.Acker) error { return err } + marker.Acked = true + return saveMarker(marker) } @@ -199,103 +241,6 @@ func copyActionStore(newHash string) error { return nil } -func getVaultPath(newHash string) string { - vaultPath := paths.AgentVaultPath() - if runtime.GOOS == darwin { - return vaultPath - } - newHome := filepath.Join(filepath.Dir(paths.Home()), fmt.Sprintf("%s-%s", agentName, newHash)) - return filepath.Join(newHome, filepath.Base(vaultPath)) -} - -// Copies the vault files for windows and linux -func copyVault(newHash string) error { - // No vault files to copy on darwin - if runtime.GOOS == darwin { - return nil - } - - vaultPath := paths.AgentVaultPath() - newVaultPath := getVaultPath(newHash) - - err := copyDir(vaultPath, newVaultPath) - if err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - - return nil -} - -// Create the key if it doesn't exist and encrypt the fleet.yml and state.yml -func encryptConfigIfNeeded(log *logger.Logger, newHash string) (err error) { - vaultPath := getVaultPath(newHash) - - err = secret.CreateAgentSecret(secret.WithVaultPath(vaultPath)) - if err != nil { - return err - } - - newHome := filepath.Join(filepath.Dir(paths.Home()), fmt.Sprintf("%s-%s", agentName, newHash)) - ymlStateStorePath := filepath.Join(newHome, filepath.Base(paths.AgentStateStoreYmlFile())) - stateStorePath := filepath.Join(newHome, filepath.Base(paths.AgentStateStoreFile())) - - files := []struct { - Src string - Dst string - }{ - { - Src: ymlStateStorePath, - Dst: stateStorePath, - }, - { - Src: paths.AgentConfigYmlFile(), - Dst: paths.AgentConfigFile(), - }, - } - for _, f := range files { - var b []byte - b, err = ioutil.ReadFile(f.Src) - if err != nil { - if os.IsNotExist(err) { - continue - } - return err - } - - // Encrypt yml file - store := storage.NewEncryptedDiskStore(f.Dst, storage.WithVaultPath(vaultPath)) - err = store.Save(bytes.NewReader(b)) - if err != nil { - return err - } - - // Remove yml file if no errors - defer func(fp string) { - if err != nil { - return - } - if rerr := os.Remove(fp); rerr != nil { - log.Warnf("failed to remove file: %s, err: %v", fp, rerr) - } - }(f.Src) - } - - // Do not remove AgentConfigYmlFile lock file if any error happened. - if err != nil { - return err - } - - lockFp := paths.AgentConfigYmlFile() + ".lock" - if rerr := os.Remove(lockFp); rerr != nil { - log.Warnf("failed to remove file: %s, err: %v", lockFp, rerr) - } - - return err -} - // shutdownCallback returns a callback function to be executing during shutdown once all processes are closed. // this goes through runtime directory of agent and copies all the state files created by processes to new versioned // home directory with updated process name to match new version. diff --git a/internal/pkg/agent/cleaner/cleaner.go b/internal/pkg/agent/cleaner/cleaner.go new file mode 100644 index 00000000000..856ae020b89 --- /dev/null +++ b/internal/pkg/agent/cleaner/cleaner.go @@ -0,0 +1,111 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package cleaner + +import ( + "context" + "os" + "sync" + "time" + + "github.com/elastic/elastic-agent-libs/logp" + "github.com/elastic/elastic-agent/internal/pkg/fileutil" +) + +// Wait interval. +// If the watchFile was not modified after this interval, then remove all the files in the removeFiles array +const defaultCleanWait = 15 * time.Minute + +type Cleaner struct { + log *logp.Logger + watchFile string + removeFiles []string + cleanWait time.Duration + + mx sync.Mutex +} + +type OptionFunc func(c *Cleaner) + +func New(log *logp.Logger, watchFile string, removeFiles []string, opts ...OptionFunc) *Cleaner { + c := &Cleaner{ + log: log, + watchFile: watchFile, + removeFiles: removeFiles, + cleanWait: defaultCleanWait, + } + + for _, opt := range opts { + opt(c) + } + return c +} + +func WithCleanWait(cleanWait time.Duration) OptionFunc { + return func(c *Cleaner) { + c.cleanWait = cleanWait + } +} + +func (c *Cleaner) Run(ctx context.Context) error { + wait, done, err := c.process() + if err != nil { + return err + } + + if done { + return nil + } + + t := time.NewTimer(wait) + defer t.Stop() + for { + select { + case <-ctx.Done(): + return nil + case <-t.C: + c.log.Debug("cleaner: timer triggered") + wait, done, err = c.process() + if err != nil { + return err + } + + if done { + return nil + } + t.Reset(wait) + } + } +} + +func (c *Cleaner) process() (wait time.Duration, done bool, err error) { + modTime, err := fileutil.GetModTime(c.watchFile) + if err != nil { + return + } + + c.log.Debugf("cleaner: check file %s mod time: %v", c.watchFile, modTime) + curDur := time.Since(modTime) + if curDur > c.cleanWait { + c.log.Debugf("cleaner: file %s modification expired", c.watchFile) + c.deleteFiles() + return wait, true, nil + } + wait = c.cleanWait - curDur + return wait, false, nil +} + +func (c *Cleaner) deleteFiles() { + c.log.Debugf("cleaner: delete files: %v", c.removeFiles) + c.mx.Lock() + defer c.mx.Unlock() + for _, fp := range c.removeFiles { + c.log.Debugf("cleaner: delete file: %v", fp) + err := os.Remove(fp) + if err != nil { + c.log.Warnf("cleaner: delete file %v failed: %v", fp, err) + } + } +} diff --git a/internal/pkg/agent/cleaner/cleaner_test.go b/internal/pkg/agent/cleaner/cleaner_test.go new file mode 100644 index 00000000000..cf189b784d3 --- /dev/null +++ b/internal/pkg/agent/cleaner/cleaner_test.go @@ -0,0 +1,68 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package cleaner + +import ( + "context" + "io/ioutil" + "os" + "path/filepath" + "testing" + "time" + + "github.com/elastic/elastic-agent-libs/logp" +) + +func TestCleaner(t *testing.T) { + // Setup + const watchFileName = "fleet.enc" + removeFiles := []string{"fleet.yml", "fleet.yml.lock"} + + ctx, cn := context.WithCancel(context.Background()) + defer cn() + + dir := t.TempDir() + watchFilePath := filepath.Join(dir, watchFileName) + + removeFilePaths := make([]string, len(removeFiles)) + + checkDir(t, dir, 0) + + // Create files + err := ioutil.WriteFile(watchFilePath, []byte{}, 0600) + if err != nil { + t.Fatal(err) + } + + for i, fn := range removeFiles { + removeFilePaths[i] = filepath.Join(dir, fn) + err := ioutil.WriteFile(removeFilePaths[i], []byte{}, 0600) + if err != nil { + t.Fatal(err) + } + } + + checkDir(t, dir, len(removeFiles)+1) + + log := logp.NewLogger("dynamic") + cleaner := New(log, watchFilePath, removeFilePaths, WithCleanWait(500*time.Millisecond)) + err = cleaner.Run(ctx) + if err != nil { + t.Fatal(err) + } + checkDir(t, dir, 1) +} + +func checkDir(t *testing.T, dir string, expectedCount int) { + t.Helper() + entries, err := os.ReadDir(dir) + if err != nil { + t.Fatal(err) + } + + if len(entries) != expectedCount { + t.Fatalf("Dir %s expected %d entries, found %d", dir, expectedCount, len(entries)) + } +} diff --git a/internal/pkg/agent/cmd/diagnostics_test.go b/internal/pkg/agent/cmd/diagnostics_test.go index cec6a6f3450..99d98ef78de 100644 --- a/internal/pkg/agent/cmd/diagnostics_test.go +++ b/internal/pkg/agent/cmd/diagnostics_test.go @@ -17,10 +17,12 @@ import ( "testing" "time" + "github.com/elastic/elastic-agent-libs/transport/tlscommon" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/elastic/elastic-agent/internal/pkg/agent/control/client" + "github.com/elastic/elastic-agent/internal/pkg/agent/program" ) var testDiagnostics = DiagnosticsInfo{ @@ -31,7 +33,7 @@ var testDiagnostics = DiagnosticsInfo{ BuildTime: time.Date(2021, 1, 1, 0, 0, 0, 0, time.UTC), Snapshot: false, }, - ProcMeta: []client.ProcMeta{client.ProcMeta{ + ProcMeta: []client.ProcMeta{{ Process: "filebeat", Name: "filebeat", Hostname: "test-host", @@ -46,7 +48,7 @@ var testDiagnostics = DiagnosticsInfo{ BinaryArchitecture: "test-architecture", RouteKey: "test", ElasticLicensed: true, - }, client.ProcMeta{ + }, { Process: "filebeat", Name: "filebeat_monitoring", Hostname: "test-host", @@ -61,7 +63,7 @@ var testDiagnostics = DiagnosticsInfo{ BinaryArchitecture: "test-architecture", RouteKey: "test", ElasticLicensed: true, - }, client.ProcMeta{ + }, { Name: "metricbeat", RouteKey: "test", Error: "failed to get metricbeat data", @@ -138,4 +140,77 @@ func Test_collectEndpointSecurityLogs_noEndpointSecurity(t *testing.T) { err := collectEndpointSecurityLogs(zw, specs) assert.NoError(t, err, "collectEndpointSecurityLogs should not return an error") } + +func Test_redact(t *testing.T) { + tests := []struct { + name string + arg interface{} + wantRedacted []string + wantErr assert.ErrorAssertionFunc + }{ + { + name: "tlscommon.Config", + arg: tlscommon.Config{ + Enabled: nil, + VerificationMode: 0, + Versions: nil, + CipherSuites: nil, + CAs: []string{"ca1", "ca2"}, + Certificate: tlscommon.CertificateConfig{ + Certificate: "Certificate", + Key: "Key", + Passphrase: "Passphrase", + }, + CurveTypes: nil, + Renegotiation: 0, + CASha256: nil, + CATrustedFingerprint: "", + }, + wantRedacted: []string{ + "certificate", "key", "key_passphrase", "certificate_authorities"}, + }, + { + name: "some map", + arg: map[string]interface{}{ + "s": "sss", + "some_key": "hey, a key!", + "a_password": "changeme", + "my_token": "a_token", + "nested": map[string]string{ + "4242": "4242", + "4242key": "4242key", + "4242password": "4242password", + "4242certificate": "4242certificate", + }, + }, + wantRedacted: []string{ + "some_key", "a_password", "my_token", "4242key", "4242password", "4242certificate"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := redact(tt.arg) + require.NoError(t, err) + + for k, v := range got { + if contains(tt.wantRedacted, k) { + assert.Equal(t, v, REDACTED) + } else { + assert.NotEqual(t, v, REDACTED) + } + } + }) + } +} + +func contains(list []string, val string) bool { + for _, k := range list { + if val == k { + return true + } + } + + return false +} */ diff --git a/internal/pkg/agent/install/svc.go b/internal/pkg/agent/install/svc.go index 7148f4acca0..1a3bf50c896 100644 --- a/internal/pkg/agent/install/svc.go +++ b/internal/pkg/agent/install/svc.go @@ -6,6 +6,7 @@ package install import ( "path/filepath" + "runtime" "github.com/kardianos/service" @@ -18,6 +19,12 @@ const ( // ServiceDescription is the description for the service. ServiceDescription = "Elastic Agent is a unified agent to observe, monitor and protect your system." + + // Set the launch daemon ExitTimeOut to 60 seconds in order to allow the agent to shutdown gracefully + // At the moment the version 8.3 & 8.4 of the agent are taking about 11 secs to shutdown + // and the launchd sends SIGKILL after 5 secs which causes the beats processes to be left running orphaned + // depending on the shutdown timing. + darwinServiceExitTimeout = 60 ) // ExecutablePath returns the path for the installed Agents executable. @@ -30,7 +37,7 @@ func ExecutablePath() string { } func newService() (service.Service, error) { - return service.New(nil, &service.Config{ + cfg := &service.Config{ Name: paths.ServiceName, DisplayName: ServiceDisplayName, Description: ServiceDescription, @@ -45,5 +52,57 @@ func newService() (service.Service, error) { "OnFailureDelayDuration": "1s", "OnFailureResetPeriod": 10, }, - }) + } + + if runtime.GOOS == "darwin" { + // The github.com/kardianos/service library doesn't support ExitTimeOut in their prebuilt template. + // This option allows to pass our own template for the launch daemon plist, which is a copy + // of the prebuilt template with added ExitTimeOut option + cfg.Option["LaunchdConfig"] = darwinLaunchdConfig + cfg.Option["ExitTimeOut"] = darwinServiceExitTimeout + } + + return service.New(nil, cfg) } + +// A copy of the launchd plist template from github.com/kardianos/service +// with added .Config.Option.ExitTimeOut option +const darwinLaunchdConfig = ` + + + + Label + {{html .Name}} + ProgramArguments + + {{html .Path}} + {{range .Config.Arguments}} + {{html .}} + {{end}} + + {{if .UserName}}UserName + {{html .UserName}}{{end}} + {{if .ChRoot}}RootDirectory + {{html .ChRoot}}{{end}} + {{if .Config.Option.ExitTimeOut}}ExitTimeOut + {{html .Config.Option.ExitTimeOut}}{{end}} + {{if .WorkingDirectory}}WorkingDirectory + {{html .WorkingDirectory}}{{end}} + SessionCreate + <{{bool .SessionCreate}}/> + KeepAlive + <{{bool .KeepAlive}}/> + RunAtLoad + <{{bool .RunAtLoad}}/> + Disabled + + + StandardOutPath + /usr/local/var/log/{{html .Name}}.out.log + StandardErrorPath + /usr/local/var/log/{{html .Name}}.err.log + + + +` diff --git a/internal/pkg/agent/migration/migrate_secret.go b/internal/pkg/agent/migration/migrate_secret.go new file mode 100644 index 00000000000..08cfc3e5eb1 --- /dev/null +++ b/internal/pkg/agent/migration/migrate_secret.go @@ -0,0 +1,163 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package migration + +import ( + "errors" + "fmt" + "io" + "io/fs" + "io/ioutil" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/elastic/elastic-agent-libs/logp" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/secret" + "github.com/elastic/elastic-agent/internal/pkg/agent/storage" + "github.com/elastic/elastic-agent/internal/pkg/fileutil" +) + +const ( + darwin = "darwin" +) + +// MigrateAgentSecret migrates agent secret if the secret doesn't exists agent upgrade from 8.3.0 - 8.3.2 to 8.x and above on Linux and Windows platforms. +func MigrateAgentSecret(log *logp.Logger) error { + // Nothing to migrate for darwin + if runtime.GOOS == darwin { + return nil + } + + // Check if the secret already exists + log.Debug("migrate agent secret, check if secret already exists") + _, err := secret.GetAgentSecret() + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + // The secret doesn't exists, perform migration below + log.Debug("agent secret doesn't exists, perform migration") + } else { + err = fmt.Errorf("failed read the agent secret: %w", err) + log.Error(err) + return err + } + } else { + // The secret already exists, nothing to migrate + log.Debug("secret already exists nothing to migrate") + return nil + } + + // Check if the secret was copied by the fleet upgrade handler to the legacy location + log.Debug("check if secret was copied over by 8.3.0-8.3.2 version of the agent") + sec, err := getAgentSecretFromHomePath(paths.Home()) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + // The secret is not found in this instance of the vault, continue with migration + log.Debug("agent secret copied from 8.3.0-8.3.2 doesn't exists, continue with migration") + } else { + err = fmt.Errorf("failed agent 8.3.0-8.3.2 secret check: %w", err) + log.Error(err) + return err + } + } else { + // The secret is found, save in the new agent vault + log.Debug("agent secret from 8.3.0-8.3.2 is found, migrate to the new vault") + return secret.SetAgentSecret(sec) + } + + // Scan other agent data directories, find the latest agent secret + log.Debug("search for possible latest agent 8.3.0-8.3.2 secret") + dataDir := paths.Data() + + sec, err = findPreviousAgentSecret(dataDir) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + // The secret is not found + log.Debug("no previous agent 8.3.0-8.3.2 secrets found, nothing to migrate") + return nil + } + err = fmt.Errorf("search for possible latest agent 8.3.0-8.3.2 secret failed: %w", err) + log.Error(err) + return err + } + log.Debug("found previous agent 8.3.0-8.3.2 secret, migrate to the new vault") + return secret.SetAgentSecret(sec) +} + +func findPreviousAgentSecret(dataDir string) (secret.Secret, error) { + found := false + var sec secret.Secret + fileSystem := os.DirFS(dataDir) + _ = fs.WalkDir(fileSystem, ".", func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + if d.IsDir() { + if strings.HasPrefix(d.Name(), "elastic-agent-") { + vaultPath := getLegacyVaultPathFromPath(filepath.Join(dataDir, path)) + s, err := secret.GetAgentSecret(secret.WithVaultPath(vaultPath)) + if err != nil { + // Ignore if fs.ErrNotExist error, keep scanning + if errors.Is(err, fs.ErrNotExist) { + return nil + } + return err + } + + // Check that the configuration can be decrypted with the found agent secret + exists, _ := fileutil.FileExists(paths.AgentConfigFile()) + if exists { + store := storage.NewEncryptedDiskStore(paths.AgentConfigFile(), storage.WithVaultPath(vaultPath)) + r, err := store.Load() + if err != nil { + //nolint:nilerr // ignore the error keep scanning + return nil + } + + defer r.Close() + _, err = ioutil.ReadAll(r) + if err != nil { + //nolint:nilerr // ignore the error keep scanning + return nil + } + + sec = s + found = true + return io.EOF + } + } else if d.Name() != "." { + return fs.SkipDir + } + } + return nil + }) + if !found { + return sec, fs.ErrNotExist + } + return sec, nil +} + +func getAgentSecretFromHomePath(homePath string) (sec secret.Secret, err error) { + vaultPath := getLegacyVaultPathFromPath(homePath) + fi, err := os.Stat(vaultPath) + if err != nil { + return + } + + if !fi.IsDir() { + return sec, fs.ErrNotExist + } + return secret.GetAgentSecret(secret.WithVaultPath(vaultPath)) +} + +func getLegacyVaultPath() string { + return getLegacyVaultPathFromPath(paths.Home()) +} + +func getLegacyVaultPathFromPath(path string) string { + return filepath.Join(path, "vault") +} diff --git a/internal/pkg/agent/migration/migrate_secret_test.go b/internal/pkg/agent/migration/migrate_secret_test.go new file mode 100644 index 00000000000..c6dfeb1781c --- /dev/null +++ b/internal/pkg/agent/migration/migrate_secret_test.go @@ -0,0 +1,387 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build linux || windows +// +build linux windows + +package migration + +import ( + "errors" + "io/fs" + "io/ioutil" + "os" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/gofrs/uuid" + "github.com/google/go-cmp/cmp" + + "github.com/elastic/elastic-agent-libs/logp" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/secret" + "github.com/elastic/elastic-agent/internal/pkg/agent/storage" + "github.com/elastic/elastic-agent/internal/pkg/agent/vault" +) + +func TestFindAgentSecretFromHomePath(t *testing.T) { + + tests := []struct { + name string + setupFn func(homePath string) error + wantErr error + }{ + { + name: "no data dir", + wantErr: fs.ErrNotExist, + }, + { + name: "no vault dir", + setupFn: func(homePath string) error { + return os.MkdirAll(homePath, 0750) + }, + wantErr: fs.ErrNotExist, + }, + { + name: "vault file instead of directory", + setupFn: func(homePath string) error { + err := os.MkdirAll(homePath, 0750) + if err != nil { + return err + } + return ioutil.WriteFile(getLegacyVaultPathFromPath(homePath), []byte{}, 0600) + }, + wantErr: fs.ErrNotExist, + }, + { + name: "empty vault directory", + setupFn: func(homePath string) error { + return os.MkdirAll(getLegacyVaultPathFromPath(homePath), 0750) + }, + wantErr: fs.ErrNotExist, + }, + { + name: "empty vault", + setupFn: func(homePath string) error { + v, err := vault.New(getLegacyVaultPathFromPath(homePath)) + if err != nil { + return err + } + defer v.Close() + return nil + }, + wantErr: fs.ErrNotExist, + }, + { + name: "vault dir with no seed", + setupFn: func(homePath string) error { + vaultPath := getLegacyVaultPathFromPath(homePath) + v, err := vault.New(vaultPath) + if err != nil { + return err + } + defer v.Close() + return os.Remove(filepath.Join(vaultPath, ".seed")) + }, + wantErr: fs.ErrNotExist, + }, + { + name: "vault with secret and misplaced seed vault", + setupFn: func(homePath string) error { + vaultPath := getLegacyVaultPathFromPath(homePath) + err := secret.CreateAgentSecret(secret.WithVaultPath(vaultPath)) + if err != nil { + return err + } + return os.Remove(filepath.Join(vaultPath, ".seed")) + }, + wantErr: fs.ErrNotExist, + }, + { + name: "vault with valid secret", + setupFn: func(homePath string) error { + vaultPath := getLegacyVaultPathFromPath(homePath) + err := secret.CreateAgentSecret(secret.WithVaultPath(vaultPath)) + if err != nil { + return err + } + return generateTestConfig(vaultPath) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + top := t.TempDir() + paths.SetTop(top) + homePath := paths.Home() + + if tc.setupFn != nil { + if err := tc.setupFn(homePath); err != nil { + t.Fatal(err) + } + } + + sec, err := getAgentSecretFromHomePath(homePath) + if !errors.Is(err, tc.wantErr) { + t.Fatalf("want err: %v, got err: %v", tc.wantErr, err) + } + + foundSec, err := findPreviousAgentSecret(filepath.Dir(homePath)) + if !errors.Is(err, tc.wantErr) { + t.Fatalf("want err: %v, got err: %v", tc.wantErr, err) + } + diff := cmp.Diff(sec, foundSec) + if diff != "" { + t.Fatal(diff) + } + + }) + } +} + +type configType int + +const ( + NoConfig configType = iota + MatchingConfig + NonMatchingConfig +) + +func TestFindNewestAgentSecret(t *testing.T) { + + tests := []struct { + name string + cfgType configType + wantErr error + }{ + { + name: "missing config", + cfgType: NoConfig, + wantErr: fs.ErrNotExist, + }, + { + name: "matching config", + cfgType: MatchingConfig, + }, + { + name: "non-matching config", + cfgType: NonMatchingConfig, + wantErr: fs.ErrNotExist, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + top := t.TempDir() + paths.SetTop(top) + paths.SetConfig(top) + dataDir := paths.Data() + + wantSecret, err := generateTestSecrets(dataDir, 3, tc.cfgType) + if err != nil { + t.Fatal(err) + } + sec, err := findPreviousAgentSecret(dataDir) + + if !errors.Is(err, tc.wantErr) { + t.Fatalf("want err: %v, got err: %v", tc.wantErr, err) + } + diff := cmp.Diff(sec, wantSecret) + if diff != "" { + t.Fatal(diff) + } + }) + } +} + +func TestMigrateAgentSecret(t *testing.T) { + top := t.TempDir() + paths.SetTop(top) + paths.SetConfig(top) + dataDir := paths.Data() + + // No vault home path + homePath := generateTestHomePath(dataDir) + if err := os.MkdirAll(homePath, 0750); err != nil { + t.Fatal(err) + } + + // Empty vault home path + homePath = generateTestHomePath(dataDir) + vaultPath := getLegacyVaultPathFromPath(homePath) + if err := os.MkdirAll(vaultPath, 0750); err != nil { + t.Fatal(err) + } + + // Vault with missing seed + homePath = generateTestHomePath(dataDir) + vaultPath = getLegacyVaultPathFromPath(homePath) + v, err := vault.New(vaultPath) + if err != nil { + t.Fatal(err) + } + defer v.Close() + + if err = os.Remove(filepath.Join(vaultPath, ".seed")); err != nil { + t.Fatal(err) + } + + // Generate few valid secrets to scan for + wantSecret, err := generateTestSecrets(dataDir, 5, MatchingConfig) + if err != nil { + t.Fatal(err) + } + + // Expect no agent secret found + _, err = secret.GetAgentSecret(secret.WithVaultPath(paths.AgentVaultPath())) + if !errors.Is(err, fs.ErrNotExist) { + t.Fatalf("expected err: %v", fs.ErrNotExist) + } + + // Perform migration + log := logp.NewLogger("test_agent_secret") + err = MigrateAgentSecret(log) + if err != nil { + t.Fatal(err) + } + + // Expect the agent secret is migrated now + sec, err := secret.GetAgentSecret(secret.WithVaultPath(paths.AgentVaultPath())) + if err != nil { + t.Fatal(err) + } + + // Compare the migrated secret with the expected newest one + diff := cmp.Diff(sec, wantSecret) + if diff != "" { + t.Fatal(diff) + } +} + +func TestMigrateAgentSecretAlreadyExists(t *testing.T) { + top := t.TempDir() + paths.SetTop(top) + err := secret.CreateAgentSecret(secret.WithVaultPath(paths.AgentVaultPath())) + if err != nil { + t.Fatal(err) + } + + // Expect agent secret created + wantSecret, err := secret.GetAgentSecret(secret.WithVaultPath(paths.AgentVaultPath())) + if err != nil { + t.Fatal(err) + } + + // Perform migration + log := logp.NewLogger("test_agent_secret") + err = MigrateAgentSecret(log) + if err != nil { + t.Fatal(err) + } + + sec, err := secret.GetAgentSecret(secret.WithVaultPath(paths.AgentVaultPath())) + if err != nil { + t.Fatal(err) + } + + // Compare, should be the same secret + diff := cmp.Diff(sec, wantSecret) + if diff != "" { + t.Fatal(diff) + } +} + +func TestMigrateAgentSecretFromLegacyLocation(t *testing.T) { + top := t.TempDir() + paths.SetTop(top) + paths.SetConfig(top) + vaultPath := getLegacyVaultPath() + err := secret.CreateAgentSecret(secret.WithVaultPath(vaultPath)) + if err != nil { + t.Fatal(err) + } + + // Expect agent secret created + wantSecret, err := secret.GetAgentSecret(secret.WithVaultPath(vaultPath)) + if err != nil { + t.Fatal(err) + } + + // Perform migration + log := logp.NewLogger("test_agent_secret") + err = MigrateAgentSecret(log) + if err != nil { + t.Fatal(err) + } + + sec, err := secret.GetAgentSecret(secret.WithVaultPath(paths.AgentVaultPath())) + if err != nil { + t.Fatal(err) + } + + // Compare, should be the same secret + diff := cmp.Diff(sec, wantSecret) + if diff != "" { + t.Fatal(diff) + } +} + +func generateTestHomePath(dataDir string) string { + suffix := uuid.Must(uuid.NewV4()).String()[:6] + return filepath.Join(dataDir, "elastic-agent-"+suffix) +} + +func generateTestConfig(vaultPath string) error { + fleetEncConfigFile := paths.AgentConfigFile() + store := storage.NewEncryptedDiskStore(fleetEncConfigFile, storage.WithVaultPath(vaultPath)) + return store.Save(strings.NewReader("foo")) +} + +func generateTestSecrets(dataDir string, count int, cfgType configType) (wantSecret secret.Secret, err error) { + now := time.Now() + + // Generate multiple home paths + //homePaths := make([]string, count) + for i := 0; i < count; i++ { + homePath := generateTestHomePath(dataDir) + k, err := vault.NewKey(vault.AES256) + if err != nil { + return wantSecret, err + } + + sec := secret.Secret{ + Value: k, + CreatedOn: now.Add(-time.Duration(i+1) * time.Minute), + } + + vaultPath := getLegacyVaultPathFromPath(homePath) + err = secret.SetAgentSecret(sec, secret.WithVaultPath(vaultPath)) + if err != nil { + return wantSecret, err + } + + switch cfgType { + case NoConfig: + case MatchingConfig, NonMatchingConfig: + if i == 0 { + wantSecret = sec + // Create matching encrypted config file, the content of the file doesn't matter for this test + err = generateTestConfig(vaultPath) + if err != nil { + return wantSecret, err + } + } + } + // Delete + if cfgType == NonMatchingConfig && i == 0 { + _ = os.RemoveAll(vaultPath) + wantSecret = secret.Secret{} + } + } + + return wantSecret, nil +} diff --git a/internal/pkg/agent/storage/encrypted_disk_store.go b/internal/pkg/agent/storage/encrypted_disk_store.go index 48027b3178f..be78e4235df 100644 --- a/internal/pkg/agent/storage/encrypted_disk_store.go +++ b/internal/pkg/agent/storage/encrypted_disk_store.go @@ -15,6 +15,7 @@ import ( "github.com/hectane/go-acl" "github.com/elastic/elastic-agent-libs/file" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/application/secret" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" @@ -78,7 +79,7 @@ func (d *EncryptedDiskStore) ensureKey() error { if d.key == nil { key, err := secret.GetAgentSecret(secret.WithVaultPath(d.vaultPath)) if err != nil { - return err + return fmt.Errorf("could not get agent key: %w", err) } d.key = key.Value } diff --git a/internal/pkg/agent/transpiler/rules.go b/internal/pkg/agent/transpiler/rules.go index e4e466ddcd9..ca97cedd707 100644 --- a/internal/pkg/agent/transpiler/rules.go +++ b/internal/pkg/agent/transpiler/rules.go @@ -669,6 +669,42 @@ func (r *InjectStreamProcessorRule) Apply(_ AgentInfo, ast *AST) (err error) { namespace := datastreamNamespaceFromInputNode(inputNode) datastreamType := datastreamTypeFromInputNode(inputNode, r.Type) + var inputID *StrVal + inputIDNode, found := inputNode.Find("id") + if found { + inputID, _ = inputIDNode.Value().(*StrVal) + } + + if inputID != nil { + // get input-level processors node + processorsNode, found := inputNode.Find("processors") + if !found { + processorsNode = &Key{ + name: "processors", + value: &List{value: make([]Node, 0)}, + } + + inputMap, ok := inputNode.(*Dict) + if ok { + inputMap.value = append(inputMap.value, processorsNode) + } + } + + processorsList, ok := processorsNode.Value().(*List) + if !ok { + return errors.New("InjectStreamProcessorRule: input processors is not a list") + } + + // inject `input_id` on the input level + processorMap := &Dict{value: make([]Node, 0)} + processorMap.value = append(processorMap.value, &Key{name: "target", value: &StrVal{value: "@metadata"}}) + processorMap.value = append(processorMap.value, &Key{name: "fields", value: &Dict{value: []Node{ + &Key{name: "input_id", value: inputID}, + }}}) + addFieldsMap := &Dict{value: []Node{&Key{"add_fields", processorMap}}} + processorsList.value = mergeStrategy(r.OnConflict).InjectItem(processorsList.value, addFieldsMap) + } + streamsNode, ok := inputNode.Find("streams") if !ok { continue @@ -680,6 +716,12 @@ func (r *InjectStreamProcessorRule) Apply(_ AgentInfo, ast *AST) (err error) { } for _, streamNode := range streamsList.value { + var streamID *StrVal + streamIDNode, ok := streamNode.Find("id") + if ok { + streamID, _ = streamIDNode.Value().(*StrVal) + } + streamMap, ok := streamNode.(*Dict) if !ok { continue @@ -722,6 +764,17 @@ func (r *InjectStreamProcessorRule) Apply(_ AgentInfo, ast *AST) (err error) { }}}) addFieldsMap = &Dict{value: []Node{&Key{"add_fields", processorMap}}} processorsList.value = mergeStrategy(r.OnConflict).InjectItem(processorsList.value, addFieldsMap) + + if streamID != nil { + // source stream + processorMap = &Dict{value: make([]Node, 0)} + processorMap.value = append(processorMap.value, &Key{name: "target", value: &StrVal{value: "@metadata"}}) + processorMap.value = append(processorMap.value, &Key{name: "fields", value: &Dict{value: []Node{ + &Key{name: "stream_id", value: streamID.Clone()}, + }}}) + addFieldsMap = &Dict{value: []Node{&Key{"add_fields", processorMap}}} + processorsList.value = mergeStrategy(r.OnConflict).InjectItem(processorsList.value, addFieldsMap) + } } } diff --git a/internal/pkg/agent/transpiler/rules_test.go b/internal/pkg/agent/transpiler/rules_test.go index ab2df9c1bce..840e1442fde 100644 --- a/internal/pkg/agent/transpiler/rules_test.go +++ b/internal/pkg/agent/transpiler/rules_test.go @@ -165,6 +165,114 @@ inputs: }, }, + "inject stream": { + givenYAML: ` +inputs: + - name: No streams, no IDs + type: file + - name: With streams and IDs + id: input-id + type: file + data_stream.namespace: nsns + streams: + - paths: /var/log/mysql/error.log + id: stream-id + data_stream.dataset: dsds + - name: With processors + id: input-id + type: file + data_stream.namespace: nsns + processors: + - add_fields: + target: some + fields: + dataset: value + streams: + - paths: /var/log/mysql/error.log + id: stream-id + data_stream.dataset: dsds + processors: + - add_fields: + target: another + fields: + dataset: value +`, + expectedYAML: ` +inputs: + - name: No streams, no IDs + type: file + - name: With streams and IDs + id: input-id + type: file + data_stream.namespace: nsns + processors: + - add_fields: + target: "@metadata" + fields: + input_id: input-id + streams: + - paths: /var/log/mysql/error.log + id: stream-id + data_stream.dataset: dsds + processors: + - add_fields: + target: data_stream + fields: + type: stream-type + namespace: nsns + dataset: dsds + - add_fields: + target: event + fields: + dataset: dsds + - add_fields: + target: "@metadata" + fields: + stream_id: stream-id + - name: With processors + id: input-id + type: file + data_stream.namespace: nsns + processors: + - add_fields: + target: some + fields: + dataset: value + - add_fields: + target: "@metadata" + fields: + input_id: input-id + streams: + - paths: /var/log/mysql/error.log + id: stream-id + data_stream.dataset: dsds + processors: + - add_fields: + target: another + fields: + dataset: value + - add_fields: + target: data_stream + fields: + type: stream-type + namespace: nsns + dataset: dsds + - add_fields: + target: event + fields: + dataset: dsds + - add_fields: + target: "@metadata" + fields: + stream_id: stream-id +`, + rule: &RuleList{ + Rules: []Rule{ + InjectStreamProcessor("insert_after", "stream-type"), + }, + }, + }, + "inject agent info": { givenYAML: ` inputs: diff --git a/internal/pkg/agent/transpiler/vars.go b/internal/pkg/agent/transpiler/vars.go index 8daacf606fe..e8f06a6928b 100644 --- a/internal/pkg/agent/transpiler/vars.go +++ b/internal/pkg/agent/transpiler/vars.go @@ -14,7 +14,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/core/composable" ) -var varsRegex = regexp.MustCompile(`\${([\p{L}\d\s\\\-_|.'"]*)}`) +var varsRegex = regexp.MustCompile(`\${([\p{L}\d\s\\\-_|.'":\/]*)}`) // ErrNoMatch is return when the replace didn't fail, just that no vars match to perform the replace. var ErrNoMatch = fmt.Errorf("no matching vars") diff --git a/internal/pkg/agent/transpiler/vars_test.go b/internal/pkg/agent/transpiler/vars_test.go index 5dd6d41ec72..56e27694a33 100644 --- a/internal/pkg/agent/transpiler/vars_test.go +++ b/internal/pkg/agent/transpiler/vars_test.go @@ -17,12 +17,14 @@ import ( func TestVars_Replace(t *testing.T) { vars := mustMakeVars(map[string]interface{}{ "un-der_score": map[string]interface{}{ - "key1": "data1", - "key2": "data2", + "key1": "data1", + "key2": "data2", + "with-dash": "dash-value", "list": []string{ "array1", "array2", }, + "with/slash": "some/path", "dict": map[string]interface{}{ "key1": "value1", "key2": "value2", @@ -44,6 +46,12 @@ func TestVars_Replace(t *testing.T) { false, false, }, + { + "${un-der_score.with-dash}", + NewStrVal("dash-value"), + false, + false, + }, { "${un-der_score.missing}", NewStrVal(""), @@ -74,12 +82,24 @@ func TestVars_Replace(t *testing.T) { false, false, }, + { + `${"with:colon"}`, + NewStrVal("with:colon"), + false, + false, + }, { `${"direct"}`, NewStrVal("direct"), false, false, }, + { + `${un-der_score.missing|'with:colon'}`, + NewStrVal("with:colon"), + false, + false, + }, { `${un-der_score.}`, NewStrVal(""), @@ -149,6 +169,12 @@ func TestVars_Replace(t *testing.T) { false, false, }, + { + `${un-der_score.with/slash}`, + NewStrVal(`some/path`), + false, + false, + }, { `list inside string ${un-der_score.list} causes no match`, NewList([]Node{ diff --git a/internal/pkg/agent/vault/seed.go b/internal/pkg/agent/vault/seed.go index 698bd0f0135..773c42e7465 100644 --- a/internal/pkg/agent/vault/seed.go +++ b/internal/pkg/agent/vault/seed.go @@ -9,6 +9,8 @@ package vault import ( "errors" + "fmt" + "io/fs" "io/ioutil" "os" "path/filepath" @@ -29,6 +31,24 @@ func getSeed(path string) ([]byte, error) { mxSeed.Lock() defer mxSeed.Unlock() + b, err := ioutil.ReadFile(fp) + if err != nil { + return nil, fmt.Errorf("could not read seed file: %w", err) + } + + // return fs.ErrNotExists if invalid length of bytes returned + if len(b) != int(AES256) { + return nil, fmt.Errorf("invalid seed length, expected: %v, got: %v: %w", int(AES256), len(b), fs.ErrNotExist) + } + return b, nil +} + +func createSeedIfNotExists(path string) ([]byte, error) { + fp := filepath.Join(path, seedFile) + + mxSeed.Lock() + defer mxSeed.Unlock() + b, err := ioutil.ReadFile(fp) if err != nil { if !errors.Is(err, os.ErrNotExist) { @@ -52,3 +72,10 @@ func getSeed(path string) ([]byte, error) { return seed, nil } + +func getOrCreateSeed(path string, readonly bool) ([]byte, error) { + if readonly { + return getSeed(path) + } + return createSeedIfNotExists(path) +} diff --git a/internal/pkg/agent/vault/seed_test.go b/internal/pkg/agent/vault/seed_test.go index bb9197ea614..d10be29634f 100644 --- a/internal/pkg/agent/vault/seed_test.go +++ b/internal/pkg/agent/vault/seed_test.go @@ -10,12 +10,14 @@ package vault import ( "context" "encoding/hex" + "io/fs" "path/filepath" "sync" "testing" "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "golang.org/x/sync/errgroup" ) @@ -24,12 +26,45 @@ func TestGetSeed(t *testing.T) { fp := filepath.Join(dir, seedFile) + require.NoFileExists(t, fp) + + // seed is not yet created + _, err := getSeed(dir) + + // should be not found + require.ErrorIs(t, err, fs.ErrNotExist) + + b, err := createSeedIfNotExists(dir) + assert.NoError(t, err) + + require.FileExists(t, fp) + + diff := cmp.Diff(int(AES256), len(b)) + if diff != "" { + t.Error(diff) + } + + // try get seed + gotSeed, err := getSeed(dir) + assert.NoError(t, err) + + diff = cmp.Diff(b, gotSeed) + if diff != "" { + t.Error(diff) + } +} + +func TestCreateSeedIfNotExists(t *testing.T) { + dir := t.TempDir() + + fp := filepath.Join(dir, seedFile) + assert.NoFileExists(t, fp) - b, err := getSeed(dir) + b, err := createSeedIfNotExists(dir) assert.NoError(t, err) - assert.FileExists(t, fp) + require.FileExists(t, fp) diff := cmp.Diff(int(AES256), len(b)) if diff != "" { @@ -37,7 +72,7 @@ func TestGetSeed(t *testing.T) { } } -func TestGetSeedRace(t *testing.T) { +func TestCreateSeedIfNotExistsRace(t *testing.T) { var err error dir := t.TempDir() @@ -51,7 +86,7 @@ func TestGetSeedRace(t *testing.T) { for i := 0; i < count; i++ { g.Go(func(idx int) func() error { return func() error { - seed, err := getSeed(dir) + seed, err := createSeedIfNotExists(dir) mx.Lock() res[idx] = seed mx.Unlock() diff --git a/internal/pkg/agent/vault/vault_darwin.go b/internal/pkg/agent/vault/vault_darwin.go index 4119b27a586..5f63a496179 100644 --- a/internal/pkg/agent/vault/vault_darwin.go +++ b/internal/pkg/agent/vault/vault_darwin.go @@ -37,13 +37,15 @@ type Vault struct { } // New initializes the vault store -// Call Close when done to release the resouces -func New(name string) (*Vault, error) { +// Call Close when done to release the resources +func New(name string, opts ...OptionFunc) (*Vault, error) { var keychain C.SecKeychainRef + err := statusToError(C.OpenKeychain(keychain)) if err != nil { - return nil, err + return nil, fmt.Errorf("could not open keychain: %w", err) } + return &Vault{ name: name, keychain: keychain, diff --git a/internal/pkg/agent/vault/vault_linux.go b/internal/pkg/agent/vault/vault_linux.go index a3737d5c625..51f6a3fa651 100644 --- a/internal/pkg/agent/vault/vault_linux.go +++ b/internal/pkg/agent/vault/vault_linux.go @@ -11,6 +11,7 @@ import ( "crypto/rand" "crypto/sha256" "errors" + "fmt" "io/fs" "io/ioutil" "os" @@ -29,28 +30,39 @@ type Vault struct { mx sync.Mutex } -// Open initializes the vault store -func New(path string) (*Vault, error) { +// New creates the vault store +func New(path string, opts ...OptionFunc) (v *Vault, err error) { + options := applyOptions(opts...) dir := filepath.Dir(path) // If there is no specific path then get the executable directory if dir == "." { exefp, err := os.Executable() if err != nil { - return nil, err + return nil, fmt.Errorf("could not get executable path: %w", err) } dir = filepath.Dir(exefp) path = filepath.Join(dir, path) } - err := os.MkdirAll(path, 0750) - if err != nil { - return nil, err + if options.readonly { + fi, err := os.Stat(path) + if err != nil { + return nil, err + } + if !fi.IsDir() { + return nil, fs.ErrNotExist + } + } else { + err := os.MkdirAll(path, 0750) + if err != nil { + return nil, fmt.Errorf("failed to create vault path: %v, err: %w", path, err) + } } - key, err := getSeed(path) + key, err := getOrCreateSeed(path, options.readonly) if err != nil { - return nil, err + return nil, fmt.Errorf("could not get seed to create new valt: %w", err) } return &Vault{ diff --git a/internal/pkg/agent/vault/vault_options.go b/internal/pkg/agent/vault/vault_options.go new file mode 100644 index 00000000000..2673ae6aa53 --- /dev/null +++ b/internal/pkg/agent/vault/vault_options.go @@ -0,0 +1,28 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package vault + +type Options struct { + readonly bool +} + +type OptionFunc func(o *Options) + +func WithReadonly(readonly bool) OptionFunc { + return func(o *Options) { + o.readonly = readonly + } +} + +//nolint:unused // not used on darwin +func applyOptions(opts ...OptionFunc) Options { + var options Options + + for _, opt := range opts { + opt(&options) + } + + return options +} diff --git a/internal/pkg/agent/vault/vault_windows.go b/internal/pkg/agent/vault/vault_windows.go index 7468fe16814..c39769cc8da 100644 --- a/internal/pkg/agent/vault/vault_windows.go +++ b/internal/pkg/agent/vault/vault_windows.go @@ -27,7 +27,8 @@ type Vault struct { } // Open initializes the vault store -func New(path string) (*Vault, error) { +func New(path string, opts ...OptionFunc) (v *Vault, err error) { + options := applyOptions(opts...) dir := filepath.Dir(path) // If there is no specific path then get the executable directory @@ -40,16 +41,26 @@ func New(path string) (*Vault, error) { path = filepath.Join(dir, path) } - err := os.MkdirAll(path, 0750) - if err != nil { - return nil, err - } - err = systemAdministratorsOnly(path, false) - if err != nil { - return nil, err + if options.readonly { + fi, err := os.Stat(path) + if err != nil { + return nil, err + } + if !fi.IsDir() { + return nil, fs.ErrNotExist + } + } else { + err := os.MkdirAll(path, 0750) + if err != nil { + return nil, err + } + err = systemAdministratorsOnly(path, false) + if err != nil { + return nil, err + } } - entropy, err := getSeed(path) + entropy, err := getOrCreateSeed(path, options.readonly) if err != nil { return nil, err } diff --git a/internal/pkg/composable/providers/kubernetes/node_test.go b/internal/pkg/composable/providers/kubernetes/node_test.go index 7d8abfcea4e..ab19e7d2ce2 100644 --- a/internal/pkg/composable/providers/kubernetes/node_test.go +++ b/internal/pkg/composable/providers/kubernetes/node_test.go @@ -26,7 +26,9 @@ func TestGenerateNodeData(t *testing.T) { Name: "testnode", UID: types.UID(uid), Labels: map[string]string{ - "foo": "bar", + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", }, Annotations: map[string]string{ "baz": "ban", @@ -54,7 +56,9 @@ func TestGenerateNodeData(t *testing.T) { "baz": "ban", }, "labels": mapstr.M{ - "foo": "bar", + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", }, } @@ -64,7 +68,11 @@ func TestGenerateNodeData(t *testing.T) { "name": "devcluster", "url": "8.8.8.8:9090"}, }, "kubernetes": mapstr.M{ - "labels": mapstr.M{"foo": "bar"}, + "labels": mapstr.M{ + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", + }, "annotations": mapstr.M{"baz": "ban"}, "node": mapstr.M{ "ip": "node1", @@ -123,7 +131,9 @@ func (n *nodeMeta) GenerateK8s(obj kubernetes.Resource, opts ...metadata.FieldOp "ip": "node1", }, "labels": mapstr.M{ - "foo": "bar", + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", }, "annotations": mapstr.M{ "baz": "ban", diff --git a/internal/pkg/composable/providers/kubernetes/pod_test.go b/internal/pkg/composable/providers/kubernetes/pod_test.go index 45fd78ac76c..95361fd2ce0 100644 --- a/internal/pkg/composable/providers/kubernetes/pod_test.go +++ b/internal/pkg/composable/providers/kubernetes/pod_test.go @@ -27,7 +27,9 @@ func TestGeneratePodData(t *testing.T) { UID: types.UID(uid), Namespace: "testns", Labels: map[string]string{ - "foo": "bar", + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", }, Annotations: map[string]string{ "app": "production", @@ -59,7 +61,9 @@ func TestGeneratePodData(t *testing.T) { "nsa": "nsb", }, "labels": mapstr.M{ - "foo": "bar", + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", }, "annotations": mapstr.M{ "app": "production", @@ -74,7 +78,9 @@ func TestGeneratePodData(t *testing.T) { }, "kubernetes": mapstr.M{ "namespace": "testns", "labels": mapstr.M{ - "foo": "bar", + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", }, "annotations": mapstr.M{"app": "production"}, "pod": mapstr.M{ @@ -119,7 +125,9 @@ func TestGenerateContainerPodData(t *testing.T) { UID: types.UID(uid), Namespace: "testns", Labels: map[string]string{ - "foo": "bar", + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", }, Annotations: map[string]string{ "app": "production", @@ -175,7 +183,9 @@ func TestGenerateContainerPodData(t *testing.T) { "app": "production", }, "labels": mapstr.M{ - "foo": "bar", + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", }, } @@ -191,7 +201,11 @@ func TestGenerateContainerPodData(t *testing.T) { }, "kubernetes": mapstr.M{ "namespace": "testns", "annotations": mapstr.M{"app": "production"}, - "labels": mapstr.M{"foo": "bar"}, + "labels": mapstr.M{ + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", + }, "pod": mapstr.M{ "ip": "127.0.0.5", "name": "testpod", @@ -232,7 +246,9 @@ func TestEphemeralContainers(t *testing.T) { UID: types.UID(uid), Namespace: "testns", Labels: map[string]string{ - "foo": "bar", + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", }, Annotations: map[string]string{ "app": "production", @@ -274,7 +290,9 @@ func TestEphemeralContainers(t *testing.T) { "ip": pod.Status.PodIP, }, "labels": mapstr.M{ - "foo": "bar", + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", }, "container": mapstr.M{ "id": "asdfghdeadbeef", @@ -300,8 +318,12 @@ func TestEphemeralContainers(t *testing.T) { "name": "devcluster", "url": "8.8.8.8:9090"}, }, "kubernetes": mapstr.M{ - "namespace": "testns", - "labels": mapstr.M{"foo": "bar"}, + "namespace": "testns", + "labels": mapstr.M{ + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", + }, "annotations": mapstr.M{"app": "production"}, "pod": mapstr.M{ "ip": "127.0.0.5", @@ -383,7 +405,9 @@ func (p *podMeta) GenerateK8s(obj kubernetes.Resource, opts ...metadata.FieldOpt "ip": k8sPod.Status.PodIP, }, "labels": mapstr.M{ - "foo": "bar", + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", }, "annotations": mapstr.M{ "app": "production", diff --git a/internal/pkg/composable/providers/kubernetes/service_test.go b/internal/pkg/composable/providers/kubernetes/service_test.go index 47d420fb233..69e945ee1cd 100644 --- a/internal/pkg/composable/providers/kubernetes/service_test.go +++ b/internal/pkg/composable/providers/kubernetes/service_test.go @@ -25,7 +25,9 @@ func TestGenerateServiceData(t *testing.T) { UID: types.UID(uid), Namespace: "testns", Labels: map[string]string{ - "foo": "bar", + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", }, Annotations: map[string]string{ "baz": "ban", @@ -64,7 +66,9 @@ func TestGenerateServiceData(t *testing.T) { "baz": "ban", }, "labels": mapstr.M{ - "foo": "bar", + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", }, } @@ -80,7 +84,9 @@ func TestGenerateServiceData(t *testing.T) { "ip": "1.2.3.4", }, "labels": mapstr.M{ - "foo": "bar", + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", }, "annotations": mapstr.M{ "baz": "ban", @@ -139,7 +145,9 @@ func (s *svcMeta) GenerateK8s(obj kubernetes.Resource, opts ...metadata.FieldOpt "ip": "1.2.3.4", }, "labels": mapstr.M{ - "foo": "bar", + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", }, "annotations": mapstr.M{ "baz": "ban", diff --git a/internal/pkg/eql/Eql.g4 b/internal/pkg/eql/Eql.g4 index d46e2571812..bb7b5a88770 100644 --- a/internal/pkg/eql/Eql.g4 +++ b/internal/pkg/eql/Eql.g4 @@ -22,7 +22,7 @@ NUMBER: [\-]? [0-9]+; WHITESPACE: [ \r\n\t]+ -> skip; NOT: 'NOT' | 'not'; NAME: [a-zA-Z_] [a-zA-Z0-9_]*; -VNAME: [a-zA-Z0-9_.]+('.'[a-zA-Z0-9_]+)*; +VNAME: [a-zA-Z0-9_.\-/]+('.'[a-zA-Z0-9_\-/]+)*; STEXT: '\'' ~[\r\n']* '\''; DTEXT: '"' ~[\r\n"]* '"'; LPAR: '('; diff --git a/internal/pkg/eql/eql_test.go b/internal/pkg/eql/eql_test.go index eab34f69026..54f7741f88d 100644 --- a/internal/pkg/eql/eql_test.go +++ b/internal/pkg/eql/eql_test.go @@ -42,6 +42,9 @@ func TestEql(t *testing.T) { {expression: "${env.MISSING|host.MISSING|true} == true", result: true}, {expression: "${env.MISSING|host.MISSING|false} == false", result: true}, {expression: "${'constant'} == 'constant'", result: true}, + {expression: "${data.with-dash} == 'dash-value'", result: true}, + {expression: "${'dash-value'} == 'dash-value'", result: true}, + {expression: "${data.with/slash} == 'some/path'", result: true}, // boolean {expression: "true", result: true}, @@ -306,9 +309,11 @@ func TestEql(t *testing.T) { store := &testVarStore{ vars: map[string]interface{}{ - "env.HOSTNAME": "my-hostname", - "host.name": "host-name", - "data.array": []interface{}{"array1", "array2", "array3"}, + "env.HOSTNAME": "my-hostname", + "host.name": "host-name", + "data.array": []interface{}{"array1", "array2", "array3"}, + "data.with-dash": "dash-value", + "data.with/slash": "some/path", "data.dict": map[string]interface{}{ "key1": "dict1", "key2": "dict2", @@ -327,7 +332,7 @@ func TestEql(t *testing.T) { } t.Run(title, func(t *testing.T) { if showDebug == "1" { - debug(test.expression) + debug(t, test.expression) } r, err := Eval(test.expression, store) @@ -343,17 +348,17 @@ func TestEql(t *testing.T) { } } -func debug(expression string) { +func debug(t *testing.T, expression string) { raw := antlr.NewInputStream(expression) lexer := parser.NewEqlLexer(raw) for { - t := lexer.NextToken() - if t.GetTokenType() == antlr.TokenEOF { + token := lexer.NextToken() + if token.GetTokenType() == antlr.TokenEOF { break } - fmt.Printf("%s (%q)\n", - lexer.SymbolicNames[t.GetTokenType()], t.GetText()) + t.Logf("%s (%q)\n", + lexer.SymbolicNames[token.GetTokenType()], token.GetText()) } } diff --git a/internal/pkg/eql/parser/EqlLexer.interp b/internal/pkg/eql/parser/EqlLexer.interp index 2131aba8177..66413a00c42 100644 --- a/internal/pkg/eql/parser/EqlLexer.interp +++ b/internal/pkg/eql/parser/EqlLexer.interp @@ -113,4 +113,4 @@ mode names: DEFAULT_MODE atn: -[3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 2, 35, 230, 8, 1, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7, 9, 7, 4, 8, 9, 8, 4, 9, 9, 9, 4, 10, 9, 10, 4, 11, 9, 11, 4, 12, 9, 12, 4, 13, 9, 13, 4, 14, 9, 14, 4, 15, 9, 15, 4, 16, 9, 16, 4, 17, 9, 17, 4, 18, 9, 18, 4, 19, 9, 19, 4, 20, 9, 20, 4, 21, 9, 21, 4, 22, 9, 22, 4, 23, 9, 23, 4, 24, 9, 24, 4, 25, 9, 25, 4, 26, 9, 26, 4, 27, 9, 27, 4, 28, 9, 28, 4, 29, 9, 29, 4, 30, 9, 30, 4, 31, 9, 31, 4, 32, 9, 32, 4, 33, 9, 33, 4, 34, 9, 34, 3, 2, 3, 2, 3, 3, 3, 3, 3, 4, 3, 4, 3, 5, 3, 5, 3, 5, 3, 6, 3, 6, 3, 6, 3, 7, 3, 7, 3, 8, 3, 8, 3, 9, 3, 9, 3, 9, 3, 10, 3, 10, 3, 10, 3, 11, 3, 11, 3, 12, 3, 12, 3, 13, 3, 13, 3, 14, 3, 14, 3, 15, 3, 15, 3, 16, 3, 16, 3, 16, 3, 16, 3, 16, 3, 16, 5, 16, 108, 10, 16, 3, 17, 3, 17, 3, 17, 3, 17, 5, 17, 114, 10, 17, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 5, 18, 124, 10, 18, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 5, 19, 136, 10, 19, 3, 20, 5, 20, 139, 10, 20, 3, 20, 6, 20, 142, 10, 20, 13, 20, 14, 20, 143, 3, 20, 3, 20, 6, 20, 148, 10, 20, 13, 20, 14, 20, 149, 3, 21, 5, 21, 153, 10, 21, 3, 21, 6, 21, 156, 10, 21, 13, 21, 14, 21, 157, 3, 22, 6, 22, 161, 10, 22, 13, 22, 14, 22, 162, 3, 22, 3, 22, 3, 23, 3, 23, 3, 23, 3, 23, 3, 23, 3, 23, 5, 23, 173, 10, 23, 3, 24, 3, 24, 7, 24, 177, 10, 24, 12, 24, 14, 24, 180, 11, 24, 3, 25, 6, 25, 183, 10, 25, 13, 25, 14, 25, 184, 3, 25, 3, 25, 6, 25, 189, 10, 25, 13, 25, 14, 25, 190, 7, 25, 193, 10, 25, 12, 25, 14, 25, 196, 11, 25, 3, 26, 3, 26, 7, 26, 200, 10, 26, 12, 26, 14, 26, 203, 11, 26, 3, 26, 3, 26, 3, 27, 3, 27, 7, 27, 209, 10, 27, 12, 27, 14, 27, 212, 11, 27, 3, 27, 3, 27, 3, 28, 3, 28, 3, 29, 3, 29, 3, 30, 3, 30, 3, 31, 3, 31, 3, 32, 3, 32, 3, 33, 3, 33, 3, 34, 3, 34, 3, 34, 2, 2, 35, 3, 3, 5, 4, 7, 5, 9, 6, 11, 7, 13, 8, 15, 9, 17, 10, 19, 11, 21, 12, 23, 13, 25, 14, 27, 15, 29, 16, 31, 17, 33, 18, 35, 19, 37, 20, 39, 21, 41, 22, 43, 23, 45, 24, 47, 25, 49, 26, 51, 27, 53, 28, 55, 29, 57, 30, 59, 31, 61, 32, 63, 33, 65, 34, 67, 35, 3, 2, 10, 3, 2, 47, 47, 3, 2, 50, 59, 5, 2, 11, 12, 15, 15, 34, 34, 5, 2, 67, 92, 97, 97, 99, 124, 6, 2, 50, 59, 67, 92, 97, 97, 99, 124, 7, 2, 48, 48, 50, 59, 67, 92, 97, 97, 99, 124, 5, 2, 12, 12, 15, 15, 41, 41, 5, 2, 12, 12, 15, 15, 36, 36, 2, 246, 2, 3, 3, 2, 2, 2, 2, 5, 3, 2, 2, 2, 2, 7, 3, 2, 2, 2, 2, 9, 3, 2, 2, 2, 2, 11, 3, 2, 2, 2, 2, 13, 3, 2, 2, 2, 2, 15, 3, 2, 2, 2, 2, 17, 3, 2, 2, 2, 2, 19, 3, 2, 2, 2, 2, 21, 3, 2, 2, 2, 2, 23, 3, 2, 2, 2, 2, 25, 3, 2, 2, 2, 2, 27, 3, 2, 2, 2, 2, 29, 3, 2, 2, 2, 2, 31, 3, 2, 2, 2, 2, 33, 3, 2, 2, 2, 2, 35, 3, 2, 2, 2, 2, 37, 3, 2, 2, 2, 2, 39, 3, 2, 2, 2, 2, 41, 3, 2, 2, 2, 2, 43, 3, 2, 2, 2, 2, 45, 3, 2, 2, 2, 2, 47, 3, 2, 2, 2, 2, 49, 3, 2, 2, 2, 2, 51, 3, 2, 2, 2, 2, 53, 3, 2, 2, 2, 2, 55, 3, 2, 2, 2, 2, 57, 3, 2, 2, 2, 2, 59, 3, 2, 2, 2, 2, 61, 3, 2, 2, 2, 2, 63, 3, 2, 2, 2, 2, 65, 3, 2, 2, 2, 2, 67, 3, 2, 2, 2, 3, 69, 3, 2, 2, 2, 5, 71, 3, 2, 2, 2, 7, 73, 3, 2, 2, 2, 9, 75, 3, 2, 2, 2, 11, 78, 3, 2, 2, 2, 13, 81, 3, 2, 2, 2, 15, 83, 3, 2, 2, 2, 17, 85, 3, 2, 2, 2, 19, 88, 3, 2, 2, 2, 21, 91, 3, 2, 2, 2, 23, 93, 3, 2, 2, 2, 25, 95, 3, 2, 2, 2, 27, 97, 3, 2, 2, 2, 29, 99, 3, 2, 2, 2, 31, 107, 3, 2, 2, 2, 33, 113, 3, 2, 2, 2, 35, 123, 3, 2, 2, 2, 37, 135, 3, 2, 2, 2, 39, 138, 3, 2, 2, 2, 41, 152, 3, 2, 2, 2, 43, 160, 3, 2, 2, 2, 45, 172, 3, 2, 2, 2, 47, 174, 3, 2, 2, 2, 49, 182, 3, 2, 2, 2, 51, 197, 3, 2, 2, 2, 53, 206, 3, 2, 2, 2, 55, 215, 3, 2, 2, 2, 57, 217, 3, 2, 2, 2, 59, 219, 3, 2, 2, 2, 61, 221, 3, 2, 2, 2, 63, 223, 3, 2, 2, 2, 65, 225, 3, 2, 2, 2, 67, 227, 3, 2, 2, 2, 69, 70, 7, 126, 2, 2, 70, 4, 3, 2, 2, 2, 71, 72, 7, 46, 2, 2, 72, 6, 3, 2, 2, 2, 73, 74, 7, 60, 2, 2, 74, 8, 3, 2, 2, 2, 75, 76, 7, 63, 2, 2, 76, 77, 7, 63, 2, 2, 77, 10, 3, 2, 2, 2, 78, 79, 7, 35, 2, 2, 79, 80, 7, 63, 2, 2, 80, 12, 3, 2, 2, 2, 81, 82, 7, 64, 2, 2, 82, 14, 3, 2, 2, 2, 83, 84, 7, 62, 2, 2, 84, 16, 3, 2, 2, 2, 85, 86, 7, 64, 2, 2, 86, 87, 7, 63, 2, 2, 87, 18, 3, 2, 2, 2, 88, 89, 7, 62, 2, 2, 89, 90, 7, 63, 2, 2, 90, 20, 3, 2, 2, 2, 91, 92, 7, 45, 2, 2, 92, 22, 3, 2, 2, 2, 93, 94, 7, 47, 2, 2, 94, 24, 3, 2, 2, 2, 95, 96, 7, 44, 2, 2, 96, 26, 3, 2, 2, 2, 97, 98, 7, 49, 2, 2, 98, 28, 3, 2, 2, 2, 99, 100, 7, 39, 2, 2, 100, 30, 3, 2, 2, 2, 101, 102, 7, 99, 2, 2, 102, 103, 7, 112, 2, 2, 103, 108, 7, 102, 2, 2, 104, 105, 7, 67, 2, 2, 105, 106, 7, 80, 2, 2, 106, 108, 7, 70, 2, 2, 107, 101, 3, 2, 2, 2, 107, 104, 3, 2, 2, 2, 108, 32, 3, 2, 2, 2, 109, 110, 7, 113, 2, 2, 110, 114, 7, 116, 2, 2, 111, 112, 7, 81, 2, 2, 112, 114, 7, 84, 2, 2, 113, 109, 3, 2, 2, 2, 113, 111, 3, 2, 2, 2, 114, 34, 3, 2, 2, 2, 115, 116, 7, 118, 2, 2, 116, 117, 7, 116, 2, 2, 117, 118, 7, 119, 2, 2, 118, 124, 7, 103, 2, 2, 119, 120, 7, 86, 2, 2, 120, 121, 7, 84, 2, 2, 121, 122, 7, 87, 2, 2, 122, 124, 7, 71, 2, 2, 123, 115, 3, 2, 2, 2, 123, 119, 3, 2, 2, 2, 124, 36, 3, 2, 2, 2, 125, 126, 7, 104, 2, 2, 126, 127, 7, 99, 2, 2, 127, 128, 7, 110, 2, 2, 128, 129, 7, 117, 2, 2, 129, 136, 7, 103, 2, 2, 130, 131, 7, 72, 2, 2, 131, 132, 7, 67, 2, 2, 132, 133, 7, 78, 2, 2, 133, 134, 7, 85, 2, 2, 134, 136, 7, 71, 2, 2, 135, 125, 3, 2, 2, 2, 135, 130, 3, 2, 2, 2, 136, 38, 3, 2, 2, 2, 137, 139, 9, 2, 2, 2, 138, 137, 3, 2, 2, 2, 138, 139, 3, 2, 2, 2, 139, 141, 3, 2, 2, 2, 140, 142, 9, 3, 2, 2, 141, 140, 3, 2, 2, 2, 142, 143, 3, 2, 2, 2, 143, 141, 3, 2, 2, 2, 143, 144, 3, 2, 2, 2, 144, 145, 3, 2, 2, 2, 145, 147, 7, 48, 2, 2, 146, 148, 9, 3, 2, 2, 147, 146, 3, 2, 2, 2, 148, 149, 3, 2, 2, 2, 149, 147, 3, 2, 2, 2, 149, 150, 3, 2, 2, 2, 150, 40, 3, 2, 2, 2, 151, 153, 9, 2, 2, 2, 152, 151, 3, 2, 2, 2, 152, 153, 3, 2, 2, 2, 153, 155, 3, 2, 2, 2, 154, 156, 9, 3, 2, 2, 155, 154, 3, 2, 2, 2, 156, 157, 3, 2, 2, 2, 157, 155, 3, 2, 2, 2, 157, 158, 3, 2, 2, 2, 158, 42, 3, 2, 2, 2, 159, 161, 9, 4, 2, 2, 160, 159, 3, 2, 2, 2, 161, 162, 3, 2, 2, 2, 162, 160, 3, 2, 2, 2, 162, 163, 3, 2, 2, 2, 163, 164, 3, 2, 2, 2, 164, 165, 8, 22, 2, 2, 165, 44, 3, 2, 2, 2, 166, 167, 7, 80, 2, 2, 167, 168, 7, 81, 2, 2, 168, 173, 7, 86, 2, 2, 169, 170, 7, 112, 2, 2, 170, 171, 7, 113, 2, 2, 171, 173, 7, 118, 2, 2, 172, 166, 3, 2, 2, 2, 172, 169, 3, 2, 2, 2, 173, 46, 3, 2, 2, 2, 174, 178, 9, 5, 2, 2, 175, 177, 9, 6, 2, 2, 176, 175, 3, 2, 2, 2, 177, 180, 3, 2, 2, 2, 178, 176, 3, 2, 2, 2, 178, 179, 3, 2, 2, 2, 179, 48, 3, 2, 2, 2, 180, 178, 3, 2, 2, 2, 181, 183, 9, 7, 2, 2, 182, 181, 3, 2, 2, 2, 183, 184, 3, 2, 2, 2, 184, 182, 3, 2, 2, 2, 184, 185, 3, 2, 2, 2, 185, 194, 3, 2, 2, 2, 186, 188, 7, 48, 2, 2, 187, 189, 9, 6, 2, 2, 188, 187, 3, 2, 2, 2, 189, 190, 3, 2, 2, 2, 190, 188, 3, 2, 2, 2, 190, 191, 3, 2, 2, 2, 191, 193, 3, 2, 2, 2, 192, 186, 3, 2, 2, 2, 193, 196, 3, 2, 2, 2, 194, 192, 3, 2, 2, 2, 194, 195, 3, 2, 2, 2, 195, 50, 3, 2, 2, 2, 196, 194, 3, 2, 2, 2, 197, 201, 7, 41, 2, 2, 198, 200, 10, 8, 2, 2, 199, 198, 3, 2, 2, 2, 200, 203, 3, 2, 2, 2, 201, 199, 3, 2, 2, 2, 201, 202, 3, 2, 2, 2, 202, 204, 3, 2, 2, 2, 203, 201, 3, 2, 2, 2, 204, 205, 7, 41, 2, 2, 205, 52, 3, 2, 2, 2, 206, 210, 7, 36, 2, 2, 207, 209, 10, 9, 2, 2, 208, 207, 3, 2, 2, 2, 209, 212, 3, 2, 2, 2, 210, 208, 3, 2, 2, 2, 210, 211, 3, 2, 2, 2, 211, 213, 3, 2, 2, 2, 212, 210, 3, 2, 2, 2, 213, 214, 7, 36, 2, 2, 214, 54, 3, 2, 2, 2, 215, 216, 7, 42, 2, 2, 216, 56, 3, 2, 2, 2, 217, 218, 7, 43, 2, 2, 218, 58, 3, 2, 2, 2, 219, 220, 7, 93, 2, 2, 220, 60, 3, 2, 2, 2, 221, 222, 7, 95, 2, 2, 222, 62, 3, 2, 2, 2, 223, 224, 7, 125, 2, 2, 224, 64, 3, 2, 2, 2, 225, 226, 7, 127, 2, 2, 226, 66, 3, 2, 2, 2, 227, 228, 7, 38, 2, 2, 228, 229, 7, 125, 2, 2, 229, 68, 3, 2, 2, 2, 20, 2, 107, 113, 123, 135, 138, 143, 149, 152, 157, 162, 172, 178, 184, 190, 194, 201, 210, 3, 8, 2, 2] \ No newline at end of file +[3, 24715, 42794, 33075, 47597, 16764, 15335, 30598, 22884, 2, 35, 230, 8, 1, 4, 2, 9, 2, 4, 3, 9, 3, 4, 4, 9, 4, 4, 5, 9, 5, 4, 6, 9, 6, 4, 7, 9, 7, 4, 8, 9, 8, 4, 9, 9, 9, 4, 10, 9, 10, 4, 11, 9, 11, 4, 12, 9, 12, 4, 13, 9, 13, 4, 14, 9, 14, 4, 15, 9, 15, 4, 16, 9, 16, 4, 17, 9, 17, 4, 18, 9, 18, 4, 19, 9, 19, 4, 20, 9, 20, 4, 21, 9, 21, 4, 22, 9, 22, 4, 23, 9, 23, 4, 24, 9, 24, 4, 25, 9, 25, 4, 26, 9, 26, 4, 27, 9, 27, 4, 28, 9, 28, 4, 29, 9, 29, 4, 30, 9, 30, 4, 31, 9, 31, 4, 32, 9, 32, 4, 33, 9, 33, 4, 34, 9, 34, 3, 2, 3, 2, 3, 3, 3, 3, 3, 4, 3, 4, 3, 5, 3, 5, 3, 5, 3, 6, 3, 6, 3, 6, 3, 7, 3, 7, 3, 8, 3, 8, 3, 9, 3, 9, 3, 9, 3, 10, 3, 10, 3, 10, 3, 11, 3, 11, 3, 12, 3, 12, 3, 13, 3, 13, 3, 14, 3, 14, 3, 15, 3, 15, 3, 16, 3, 16, 3, 16, 3, 16, 3, 16, 3, 16, 5, 16, 108, 10, 16, 3, 17, 3, 17, 3, 17, 3, 17, 5, 17, 114, 10, 17, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 3, 18, 5, 18, 124, 10, 18, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 3, 19, 5, 19, 136, 10, 19, 3, 20, 5, 20, 139, 10, 20, 3, 20, 6, 20, 142, 10, 20, 13, 20, 14, 20, 143, 3, 20, 3, 20, 6, 20, 148, 10, 20, 13, 20, 14, 20, 149, 3, 21, 5, 21, 153, 10, 21, 3, 21, 6, 21, 156, 10, 21, 13, 21, 14, 21, 157, 3, 22, 6, 22, 161, 10, 22, 13, 22, 14, 22, 162, 3, 22, 3, 22, 3, 23, 3, 23, 3, 23, 3, 23, 3, 23, 3, 23, 5, 23, 173, 10, 23, 3, 24, 3, 24, 7, 24, 177, 10, 24, 12, 24, 14, 24, 180, 11, 24, 3, 25, 6, 25, 183, 10, 25, 13, 25, 14, 25, 184, 3, 25, 3, 25, 6, 25, 189, 10, 25, 13, 25, 14, 25, 190, 7, 25, 193, 10, 25, 12, 25, 14, 25, 196, 11, 25, 3, 26, 3, 26, 7, 26, 200, 10, 26, 12, 26, 14, 26, 203, 11, 26, 3, 26, 3, 26, 3, 27, 3, 27, 7, 27, 209, 10, 27, 12, 27, 14, 27, 212, 11, 27, 3, 27, 3, 27, 3, 28, 3, 28, 3, 29, 3, 29, 3, 30, 3, 30, 3, 31, 3, 31, 3, 32, 3, 32, 3, 33, 3, 33, 3, 34, 3, 34, 3, 34, 2, 2, 35, 3, 3, 5, 4, 7, 5, 9, 6, 11, 7, 13, 8, 15, 9, 17, 10, 19, 11, 21, 12, 23, 13, 25, 14, 27, 15, 29, 16, 31, 17, 33, 18, 35, 19, 37, 20, 39, 21, 41, 22, 43, 23, 45, 24, 47, 25, 49, 26, 51, 27, 53, 28, 55, 29, 57, 30, 59, 31, 61, 32, 63, 33, 65, 34, 67, 35, 3, 2, 11, 3, 2, 47, 47, 3, 2, 50, 59, 5, 2, 11, 12, 15, 15, 34, 34, 5, 2, 67, 92, 97, 97, 99, 124, 6, 2, 50, 59, 67, 92, 97, 97, 99, 124, 6, 2, 47, 59, 67, 92, 97, 97, 99, 124, 7, 2, 47, 47, 49, 59, 67, 92, 97, 97, 99, 124, 5, 2, 12, 12, 15, 15, 41, 41, 5, 2, 12, 12, 15, 15, 36, 36, 2, 246, 2, 3, 3, 2, 2, 2, 2, 5, 3, 2, 2, 2, 2, 7, 3, 2, 2, 2, 2, 9, 3, 2, 2, 2, 2, 11, 3, 2, 2, 2, 2, 13, 3, 2, 2, 2, 2, 15, 3, 2, 2, 2, 2, 17, 3, 2, 2, 2, 2, 19, 3, 2, 2, 2, 2, 21, 3, 2, 2, 2, 2, 23, 3, 2, 2, 2, 2, 25, 3, 2, 2, 2, 2, 27, 3, 2, 2, 2, 2, 29, 3, 2, 2, 2, 2, 31, 3, 2, 2, 2, 2, 33, 3, 2, 2, 2, 2, 35, 3, 2, 2, 2, 2, 37, 3, 2, 2, 2, 2, 39, 3, 2, 2, 2, 2, 41, 3, 2, 2, 2, 2, 43, 3, 2, 2, 2, 2, 45, 3, 2, 2, 2, 2, 47, 3, 2, 2, 2, 2, 49, 3, 2, 2, 2, 2, 51, 3, 2, 2, 2, 2, 53, 3, 2, 2, 2, 2, 55, 3, 2, 2, 2, 2, 57, 3, 2, 2, 2, 2, 59, 3, 2, 2, 2, 2, 61, 3, 2, 2, 2, 2, 63, 3, 2, 2, 2, 2, 65, 3, 2, 2, 2, 2, 67, 3, 2, 2, 2, 3, 69, 3, 2, 2, 2, 5, 71, 3, 2, 2, 2, 7, 73, 3, 2, 2, 2, 9, 75, 3, 2, 2, 2, 11, 78, 3, 2, 2, 2, 13, 81, 3, 2, 2, 2, 15, 83, 3, 2, 2, 2, 17, 85, 3, 2, 2, 2, 19, 88, 3, 2, 2, 2, 21, 91, 3, 2, 2, 2, 23, 93, 3, 2, 2, 2, 25, 95, 3, 2, 2, 2, 27, 97, 3, 2, 2, 2, 29, 99, 3, 2, 2, 2, 31, 107, 3, 2, 2, 2, 33, 113, 3, 2, 2, 2, 35, 123, 3, 2, 2, 2, 37, 135, 3, 2, 2, 2, 39, 138, 3, 2, 2, 2, 41, 152, 3, 2, 2, 2, 43, 160, 3, 2, 2, 2, 45, 172, 3, 2, 2, 2, 47, 174, 3, 2, 2, 2, 49, 182, 3, 2, 2, 2, 51, 197, 3, 2, 2, 2, 53, 206, 3, 2, 2, 2, 55, 215, 3, 2, 2, 2, 57, 217, 3, 2, 2, 2, 59, 219, 3, 2, 2, 2, 61, 221, 3, 2, 2, 2, 63, 223, 3, 2, 2, 2, 65, 225, 3, 2, 2, 2, 67, 227, 3, 2, 2, 2, 69, 70, 7, 126, 2, 2, 70, 4, 3, 2, 2, 2, 71, 72, 7, 46, 2, 2, 72, 6, 3, 2, 2, 2, 73, 74, 7, 60, 2, 2, 74, 8, 3, 2, 2, 2, 75, 76, 7, 63, 2, 2, 76, 77, 7, 63, 2, 2, 77, 10, 3, 2, 2, 2, 78, 79, 7, 35, 2, 2, 79, 80, 7, 63, 2, 2, 80, 12, 3, 2, 2, 2, 81, 82, 7, 64, 2, 2, 82, 14, 3, 2, 2, 2, 83, 84, 7, 62, 2, 2, 84, 16, 3, 2, 2, 2, 85, 86, 7, 64, 2, 2, 86, 87, 7, 63, 2, 2, 87, 18, 3, 2, 2, 2, 88, 89, 7, 62, 2, 2, 89, 90, 7, 63, 2, 2, 90, 20, 3, 2, 2, 2, 91, 92, 7, 45, 2, 2, 92, 22, 3, 2, 2, 2, 93, 94, 7, 47, 2, 2, 94, 24, 3, 2, 2, 2, 95, 96, 7, 44, 2, 2, 96, 26, 3, 2, 2, 2, 97, 98, 7, 49, 2, 2, 98, 28, 3, 2, 2, 2, 99, 100, 7, 39, 2, 2, 100, 30, 3, 2, 2, 2, 101, 102, 7, 99, 2, 2, 102, 103, 7, 112, 2, 2, 103, 108, 7, 102, 2, 2, 104, 105, 7, 67, 2, 2, 105, 106, 7, 80, 2, 2, 106, 108, 7, 70, 2, 2, 107, 101, 3, 2, 2, 2, 107, 104, 3, 2, 2, 2, 108, 32, 3, 2, 2, 2, 109, 110, 7, 113, 2, 2, 110, 114, 7, 116, 2, 2, 111, 112, 7, 81, 2, 2, 112, 114, 7, 84, 2, 2, 113, 109, 3, 2, 2, 2, 113, 111, 3, 2, 2, 2, 114, 34, 3, 2, 2, 2, 115, 116, 7, 118, 2, 2, 116, 117, 7, 116, 2, 2, 117, 118, 7, 119, 2, 2, 118, 124, 7, 103, 2, 2, 119, 120, 7, 86, 2, 2, 120, 121, 7, 84, 2, 2, 121, 122, 7, 87, 2, 2, 122, 124, 7, 71, 2, 2, 123, 115, 3, 2, 2, 2, 123, 119, 3, 2, 2, 2, 124, 36, 3, 2, 2, 2, 125, 126, 7, 104, 2, 2, 126, 127, 7, 99, 2, 2, 127, 128, 7, 110, 2, 2, 128, 129, 7, 117, 2, 2, 129, 136, 7, 103, 2, 2, 130, 131, 7, 72, 2, 2, 131, 132, 7, 67, 2, 2, 132, 133, 7, 78, 2, 2, 133, 134, 7, 85, 2, 2, 134, 136, 7, 71, 2, 2, 135, 125, 3, 2, 2, 2, 135, 130, 3, 2, 2, 2, 136, 38, 3, 2, 2, 2, 137, 139, 9, 2, 2, 2, 138, 137, 3, 2, 2, 2, 138, 139, 3, 2, 2, 2, 139, 141, 3, 2, 2, 2, 140, 142, 9, 3, 2, 2, 141, 140, 3, 2, 2, 2, 142, 143, 3, 2, 2, 2, 143, 141, 3, 2, 2, 2, 143, 144, 3, 2, 2, 2, 144, 145, 3, 2, 2, 2, 145, 147, 7, 48, 2, 2, 146, 148, 9, 3, 2, 2, 147, 146, 3, 2, 2, 2, 148, 149, 3, 2, 2, 2, 149, 147, 3, 2, 2, 2, 149, 150, 3, 2, 2, 2, 150, 40, 3, 2, 2, 2, 151, 153, 9, 2, 2, 2, 152, 151, 3, 2, 2, 2, 152, 153, 3, 2, 2, 2, 153, 155, 3, 2, 2, 2, 154, 156, 9, 3, 2, 2, 155, 154, 3, 2, 2, 2, 156, 157, 3, 2, 2, 2, 157, 155, 3, 2, 2, 2, 157, 158, 3, 2, 2, 2, 158, 42, 3, 2, 2, 2, 159, 161, 9, 4, 2, 2, 160, 159, 3, 2, 2, 2, 161, 162, 3, 2, 2, 2, 162, 160, 3, 2, 2, 2, 162, 163, 3, 2, 2, 2, 163, 164, 3, 2, 2, 2, 164, 165, 8, 22, 2, 2, 165, 44, 3, 2, 2, 2, 166, 167, 7, 80, 2, 2, 167, 168, 7, 81, 2, 2, 168, 173, 7, 86, 2, 2, 169, 170, 7, 112, 2, 2, 170, 171, 7, 113, 2, 2, 171, 173, 7, 118, 2, 2, 172, 166, 3, 2, 2, 2, 172, 169, 3, 2, 2, 2, 173, 46, 3, 2, 2, 2, 174, 178, 9, 5, 2, 2, 175, 177, 9, 6, 2, 2, 176, 175, 3, 2, 2, 2, 177, 180, 3, 2, 2, 2, 178, 176, 3, 2, 2, 2, 178, 179, 3, 2, 2, 2, 179, 48, 3, 2, 2, 2, 180, 178, 3, 2, 2, 2, 181, 183, 9, 7, 2, 2, 182, 181, 3, 2, 2, 2, 183, 184, 3, 2, 2, 2, 184, 182, 3, 2, 2, 2, 184, 185, 3, 2, 2, 2, 185, 194, 3, 2, 2, 2, 186, 188, 7, 48, 2, 2, 187, 189, 9, 8, 2, 2, 188, 187, 3, 2, 2, 2, 189, 190, 3, 2, 2, 2, 190, 188, 3, 2, 2, 2, 190, 191, 3, 2, 2, 2, 191, 193, 3, 2, 2, 2, 192, 186, 3, 2, 2, 2, 193, 196, 3, 2, 2, 2, 194, 192, 3, 2, 2, 2, 194, 195, 3, 2, 2, 2, 195, 50, 3, 2, 2, 2, 196, 194, 3, 2, 2, 2, 197, 201, 7, 41, 2, 2, 198, 200, 10, 9, 2, 2, 199, 198, 3, 2, 2, 2, 200, 203, 3, 2, 2, 2, 201, 199, 3, 2, 2, 2, 201, 202, 3, 2, 2, 2, 202, 204, 3, 2, 2, 2, 203, 201, 3, 2, 2, 2, 204, 205, 7, 41, 2, 2, 205, 52, 3, 2, 2, 2, 206, 210, 7, 36, 2, 2, 207, 209, 10, 10, 2, 2, 208, 207, 3, 2, 2, 2, 209, 212, 3, 2, 2, 2, 210, 208, 3, 2, 2, 2, 210, 211, 3, 2, 2, 2, 211, 213, 3, 2, 2, 2, 212, 210, 3, 2, 2, 2, 213, 214, 7, 36, 2, 2, 214, 54, 3, 2, 2, 2, 215, 216, 7, 42, 2, 2, 216, 56, 3, 2, 2, 2, 217, 218, 7, 43, 2, 2, 218, 58, 3, 2, 2, 2, 219, 220, 7, 93, 2, 2, 220, 60, 3, 2, 2, 2, 221, 222, 7, 95, 2, 2, 222, 62, 3, 2, 2, 2, 223, 224, 7, 125, 2, 2, 224, 64, 3, 2, 2, 2, 225, 226, 7, 127, 2, 2, 226, 66, 3, 2, 2, 2, 227, 228, 7, 38, 2, 2, 228, 229, 7, 125, 2, 2, 229, 68, 3, 2, 2, 2, 20, 2, 107, 113, 123, 135, 138, 143, 149, 152, 157, 162, 172, 178, 184, 190, 194, 201, 210, 3, 8, 2, 2] \ No newline at end of file diff --git a/internal/pkg/eql/parser/eql_lexer.go b/internal/pkg/eql/parser/eql_lexer.go index da1bf4d112e..b8eb1eeed6d 100644 --- a/internal/pkg/eql/parser/eql_lexer.go +++ b/internal/pkg/eql/parser/eql_lexer.go @@ -46,84 +46,85 @@ var serializedLexerAtn = []uint16{ 34, 2, 2, 35, 3, 3, 5, 4, 7, 5, 9, 6, 11, 7, 13, 8, 15, 9, 17, 10, 19, 11, 21, 12, 23, 13, 25, 14, 27, 15, 29, 16, 31, 17, 33, 18, 35, 19, 37, 20, 39, 21, 41, 22, 43, 23, 45, 24, 47, 25, 49, 26, 51, 27, 53, 28, 55, - 29, 57, 30, 59, 31, 61, 32, 63, 33, 65, 34, 67, 35, 3, 2, 10, 3, 2, 47, + 29, 57, 30, 59, 31, 61, 32, 63, 33, 65, 34, 67, 35, 3, 2, 11, 3, 2, 47, 47, 3, 2, 50, 59, 5, 2, 11, 12, 15, 15, 34, 34, 5, 2, 67, 92, 97, 97, 99, - 124, 6, 2, 50, 59, 67, 92, 97, 97, 99, 124, 7, 2, 48, 48, 50, 59, 67, 92, - 97, 97, 99, 124, 5, 2, 12, 12, 15, 15, 41, 41, 5, 2, 12, 12, 15, 15, 36, - 36, 2, 246, 2, 3, 3, 2, 2, 2, 2, 5, 3, 2, 2, 2, 2, 7, 3, 2, 2, 2, 2, 9, - 3, 2, 2, 2, 2, 11, 3, 2, 2, 2, 2, 13, 3, 2, 2, 2, 2, 15, 3, 2, 2, 2, 2, - 17, 3, 2, 2, 2, 2, 19, 3, 2, 2, 2, 2, 21, 3, 2, 2, 2, 2, 23, 3, 2, 2, 2, - 2, 25, 3, 2, 2, 2, 2, 27, 3, 2, 2, 2, 2, 29, 3, 2, 2, 2, 2, 31, 3, 2, 2, - 2, 2, 33, 3, 2, 2, 2, 2, 35, 3, 2, 2, 2, 2, 37, 3, 2, 2, 2, 2, 39, 3, 2, - 2, 2, 2, 41, 3, 2, 2, 2, 2, 43, 3, 2, 2, 2, 2, 45, 3, 2, 2, 2, 2, 47, 3, - 2, 2, 2, 2, 49, 3, 2, 2, 2, 2, 51, 3, 2, 2, 2, 2, 53, 3, 2, 2, 2, 2, 55, - 3, 2, 2, 2, 2, 57, 3, 2, 2, 2, 2, 59, 3, 2, 2, 2, 2, 61, 3, 2, 2, 2, 2, - 63, 3, 2, 2, 2, 2, 65, 3, 2, 2, 2, 2, 67, 3, 2, 2, 2, 3, 69, 3, 2, 2, 2, - 5, 71, 3, 2, 2, 2, 7, 73, 3, 2, 2, 2, 9, 75, 3, 2, 2, 2, 11, 78, 3, 2, - 2, 2, 13, 81, 3, 2, 2, 2, 15, 83, 3, 2, 2, 2, 17, 85, 3, 2, 2, 2, 19, 88, - 3, 2, 2, 2, 21, 91, 3, 2, 2, 2, 23, 93, 3, 2, 2, 2, 25, 95, 3, 2, 2, 2, - 27, 97, 3, 2, 2, 2, 29, 99, 3, 2, 2, 2, 31, 107, 3, 2, 2, 2, 33, 113, 3, - 2, 2, 2, 35, 123, 3, 2, 2, 2, 37, 135, 3, 2, 2, 2, 39, 138, 3, 2, 2, 2, - 41, 152, 3, 2, 2, 2, 43, 160, 3, 2, 2, 2, 45, 172, 3, 2, 2, 2, 47, 174, - 3, 2, 2, 2, 49, 182, 3, 2, 2, 2, 51, 197, 3, 2, 2, 2, 53, 206, 3, 2, 2, - 2, 55, 215, 3, 2, 2, 2, 57, 217, 3, 2, 2, 2, 59, 219, 3, 2, 2, 2, 61, 221, - 3, 2, 2, 2, 63, 223, 3, 2, 2, 2, 65, 225, 3, 2, 2, 2, 67, 227, 3, 2, 2, - 2, 69, 70, 7, 126, 2, 2, 70, 4, 3, 2, 2, 2, 71, 72, 7, 46, 2, 2, 72, 6, - 3, 2, 2, 2, 73, 74, 7, 60, 2, 2, 74, 8, 3, 2, 2, 2, 75, 76, 7, 63, 2, 2, - 76, 77, 7, 63, 2, 2, 77, 10, 3, 2, 2, 2, 78, 79, 7, 35, 2, 2, 79, 80, 7, - 63, 2, 2, 80, 12, 3, 2, 2, 2, 81, 82, 7, 64, 2, 2, 82, 14, 3, 2, 2, 2, - 83, 84, 7, 62, 2, 2, 84, 16, 3, 2, 2, 2, 85, 86, 7, 64, 2, 2, 86, 87, 7, - 63, 2, 2, 87, 18, 3, 2, 2, 2, 88, 89, 7, 62, 2, 2, 89, 90, 7, 63, 2, 2, - 90, 20, 3, 2, 2, 2, 91, 92, 7, 45, 2, 2, 92, 22, 3, 2, 2, 2, 93, 94, 7, - 47, 2, 2, 94, 24, 3, 2, 2, 2, 95, 96, 7, 44, 2, 2, 96, 26, 3, 2, 2, 2, - 97, 98, 7, 49, 2, 2, 98, 28, 3, 2, 2, 2, 99, 100, 7, 39, 2, 2, 100, 30, - 3, 2, 2, 2, 101, 102, 7, 99, 2, 2, 102, 103, 7, 112, 2, 2, 103, 108, 7, - 102, 2, 2, 104, 105, 7, 67, 2, 2, 105, 106, 7, 80, 2, 2, 106, 108, 7, 70, - 2, 2, 107, 101, 3, 2, 2, 2, 107, 104, 3, 2, 2, 2, 108, 32, 3, 2, 2, 2, - 109, 110, 7, 113, 2, 2, 110, 114, 7, 116, 2, 2, 111, 112, 7, 81, 2, 2, - 112, 114, 7, 84, 2, 2, 113, 109, 3, 2, 2, 2, 113, 111, 3, 2, 2, 2, 114, - 34, 3, 2, 2, 2, 115, 116, 7, 118, 2, 2, 116, 117, 7, 116, 2, 2, 117, 118, - 7, 119, 2, 2, 118, 124, 7, 103, 2, 2, 119, 120, 7, 86, 2, 2, 120, 121, - 7, 84, 2, 2, 121, 122, 7, 87, 2, 2, 122, 124, 7, 71, 2, 2, 123, 115, 3, - 2, 2, 2, 123, 119, 3, 2, 2, 2, 124, 36, 3, 2, 2, 2, 125, 126, 7, 104, 2, - 2, 126, 127, 7, 99, 2, 2, 127, 128, 7, 110, 2, 2, 128, 129, 7, 117, 2, - 2, 129, 136, 7, 103, 2, 2, 130, 131, 7, 72, 2, 2, 131, 132, 7, 67, 2, 2, - 132, 133, 7, 78, 2, 2, 133, 134, 7, 85, 2, 2, 134, 136, 7, 71, 2, 2, 135, - 125, 3, 2, 2, 2, 135, 130, 3, 2, 2, 2, 136, 38, 3, 2, 2, 2, 137, 139, 9, - 2, 2, 2, 138, 137, 3, 2, 2, 2, 138, 139, 3, 2, 2, 2, 139, 141, 3, 2, 2, - 2, 140, 142, 9, 3, 2, 2, 141, 140, 3, 2, 2, 2, 142, 143, 3, 2, 2, 2, 143, - 141, 3, 2, 2, 2, 143, 144, 3, 2, 2, 2, 144, 145, 3, 2, 2, 2, 145, 147, - 7, 48, 2, 2, 146, 148, 9, 3, 2, 2, 147, 146, 3, 2, 2, 2, 148, 149, 3, 2, - 2, 2, 149, 147, 3, 2, 2, 2, 149, 150, 3, 2, 2, 2, 150, 40, 3, 2, 2, 2, - 151, 153, 9, 2, 2, 2, 152, 151, 3, 2, 2, 2, 152, 153, 3, 2, 2, 2, 153, - 155, 3, 2, 2, 2, 154, 156, 9, 3, 2, 2, 155, 154, 3, 2, 2, 2, 156, 157, - 3, 2, 2, 2, 157, 155, 3, 2, 2, 2, 157, 158, 3, 2, 2, 2, 158, 42, 3, 2, - 2, 2, 159, 161, 9, 4, 2, 2, 160, 159, 3, 2, 2, 2, 161, 162, 3, 2, 2, 2, - 162, 160, 3, 2, 2, 2, 162, 163, 3, 2, 2, 2, 163, 164, 3, 2, 2, 2, 164, - 165, 8, 22, 2, 2, 165, 44, 3, 2, 2, 2, 166, 167, 7, 80, 2, 2, 167, 168, - 7, 81, 2, 2, 168, 173, 7, 86, 2, 2, 169, 170, 7, 112, 2, 2, 170, 171, 7, - 113, 2, 2, 171, 173, 7, 118, 2, 2, 172, 166, 3, 2, 2, 2, 172, 169, 3, 2, - 2, 2, 173, 46, 3, 2, 2, 2, 174, 178, 9, 5, 2, 2, 175, 177, 9, 6, 2, 2, - 176, 175, 3, 2, 2, 2, 177, 180, 3, 2, 2, 2, 178, 176, 3, 2, 2, 2, 178, - 179, 3, 2, 2, 2, 179, 48, 3, 2, 2, 2, 180, 178, 3, 2, 2, 2, 181, 183, 9, - 7, 2, 2, 182, 181, 3, 2, 2, 2, 183, 184, 3, 2, 2, 2, 184, 182, 3, 2, 2, - 2, 184, 185, 3, 2, 2, 2, 185, 194, 3, 2, 2, 2, 186, 188, 7, 48, 2, 2, 187, - 189, 9, 6, 2, 2, 188, 187, 3, 2, 2, 2, 189, 190, 3, 2, 2, 2, 190, 188, - 3, 2, 2, 2, 190, 191, 3, 2, 2, 2, 191, 193, 3, 2, 2, 2, 192, 186, 3, 2, - 2, 2, 193, 196, 3, 2, 2, 2, 194, 192, 3, 2, 2, 2, 194, 195, 3, 2, 2, 2, - 195, 50, 3, 2, 2, 2, 196, 194, 3, 2, 2, 2, 197, 201, 7, 41, 2, 2, 198, - 200, 10, 8, 2, 2, 199, 198, 3, 2, 2, 2, 200, 203, 3, 2, 2, 2, 201, 199, - 3, 2, 2, 2, 201, 202, 3, 2, 2, 2, 202, 204, 3, 2, 2, 2, 203, 201, 3, 2, - 2, 2, 204, 205, 7, 41, 2, 2, 205, 52, 3, 2, 2, 2, 206, 210, 7, 36, 2, 2, - 207, 209, 10, 9, 2, 2, 208, 207, 3, 2, 2, 2, 209, 212, 3, 2, 2, 2, 210, - 208, 3, 2, 2, 2, 210, 211, 3, 2, 2, 2, 211, 213, 3, 2, 2, 2, 212, 210, - 3, 2, 2, 2, 213, 214, 7, 36, 2, 2, 214, 54, 3, 2, 2, 2, 215, 216, 7, 42, - 2, 2, 216, 56, 3, 2, 2, 2, 217, 218, 7, 43, 2, 2, 218, 58, 3, 2, 2, 2, - 219, 220, 7, 93, 2, 2, 220, 60, 3, 2, 2, 2, 221, 222, 7, 95, 2, 2, 222, - 62, 3, 2, 2, 2, 223, 224, 7, 125, 2, 2, 224, 64, 3, 2, 2, 2, 225, 226, - 7, 127, 2, 2, 226, 66, 3, 2, 2, 2, 227, 228, 7, 38, 2, 2, 228, 229, 7, - 125, 2, 2, 229, 68, 3, 2, 2, 2, 20, 2, 107, 113, 123, 135, 138, 143, 149, - 152, 157, 162, 172, 178, 184, 190, 194, 201, 210, 3, 8, 2, 2, + 124, 6, 2, 50, 59, 67, 92, 97, 97, 99, 124, 6, 2, 47, 59, 67, 92, 97, 97, + 99, 124, 7, 2, 47, 47, 49, 59, 67, 92, 97, 97, 99, 124, 5, 2, 12, 12, 15, + 15, 41, 41, 5, 2, 12, 12, 15, 15, 36, 36, 2, 246, 2, 3, 3, 2, 2, 2, 2, + 5, 3, 2, 2, 2, 2, 7, 3, 2, 2, 2, 2, 9, 3, 2, 2, 2, 2, 11, 3, 2, 2, 2, 2, + 13, 3, 2, 2, 2, 2, 15, 3, 2, 2, 2, 2, 17, 3, 2, 2, 2, 2, 19, 3, 2, 2, 2, + 2, 21, 3, 2, 2, 2, 2, 23, 3, 2, 2, 2, 2, 25, 3, 2, 2, 2, 2, 27, 3, 2, 2, + 2, 2, 29, 3, 2, 2, 2, 2, 31, 3, 2, 2, 2, 2, 33, 3, 2, 2, 2, 2, 35, 3, 2, + 2, 2, 2, 37, 3, 2, 2, 2, 2, 39, 3, 2, 2, 2, 2, 41, 3, 2, 2, 2, 2, 43, 3, + 2, 2, 2, 2, 45, 3, 2, 2, 2, 2, 47, 3, 2, 2, 2, 2, 49, 3, 2, 2, 2, 2, 51, + 3, 2, 2, 2, 2, 53, 3, 2, 2, 2, 2, 55, 3, 2, 2, 2, 2, 57, 3, 2, 2, 2, 2, + 59, 3, 2, 2, 2, 2, 61, 3, 2, 2, 2, 2, 63, 3, 2, 2, 2, 2, 65, 3, 2, 2, 2, + 2, 67, 3, 2, 2, 2, 3, 69, 3, 2, 2, 2, 5, 71, 3, 2, 2, 2, 7, 73, 3, 2, 2, + 2, 9, 75, 3, 2, 2, 2, 11, 78, 3, 2, 2, 2, 13, 81, 3, 2, 2, 2, 15, 83, 3, + 2, 2, 2, 17, 85, 3, 2, 2, 2, 19, 88, 3, 2, 2, 2, 21, 91, 3, 2, 2, 2, 23, + 93, 3, 2, 2, 2, 25, 95, 3, 2, 2, 2, 27, 97, 3, 2, 2, 2, 29, 99, 3, 2, 2, + 2, 31, 107, 3, 2, 2, 2, 33, 113, 3, 2, 2, 2, 35, 123, 3, 2, 2, 2, 37, 135, + 3, 2, 2, 2, 39, 138, 3, 2, 2, 2, 41, 152, 3, 2, 2, 2, 43, 160, 3, 2, 2, + 2, 45, 172, 3, 2, 2, 2, 47, 174, 3, 2, 2, 2, 49, 182, 3, 2, 2, 2, 51, 197, + 3, 2, 2, 2, 53, 206, 3, 2, 2, 2, 55, 215, 3, 2, 2, 2, 57, 217, 3, 2, 2, + 2, 59, 219, 3, 2, 2, 2, 61, 221, 3, 2, 2, 2, 63, 223, 3, 2, 2, 2, 65, 225, + 3, 2, 2, 2, 67, 227, 3, 2, 2, 2, 69, 70, 7, 126, 2, 2, 70, 4, 3, 2, 2, + 2, 71, 72, 7, 46, 2, 2, 72, 6, 3, 2, 2, 2, 73, 74, 7, 60, 2, 2, 74, 8, + 3, 2, 2, 2, 75, 76, 7, 63, 2, 2, 76, 77, 7, 63, 2, 2, 77, 10, 3, 2, 2, + 2, 78, 79, 7, 35, 2, 2, 79, 80, 7, 63, 2, 2, 80, 12, 3, 2, 2, 2, 81, 82, + 7, 64, 2, 2, 82, 14, 3, 2, 2, 2, 83, 84, 7, 62, 2, 2, 84, 16, 3, 2, 2, + 2, 85, 86, 7, 64, 2, 2, 86, 87, 7, 63, 2, 2, 87, 18, 3, 2, 2, 2, 88, 89, + 7, 62, 2, 2, 89, 90, 7, 63, 2, 2, 90, 20, 3, 2, 2, 2, 91, 92, 7, 45, 2, + 2, 92, 22, 3, 2, 2, 2, 93, 94, 7, 47, 2, 2, 94, 24, 3, 2, 2, 2, 95, 96, + 7, 44, 2, 2, 96, 26, 3, 2, 2, 2, 97, 98, 7, 49, 2, 2, 98, 28, 3, 2, 2, + 2, 99, 100, 7, 39, 2, 2, 100, 30, 3, 2, 2, 2, 101, 102, 7, 99, 2, 2, 102, + 103, 7, 112, 2, 2, 103, 108, 7, 102, 2, 2, 104, 105, 7, 67, 2, 2, 105, + 106, 7, 80, 2, 2, 106, 108, 7, 70, 2, 2, 107, 101, 3, 2, 2, 2, 107, 104, + 3, 2, 2, 2, 108, 32, 3, 2, 2, 2, 109, 110, 7, 113, 2, 2, 110, 114, 7, 116, + 2, 2, 111, 112, 7, 81, 2, 2, 112, 114, 7, 84, 2, 2, 113, 109, 3, 2, 2, + 2, 113, 111, 3, 2, 2, 2, 114, 34, 3, 2, 2, 2, 115, 116, 7, 118, 2, 2, 116, + 117, 7, 116, 2, 2, 117, 118, 7, 119, 2, 2, 118, 124, 7, 103, 2, 2, 119, + 120, 7, 86, 2, 2, 120, 121, 7, 84, 2, 2, 121, 122, 7, 87, 2, 2, 122, 124, + 7, 71, 2, 2, 123, 115, 3, 2, 2, 2, 123, 119, 3, 2, 2, 2, 124, 36, 3, 2, + 2, 2, 125, 126, 7, 104, 2, 2, 126, 127, 7, 99, 2, 2, 127, 128, 7, 110, + 2, 2, 128, 129, 7, 117, 2, 2, 129, 136, 7, 103, 2, 2, 130, 131, 7, 72, + 2, 2, 131, 132, 7, 67, 2, 2, 132, 133, 7, 78, 2, 2, 133, 134, 7, 85, 2, + 2, 134, 136, 7, 71, 2, 2, 135, 125, 3, 2, 2, 2, 135, 130, 3, 2, 2, 2, 136, + 38, 3, 2, 2, 2, 137, 139, 9, 2, 2, 2, 138, 137, 3, 2, 2, 2, 138, 139, 3, + 2, 2, 2, 139, 141, 3, 2, 2, 2, 140, 142, 9, 3, 2, 2, 141, 140, 3, 2, 2, + 2, 142, 143, 3, 2, 2, 2, 143, 141, 3, 2, 2, 2, 143, 144, 3, 2, 2, 2, 144, + 145, 3, 2, 2, 2, 145, 147, 7, 48, 2, 2, 146, 148, 9, 3, 2, 2, 147, 146, + 3, 2, 2, 2, 148, 149, 3, 2, 2, 2, 149, 147, 3, 2, 2, 2, 149, 150, 3, 2, + 2, 2, 150, 40, 3, 2, 2, 2, 151, 153, 9, 2, 2, 2, 152, 151, 3, 2, 2, 2, + 152, 153, 3, 2, 2, 2, 153, 155, 3, 2, 2, 2, 154, 156, 9, 3, 2, 2, 155, + 154, 3, 2, 2, 2, 156, 157, 3, 2, 2, 2, 157, 155, 3, 2, 2, 2, 157, 158, + 3, 2, 2, 2, 158, 42, 3, 2, 2, 2, 159, 161, 9, 4, 2, 2, 160, 159, 3, 2, + 2, 2, 161, 162, 3, 2, 2, 2, 162, 160, 3, 2, 2, 2, 162, 163, 3, 2, 2, 2, + 163, 164, 3, 2, 2, 2, 164, 165, 8, 22, 2, 2, 165, 44, 3, 2, 2, 2, 166, + 167, 7, 80, 2, 2, 167, 168, 7, 81, 2, 2, 168, 173, 7, 86, 2, 2, 169, 170, + 7, 112, 2, 2, 170, 171, 7, 113, 2, 2, 171, 173, 7, 118, 2, 2, 172, 166, + 3, 2, 2, 2, 172, 169, 3, 2, 2, 2, 173, 46, 3, 2, 2, 2, 174, 178, 9, 5, + 2, 2, 175, 177, 9, 6, 2, 2, 176, 175, 3, 2, 2, 2, 177, 180, 3, 2, 2, 2, + 178, 176, 3, 2, 2, 2, 178, 179, 3, 2, 2, 2, 179, 48, 3, 2, 2, 2, 180, 178, + 3, 2, 2, 2, 181, 183, 9, 7, 2, 2, 182, 181, 3, 2, 2, 2, 183, 184, 3, 2, + 2, 2, 184, 182, 3, 2, 2, 2, 184, 185, 3, 2, 2, 2, 185, 194, 3, 2, 2, 2, + 186, 188, 7, 48, 2, 2, 187, 189, 9, 8, 2, 2, 188, 187, 3, 2, 2, 2, 189, + 190, 3, 2, 2, 2, 190, 188, 3, 2, 2, 2, 190, 191, 3, 2, 2, 2, 191, 193, + 3, 2, 2, 2, 192, 186, 3, 2, 2, 2, 193, 196, 3, 2, 2, 2, 194, 192, 3, 2, + 2, 2, 194, 195, 3, 2, 2, 2, 195, 50, 3, 2, 2, 2, 196, 194, 3, 2, 2, 2, + 197, 201, 7, 41, 2, 2, 198, 200, 10, 9, 2, 2, 199, 198, 3, 2, 2, 2, 200, + 203, 3, 2, 2, 2, 201, 199, 3, 2, 2, 2, 201, 202, 3, 2, 2, 2, 202, 204, + 3, 2, 2, 2, 203, 201, 3, 2, 2, 2, 204, 205, 7, 41, 2, 2, 205, 52, 3, 2, + 2, 2, 206, 210, 7, 36, 2, 2, 207, 209, 10, 10, 2, 2, 208, 207, 3, 2, 2, + 2, 209, 212, 3, 2, 2, 2, 210, 208, 3, 2, 2, 2, 210, 211, 3, 2, 2, 2, 211, + 213, 3, 2, 2, 2, 212, 210, 3, 2, 2, 2, 213, 214, 7, 36, 2, 2, 214, 54, + 3, 2, 2, 2, 215, 216, 7, 42, 2, 2, 216, 56, 3, 2, 2, 2, 217, 218, 7, 43, + 2, 2, 218, 58, 3, 2, 2, 2, 219, 220, 7, 93, 2, 2, 220, 60, 3, 2, 2, 2, + 221, 222, 7, 95, 2, 2, 222, 62, 3, 2, 2, 2, 223, 224, 7, 125, 2, 2, 224, + 64, 3, 2, 2, 2, 225, 226, 7, 127, 2, 2, 226, 66, 3, 2, 2, 2, 227, 228, + 7, 38, 2, 2, 228, 229, 7, 125, 2, 2, 229, 68, 3, 2, 2, 2, 20, 2, 107, 113, + 123, 135, 138, 143, 149, 152, 157, 162, 172, 178, 184, 190, 194, 201, 210, + 3, 8, 2, 2, } var lexerDeserializer = antlr.NewATNDeserializer(nil) diff --git a/internal/pkg/fileutil/fileutil.go b/internal/pkg/fileutil/fileutil.go new file mode 100644 index 00000000000..86d1db249aa --- /dev/null +++ b/internal/pkg/fileutil/fileutil.go @@ -0,0 +1,46 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package fileutil + +import ( + "errors" + "io/fs" + "os" + "time" +) + +// FileExists returns true if file/dir exists +func FileExists(fp string) (bool, error) { + _, err := os.Stat(fp) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + return false, nil + } + return false, err + } + return true, nil +} + +// GetModTime returns file modification time +func GetModTime(fp string) (time.Time, error) { + fi, err := os.Stat(fp) + if err != nil { + return time.Time{}, err + } + return fi.ModTime(), nil +} + +// GetModTimeExists returns file modification time and existence status +// Returns no error if the file doesn't exists +func GetModTimeExists(fp string) (time.Time, bool, error) { + modTime, err := GetModTime(fp) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + return modTime, false, nil + } + return modTime, false, err + } + return modTime, true, nil +} diff --git a/magefile.go b/magefile.go index 9ddfe7bb7fb..ead48b5ee42 100644 --- a/magefile.go +++ b/magefile.go @@ -822,6 +822,7 @@ func packageAgent(requiredPackages []string, packagingFn func()) { mg.Deps(CrossBuild, CrossBuildGoDaemon) mg.SerialDeps(devtools.Package, TestPackages) } + func copyComponentSpecs(componentName, versionedDropPath string) (string, error) { sourceSpecFile := filepath.Join("specs", componentName+specSuffix) targetPath := filepath.Join(versionedDropPath, componentName+specSuffix) @@ -1011,3 +1012,111 @@ type checksumFile struct { Name string `yaml:"name"` Checksum string `yaml:"sha512"` } + +// Package packages elastic-agent for the IronBank distribution, relying on the +// binaries having already been built. +// +// Use SNAPSHOT=true to build snapshots. +func Ironbank() error { + if runtime.GOARCH != "amd64" { + fmt.Printf(">> IronBank images are only supported for amd64 arch (%s is not supported)\n", runtime.GOARCH) + return nil + } + if err := prepareIronbankBuild(); err != nil { + return errors.Wrap(err, "failed to prepare the IronBank context") + } + if err := saveIronbank(); err != nil { + return errors.Wrap(err, "failed to save artifacts for IronBank") + } + return nil +} + +func saveIronbank() error { + fmt.Println(">> saveIronbank: save the IronBank container context.") + + ironbank := getIronbankContextName() + buildDir := filepath.Join("build", ironbank) + if _, err := os.Stat(buildDir); os.IsNotExist(err) { + return fmt.Errorf("cannot find the folder with the ironbank context: %+v", err) + } + + distributionsDir := "build/distributions" + if _, err := os.Stat(distributionsDir); os.IsNotExist(err) { + err := os.MkdirAll(distributionsDir, 0750) + if err != nil { + return fmt.Errorf("cannot create folder for docker artifacts: %+v", err) + } + } + + // change dir to the buildDir location where the ironbank folder exists + // this will generate a tar.gz without some nested folders. + wd, _ := os.Getwd() + os.Chdir(buildDir) + defer os.Chdir(wd) + + // move the folder to the parent folder, there are two parent folder since + // buildDir contains a two folders dir. + tarGzFile := filepath.Join("..", "..", distributionsDir, ironbank+".tar.gz") + + // Save the build context as tar.gz artifact + err := devtools.Tar("./", tarGzFile) + if err != nil { + return fmt.Errorf("cannot compress the tar.gz file: %+v", err) + } + + return errors.Wrap(devtools.CreateSHA512File(tarGzFile), "failed to create .sha512 file") +} + +func getIronbankContextName() string { + version, _ := devtools.BeatQualifiedVersion() + defaultBinaryName := "{{.Name}}-ironbank-{{.Version}}{{if .Snapshot}}-SNAPSHOT{{end}}" + outputDir, _ := devtools.Expand(defaultBinaryName+"-docker-build-context", map[string]interface{}{ + "Name": "elastic-agent", + "Version": version, + }) + return outputDir +} + +func prepareIronbankBuild() error { + fmt.Println(">> prepareIronbankBuild: prepare the IronBank container context.") + buildDir := filepath.Join("build", getIronbankContextName()) + templatesDir := filepath.Join("dev-tools", "packaging", "templates", "ironbank") + + data := map[string]interface{}{ + "MajorMinor": majorMinor(), + } + + err := filepath.Walk(templatesDir, func(path string, info os.FileInfo, _ error) error { + if !info.IsDir() { + target := strings.TrimSuffix( + filepath.Join(buildDir, filepath.Base(path)), + ".tmpl", + ) + + err := devtools.ExpandFile(path, target, data) + if err != nil { + return errors.Wrapf(err, "expanding template '%s' to '%s'", path, target) + } + } + return nil + }) + + if err != nil { + return fmt.Errorf("cannot create templates for the IronBank: %+v", err) + } + + // copy files + sourcePath := filepath.Join("dev-tools", "packaging", "files", "ironbank") + if err := devtools.Copy(sourcePath, buildDir); err != nil { + return fmt.Errorf("cannot create files for the IronBank: %+v", err) + } + return nil +} + +func majorMinor() string { + if v, _ := devtools.BeatQualifiedVersion(); v != "" { + parts := strings.SplitN(v, ".", 3) + return parts[0] + "." + parts[1] + } + return "" +} diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index 9a64bdc5d95..b8b6792b912 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-7e67f5d9-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-d058e92f-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.4.0-7e67f5d9-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.4.0-d058e92f-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" From 2cc23382db4c89e605faf057b651a11cfe567638 Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Tue, 2 Aug 2022 12:20:27 -0400 Subject: [PATCH 11/49] [v2] Fix inspect command (#805) * Write the inspect command for v2. * Fix lint. * Fix code review. Load inputs from inputs.d for inspect. * Fix lint. * Refactor to use errgroup. * Remove unused struct. --- internal/pkg/agent/application/application.go | 36 +- internal/pkg/agent/application/once.go | 6 +- .../pkg/agent/application/paths/common.go | 10 +- internal/pkg/agent/application/periodic.go | 6 +- internal/pkg/agent/cmd/diagnostics.go | 12 +- internal/pkg/agent/cmd/inspect.go | 538 ++++++++++-------- internal/pkg/agent/cmd/inspect_test.go | 53 -- internal/pkg/agent/configuration/settings.go | 5 - internal/pkg/agent/install/uninstall.go | 2 +- internal/pkg/composable/controller.go | 15 + internal/pkg/config/discover.go | 39 ++ internal/pkg/config/operations/inspector.go | 18 +- pkg/component/component.go | 17 +- pkg/component/error.go | 32 ++ pkg/component/load.go | 14 +- 15 files changed, 437 insertions(+), 366 deletions(-) delete mode 100644 internal/pkg/agent/cmd/inspect_test.go create mode 100644 internal/pkg/config/discover.go create mode 100644 pkg/component/error.go diff --git a/internal/pkg/agent/application/application.go b/internal/pkg/agent/application/application.go index bf0d0fd6444..6b3c4b73d42 100644 --- a/internal/pkg/agent/application/application.go +++ b/internal/pkg/agent/application/application.go @@ -6,7 +6,6 @@ package application import ( "fmt" - "path/filepath" "go.elastic.co/apm" @@ -20,17 +19,11 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/capabilities" "github.com/elastic/elastic-agent/internal/pkg/composable" "github.com/elastic/elastic-agent/internal/pkg/config" - "github.com/elastic/elastic-agent/internal/pkg/dir" "github.com/elastic/elastic-agent/pkg/component" "github.com/elastic/elastic-agent/pkg/component/runtime" "github.com/elastic/elastic-agent/pkg/core/logger" ) -type discoverFunc func() ([]string, error) - -// ErrNoConfiguration is returned when no configuration are found. -var ErrNoConfiguration = errors.New("no configuration found", errors.TypeConfig) - // New creates a new Agent and bootstrap the required subsystem. func New( log *logger.Logger, @@ -83,8 +76,8 @@ func New( if configuration.IsStandalone(cfg.Fleet) { log.Info("Parsed configuration and determined agent is managed locally") - loader := config.NewLoader(log, externalConfigsGlob()) - discover := discoverer(pathConfigFile, cfg.Settings.Path, externalConfigsGlob()) + loader := config.NewLoader(log, paths.ExternalInputs()) + discover := config.Discoverer(pathConfigFile, cfg.Settings.Path, paths.ExternalInputs()) if !cfg.Settings.Reload.Enabled { log.Debug("Reloading of configuration is off") configMgr = newOnce(log, discover, loader) @@ -173,28 +166,3 @@ func mergeFleetConfig(rawConfig *config.Config) (storage.Store, *configuration.C return store, cfg, nil } - -func externalConfigsGlob() string { - return filepath.Join(paths.Config(), configuration.ExternalInputsPattern) -} - -func discoverer(patterns ...string) discoverFunc { - p := make([]string, 0, len(patterns)) - for _, newP := range patterns { - if len(newP) == 0 { - continue - } - - p = append(p, newP) - } - - if len(p) == 0 { - return func() ([]string, error) { - return []string{}, ErrNoConfiguration - } - } - - return func() ([]string, error) { - return dir.DiscoverFiles(p...) - } -} diff --git a/internal/pkg/agent/application/once.go b/internal/pkg/agent/application/once.go index 7326612950b..fca0ed3e741 100644 --- a/internal/pkg/agent/application/once.go +++ b/internal/pkg/agent/application/once.go @@ -17,13 +17,13 @@ import ( type once struct { log *logger.Logger - discover discoverFunc + discover config.DiscoverFunc loader *config.Loader ch chan coordinator.ConfigChange errCh chan error } -func newOnce(log *logger.Logger, discover discoverFunc, loader *config.Loader) *once { +func newOnce(log *logger.Logger, discover config.DiscoverFunc, loader *config.Loader) *once { return &once{log: log, discover: discover, loader: loader, ch: make(chan coordinator.ConfigChange), errCh: make(chan error)} } @@ -34,7 +34,7 @@ func (o *once) Run(ctx context.Context) error { } if len(files) == 0 { - return ErrNoConfiguration + return config.ErrNoConfiguration } cfg, err := readfiles(files, o.loader) diff --git a/internal/pkg/agent/application/paths/common.go b/internal/pkg/agent/application/paths/common.go index 79b114144cc..8b6cc06743e 100644 --- a/internal/pkg/agent/application/paths/common.go +++ b/internal/pkg/agent/application/paths/common.go @@ -23,6 +23,9 @@ const ( tempSubdir = "tmp" ) +// ExternalInputsPattern is a glob that matches the paths of external configuration files. +var ExternalInputsPattern = filepath.Join("inputs.d", "*.yml") + var ( topPath string configPath string @@ -69,7 +72,7 @@ func TempDir() string { tmpDir := filepath.Join(Data(), tempSubdir) tmpCreator.Do(func() { // create tempdir as it probably don't exists - os.MkdirAll(tmpDir, 0750) + _ = os.MkdirAll(tmpDir, 0750) }) return tmpDir } @@ -119,6 +122,11 @@ func ConfigFile() string { return filepath.Join(Config(), configFilePath) } +// ExternalInputs returns the path to load external inputs from. +func ExternalInputs() string { + return filepath.Join(Config(), ExternalInputsPattern) +} + // Data returns the data directory for Agent func Data() string { if unversionedHome { diff --git a/internal/pkg/agent/application/periodic.go b/internal/pkg/agent/application/periodic.go index bb9f717a7af..e32234a4ca3 100644 --- a/internal/pkg/agent/application/periodic.go +++ b/internal/pkg/agent/application/periodic.go @@ -22,7 +22,7 @@ type periodic struct { period time.Duration watcher *filewatcher.Watch loader *config.Loader - discover discoverFunc + discover config.DiscoverFunc ch chan coordinator.ConfigChange errCh chan error } @@ -62,7 +62,7 @@ func (p *periodic) work() error { } if len(files) == 0 { - return ErrNoConfiguration + return config.ErrNoConfiguration } // Reset the state of the watched files @@ -115,7 +115,7 @@ func (p *periodic) work() error { func newPeriodic( log *logger.Logger, period time.Duration, - discover discoverFunc, + discover config.DiscoverFunc, loader *config.Loader, ) *periodic { w, err := filewatcher.New(log, filewatcher.DefaultComparer) diff --git a/internal/pkg/agent/cmd/diagnostics.go b/internal/pkg/agent/cmd/diagnostics.go index 811b88465d2..f267c2df162 100644 --- a/internal/pkg/agent/cmd/diagnostics.go +++ b/internal/pkg/agent/cmd/diagnostics.go @@ -398,6 +398,11 @@ func outputDiagnostics(w io.Writer, d DiagnosticsInfo) error { } func gatherConfig() (AgentConfig, error) { + log, err := newErrorLogger() + if err != nil { + return AgentConfig{}, err + } + cfg := AgentConfig{} localCFG, err := loadConfig(nil) if err != nil { @@ -405,7 +410,7 @@ func gatherConfig() (AgentConfig, error) { } cfg.ConfigLocal = localCFG - renderedCFG, err := operations.LoadFullAgentConfig(paths.ConfigFile(), true) + renderedCFG, err := operations.LoadFullAgentConfig(log, paths.ConfigFile(), true) if err != nil { return cfg, err } @@ -434,11 +439,6 @@ func gatherConfig() (AgentConfig, error) { return AgentConfig{}, err } - log, err := newErrorLogger() - if err != nil { - return AgentConfig{}, err - } - // Get process config - uses same approach as inspect output command. // Does not contact server process to request configs. pMap, err := getProgramsFromConfig(log, agentInfo, renderedCFG, isStandalone) diff --git a/internal/pkg/agent/cmd/inspect.go b/internal/pkg/agent/cmd/inspect.go index 0c51bb40460..32455a179c4 100644 --- a/internal/pkg/agent/cmd/inspect.go +++ b/internal/pkg/agent/cmd/inspect.go @@ -5,383 +5,437 @@ package cmd import ( + "context" + "fmt" + "os" + "strings" + "time" + "github.com/spf13/cobra" + "golang.org/x/sync/errgroup" + "gopkg.in/yaml.v2" + + "github.com/elastic/elastic-agent-libs/logp" + "github.com/elastic/elastic-agent-libs/service" - "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" + "github.com/elastic/elastic-agent/internal/pkg/agent/errors" + "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" + "github.com/elastic/elastic-agent/internal/pkg/capabilities" "github.com/elastic/elastic-agent/internal/pkg/cli" + "github.com/elastic/elastic-agent/internal/pkg/composable" "github.com/elastic/elastic-agent/internal/pkg/config" + "github.com/elastic/elastic-agent/internal/pkg/config/operations" + "github.com/elastic/elastic-agent/pkg/component" + "github.com/elastic/elastic-agent/pkg/core/logger" ) func newInspectCommandWithArgs(s []string, streams *cli.IOStreams) *cobra.Command { cmd := &cobra.Command{ Use: "inspect", Short: "Shows configuration of the agent", - Long: "Shows current configuration of the agent", - Args: cobra.ExactArgs(0), + Long: `Shows current configuration of the agent. + +By default variable substitution is not performed. Use the --variables flag to enable variable substitution. The +first set of computed variables are used when only the --variables flag is defined. This can prevent some of the +dynamic providers (kubernetes, docker, etc.) from providing all the possible variables it could have discovered if given +more time. The --variables-wait allows an amount of time to be provided for variable discovery, when set it will +wait that amount of time before using the variables for the configuration. +`, + Args: cobra.ExactArgs(0), Run: func(c *cobra.Command, args []string) { - // TODO(blakerouse): Fix inspect command for Elastic Agent v2 - /* - if err := inspectConfig(paths.ConfigFile()); err != nil { - fmt.Fprintf(streams.Err, "Error: %v\n%s\n", err, troubleshootMessage()) - os.Exit(1) - } - */ + var opts inspectConfigOpts + opts.variables, _ = c.Flags().GetBool("variables") + opts.variablesWait, _ = c.Flags().GetDuration("variables-wait") + + ctx, cancel := context.WithCancel(context.Background()) + service.HandleSignals(func() {}, cancel) + if err := inspectConfig(ctx, paths.ConfigFile(), opts, streams); err != nil { + fmt.Fprintf(streams.Err, "Error: %v\n%s\n", err, troubleshootMessage()) + os.Exit(1) + } }, } - cmd.AddCommand(newInspectOutputCommandWithArgs(s)) + cmd.Flags().Bool("variables", false, "render configuration with variables substituted") + cmd.Flags().Duration("variables-wait", time.Duration(0), "wait this amount of time for variables before performing substitution") + + cmd.AddCommand(newInspectComponentsCommandWithArgs(s, streams)) return cmd } -func newInspectOutputCommandWithArgs(_ []string) *cobra.Command { +func newInspectComponentsCommandWithArgs(_ []string, streams *cli.IOStreams) *cobra.Command { cmd := &cobra.Command{ - Use: "output", - Short: "Displays configuration generated for output", - Long: "Displays configuration generated for output.\nIf no output is specified list of output is displayed", - Args: cobra.MaximumNArgs(2), - RunE: func(c *cobra.Command, args []string) error { - // TODO(blakerouse): Fix inspect command for Elastic Agent v2 - /* - outName, _ := c.Flags().GetString("output") - program, _ := c.Flags().GetString("program") - cfgPath := paths.ConfigFile() - agentInfo, err := info.NewAgentInfo(false) - if err != nil { - return err - } + Use: "components [id]", + Short: "Displays the components model for the configuration", + Long: `Displays the generated components model for the current configuration. + +By default the configuration for each unit inside of a component is not returned. Use --show-config to display the +configuration in all the units. + +A specific component can be selected by its ID and only that component and all its units will be returned. Because its +possible for a component to have many units the configuration for each unit is still not provided by default. Use +--show-config to display the configuration in all the units. + +A specific unit inside of a component can be selected by using / and only that unit will be +returned. In this mode the configuration is provided by default, using the --show-config is a noop. - if outName == "" { - return inspectOutputs(cfgPath, agentInfo) - } +The selected input or output runtime specification for a component is never provided unless enabled with --show-spec. - return inspectOutput(cfgPath, outName, program, agentInfo) - */ - return nil +Variable substitution is always performed when computing the components, and it cannot be disabled. By default only the +first set of computed variables are used. This can prevent some of the dynamic providers (kubernetes, docker, etc.) from +providing all the possible variables it could have discovered if given more time. The --variables-wait allows an +amount of time to be provided for variable discovery, when set it will wait that amount of time before using the +variables for the configuration. +`, + Args: cobra.MaximumNArgs(1), + Run: func(c *cobra.Command, args []string) { + var opts inspectComponentsOpts + if len(args) > 0 { + opts.id = args[0] + } + opts.showConfig, _ = c.Flags().GetBool("show-config") + opts.showSpec, _ = c.Flags().GetBool("show-spec") + opts.variablesWait, _ = c.Flags().GetDuration("variables-wait") + + ctx, cancel := context.WithCancel(context.Background()) + service.HandleSignals(func() {}, cancel) + if err := inspectComponents(ctx, paths.ConfigFile(), opts, streams); err != nil { + fmt.Fprintf(streams.Err, "Error: %v\n%s\n", err, troubleshootMessage()) + os.Exit(1) + } }, } - cmd.Flags().StringP("output", "o", "", "name of the output to be inspected") - cmd.Flags().StringP("program", "p", "", "type of program to inspect, needs to be combined with output. e.g filebeat") + cmd.Flags().Bool("show-config", false, "show the configuration for all units") + cmd.Flags().Bool("show-spec", false, "show the runtime specification for a component") + cmd.Flags().Duration("variables-wait", time.Duration(0), "wait this amount of time for variables before performing substitution") return cmd } -/* -func inspectConfig(cfgPath string) error { - err := tryContainerLoadPaths() - if err != nil { - return err - } +type inspectConfigOpts struct { + variables bool + variablesWait time.Duration +} - fullCfg, err := operations.LoadFullAgentConfig(cfgPath, true) +func inspectConfig(ctx context.Context, cfgPath string, opts inspectConfigOpts, streams *cli.IOStreams) error { + err := tryContainerLoadPaths() if err != nil { return err } - return printConfig(fullCfg) -} - -func printMapStringConfig(mapStr map[string]interface{}) error { l, err := newErrorLogger() if err != nil { return err } - caps, err := capabilities.Load(paths.AgentCapabilitiesPath(), l) + + fullCfg, err := operations.LoadFullAgentConfig(l, cfgPath, true) if err != nil { return err } - newCfg, err := caps.Apply(mapStr) - if err != nil { - return errors.New(err, "failed to apply capabilities") + if !opts.variables { + return printConfig(fullCfg, l, streams) } - newMap, ok := newCfg.(map[string]interface{}) - if !ok { - return errors.New("config returned from capabilities has invalid type") + cfg, err := getConfigWithVariables(ctx, l, cfgPath, opts.variablesWait) + if err != nil { + return err } + return printMapStringConfig(cfg, streams) +} - data, err := yaml.Marshal(newMap) +func printMapStringConfig(mapStr map[string]interface{}, streams *cli.IOStreams) error { + data, err := yaml.Marshal(mapStr) if err != nil { return errors.New(err, "could not marshal to YAML") } - _, err = os.Stdout.WriteString(string(data)) + _, err = streams.Out.Write(data) return err } -func printConfig(cfg *config.Config) error { - mapStr, err := cfg.ToMapStr() +func printConfig(cfg *config.Config, l *logger.Logger, streams *cli.IOStreams) error { + caps, err := capabilities.Load(paths.AgentCapabilitiesPath(), l) if err != nil { return err } - return printMapStringConfig(mapStr) -} - -func newErrorLogger() (*logger.Logger, error) { - return logger.NewWithLogpLevel("", logp.ErrorLevel, false) -} - -func inspectOutputs(cfgPath string, agentInfo *info.AgentInfo) error { - l, err := newErrorLogger() + mapStr, err := cfg.ToMapStr() if err != nil { return err } - - fullCfg, err := operations.LoadFullAgentConfig(cfgPath, true) + newCfg, err := caps.Apply(mapStr) if err != nil { - return err + return errors.New(err, "failed to apply capabilities") } - - fleetConfig, err := fullCfg.ToMapStr() - if err != nil { - return err + newMap, ok := newCfg.(map[string]interface{}) + if !ok { + return errors.New("config returned from capabilities has invalid type") } - isStandalone, err := isStandalone(fullCfg) - if err != nil { - return err - } + return printMapStringConfig(newMap, streams) +} - return listOutputsFromMap(l, agentInfo, fleetConfig, isStandalone) +type inspectComponentsOpts struct { + id string + showConfig bool + showSpec bool + variablesWait time.Duration } -func listOutputsFromConfig(log *logger.Logger, agentInfo *info.AgentInfo, cfg *config.Config, isStandalone bool) error { - programsGroup, err := getProgramsFromConfig(log, agentInfo, cfg, isStandalone) +func inspectComponents(ctx context.Context, cfgPath string, opts inspectComponentsOpts, streams *cli.IOStreams) error { + l, err := newErrorLogger() if err != nil { return err - - } - - for k := range programsGroup { - _, _ = os.Stdout.WriteString(k) } - return nil -} - -func listOutputsFromMap(log *logger.Logger, agentInfo *info.AgentInfo, cfg map[string]interface{}, isStandalone bool) error { - c, err := config.NewConfigFrom(cfg) + // Ensure that when running inside a container that the correct paths are used. + err = tryContainerLoadPaths() if err != nil { return err } - return listOutputsFromConfig(log, agentInfo, c, isStandalone) -} - -func inspectOutput(cfgPath, output, program string, agentInfo *info.AgentInfo) error { - l, err := newErrorLogger() + // Load the requirements before trying to load the configuration. These should always load + // even if the configuration is wrong. + platform, err := component.LoadPlatformDetail() if err != nil { - return err + return fmt.Errorf("failed to gather system information: %w", err) } - - fullCfg, err := operations.LoadFullAgentConfig(cfgPath, true) + specs, err := component.LoadRuntimeSpecs(paths.Components(), platform) if err != nil { - return err + return fmt.Errorf("failed to detect inputs and outputs: %w", err) } - fleetConfig, err := fullCfg.ToMapStr() + m, err := getConfigWithVariables(ctx, l, cfgPath, opts.variablesWait) if err != nil { return err } - return printOutputFromMap(l, agentInfo, output, program, fleetConfig, true) -} - -func printOutputFromConfig(log *logger.Logger, agentInfo *info.AgentInfo, output, programName string, cfg *config.Config, isStandalone bool) error { - programsGroup, err := getProgramsFromConfig(log, agentInfo, cfg, isStandalone) + // Compute the components from the computed configuration. + comps, err := specs.ToComponents(m) if err != nil { - return err - + return fmt.Errorf("failed to render components: %w", err) } - for k, programs := range programsGroup { - if k != output { - continue + // ID provided. + if opts.id != "" { + splitID := strings.SplitN(opts.id, "/", 2) + compID := splitID[0] + unitID := "" + if len(splitID) > 1 { + unitID = splitID[1] } - - var programFound bool - for _, p := range programs { - if programName != "" && programName != p.Spec.CommandName() { - continue + comp, ok := findComponent(comps, compID) + if ok { + if unitID != "" { + unit, ok := findUnit(comp, unitID) + if ok { + return printUnit(unit, streams) + } + return fmt.Errorf("unable to find unit with ID: %s/%s", compID, unitID) } - - programFound = true - _, _ = os.Stdout.WriteString(fmt.Sprintf("[%s] %s:\n", k, p.Spec.CommandName())) - err = printMapStringConfig(p.Configuration()) - if err != nil { - return fmt.Errorf("cannot print configuration of program '%s': %w", programName, err) + if !opts.showSpec { + comp.Spec = component.InputRuntimeSpec{} } - _, _ = os.Stdout.WriteString("---") + if !opts.showConfig { + for key, unit := range comp.Units { + unit.Config = nil + comp.Units[key] = unit + } + } + return printComponent(comp, streams) } + return fmt.Errorf("unable to find component with ID: %s", compID) + } - if !programFound { - return fmt.Errorf("program '%s' is not recognized within output '%s', try running `elastic-agent inspect output` to find available outputs", - programName, - output) + // Hide configuration unless toggled on. + if !opts.showConfig { + for i, comp := range comps { + for key, unit := range comp.Units { + unit.Config = nil + comp.Units[key] = unit + } + comps[i] = comp } - - return nil } - return fmt.Errorf("output '%s' is not recognized, try running `elastic-agent inspect output` to find available outputs", output) - -} - -func printOutputFromMap(log *logger.Logger, agentInfo *info.AgentInfo, output, programName string, cfg map[string]interface{}, isStandalone bool) error { - c, err := config.NewConfigFrom(cfg) - if err != nil { - return err + // Hide runtime specification unless toggled on. + if !opts.showSpec { + for i, comp := range comps { + comp.Spec = component.InputRuntimeSpec{} + comps[i] = comp + } } - return printOutputFromConfig(log, agentInfo, output, programName, c, isStandalone) + return printComponents(comps, streams) } -func getProgramsFromConfig(log *logger.Logger, agentInfo *info.AgentInfo, cfg *config.Config, isStandalone bool) (map[string][]program.Program, error) { - monitor := noop.NewMonitor() - router := &inmemRouter{} - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - composableCtrl, err := composable.New(log, cfg) +func getConfigWithVariables(ctx context.Context, l *logger.Logger, cfgPath string, timeout time.Duration) (map[string]interface{}, error) { + caps, err := capabilities.Load(paths.AgentCapabilitiesPath(), l) if err != nil { - return nil, err - } - - composableWaiter := newWaitForCompose(composableCtrl) - configModifiers := &pipeline.ConfigModifiers{ - Decorators: []pipeline.DecoratorFunc{modifiers.InjectMonitoring}, - Filters: []pipeline.FilterFunc{filters.StreamChecker}, - } - - if !isStandalone { - sysInfo, err := sysinfo.Host() - if err != nil { - return nil, errors.New(err, - "fail to get system information", - errors.TypeUnexpected) - } - configModifiers.Filters = append(configModifiers.Filters, modifiers.InjectFleet(cfg, sysInfo.Info(), agentInfo)) + return nil, fmt.Errorf("failed to determine capabilities: %w", err) } - caps, err := capabilities.Load(paths.AgentCapabilitiesPath(), log) + cfg, err := operations.LoadFullAgentConfig(l, cfgPath, true) if err != nil { return nil, err } - - emit, err := emitter.New( - ctx, - log, - agentInfo, - composableWaiter, - router, - configModifiers, - caps, - monitor, - ) + m, err := cfg.ToMapStr() if err != nil { return nil, err } - - if err := emit(ctx, cfg); err != nil { - return nil, err + ast, err := transpiler.NewAST(m) + if err != nil { + return nil, fmt.Errorf("could not create the AST from the configuration: %w", err) } - composableWaiter.Wait() - // add the fleet-server input to default programs list - // this does not correspond to the actual config that fleet-server uses as it's in fleet.yml and not part of the assembled config (cfg) - fleetCFG, err := cfg.ToMapStr() + var ok bool + updatedAst, err := caps.Apply(ast) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to apply capabilities: %w", err) } - if fleetInput := getFleetInput(fleetCFG); fleetInput != nil { - ast, err := transpiler.NewAST(fleetInput) - if err != nil { - return nil, err - } - router.programs["default"] = append(router.programs["default"], program.Program{ - Spec: program.Spec{ - Name: "fleet-server", - Cmd: "fleet-server", - }, - Config: ast, - }) + ast, ok = updatedAst.(*transpiler.AST) + if !ok { + return nil, fmt.Errorf("failed to transform object returned from capabilities to AST: %w", err) } - return router.programs, nil -} - -func getFleetInput(o map[string]interface{}) map[string]interface{} { - arr, ok := o["inputs"].([]interface{}) - if !ok { - return nil + // Wait for the variables based on the timeout. + vars, err := waitForVariables(ctx, l, cfg, timeout) + if err != nil { + return nil, fmt.Errorf("failed to gather variables: %w", err) } - for _, iface := range arr { - input, ok := iface.(map[string]interface{}) - if !ok { - continue - } - t, ok := input["type"] - if !ok { - continue + + // Render the inputs using the discovered inputs. + inputs, ok := transpiler.Lookup(ast, "inputs") + if ok { + renderedInputs, err := transpiler.RenderInputs(inputs, vars) + if err != nil { + return nil, fmt.Errorf("rendering inputs failed: %w", err) } - if t.(string) == "fleet-server" { - return input + err = transpiler.Insert(ast, renderedInputs, "inputs") + if err != nil { + return nil, fmt.Errorf("inserting rendered inputs failed: %w", err) } } - return nil + m, err = ast.Map() + if err != nil { + return nil, fmt.Errorf("failed to convert ast to map[string]interface{}: %w", err) + } + return m, nil } -type inmemRouter struct { - programs map[string][]program.Program -} +func waitForVariables(ctx context.Context, l *logger.Logger, cfg *config.Config, wait time.Duration) ([]*transpiler.Vars, error) { + var cancel context.CancelFunc + var vars []*transpiler.Vars -func (r *inmemRouter) Routes() *sorted.Set { - return nil -} + composable, err := composable.New(l, cfg) + if err != nil { + return nil, fmt.Errorf("failed to create composable controller: %w", err) + } -func (r *inmemRouter) Route(_ context.Context, _ string, grpProg map[pipeline.RoutingKey][]program.Program) error { - r.programs = grpProg - return nil -} + hasTimeout := false + if wait > time.Duration(0) { + hasTimeout = true + ctx, cancel = context.WithTimeout(ctx, wait) + } else { + ctx, cancel = context.WithCancel(ctx) + } + defer cancel() -func (r *inmemRouter) Shutdown() {} + g, ctx := errgroup.WithContext(ctx) + g.Go(func() error { + var err error + for { + select { + case <-ctx.Done(): + if err == nil { + err = ctx.Err() + } + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + err = nil + } + return err + case cErr := <-composable.Errors(): + err = cErr + if err != nil { + cancel() + } + case cVars := <-composable.Watch(): + vars = cVars + if !hasTimeout { + cancel() + } + } + } + }) -type waitForCompose struct { - controller composable.Controller - done chan bool -} + g.Go(func() error { + err := composable.Run(ctx) + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + err = nil + } + return err + }) -func newWaitForCompose(wrapped composable.Controller) *waitForCompose { - return &waitForCompose{ - controller: wrapped, - done: make(chan bool), + err = g.Wait() + if err != nil { + return nil, err } + return vars, nil } -func (w *waitForCompose) Run(ctx context.Context) error { - err := w.controller.Run(ctx) +func printComponents(components []component.Component, streams *cli.IOStreams) error { + topLevel := struct { + Components []component.Component `yaml:"components"` + }{ + Components: components, + } + data, err := yaml.Marshal(topLevel) + if err != nil { + return errors.New(err, "could not marshal to YAML") + } + _, err = streams.Out.Write(data) return err } -func (w *waitForCompose) Errors() <-chan error { - return nil +func printComponent(comp component.Component, streams *cli.IOStreams) error { + data, err := yaml.Marshal(comp) + if err != nil { + return errors.New(err, "could not marshal to YAML") + } + _, err = streams.Out.Write(data) + return err } -func (w *waitForCompose) Watch() <-chan []*transpiler.Vars { - return nil +func printUnit(unit component.Unit, streams *cli.IOStreams) error { + data, err := yaml.Marshal(unit) + if err != nil { + return errors.New(err, "could not marshal to YAML") + } + _, err = streams.Out.Write(data) + return err } -func (w *waitForCompose) Wait() { - <-w.done +func findUnit(comp component.Component, id string) (component.Unit, bool) { + for _, unit := range comp.Units { + if unit.ID == id { + return unit, true + } + } + return component.Unit{}, false } -*/ -func isStandalone(cfg *config.Config) (bool, error) { - c, err := configuration.NewFromConfig(cfg) - if err != nil { - return false, err +func findComponent(components []component.Component, id string) (component.Component, bool) { + for _, comp := range components { + if comp.ID == id { + return comp, true + } } + return component.Component{}, false +} - return configuration.IsStandalone(c.Fleet), nil +func newErrorLogger() (*logger.Logger, error) { + return logger.NewWithLogpLevel("", logp.ErrorLevel, false) } diff --git a/internal/pkg/agent/cmd/inspect_test.go b/internal/pkg/agent/cmd/inspect_test.go deleted file mode 100644 index 3a5ffb35380..00000000000 --- a/internal/pkg/agent/cmd/inspect_test.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package cmd - -/* -import ( - "testing" -) - -func TestGetFleetInput(t *testing.T) { - tests := []struct { - name string - input map[string]interface{} - expect map[string]interface{} - }{{ - name: "fleet-server input found", - input: map[string]interface{}{ - "inputs": []map[string]interface{}{ - map[string]interface{}{ - "type": "fleet-server", - }}, - }, - expect: map[string]interface{}{ - "type": "fleet-server", - }, - }, { - name: "no fleet-server input", - input: map[string]interface{}{ - "inputs": []map[string]interface{}{ - map[string]interface{}{ - "type": "test-server", - }}, - }, - expect: nil, - }, { - name: "wrong input formant", - input: map[string]interface{}{ - "inputs": "example", - }, - expect: nil, - }} - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - r := getFleetInput(tt.input) - if tt.expect == nil && r != nil { - t.Error("expected nil") - } - }) - } -} -*/ diff --git a/internal/pkg/agent/configuration/settings.go b/internal/pkg/agent/configuration/settings.go index eab16a8177d..0a211101c4d 100644 --- a/internal/pkg/agent/configuration/settings.go +++ b/internal/pkg/agent/configuration/settings.go @@ -5,8 +5,6 @@ package configuration import ( - "path/filepath" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" monitoringCfg "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/config" @@ -14,9 +12,6 @@ import ( "github.com/elastic/elastic-agent/pkg/core/process" ) -// ExternalInputsPattern is a glob that matches the paths of external configuration files. -var ExternalInputsPattern = filepath.Join("inputs.d", "*.yml") - // SettingsConfig is an collection of agent settings configuration. type SettingsConfig struct { DownloadConfig *artifact.Config `yaml:"download" config:"download" json:"download"` diff --git a/internal/pkg/agent/install/uninstall.go b/internal/pkg/agent/install/uninstall.go index d1ee5e371ff..61588f5de97 100644 --- a/internal/pkg/agent/install/uninstall.go +++ b/internal/pkg/agent/install/uninstall.go @@ -127,7 +127,7 @@ func uninstallComponents(ctx context.Context, cfgFile string) error { return fmt.Errorf("failed to detect inputs and outputs: %w", err) } - cfg, err := operations.LoadFullAgentConfig(cfgFile, false) + cfg, err := operations.LoadFullAgentConfig(log, cfgFile, false) if err != nil { return err } diff --git a/internal/pkg/composable/controller.go b/internal/pkg/composable/controller.go index 116424ae8e4..babd1230586 100644 --- a/internal/pkg/composable/controller.go +++ b/internal/pkg/composable/controller.go @@ -158,6 +158,21 @@ func (c *controller) Run(ctx context.Context) error { c.logger.Debugf("Stopping controller for composable inputs") t.Stop() cancel() + + // wait for all providers to stop (but its possible they still send notifications over notify + // channel, and we cannot block them sending) + emptyChan, emptyCancel := context.WithCancel(context.Background()) + defer emptyCancel() + go func() { + for { + select { + case <-emptyChan.Done(): + return + case <-notify: + } + } + }() + wg.Wait() return ctx.Err() case <-notify: diff --git a/internal/pkg/config/discover.go b/internal/pkg/config/discover.go new file mode 100644 index 00000000000..2408626fdaf --- /dev/null +++ b/internal/pkg/config/discover.go @@ -0,0 +1,39 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package config + +import ( + "errors" + + "github.com/elastic/elastic-agent/internal/pkg/dir" +) + +// ErrNoConfiguration is returned when no configuration are found. +var ErrNoConfiguration = errors.New("no configuration found") + +// DiscoverFunc is a function that discovers a list of files to load. +type DiscoverFunc func() ([]string, error) + +// Discoverer returns a DiscoverFunc that discovers all files that match the given patterns. +func Discoverer(patterns ...string) DiscoverFunc { + p := make([]string, 0, len(patterns)) + for _, newP := range patterns { + if len(newP) == 0 { + continue + } + + p = append(p, newP) + } + + if len(p) == 0 { + return func() ([]string, error) { + return []string{}, ErrNoConfiguration + } + } + + return func() ([]string, error) { + return dir.DiscoverFiles(p...) + } +} diff --git a/internal/pkg/config/operations/inspector.go b/internal/pkg/config/operations/inspector.go index 05ab040d92b..7feaa4e5ef6 100644 --- a/internal/pkg/config/operations/inspector.go +++ b/internal/pkg/config/operations/inspector.go @@ -26,7 +26,7 @@ var ( // LoadFullAgentConfig load agent config based on provided paths and defined capabilities. // In case fleet is used, config from policy action is returned. -func LoadFullAgentConfig(cfgPath string, failOnFleetMissing bool) (*config.Config, error) { +func LoadFullAgentConfig(logger *logger.Logger, cfgPath string, failOnFleetMissing bool) (*config.Config, error) { rawConfig, err := loadConfig(cfgPath) if err != nil { return nil, err @@ -38,7 +38,21 @@ func LoadFullAgentConfig(cfgPath string, failOnFleetMissing bool) (*config.Confi } if configuration.IsStandalone(cfg.Fleet) { - return rawConfig, nil + // When in standalone we load the configuration again with inputs that are defined in the paths.ExternalInputs. + loader := config.NewLoader(logger, paths.ExternalInputs()) + discover := config.Discoverer(cfgPath, cfg.Settings.Path, paths.ExternalInputs()) + files, err := discover() + if err != nil { + return nil, fmt.Errorf("could not discover configuration files: %w", err) + } + if len(files) == 0 { + return nil, config.ErrNoConfiguration + } + c, err := loader.Load(files) + if err != nil { + return nil, fmt.Errorf("failed to load or merge configuration: %w", err) + } + return c, nil } fleetConfig, err := loadFleetConfig() diff --git a/pkg/component/component.go b/pkg/component/component.go index db38bb38471..94c19c8535b 100644 --- a/pkg/component/component.go +++ b/pkg/component/component.go @@ -5,7 +5,6 @@ package component import ( - "errors" "fmt" "github.com/elastic/elastic-agent-client/v7/pkg/client" @@ -16,7 +15,7 @@ import ( var ( // ErrOutputNotSupported is returned when an input does not support an output type - ErrOutputNotSupported = errors.New("input doesn't support output type") + ErrOutputNotSupported = newError("input doesn't support output type") ) // ErrInputRuntimeCheckFail error is used when an input specification runtime prevention check occurs. @@ -37,25 +36,25 @@ func (e *ErrInputRuntimeCheckFail) Error() string { // Unit is a single input or output that a component must run. type Unit struct { - ID string - Type client.UnitType - Config map[string]interface{} + ID string `yaml:"id"` + Type client.UnitType `yaml:"type"` + Config map[string]interface{} `yaml:"config,omitempty"` } // Component is a set of units that needs to run. type Component struct { // ID is the unique ID of the component. - ID string + ID string `yaml:"id"` // Err used when there is an error with running this input. Used by the runtime to alert // the reason that all of these units are failed. - Err error + Err error `yaml:"error,omitempty"` // Spec on how the input should run. - Spec InputRuntimeSpec + Spec InputRuntimeSpec `yaml:"spec,omitempty"` // Units that should be running inside this component. - Units []Unit + Units []Unit `yaml:"units"` } // ToComponents returns the components that should be running based on the policy and the current runtime specification. diff --git a/pkg/component/error.go b/pkg/component/error.go new file mode 100644 index 00000000000..25d86acaf90 --- /dev/null +++ b/pkg/component/error.go @@ -0,0 +1,32 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package component + +// errorReason is an error that can be marshalled/unmarshalled to and from YAML. +type errorReason struct { + reason string +} + +func newError(reason string) error { + return &errorReason{reason: reason} +} + +func (e *errorReason) Error() string { + return e.reason +} + +func (e *errorReason) MarshalYAML() (interface{}, error) { + return e.reason, nil +} + +func (e *errorReason) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + err := unmarshal(&s) + if err != nil { + return err + } + e.reason = s + return nil +} diff --git a/pkg/component/load.go b/pkg/component/load.go index 62a983f1f9d..2b96c5ad64c 100644 --- a/pkg/component/load.go +++ b/pkg/component/load.go @@ -5,12 +5,12 @@ package component import ( + "errors" "fmt" "io/ioutil" "os" "path/filepath" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/go-ucfg/yaml" ) @@ -21,17 +21,17 @@ const ( var ( // ErrInputNotSupported is returned when the input is not supported on any platform - ErrInputNotSupported = errors.New("input not supported") + ErrInputNotSupported = newError("input not supported") // ErrInputNotSupportedOnPlatform is returned when the input is supported but not on this platform - ErrInputNotSupportedOnPlatform = errors.New("input not supported on this platform") + ErrInputNotSupportedOnPlatform = newError("input not supported on this platform") ) // InputRuntimeSpec returns the specification for running this input on the current platform. type InputRuntimeSpec struct { - InputType string - BinaryName string - BinaryPath string - Spec InputSpec + InputType string `yaml:"input_type"` + BinaryName string `yaml:"binary_name"` + BinaryPath string `yaml:"binary_path"` + Spec InputSpec `yaml:"spec"` } // RuntimeSpecs return all the specifications for inputs that are supported on the current platform. From 2705093d732a54b4409cfa51bdbec3e91e6d71bb Mon Sep 17 00:00:00 2001 From: Aleksandr Maus Date: Tue, 16 Aug 2022 08:47:46 -0400 Subject: [PATCH 12/49] Expand check-in payload for V2 (#916) * Expand check-in payload for V2 * Make linter happy --- .../gateway/fleet/fleet_gateway.go | 85 ++++++++++++++++++- internal/pkg/fleetapi/checkin_cmd.go | 26 +++++- 2 files changed, 103 insertions(+), 8 deletions(-) diff --git a/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go b/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go index d8c21a580d3..fe5028b0fce 100644 --- a/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go +++ b/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go @@ -10,6 +10,7 @@ import ( "fmt" "time" + eaclient "github.com/elastic/elastic-agent-client/v7/pkg/client" "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" "github.com/elastic/elastic-agent/internal/pkg/agent/application/dispatcher" "github.com/elastic/elastic-agent/internal/pkg/agent/application/gateway" @@ -21,12 +22,16 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "github.com/elastic/elastic-agent/internal/pkg/fleetapi/client" "github.com/elastic/elastic-agent/internal/pkg/scheduler" + "github.com/elastic/elastic-agent/pkg/component/runtime" "github.com/elastic/elastic-agent/pkg/core/logger" ) // Max number of times an invalid API Key is checked const maxUnauthCounter int = 6 +// Const for decraded state or linter complains +const degraded = "degraded" + // Default Configuration for the Fleet Gateway. var defaultGatewaySettings = &fleetGatewaySettings{ Duration: 1 * time.Second, // time between successful calls @@ -298,6 +303,73 @@ func (f *fleetGateway) doExecute(ctx context.Context, bo backoff.Backoff) (*flee return nil, ctx.Err() } +func (f *fleetGateway) convertToCheckinComponents(components []runtime.ComponentComponentState) []fleetapi.CheckinComponent { + if components == nil { + return nil + } + stateString := func(s eaclient.UnitState) string { + switch s { + case eaclient.UnitStateStarting: + return "starting" + case eaclient.UnitStateConfiguring: + return "configuring" + case eaclient.UnitStateHealthy: + return "healthy" + case eaclient.UnitStateDegraded: + return degraded + case eaclient.UnitStateFailed: + return "failed" + case eaclient.UnitStateStopping: + return "stopping" + case eaclient.UnitStateStopped: + return "stopped" + } + return "" + } + + unitTypeString := func(t eaclient.UnitType) string { + switch t { + case eaclient.UnitTypeInput: + return "input" + case eaclient.UnitTypeOutput: + return "output" + } + return "" + } + + checkinComponents := make([]fleetapi.CheckinComponent, 0, len(components)) + + for _, item := range components { + component := item.Component + state := item.State + + checkinComponent := fleetapi.CheckinComponent{ + ID: component.ID, + Type: component.Spec.InputType, + Status: stateString(state.State), + Message: state.Message, + } + + if state.Units != nil { + units := make([]fleetapi.CheckinUnit, 0, len(state.Units)) + + for unitKey, unitState := range state.Units { + units = append(units, fleetapi.CheckinUnit{ + ID: unitKey.UnitID, + Type: unitTypeString(unitKey.UnitType), + Status: stateString(unitState.State), + Message: unitState.Message, + Payload: unitState.Payload, + }) + } + checkinComponent.Units = units + } + checkinComponents = append(checkinComponents, checkinComponent) + } + + return checkinComponents +} + func (f *fleetGateway) execute(ctx context.Context) (*fleetapi.CheckinResponse, error) { ecsMeta, err := info.Metadata() if err != nil { @@ -313,12 +385,17 @@ func (f *fleetGateway) execute(ctx context.Context) (*fleetapi.CheckinResponse, // get current state state := f.stateFetcher.State() + // convert components into checkin components structure + components := f.convertToCheckinComponents(state.Components) + // checkin cmd := fleetapi.NewCheckinCmd(f.agentInfo, f.client) req := &fleetapi.CheckinRequest{ - AckToken: ackToken, - Metadata: ecsMeta, - Status: agentStateToString(state.State), + AckToken: ackToken, + Metadata: ecsMeta, + Status: agentStateToString(state.State), + Message: state.Message, + Components: components, } resp, err := cmd.Execute(ctx, req) @@ -372,5 +449,5 @@ func agentStateToString(state agentclient.State) string { case agentclient.Failed: return "error" } - return "degraded" + return degraded } diff --git a/internal/pkg/fleetapi/checkin_cmd.go b/internal/pkg/fleetapi/checkin_cmd.go index 47a76ea47e7..9c2cd1513e1 100644 --- a/internal/pkg/fleetapi/checkin_cmd.go +++ b/internal/pkg/fleetapi/checkin_cmd.go @@ -20,12 +20,30 @@ import ( const checkingPath = "/api/fleet/agents/%s/checkin" +type CheckinUnit struct { + ID string `json:"id"` + Type string `json:"type"` + Status string `json:"status"` + Message string `json:"message"` + Payload map[string]interface{} `json:"payload,omitempty"` +} + +type CheckinComponent struct { + ID string `json:"id"` + Type string `json:"type"` + Status string `json:"status"` + Message string `json:"message"` + Units []CheckinUnit `json:"units,omitempty"` +} + // CheckinRequest consists of multiple events reported to fleet ui. type CheckinRequest struct { - Status string `json:"status"` - AckToken string `json:"ack_token,omitempty"` - Events []SerializableEvent `json:"events"` - Metadata *info.ECSMeta `json:"local_metadata,omitempty"` + Status string `json:"status"` + AckToken string `json:"ack_token,omitempty"` + Events []SerializableEvent `json:"events"` + Metadata *info.ECSMeta `json:"local_metadata,omitempty"` + Message string `json:"message"` // V2 Agent message + Components []CheckinComponent `json:"components"` // V2 Agent components } // SerializableEvent is a representation of the event to be send to the Fleet Server API via the checkin From d56e3f5778388c75ed5459678e55d4e34093312b Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Thu, 18 Aug 2022 11:48:23 -0400 Subject: [PATCH 13/49] [v2] Update protocol to use new UnitExpectedConfig. (#850) * Update v2 protocol to use new UnitExpectedConfig. * Cleanup. * Update NOTICE.txt. Lint dupl. * Fix code review. Ensure type is set to real type and not alias. --- NOTICE.txt | 43 +- go.mod | 7 +- go.sum | 13 +- .../handlers/handler_action_application.go | 7 +- pkg/component/component.go | 94 ++- pkg/component/component_test.go | 317 ++++++---- pkg/component/config.go | 171 ++++++ pkg/component/config_test.go | 204 +++++++ pkg/component/fake/main.go | 141 +++-- pkg/component/runtime/command.go | 120 +--- pkg/component/runtime/manager_test.go | 566 ++++++++++++++++-- pkg/component/runtime/runtime.go | 167 ------ pkg/component/runtime/state.go | 427 +++++++++++++ 13 files changed, 1780 insertions(+), 497 deletions(-) create mode 100644 pkg/component/config.go create mode 100644 pkg/component/config_test.go create mode 100644 pkg/component/runtime/state.go diff --git a/NOTICE.txt b/NOTICE.txt index 1451a7531c1..6aef478cffe 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -202,11 +202,11 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : github.com/coreos/go-systemd/v22 -Version: v22.3.2 +Version: v22.3.3-0.20220203105225-a9a7ef127534 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/coreos/go-systemd/v22@v22.3.2/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/coreos/go-systemd/v22@v22.3.3-0.20220203105225-a9a7ef127534/LICENSE: Apache License Version 2.0, January 2004 @@ -828,11 +828,11 @@ Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-a -------------------------------------------------------------------------------- Dependency : github.com/elastic/elastic-agent-client/v7 -Version: v7.0.0-20220524131921-43bacbeec516 +Version: v7.0.0-20220804181728-b0328d2fe484 Licence type (autodetected): Elastic -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-client/v7@v7.0.0-20220524131921-43bacbeec516/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-client/v7@v7.0.0-20220804181728-b0328d2fe484/LICENSE.txt: ELASTIC LICENSE AGREEMENT @@ -3111,11 +3111,11 @@ THE SOFTWARE. -------------------------------------------------------------------------------- Dependency : github.com/mitchellh/mapstructure -Version: v1.4.3 +Version: v1.5.0 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/mitchellh/mapstructure@v1.4.3/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/mitchellh/mapstructure@v1.5.0/LICENSE: The MIT License (MIT) @@ -3431,6 +3431,37 @@ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +-------------------------------------------------------------------------------- +Dependency : github.com/rs/zerolog +Version: v1.27.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/rs/zerolog@v1.27.0/LICENSE: + +MIT License + +Copyright (c) 2017 Olivier Poitrey + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + -------------------------------------------------------------------------------- Dependency : github.com/shirou/gopsutil/v3 Version: v3.21.12 diff --git a/go.mod b/go.mod index 267e46602f2..45a5e482760 100644 --- a/go.mod +++ b/go.mod @@ -8,11 +8,11 @@ require ( github.com/billgraziano/dpapi v0.4.0 github.com/blakesmith/ar v0.0.0-20150311145944-8bd4349a67f2 github.com/cavaliercoder/go-rpm v0.0.0-20190131055624-7a9c54e3d83e - github.com/coreos/go-systemd/v22 v22.3.2 + github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534 github.com/docker/go-units v0.4.0 github.com/elastic/e2e-testing v1.99.2-0.20220117192005-d3365c99b9c4 github.com/elastic/elastic-agent-autodiscover v0.0.0-20220404145827-89887023c1ab - github.com/elastic/elastic-agent-client/v7 v7.0.0-20220524131921-43bacbeec516 + github.com/elastic/elastic-agent-client/v7 v7.0.0-20220804181728-b0328d2fe484 github.com/elastic/elastic-agent-libs v0.2.6 github.com/elastic/go-licenser v0.4.0 github.com/elastic/go-sysinfo v1.7.1 @@ -28,11 +28,12 @@ require ( github.com/magefile/mage v1.13.0 github.com/mitchellh/gox v1.0.1 github.com/mitchellh/hashstructure v0.0.0-20170116052023-ab25296c0f51 - github.com/mitchellh/mapstructure v1.4.3 + github.com/mitchellh/mapstructure v1.5.0 github.com/oklog/ulid v1.3.1 github.com/otiai10/copy v1.2.0 github.com/pierrre/gotestcover v0.0.0-20160517101806-924dca7d15f0 github.com/pkg/errors v0.9.1 + github.com/rs/zerolog v1.27.0 github.com/shirou/gopsutil/v3 v3.21.12 github.com/spf13/cobra v1.3.0 github.com/stretchr/testify v1.7.0 diff --git a/go.sum b/go.sum index 67d8b0f1cc1..ae8ec309edf 100644 --- a/go.sum +++ b/go.sum @@ -317,8 +317,9 @@ github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9 github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= -github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534 h1:rtAn27wIbmOGUs7RIbVgPEjb31ehTVniDwPGXyMxm5U= +github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= @@ -383,8 +384,8 @@ github.com/elastic/e2e-testing v1.99.2-0.20220117192005-d3365c99b9c4 h1:uYT+Krd8 github.com/elastic/e2e-testing v1.99.2-0.20220117192005-d3365c99b9c4/go.mod h1:UcNuf4pX/qDVNQr0zybm1NL2YoWik+jKBaINZqQCA40= github.com/elastic/elastic-agent-autodiscover v0.0.0-20220404145827-89887023c1ab h1:Jk6Mfk5BF8gtfE7X0bNCiDGBtwJVxRI79b4wLCAsP+A= github.com/elastic/elastic-agent-autodiscover v0.0.0-20220404145827-89887023c1ab/go.mod h1:Gg1fsQI+rVms9FJ2DefBSojfPIzgkV8xlyG8fPG0DE8= -github.com/elastic/elastic-agent-client/v7 v7.0.0-20220524131921-43bacbeec516 h1:8sGoTlgXRCesR1+FjBv8YY5CyVhNSDjXlo4uq5q1RGM= -github.com/elastic/elastic-agent-client/v7 v7.0.0-20220524131921-43bacbeec516/go.mod h1:fkvyUfFwyAG5OnMF0h+FV9sC0Xn9YLITwQpSuwungQs= +github.com/elastic/elastic-agent-client/v7 v7.0.0-20220804181728-b0328d2fe484 h1:uJIMfLgCenJvxsVmEjBjYGxt0JddCgw2IxgoNfcIXOk= +github.com/elastic/elastic-agent-client/v7 v7.0.0-20220804181728-b0328d2fe484/go.mod h1:fkvyUfFwyAG5OnMF0h+FV9sC0Xn9YLITwQpSuwungQs= github.com/elastic/elastic-agent-libs v0.0.0-20220303160015-5b4e674da3dd/go.mod h1://82M1l73IHx0wDbS2Tzkq6Fx9fkmytS1KgkIyzvNTM= github.com/elastic/elastic-agent-libs v0.2.6 h1:DpcUcCVYZ7lNtHLUlyT1u/GtGAh49wpL15DTH7+8O5o= github.com/elastic/elastic-agent-libs v0.2.6/go.mod h1:chO3rtcLyGlKi9S0iGVZhYCzDfdDsAQYBc+ui588AFE= @@ -879,8 +880,9 @@ github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:F github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/mitchellh/mapstructure v1.4.3 h1:OVowDSCllw/YjdLkam3/sm7wEtOy59d8ndGgCcyj8cs= github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= +github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= @@ -1043,6 +1045,9 @@ github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.5.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= +github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rs/zerolog v1.27.0 h1:1T7qCieN22GVc8S4Q2yuexzBb1EqjbgjSH9RohbMjKs= +github.com/rs/zerolog v1.27.0/go.mod h1:7frBqO0oezxmnO7GF86FY++uy8I0Tk/If5ni1G9Qc0U= github.com/rubenv/sql-migrate v0.0.0-20210614095031-55d5740dbbcc/go.mod h1:HFLT6i9iR4QBOF5rdCyjddC9t59ArqWJV2xx+jwcCMo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= diff --git a/internal/pkg/agent/application/actions/handlers/handler_action_application.go b/internal/pkg/agent/application/actions/handlers/handler_action_application.go index d36f8f1d33a..a5de2384bd1 100644 --- a/internal/pkg/agent/application/actions/handlers/handler_action_application.go +++ b/internal/pkg/agent/application/actions/handlers/handler_action_application.go @@ -153,11 +153,8 @@ func readMapString(m map[string]interface{}, key string, def string) string { func findUnitFromInputType(state coordinator.State, inputType string) (component.Unit, bool) { for _, comp := range state.Components { for _, unit := range comp.Component.Units { - if unit.Type == client.UnitTypeInput { - it, ok := unit.Config["type"] - if ok && it == inputType { - return unit, true - } + if unit.Type == client.UnitTypeInput && unit.Config != nil && unit.Config.Type == inputType { + return unit, true } } } diff --git a/pkg/component/component.go b/pkg/component/component.go index 94c19c8535b..75728b3e847 100644 --- a/pkg/component/component.go +++ b/pkg/component/component.go @@ -6,13 +6,20 @@ package component import ( "fmt" + "strings" "github.com/elastic/elastic-agent-client/v7/pkg/client" + "github.com/elastic/elastic-agent-client/v7/pkg/proto" "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" "github.com/elastic/elastic-agent/internal/pkg/eql" ) +const ( + // defaultUnitLogLevel is the default log level that a unit will get if one is not defined. + defaultUnitLogLevel = client.UnitLogLevelInfo +) + var ( // ErrOutputNotSupported is returned when an input does not support an output type ErrOutputNotSupported = newError("input doesn't support output type") @@ -36,9 +43,21 @@ func (e *ErrInputRuntimeCheckFail) Error() string { // Unit is a single input or output that a component must run. type Unit struct { - ID string `yaml:"id"` - Type client.UnitType `yaml:"type"` - Config map[string]interface{} `yaml:"config,omitempty"` + // ID is the unique ID of the unit. + ID string `yaml:"id"` + + // Type is the unit type (either input or output). + Type client.UnitType `yaml:"type"` + + // LogLevel is the unit's log level. + LogLevel client.UnitLogLevel `yaml:"log_level"` + + // Config is the units expected configuration. + Config *proto.UnitExpectedConfig `yaml:"config,omitempty"` + + // Err used when the Config cannot be marshalled from its value into a configuration that + // can actually be sent to a unit. All units with Err set should not be sent to the component. + Err error `yaml:"error,omitempty"` } // Component is a set of units that needs to run. @@ -121,18 +140,27 @@ func (r *RuntimeSpecs) ToComponents(policy map[string]interface{}) ([]Component, // skip; not enabled continue } + cfg, cfgErr := ExpectedConfig(input.input) + if cfg != nil { + cfg.Type = inputType // ensure alias is replaced in the ExpectedConfig to be non-alias type + } units = append(units, Unit{ - ID: fmt.Sprintf("%s-%s-%s", inputType, outputName, input.id), - Type: client.UnitTypeInput, - Config: input.input, + ID: fmt.Sprintf("%s-%s-%s", inputType, outputName, input.id), + Type: client.UnitTypeInput, + LogLevel: input.logLevel, + Config: cfg, + Err: cfgErr, }) } if len(units) > 0 { componentID := fmt.Sprintf("%s-%s", inputType, outputName) + cfg, cfgErr := ExpectedConfig(output.output) units = append(units, Unit{ - ID: componentID, - Type: client.UnitTypeOutput, - Config: output.output, + ID: componentID, + Type: client.UnitTypeOutput, + LogLevel: output.logLevel, + Config: cfg, + Err: cfgErr, }) components = append(components, Component{ ID: componentID, @@ -193,9 +221,14 @@ func toIntermediate(policy map[string]interface{}) (map[string]outputI, error) { enabled = enabledVal delete(output, enabledKey) } + logLevel, err := getLogLevel(output) + if err != nil { + return nil, fmt.Errorf("invalid 'outputs.%s.log_level', %w", name, err) + } outputsMap[name] = outputI{ name: name, enabled: enabled, + logLevel: logLevel, outputType: t, output: output, inputs: make(map[string][]inputI), @@ -255,10 +288,15 @@ func toIntermediate(policy map[string]interface{}) (map[string]outputI, error) { enabled = enabledVal delete(input, enabledKey) } + logLevel, err := getLogLevel(input) + if err != nil { + return nil, fmt.Errorf("invalid 'inputs.%d.log_level', %w", idx, err) + } output.inputs[t] = append(output.inputs[t], inputI{ idx: idx, id: id, enabled: enabled, + logLevel: logLevel, inputType: t, input: input, }) @@ -273,6 +311,7 @@ type inputI struct { idx int id string enabled bool + logLevel client.UnitLogLevel inputType string input map[string]interface{} } @@ -280,6 +319,7 @@ type inputI struct { type outputI struct { name string enabled bool + logLevel client.UnitLogLevel outputType string output map[string]interface{} inputs map[string][]inputI @@ -305,3 +345,39 @@ func validateRuntimeChecks(spec *InputSpec, store eql.VarStore) error { } return nil } + +func getLogLevel(val map[string]interface{}) (client.UnitLogLevel, error) { + const logLevelKey = "log_level" + + logLevel := defaultUnitLogLevel + if logLevelRaw, ok := val[logLevelKey]; ok { + logLevelStr, ok := logLevelRaw.(string) + if !ok { + return defaultUnitLogLevel, fmt.Errorf("expected a string not a %T", logLevelRaw) + } + var err error + logLevel, err = stringToLogLevel(logLevelStr) + if err != nil { + return defaultUnitLogLevel, err + } + delete(val, logLevelKey) + } + return logLevel, nil +} + +func stringToLogLevel(val string) (client.UnitLogLevel, error) { + val = strings.ToLower(strings.TrimSpace(val)) + switch val { + case "error": + return client.UnitLogLevelError, nil + case "warn", "warning": + return client.UnitLogLevelWarn, nil + case "info": + return client.UnitLogLevelInfo, nil + case "debug": + return client.UnitLogLevelDebug, nil + case "trace": + return client.UnitLogLevelTrace, nil + } + return client.UnitLogLevelError, fmt.Errorf("unknown log level type: %s", val) +} diff --git a/pkg/component/component_test.go b/pkg/component/component_test.go index a929fbde7b3..03d18172593 100644 --- a/pkg/component/component_test.go +++ b/pkg/component/component_test.go @@ -2,9 +2,13 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//nolint:dupl // duplicate code is in test cases + package component import ( + "errors" + "github.com/elastic/elastic-agent-client/v7/pkg/proto" "path/filepath" "sort" "testing" @@ -289,19 +293,21 @@ func TestToComponents(t *testing.T) { Err: ErrInputNotSupported, Units: []Unit{ { - ID: "unknown-default", - Type: client.UnitTypeOutput, - Config: map[string]interface{}{ + ID: "unknown-default", + Type: client.UnitTypeOutput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ "type": "elasticsearch", - }, + }), }, { - ID: "unknown-default-unknown-0", - Type: client.UnitTypeInput, - Config: map[string]interface{}{ + ID: "unknown-default-unknown-0", + Type: client.UnitTypeInput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ "type": "unknown", "id": "unknown-0", - }, + }), }, }, }, @@ -339,19 +345,21 @@ func TestToComponents(t *testing.T) { Err: ErrInputNotSupportedOnPlatform, Units: []Unit{ { - ID: "endpoint-default", - Type: client.UnitTypeOutput, - Config: map[string]interface{}{ + ID: "endpoint-default", + Type: client.UnitTypeOutput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ "type": "elasticsearch", - }, + }), }, { - ID: "endpoint-default-endpoint-0", - Type: client.UnitTypeInput, - Config: map[string]interface{}{ + ID: "endpoint-default-endpoint-0", + Type: client.UnitTypeInput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ "type": "endpoint", "id": "endpoint-0", - }, + }), }, }, }, @@ -380,19 +388,21 @@ func TestToComponents(t *testing.T) { Err: ErrOutputNotSupported, Units: []Unit{ { - ID: "endpoint-default", - Type: client.UnitTypeOutput, - Config: map[string]interface{}{ + ID: "endpoint-default", + Type: client.UnitTypeOutput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ "type": "logstash", - }, + }), }, { - ID: "endpoint-default-endpoint-0", - Type: client.UnitTypeInput, - Config: map[string]interface{}{ + ID: "endpoint-default-endpoint-0", + Type: client.UnitTypeInput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ "type": "endpoint", "id": "endpoint-0", - }, + }), }, }, }, @@ -433,23 +443,91 @@ func TestToComponents(t *testing.T) { Err: NewErrInputRuntimeCheckFail("No support for RHEL7 on arm64"), Units: []Unit{ { - ID: "endpoint-default", - Type: client.UnitTypeOutput, - Config: map[string]interface{}{ + ID: "endpoint-default", + Type: client.UnitTypeOutput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ "type": "elasticsearch", - }, + }), }, { - ID: "endpoint-default-endpoint-0", - Type: client.UnitTypeInput, - Config: map[string]interface{}{ + ID: "endpoint-default-endpoint-0", + Type: client.UnitTypeInput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ "type": "endpoint", "id": "endpoint-0", + }), + }, + }, + }, + }, + }, + { + Name: "Invalid: single input failed to decode into config", + Platform: linuxAMD64Platform, + Policy: map[string]interface{}{ + "outputs": map[string]interface{}{ + "default": map[string]interface{}{ + "type": "elasticsearch", + "enabled": true, + }, + }, + "inputs": []interface{}{ + map[string]interface{}{ + "type": "filestream", + "id": "filestream-0", + "use_output": "default", + "enabled": true, + }, + map[string]interface{}{ + "type": "filestream", + "id": "filestream-1", + "use_output": "default", + "enabled": true, + "meta": []interface{}{ + map[string]interface{}{ + "bad": "should not have been array of dicts", }, }, }, }, }, + Result: []Component{ + { + ID: "filestream-default", + Spec: InputRuntimeSpec{ + InputType: "filestream", + BinaryName: "filebeat", + BinaryPath: filepath.Join("..", "..", "specs", "filebeat"), + }, + Units: []Unit{ + { + ID: "filestream-default", + Type: client.UnitTypeOutput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ + "type": "elasticsearch", + }), + }, + { + ID: "filestream-default-filestream-0", + Type: client.UnitTypeInput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ + "type": "filestream", + "id": "filestream-0", + }), + }, + { + ID: "filestream-default-filestream-1", + Type: client.UnitTypeInput, + LogLevel: defaultUnitLogLevel, + Err: errors.New("1 decoding error(s): 'meta' expected a map, got 'slice'"), + }, + }, + }, + }, }, { Name: "Output disabled", @@ -521,19 +599,21 @@ func TestToComponents(t *testing.T) { }, Units: []Unit{ { - ID: "filestream-default", - Type: client.UnitTypeOutput, - Config: map[string]interface{}{ + ID: "filestream-default", + Type: client.UnitTypeOutput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ "type": "elasticsearch", - }, + }), }, { - ID: "filestream-default-filestream-0", - Type: client.UnitTypeInput, - Config: map[string]interface{}{ + ID: "filestream-default-filestream-0", + Type: client.UnitTypeInput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ "type": "filestream", "id": "filestream-0", - }, + }), }, }, }, @@ -621,27 +701,30 @@ func TestToComponents(t *testing.T) { }, Units: []Unit{ { - ID: "filestream-default", - Type: client.UnitTypeOutput, - Config: map[string]interface{}{ + ID: "filestream-default", + Type: client.UnitTypeOutput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ "type": "elasticsearch", - }, + }), }, { - ID: "filestream-default-filestream-0", - Type: client.UnitTypeInput, - Config: map[string]interface{}{ + ID: "filestream-default-filestream-0", + Type: client.UnitTypeInput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ "type": "filestream", "id": "filestream-0", - }, + }), }, { - ID: "filestream-default-filestream-1", - Type: client.UnitTypeInput, - Config: map[string]interface{}{ + ID: "filestream-default-filestream-1", + Type: client.UnitTypeInput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ "type": "filestream", "id": "filestream-1", - }, + }), }, }, }, @@ -653,27 +736,30 @@ func TestToComponents(t *testing.T) { }, Units: []Unit{ { - ID: "filestream-other", - Type: client.UnitTypeOutput, - Config: map[string]interface{}{ + ID: "filestream-other", + Type: client.UnitTypeOutput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ "type": "elasticsearch", - }, + }), }, { - ID: "filestream-other-filestream-3", - Type: client.UnitTypeInput, - Config: map[string]interface{}{ + ID: "filestream-other-filestream-3", + Type: client.UnitTypeInput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ "type": "filestream", "id": "filestream-3", - }, + }), }, { - ID: "filestream-other-filestream-4", - Type: client.UnitTypeInput, - Config: map[string]interface{}{ + ID: "filestream-other-filestream-4", + Type: client.UnitTypeInput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ "type": "filestream", "id": "filestream-4", - }, + }), }, }, }, @@ -685,27 +771,30 @@ func TestToComponents(t *testing.T) { }, Units: []Unit{ { - ID: "log-default", - Type: client.UnitTypeOutput, - Config: map[string]interface{}{ + ID: "log-default", + Type: client.UnitTypeOutput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ "type": "elasticsearch", - }, + }), }, { - ID: "log-default-logfile-0", - Type: client.UnitTypeInput, - Config: map[string]interface{}{ + ID: "log-default-logfile-0", + Type: client.UnitTypeInput, + LogLevel: defaultUnitLogLevel, + Config: mustExpectedConfigForceType(map[string]interface{}{ "type": "logfile", "id": "logfile-0", - }, + }, "log"), }, { - ID: "log-default-logfile-1", - Type: client.UnitTypeInput, - Config: map[string]interface{}{ + ID: "log-default-logfile-1", + Type: client.UnitTypeInput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ "type": "log", "id": "logfile-1", - }, + }), }, }, }, @@ -717,19 +806,21 @@ func TestToComponents(t *testing.T) { }, Units: []Unit{ { - ID: "log-other", - Type: client.UnitTypeOutput, - Config: map[string]interface{}{ + ID: "log-other", + Type: client.UnitTypeOutput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ "type": "elasticsearch", - }, + }), }, { - ID: "log-other-logfile-2", - Type: client.UnitTypeInput, - Config: map[string]interface{}{ + ID: "log-other-logfile-2", + Type: client.UnitTypeInput, + LogLevel: defaultUnitLogLevel, + Config: mustExpectedConfigForceType(map[string]interface{}{ "type": "logfile", "id": "logfile-2", - }, + }, "log"), }, }, }, @@ -741,19 +832,21 @@ func TestToComponents(t *testing.T) { }, Units: []Unit{ { - ID: "log-stashit", - Type: client.UnitTypeOutput, - Config: map[string]interface{}{ + ID: "log-stashit", + Type: client.UnitTypeOutput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ "type": "logstash", - }, + }), }, { - ID: "log-stashit-logfile-3", - Type: client.UnitTypeInput, - Config: map[string]interface{}{ + ID: "log-stashit-logfile-3", + Type: client.UnitTypeInput, + LogLevel: defaultUnitLogLevel, + Config: mustExpectedConfigForceType(map[string]interface{}{ "type": "logfile", "id": "logfile-3", - }, + }, "log"), }, }, }, @@ -765,19 +858,21 @@ func TestToComponents(t *testing.T) { }, Units: []Unit{ { - ID: "log-redis", - Type: client.UnitTypeOutput, - Config: map[string]interface{}{ + ID: "log-redis", + Type: client.UnitTypeOutput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ "type": "redis", - }, + }), }, { - ID: "log-redis-logfile-4", - Type: client.UnitTypeInput, - Config: map[string]interface{}{ + ID: "log-redis-logfile-4", + Type: client.UnitTypeInput, + LogLevel: defaultUnitLogLevel, + Config: mustExpectedConfigForceType(map[string]interface{}{ "type": "logfile", "id": "logfile-4", - }, + }, "log"), }, }, }, @@ -789,19 +884,21 @@ func TestToComponents(t *testing.T) { }, Units: []Unit{ { - ID: "apm-default", - Type: client.UnitTypeOutput, - Config: map[string]interface{}{ + ID: "apm-default", + Type: client.UnitTypeOutput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ "type": "elasticsearch", - }, + }), }, { - ID: "apm-default-apm-server-0", - Type: client.UnitTypeInput, - Config: map[string]interface{}{ + ID: "apm-default-apm-server-0", + Type: client.UnitTypeInput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ "type": "apm", "id": "apm-server-0", - }, + }), }, }, }, @@ -849,3 +946,9 @@ func sortComponents(components []Component) { return components[i].Units[0].ID < components[j].Units[0].ID }) } + +func mustExpectedConfigForceType(cfg map[string]interface{}, forceType string) *proto.UnitExpectedConfig { + res := MustExpectedConfig(cfg) + res.Type = forceType + return res +} diff --git a/pkg/component/config.go b/pkg/component/config.go new file mode 100644 index 00000000000..23e05f49de4 --- /dev/null +++ b/pkg/component/config.go @@ -0,0 +1,171 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package component + +import ( + "errors" + "fmt" + "reflect" + "strings" + + "github.com/mitchellh/mapstructure" + "google.golang.org/protobuf/types/known/structpb" + + "github.com/elastic/elastic-agent-client/v7/pkg/proto" +) + +const ( + sourceFieldName = "source" +) + +// MustExpectedConfig returns proto.UnitExpectedConfig. +// +// Panics if the map[string]interface{} cannot be converted to proto.UnitExpectedConfig. This really should +// only be used by tests. +func MustExpectedConfig(cfg map[string]interface{}) *proto.UnitExpectedConfig { + config, err := ExpectedConfig(cfg) + if err != nil { + panic(err) + } + return config +} + +// ExpectedConfig converts a map[string]interface{} to a proto.UnitExpectedConfig. +func ExpectedConfig(cfg map[string]interface{}) (*proto.UnitExpectedConfig, error) { + result := &proto.UnitExpectedConfig{} + decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + ZeroFields: true, + WeaklyTypedInput: true, + TagName: "json", + IgnoreUntaggedFields: true, + Result: result, + MatchName: func(mapKey, fieldName string) bool { + if fieldName == sourceFieldName { + // never match for 'source' field that is set manually later + return false + } + return strings.EqualFold(mapKey, fieldName) + }, + }) + if err != nil { + return nil, err + } + err = decoder.Decode(cfg) + if err != nil { + return nil, rewrapErr(err) + } + err = setSource(result, cfg) + if err != nil { + return nil, err + } + return result, nil +} + +func setSource(val interface{}, cfg map[string]interface{}) error { + // find the source field on the val + resVal := reflect.ValueOf(val).Elem() + sourceFieldByTag, ok := getSourceField(resVal.Type()) + if !ok { + return fmt.Errorf("%T does not define a source field", val) + } + sourceField := resVal.FieldByName(sourceFieldByTag.Name) + if !sourceField.CanSet() { + return fmt.Errorf("%T.source cannot be set", val) + } + + // create the source (as the original source is always sent) + source, err := structpb.NewStruct(cfg) + if err != nil { + return err + } + sourceField.Set(reflect.ValueOf(source)) + + // look into every field that could also have a source field + for i := 0; i < resVal.NumField(); i++ { + typeField := resVal.Type().Field(i) + if !typeField.IsExported() { + continue + } + jsonName := getJSONFieldName(typeField) + if jsonName == "" || jsonName == sourceFieldName { + // skip fields without a json name or named 'source' + continue + } + cfgVal, ok := cfg[jsonName] + if !ok { + // doesn't exist in config (so no source) + continue + } + valField := resVal.Field(i) + valType := valField.Type() + switch valType.Kind() { + case reflect.Ptr: + cfgDict, ok := cfgVal.(map[string]interface{}) + if ok && hasSourceField(valType.Elem()) { + err := setSource(valField.Interface(), cfgDict) + if err != nil { + return fmt.Errorf("setting source for field %s failed: %w", jsonName, err) + } + } + case reflect.Slice: + cfgSlice, ok := cfgVal.([]interface{}) + if ok { + valElem := reflect.ValueOf(valField.Interface()) + for j := 0; j < valElem.Len(); j++ { + valIdx := valElem.Index(j) + cfgDict, ok := cfgSlice[j].(map[string]interface{}) + if ok && hasSourceField(valIdx.Elem().Type()) { + err := setSource(valIdx.Interface(), cfgDict) + if err != nil { + return fmt.Errorf("setting source for field %s.%d failed: %w", jsonName, j, err) + } + } + } + } + } + } + return nil +} + +func getSourceField(t reflect.Type) (reflect.StructField, bool) { + for i := 0; i < t.NumField(); i++ { + typeField := t.Field(i) + jsonName := getJSONFieldName(typeField) + if typeField.IsExported() && jsonName == sourceFieldName { + return typeField, true + } + } + return reflect.StructField{}, false +} + +func hasSourceField(t reflect.Type) bool { + _, ok := getSourceField(t) + return ok +} + +func getJSONFieldName(field reflect.StructField) string { + tag, ok := field.Tag.Lookup("json") + if !ok { + return "" + } + if tag == "" { + return "" + } + split := strings.Split(tag, ",") + return strings.TrimSpace(split[0]) +} + +func rewrapErr(err error) error { + var me *mapstructure.Error + if !errors.As(err, &me) { + return err + } + errs := me.WrappedErrors() + points := make([]string, 0, len(errs)) + for _, e := range errs { + points = append(points, e.Error()) + } + return fmt.Errorf("%d decoding error(s): %s", len(errs), strings.Join(points, ", ")) +} diff --git a/pkg/component/config_test.go b/pkg/component/config_test.go new file mode 100644 index 00000000000..64dcfe3a697 --- /dev/null +++ b/pkg/component/config_test.go @@ -0,0 +1,204 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package component + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "google.golang.org/protobuf/types/known/structpb" + + "github.com/elastic/elastic-agent-client/v7/pkg/proto" +) + +func TestExpectedConfig(t *testing.T) { + scenarios := []struct { + Name string + Config map[string]interface{} + Err error + Expected *proto.UnitExpectedConfig + SetSource func(map[string]interface{}, *proto.UnitExpectedConfig) error + }{ + { + Name: "Full", + Config: map[string]interface{}{ + "id": "simple-0", + "type": "simple", + "name": "simple", + "revision": 1, + "meta": map[string]interface{}{ + "package": map[string]interface{}{ + "name": "simple", + "version": "1.0.0", + "extra": map[string]interface{}{ + "field": "package", + }, + }, + "extra": map[string]interface{}{ + "field": "meta", + }, + }, + "data_stream": map[string]interface{}{ + "dataset": "other", + "type": "simple", + "namespace": "default", + "extra": map[string]interface{}{ + "field": "data_stream", + }, + }, + "streams": []interface{}{ + map[string]interface{}{ + "id": "simple-stream-0", + "data_stream": map[string]interface{}{ + "dataset": "other", + "type": "simple", + "namespace": "default-0", + "extra": map[string]interface{}{ + "field": "data_stream", + }, + }, + "extra": map[string]interface{}{ + "field": "stream-0", + }, + }, + map[string]interface{}{ + "id": "simple-stream-1", + "data_stream": map[string]interface{}{ + "dataset": "other", + "type": "simple", + "namespace": "default-1", + "extra": map[string]interface{}{ + "field": "data_stream", + }, + }, + "extra": map[string]interface{}{ + "field": "stream-1", + }, + }, + }, + "extra": map[string]interface{}{ + "field": "config", + }, + }, + Expected: &proto.UnitExpectedConfig{ + Source: nil, + Id: "simple-0", + Type: "simple", + Name: "simple", + Revision: 1, + Meta: &proto.Meta{ + Source: nil, + Package: &proto.Package{ + Source: nil, + Name: "simple", + Version: "1.0.0", + }, + }, + DataStream: &proto.DataStream{ + Source: nil, + Dataset: "other", + Type: "simple", + Namespace: "default", + }, + Streams: []*proto.Stream{ + { + Source: nil, + Id: "simple-stream-0", + DataStream: &proto.DataStream{ + Source: nil, + Dataset: "other", + Type: "simple", + Namespace: "default-0", + }, + }, + { + Source: nil, + Id: "simple-stream-1", + DataStream: &proto.DataStream{ + Source: nil, + Dataset: "other", + Type: "simple", + Namespace: "default-1", + }, + }, + }, + }, + SetSource: func(cfg map[string]interface{}, expected *proto.UnitExpectedConfig) error { + source, err := structpb.NewStruct(cfg) + if err != nil { + return err + } + expected.Source = source + + meta, err := structpb.NewStruct(cfg["meta"].(map[string]interface{})) + if err != nil { + return err + } + expected.Meta.Source = meta + + pack, err := structpb.NewStruct(cfg["meta"].(map[string]interface{})["package"].(map[string]interface{})) + if err != nil { + return err + } + expected.Meta.Package.Source = pack + + ds, err := structpb.NewStruct(cfg["data_stream"].(map[string]interface{})) + if err != nil { + return err + } + expected.DataStream.Source = ds + + for i, stream := range cfg["streams"].([]interface{}) { + ss, err := structpb.NewStruct(stream.(map[string]interface{})) + if err != nil { + return err + } + expected.Streams[i].Source = ss + + sds, err := structpb.NewStruct(stream.(map[string]interface{})["data_stream"].(map[string]interface{})) + if err != nil { + return err + } + expected.Streams[i].DataStream.Source = sds + } + return nil + }, + }, + { + Name: "Invalid", + Config: map[string]interface{}{ + "id": "simple-0", + "type": "simple", + "name": "simple", + "revision": 1, + "meta": []interface{}{ + map[string]interface{}{ + "invalid": "meta", + }, + }, + }, + Err: errors.New("1 decoding error(s): 'meta' expected a map, got 'slice'"), + }, + } + + for _, scenario := range scenarios { + t.Run(scenario.Name, func(t *testing.T) { + if scenario.SetSource != nil { + err := scenario.SetSource(scenario.Config, scenario.Expected) + require.NoError(t, err) + } + + observed, err := ExpectedConfig(scenario.Config) + if scenario.Err != nil { + assert.Equal(t, err.Error(), scenario.Err.Error()) + } else { + require.NoError(t, err) + assert.EqualValues(t, scenario.Expected, observed) + } + }) + } +} diff --git a/pkg/component/fake/main.go b/pkg/component/fake/main.go index 000e2fd36fa..0464d55b8ef 100644 --- a/pkg/component/fake/main.go +++ b/pkg/component/fake/main.go @@ -14,13 +14,17 @@ import ( "syscall" "time" - "gopkg.in/yaml.v2" + "github.com/rs/zerolog" "github.com/elastic/elastic-agent-client/v7/pkg/client" + "github.com/elastic/elastic-agent-client/v7/pkg/proto" ) const ( fake = "fake" + + stoppingMsg = "Stopping" + stoppedMsg = "Stopped" ) func main() { @@ -32,6 +36,7 @@ func main() { } func run() error { + logger := zerolog.New(os.Stderr).With().Timestamp().Logger() ver := client.VersionInfo{ Name: fake, Version: "1.0", @@ -64,7 +69,7 @@ func run() error { return fmt.Errorf("failed to start GRPC client: %w", err) } - s := newStateManager() + s := newStateManager(logger) for { select { case <-ctx.Done(): @@ -92,11 +97,12 @@ type unitKey struct { } type stateManager struct { - units map[unitKey]runningUnit + logger zerolog.Logger + units map[unitKey]runningUnit } -func newStateManager() *stateManager { - return &stateManager{units: make(map[unitKey]runningUnit)} +func newStateManager(logger zerolog.Logger) *stateManager { + return &stateManager{logger: logger, units: make(map[unitKey]runningUnit)} } func (s *stateManager) added(unit *client.Unit) { @@ -106,7 +112,7 @@ func (s *stateManager) added(unit *client.Unit) { _ = unit.UpdateState(client.UnitStateFailed, "Error: duplicate unit", nil) return } - r, err := newRunningUnit(unit) + r, err := newRunningUnit(s.logger, unit) if err != nil { _ = unit.UpdateState(client.UnitStateFailed, fmt.Sprintf("Error: %s", err), nil) return @@ -141,24 +147,64 @@ type runningUnit interface { } type fakeInput struct { - unit *client.Unit - cfg inputConfig + logger zerolog.Logger + unit *client.Unit + cfg *proto.UnitExpectedConfig state client.UnitState stateMsg string + + canceller context.CancelFunc } -func newFakeInput(unit *client.Unit, cfg inputConfig) *fakeInput { +func newFakeInput(logger zerolog.Logger, logLevel client.UnitLogLevel, unit *client.Unit, cfg *proto.UnitExpectedConfig) (*fakeInput, error) { + logger = logger.Level(toZerologLevel(logLevel)) + state, msg, err := getStateFromConfig(cfg) + if err != nil { + return nil, err + } + i := &fakeInput{ + logger: logger, unit: unit, cfg: cfg, - state: cfg.State, - stateMsg: cfg.Message, + state: state, + stateMsg: msg, } + + logger.Trace().Msg("registering set_state action for unit") unit.RegisterAction(&stateSetterAction{i}) - unit.RegisterAction(&killAction{}) + logger.Trace().Msg("registering kill action for unit") + unit.RegisterAction(&killAction{i}) + logger.Debug().Str("state", i.state.String()).Str("message", i.stateMsg).Msg("updating unit state") _ = unit.UpdateState(i.state, i.stateMsg, nil) - return i + + logTimer := 10 * time.Second + if logTimerValue, ok := cfg.Source.Fields["log_timer"]; ok { + logTimeStr := logTimerValue.GetStringValue() + if logTimeStr != "" { + logTimer, err = time.ParseDuration(logTimeStr) + if err != nil { + return nil, err + } + } + } + ctx, cancel := context.WithCancel(context.Background()) + go func() { + t := time.NewTicker(logTimer) + defer t.Stop() + for { + select { + case <-ctx.Done(): + return + case <-t.C: + logger.Trace().Dur("log_timer", logTimer).Msg("trace log ticker") + } + } + }() + i.canceller = cancel + + return i, nil } func (f *fakeInput) Unit() *client.Unit { @@ -166,36 +212,34 @@ func (f *fakeInput) Unit() *client.Unit { } func (f *fakeInput) Update(u *client.Unit) error { - expected, config := u.Expected() + expected, _, config := u.Expected() if expected == client.UnitStateStopped { // agent is requesting this input to stop - _ = u.UpdateState(client.UnitStateStopping, "Stopping", nil) + f.logger.Debug().Str("state", client.UnitStateStopping.String()).Str("message", stoppingMsg).Msg("updating unit state") + _ = u.UpdateState(client.UnitStateStopping, stoppingMsg, nil) + f.canceller() go func() { <-time.After(1 * time.Second) - _ = u.UpdateState(client.UnitStateStopped, "Stopped", nil) + f.logger.Debug().Str("state", client.UnitStateStopped.String()).Str("message", stoppedMsg).Msg("updating unit state") + _ = u.UpdateState(client.UnitStateStopped, stoppedMsg, nil) }() return nil } - var cfg map[string]interface{} - err := yaml.Unmarshal([]byte(config), &cfg) - if err != nil { - return fmt.Errorf("failed to unmarshal YAML: %w", err) - } - unitType, ok := cfg["type"] - if !ok { + if config.Type == "" { return fmt.Errorf("unit missing config type") } - if unitType != fake { - return fmt.Errorf("unit type changed with the same unit ID: %s", unitType) + if config.Type != fake { + return fmt.Errorf("unit type changed with the same unit ID: %s", config.Type) } - state, stateMsg, err := getStateFromMap(cfg) + state, stateMsg, err := getStateFromConfig(config) if err != nil { return fmt.Errorf("unit config parsing error: %w", err) } f.state = state f.stateMsg = stateMsg + f.logger.Debug().Str("state", f.state.String()).Str("message", f.stateMsg).Msg("updating unit state") _ = u.UpdateState(f.state, f.stateMsg, nil) return nil } @@ -209,43 +253,42 @@ func (s *stateSetterAction) Name() string { } func (s *stateSetterAction) Execute(_ context.Context, params map[string]interface{}) (map[string]interface{}, error) { + s.input.logger.Trace().Msg("executing set_state action") state, stateMsg, err := getStateFromMap(params) if err != nil { return nil, err } s.input.state = state s.input.stateMsg = stateMsg + s.input.logger.Debug().Str("state", s.input.state.String()).Str("message", s.input.stateMsg).Msg("updating unit state") _ = s.input.unit.UpdateState(s.input.state, s.input.stateMsg, nil) return nil, nil } type killAction struct { + input *fakeInput } func (s *killAction) Name() string { return "kill" } -func (s *killAction) Execute(_ context.Context, params map[string]interface{}) (map[string]interface{}, error) { +func (s *killAction) Execute(_ context.Context, _ map[string]interface{}) (map[string]interface{}, error) { + s.input.logger.Trace().Msg("executing kill action") os.Exit(1) return nil, nil } -func newRunningUnit(unit *client.Unit) (runningUnit, error) { - _, config := unit.Expected() - var cfg inputConfig - err := yaml.Unmarshal([]byte(config), &cfg) - if err != nil { - return nil, fmt.Errorf("failed to unmarshal YAML: %w", err) - } - if cfg.Type == "" { +func newRunningUnit(logger zerolog.Logger, unit *client.Unit) (runningUnit, error) { + _, logLevel, config := unit.Expected() + if config.Type == "" { return nil, fmt.Errorf("unit config type empty") } - switch cfg.Type { + switch config.Type { case fake: - return newFakeInput(unit, cfg), nil + return newFakeInput(logger, logLevel, unit, config) } - return nil, fmt.Errorf("unknown unit config type: %s", cfg.Type) + return nil, fmt.Errorf("unknown unit config type: %s", config.Type) } func newUnitKey(unit *client.Unit) unitKey { @@ -255,6 +298,10 @@ func newUnitKey(unit *client.Unit) unitKey { } } +func getStateFromConfig(cfg *proto.UnitExpectedConfig) (client.UnitState, string, error) { + return getStateFromMap(cfg.Source.AsMap()) +} + func getStateFromMap(cfg map[string]interface{}) (client.UnitState, string, error) { state, ok := cfg["state"] if !ok { @@ -278,8 +325,18 @@ func getStateFromMap(cfg map[string]interface{}) (client.UnitState, string, erro return stateType, stateMsgStr, nil } -type inputConfig struct { - Type string `json:"type" yaml:"type"` - State client.UnitState `json:"state" yaml:"state"` - Message string `json:"message" yaml:"message"` +func toZerologLevel(level client.UnitLogLevel) zerolog.Level { + switch level { + case client.UnitLogLevelError: + return zerolog.ErrorLevel + case client.UnitLogLevelWarn: + return zerolog.WarnLevel + case client.UnitLogLevelInfo: + return zerolog.InfoLevel + case client.UnitLogLevelDebug: + return zerolog.DebugLevel + case client.UnitLogLevelTrace: + return zerolog.TraceLevel + } + return zerolog.InfoLevel } diff --git a/pkg/component/runtime/command.go b/pkg/component/runtime/command.go index dce34e4bcd4..ebbca961228 100644 --- a/pkg/component/runtime/command.go +++ b/pkg/component/runtime/command.go @@ -12,12 +12,7 @@ import ( "os/exec" "time" - "gopkg.in/yaml.v2" - - "github.com/elastic/elastic-agent-client/v7/pkg/proto" - "github.com/elastic/elastic-agent-client/v7/pkg/client" - "github.com/elastic/elastic-agent/pkg/component" "github.com/elastic/elastic-agent/pkg/core/process" ) @@ -46,8 +41,7 @@ type CommandRuntime struct { actionState actionMode proc *process.Info - expected ComponentState - observed ComponentState + state ComponentState lastCheckin time.Time missedCheckins int } @@ -57,8 +51,6 @@ func NewCommandRuntime(comp component.Component) (ComponentRuntime, error) { if comp.Spec.Spec.Command == nil { return nil, errors.New("must have command defined in specification") } - expected := newComponentState(&comp, client.UnitStateHealthy, "", 1) - observed := newComponentState(&comp, client.UnitStateStarting, "Starting", 0) return &CommandRuntime{ current: comp, ch: make(chan ComponentState), @@ -66,8 +58,7 @@ func NewCommandRuntime(comp component.Component) (ComponentRuntime, error) { procCh: make(chan procState), compCh: make(chan component.Component), actionState: actionStart, - expected: expected, - observed: observed, + state: newComponentState(&comp), }, nil } @@ -111,17 +102,21 @@ func (c *CommandRuntime) Run(ctx context.Context, comm Communicator) error { t.Reset(checkinPeriod) } case newComp := <-c.compCh: - c.expected.syncComponent(&newComp, client.UnitStateHealthy, "Healthy", 1) - if c.mustSendExpected() { - c.sendExpected(comm) + sendExpected := c.state.syncExpected(&newComp) + changed := c.state.syncUnits(&newComp) + if sendExpected || c.state.unsettled() { + comm.CheckinExpected(c.state.toCheckinExpected()) + } + if changed { + c.sendObserved() } case checkin := <-comm.CheckinObserved(): sendExpected := false changed := false - if c.observed.State == client.UnitStateStarting { + if c.state.State == client.UnitStateStarting { // first observation after start set component to healthy - c.observed.State = client.UnitStateHealthy - c.observed.Message = fmt.Sprintf("Healthy: communicating with pid '%d'", c.proc.PID) + c.state.State = client.UnitStateHealthy + c.state.Message = fmt.Sprintf("Healthy: communicating with pid '%d'", c.proc.PID) changed = true } if c.lastCheckin.IsZero() { @@ -129,19 +124,19 @@ func (c *CommandRuntime) Run(ctx context.Context, comm Communicator) error { sendExpected = true } c.lastCheckin = time.Now().UTC() - if c.observed.syncCheckin(checkin) { + if c.state.syncCheckin(checkin) { changed = true } - if c.mustSendExpected() { + if c.state.unsettled() { sendExpected = true } if sendExpected { - c.sendExpected(comm) + comm.CheckinExpected(c.state.toCheckinExpected()) } if changed { c.sendObserved() } - if c.cleanupStopped() { + if c.state.cleanupStopped() { c.sendObserved() } case <-t.C: @@ -216,18 +211,9 @@ func (c *CommandRuntime) Teardown() error { // forceCompState force updates the state for the entire component, forcing that state on all units. func (c *CommandRuntime) forceCompState(state client.UnitState, msg string) { - c.observed.State = state - c.observed.Message = msg - for k, unit := range c.observed.Units { - unit.State = state - unit.Message = msg - unit.Payload = nil - unit.configStateIdx = 0 - - // unit is a copy and must be set back into the map - c.observed.Units[k] = unit + if c.state.forceState(state, msg) { + c.sendObserved() } - c.sendObserved() } // compState updates just the component state not all the units. @@ -242,15 +228,13 @@ func (c *CommandRuntime) compState(state client.UnitState) { msg = fmt.Sprintf("Degraded: pid '%d' missed %d check-ins", c.proc.PID, c.missedCheckins) } } - if c.observed.State != state || c.observed.Message != msg { - c.observed.State = state - c.observed.Message = msg + if c.state.compState(state, msg) { c.sendObserved() } } func (c *CommandRuntime) sendObserved() { - c.ch <- c.observed.Copy() + c.ch <- c.state.Copy() } func (c *CommandRuntime) start(comm Communicator) error { @@ -259,7 +243,7 @@ func (c *CommandRuntime) start(comm Communicator) error { return nil } cmdSpec := c.current.Spec.Spec.Command - var env []string + env := make([]string, 0, len(cmdSpec.Env)) for _, e := range cmdSpec.Env { env = append(env, fmt.Sprintf("%s=%s", e.Name, e.Value)) } @@ -301,7 +285,7 @@ func (c *CommandRuntime) startWatcher(info *process.Info, comm Communicator) { if err != nil { c.forceCompState(client.UnitStateFailed, fmt.Sprintf("Failed: failed to provide connection information to spawned pid '%d': %s", info.PID, err)) // kill instantly - info.Kill() + _ = info.Kill() } else { _ = info.Stdin.Close() } @@ -330,66 +314,6 @@ func (c *CommandRuntime) handleProc(state *os.ProcessState) bool { return false } -func (c *CommandRuntime) mustSendExpected() bool { - if len(c.expected.Units) != len(c.observed.Units) { - // mismatch on unit count - return true - } - for ek, e := range c.expected.Units { - o, ok := c.observed.Units[ek] - if !ok { - // unit missing - return true - } - if o.configStateIdx != e.configStateIdx || e.State != o.State { - // config or state mismatch - return true - } - } - return false -} - -func (c *CommandRuntime) sendExpected(comm Communicator) error { - units := make([]*proto.UnitExpected, 0, len(c.expected.Units)) - for k, u := range c.expected.Units { - e := &proto.UnitExpected{ - Id: k.UnitID, - Type: proto.UnitType(k.UnitType), - State: proto.State(u.State), - ConfigStateIdx: u.configStateIdx, - Config: "", - } - o, ok := c.observed.Units[k] - if !ok || o.configStateIdx != u.configStateIdx { - cfg, err := yaml.Marshal(u.config) - if err != nil { - return fmt.Errorf("failed to marshal YAML for unit %s: %w", k.UnitID, err) - } - e.Config = string(cfg) - } - units = append(units, e) - } - comm.CheckinExpected(&proto.CheckinExpected{Units: units}) - return nil -} - -func (c *CommandRuntime) cleanupStopped() bool { - cleaned := false - for ek, e := range c.expected.Units { - if e.State == client.UnitStateStopped { - // should be stopped; check if observed is also reporting stopped - o, ok := c.observed.Units[ek] - if ok && o.State == client.UnitStateStopped { - // its also stopped; so it can now be removed from both - delete(c.expected.Units, ek) - delete(c.observed.Units, ek) - cleaned = true - } - } - } - return cleaned -} - func attachOutErr(cmd *exec.Cmd) error { cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr diff --git a/pkg/component/runtime/manager_test.go b/pkg/component/runtime/manager_test.go index f496a01100e..a28566c43eb 100644 --- a/pkg/component/runtime/manager_test.go +++ b/pkg/component/runtime/manager_test.go @@ -2,6 +2,8 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//nolint:dupl // duplicate code is in test cases + package runtime import ( @@ -65,7 +67,7 @@ func TestManager_SimpleComponentErr(t *testing.T) { { ID: "error-input", Type: client.UnitTypeInput, - Config: map[string]interface{}{}, + Config: nil, }, }, } @@ -169,11 +171,11 @@ func TestManager_FakeInput_StartStop(t *testing.T) { { ID: "fake-input", Type: client.UnitTypeInput, - Config: map[string]interface{}{ + Config: component.MustExpectedConfig(map[string]interface{}{ "type": "fake", - "state": client.UnitStateHealthy, + "state": int(client.UnitStateHealthy), "message": "Fake Healthy", - }, + }), }, }, } @@ -253,6 +255,323 @@ LOOP: require.NoError(t, err) } +func TestManager_FakeInput_BadUnitToGood(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + m, err := NewManager(newErrorLogger(t), "localhost:0", apmtest.DiscardTracer) + require.NoError(t, err) + errCh := make(chan error) + go func() { + errCh <- m.Run(ctx) + }() + + waitCtx, waitCancel := context.WithTimeout(ctx, 1*time.Second) + defer waitCancel() + if err := m.WaitForReady(waitCtx); err != nil { + require.NoError(t, err) + } + + binaryPath := filepath.Join("..", "fake", "fake") + if runtime.GOOS == component.Windows { + binaryPath += exeExt + } + comp := component.Component{ + ID: "fake-default", + Spec: component.InputRuntimeSpec{ + InputType: "fake", + BinaryName: "", + BinaryPath: binaryPath, + Spec: fakeInputSpec, + }, + Units: []component.Unit{ + { + ID: "fake-input", + Type: client.UnitTypeInput, + Config: component.MustExpectedConfig(map[string]interface{}{ + "type": "fake", + "state": int(client.UnitStateHealthy), + "message": "Fake Healthy", + }), + }, + { + ID: "bad-input", + Type: client.UnitTypeInput, + Err: errors.New("hard-error for config"), + }, + }, + } + + subCtx, subCancel := context.WithCancel(context.Background()) + defer subCancel() + subErrCh := make(chan error) + go func() { + unitBad := true + + sub := m.Subscribe(subCtx, "fake-default") + for { + select { + case <-subCtx.Done(): + return + case state := <-sub.Ch(): + t.Logf("component state changed: %+v", state) + if state.State == client.UnitStateFailed { + subErrCh <- fmt.Errorf("component failed: %s", state.Message) + } else { + unit, ok := state.Units[ComponentUnitKey{UnitType: client.UnitTypeInput, UnitID: "fake-input"}] + if ok { + if unit.State == client.UnitStateFailed { + subErrCh <- fmt.Errorf("unit failed: %s", unit.Message) + } else if unit.State == client.UnitStateHealthy { + // update the bad unit to be good; so it will transition to healthy + updatedComp := comp + updatedComp.Units = make([]component.Unit, len(comp.Units)) + copy(updatedComp.Units, comp.Units) + updatedComp.Units[1] = component.Unit{ + ID: "bad-input", + Type: client.UnitTypeInput, + Config: component.MustExpectedConfig(map[string]interface{}{ + "type": "fake", + "state": int(client.UnitStateHealthy), + "message": "Fake Healthy 2", + }), + } + + unitBad = false + err := m.Update([]component.Component{updatedComp}) + if err != nil { + subErrCh <- err + } + } else if unit.State == client.UnitStateStopped || unit.State == client.UnitStateStarting { + // acceptable + } else { + // unknown state that should not have occurred + subErrCh <- fmt.Errorf("unit reported unexpected state: %v", unit.State) + } + } else { + subErrCh <- errors.New("unit missing: fake-input") + } + unit, ok = state.Units[ComponentUnitKey{UnitType: client.UnitTypeInput, UnitID: "bad-input"}] + if ok { + if unitBad { + if unit.State != client.UnitStateFailed { + subErrCh <- errors.New("bad-input unit should be failed") + } + } else { + if unit.State == client.UnitStateFailed { + if unit.Message == "hard-error for config" { + // still hard-error; wait for it to go healthy + } else { + subErrCh <- fmt.Errorf("unit failed: %s", unit.Message) + } + } else if unit.State == client.UnitStateHealthy { + // bad unit is now healthy; stop the component + err := m.Update([]component.Component{}) + if err != nil { + subErrCh <- err + } + } else if unit.State == client.UnitStateStopped { + subErrCh <- nil + } else if unit.State == client.UnitStateStarting { + // acceptable + } else { + // unknown state that should not have occurred + subErrCh <- fmt.Errorf("unit reported unexpected state: %v", unit.State) + } + } + } else { + subErrCh <- errors.New("unit missing: bad-input") + } + } + } + } + }() + + defer drainErrChan(errCh) + defer drainErrChan(subErrCh) + + startTimer := time.NewTimer(100 * time.Millisecond) + defer startTimer.Stop() + select { + case <-startTimer.C: + err = m.Update([]component.Component{comp}) + require.NoError(t, err) + case err := <-errCh: + t.Fatalf("failed early: %s", err) + } + + endTimer := time.NewTimer(30 * time.Second) + defer endTimer.Stop() +LOOP: + for { + select { + case <-endTimer.C: + t.Fatalf("timed out after 30 seconds") + case err := <-errCh: + require.NoError(t, err) + case err := <-subErrCh: + require.NoError(t, err) + break LOOP + } + } + + subCancel() + cancel() + + err = <-errCh + require.NoError(t, err) +} + +func TestManager_FakeInput_GoodUnitToBad(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + m, err := NewManager(newErrorLogger(t), "localhost:0", apmtest.DiscardTracer) + require.NoError(t, err) + errCh := make(chan error) + go func() { + errCh <- m.Run(ctx) + }() + + waitCtx, waitCancel := context.WithTimeout(ctx, 1*time.Second) + defer waitCancel() + if err := m.WaitForReady(waitCtx); err != nil { + require.NoError(t, err) + } + + binaryPath := filepath.Join("..", "fake", "fake") + if runtime.GOOS == component.Windows { + binaryPath += exeExt + } + comp := component.Component{ + ID: "fake-default", + Spec: component.InputRuntimeSpec{ + InputType: "fake", + BinaryName: "", + BinaryPath: binaryPath, + Spec: fakeInputSpec, + }, + Units: []component.Unit{ + { + ID: "fake-input", + Type: client.UnitTypeInput, + Config: component.MustExpectedConfig(map[string]interface{}{ + "type": "fake", + "state": int(client.UnitStateHealthy), + "message": "Fake Healthy", + }), + }, + { + ID: "good-input", + Type: client.UnitTypeInput, + Config: component.MustExpectedConfig(map[string]interface{}{ + "type": "fake", + "state": int(client.UnitStateHealthy), + "message": "Fake Health 2", + }), + }, + }, + } + + subCtx, subCancel := context.WithCancel(context.Background()) + defer subCancel() + subErrCh := make(chan error) + go func() { + unitGood := true + + sub := m.Subscribe(subCtx, "fake-default") + for { + select { + case <-subCtx.Done(): + return + case state := <-sub.Ch(): + t.Logf("component state changed: %+v", state) + if state.State == client.UnitStateFailed { + subErrCh <- fmt.Errorf("component failed: %s", state.Message) + } else { + unit, ok := state.Units[ComponentUnitKey{UnitType: client.UnitTypeInput, UnitID: "good-input"}] + if ok { + if unitGood { + if unit.State == client.UnitStateFailed { + subErrCh <- fmt.Errorf("unit failed: %s", unit.Message) + } else if unit.State == client.UnitStateHealthy { + // good unit it; now make it bad + updatedComp := comp + updatedComp.Units = make([]component.Unit, len(comp.Units)) + copy(updatedComp.Units, comp.Units) + updatedComp.Units[1] = component.Unit{ + ID: "good-input", + Type: client.UnitTypeInput, + Err: errors.New("hard-error for config"), + } + unitGood = false + err := m.Update([]component.Component{updatedComp}) + if err != nil { + subErrCh <- err + } + } else if unit.State == client.UnitStateStarting { + // acceptable + } else { + // unknown state that should not have occurred + subErrCh <- fmt.Errorf("unit reported unexpected state: %v", unit.State) + } + } else { + if unit.State == client.UnitStateFailed { + // went to failed; stop whole component + err := m.Update([]component.Component{}) + if err != nil { + subErrCh <- err + } + } else if unit.State == client.UnitStateStopped { + // unit was stopped + subErrCh <- nil + } else { + subErrCh <- errors.New("good-input unit should be either failed or stopped") + } + } + } else { + subErrCh <- errors.New("unit missing: good-input") + } + } + } + } + }() + + defer drainErrChan(errCh) + defer drainErrChan(subErrCh) + + startTimer := time.NewTimer(100 * time.Millisecond) + defer startTimer.Stop() + select { + case <-startTimer.C: + err = m.Update([]component.Component{comp}) + require.NoError(t, err) + case err := <-errCh: + t.Fatalf("failed early: %s", err) + } + + endTimer := time.NewTimer(30 * time.Second) + defer endTimer.Stop() +LOOP: + for { + select { + case <-endTimer.C: + t.Fatalf("timed out after 30 seconds") + case err := <-errCh: + require.NoError(t, err) + case err := <-subErrCh: + require.NoError(t, err) + break LOOP + } + } + + subCancel() + cancel() + + err = <-errCh + require.NoError(t, err) +} + func TestManager_FakeInput_Configure(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -286,11 +605,11 @@ func TestManager_FakeInput_Configure(t *testing.T) { { ID: "fake-input", Type: client.UnitTypeInput, - Config: map[string]interface{}{ + Config: component.MustExpectedConfig(map[string]interface{}{ "type": "fake", - "state": client.UnitStateHealthy, + "state": int(client.UnitStateHealthy), "message": "Fake Healthy", - }, + }), }, }, } @@ -315,11 +634,11 @@ func TestManager_FakeInput_Configure(t *testing.T) { subErrCh <- fmt.Errorf("unit failed: %s", unit.Message) } else if unit.State == client.UnitStateHealthy { // update config to change the state to degraded - comp.Units[0].Config = map[string]interface{}{ + comp.Units[0].Config = component.MustExpectedConfig(map[string]interface{}{ "type": "fake", - "state": client.UnitStateDegraded, + "state": int(client.UnitStateDegraded), "message": "Fake Degraded", - } + }) err := m.Update([]component.Component{comp}) if err != nil { subErrCh <- err @@ -408,20 +727,20 @@ func TestManager_FakeInput_RemoveUnit(t *testing.T) { { ID: "fake-input-0", Type: client.UnitTypeInput, - Config: map[string]interface{}{ + Config: component.MustExpectedConfig(map[string]interface{}{ "type": "fake", - "state": client.UnitStateHealthy, + "state": int(client.UnitStateHealthy), "message": "Fake Healthy 0", - }, + }), }, { ID: "fake-input-1", Type: client.UnitTypeInput, - Config: map[string]interface{}{ + Config: component.MustExpectedConfig(map[string]interface{}{ "type": "fake", - "state": client.UnitStateHealthy, + "state": int(client.UnitStateHealthy), "message": "Fake Healthy 1", - }, + }), }, }, } @@ -562,11 +881,11 @@ func TestManager_FakeInput_ActionState(t *testing.T) { { ID: "fake-input", Type: client.UnitTypeInput, - Config: map[string]interface{}{ + Config: component.MustExpectedConfig(map[string]interface{}{ "type": "fake", - "state": client.UnitStateHealthy, + "state": int(client.UnitStateHealthy), "message": "Fake Healthy", - }, + }), }, }, } @@ -595,7 +914,7 @@ func TestManager_FakeInput_ActionState(t *testing.T) { go func() { actionCtx, actionCancel := context.WithTimeout(context.Background(), 15*time.Second) _, err := m.PerformAction(actionCtx, comp.Units[0], "set_state", map[string]interface{}{ - "state": client.UnitStateDegraded, + "state": int(client.UnitStateDegraded), "message": "Action Set Degraded", }) actionCancel() @@ -688,11 +1007,11 @@ func TestManager_FakeInput_Restarts(t *testing.T) { { ID: "fake-input", Type: client.UnitTypeInput, - Config: map[string]interface{}{ + Config: component.MustExpectedConfig(map[string]interface{}{ "type": "fake", - "state": client.UnitStateHealthy, + "state": int(client.UnitStateHealthy), "message": "Fake Healthy", - }, + }), }, }, } @@ -832,11 +1151,11 @@ func TestManager_FakeInput_RestartsOnMissedCheckins(t *testing.T) { { ID: "fake-input", Type: client.UnitTypeInput, - Config: map[string]interface{}{ + Config: component.MustExpectedConfig(map[string]interface{}{ "type": "fake", - "state": client.UnitStateHealthy, + "state": int(client.UnitStateHealthy), "message": "Fake Healthy", - }, + }), }, }, } @@ -940,11 +1259,11 @@ func TestManager_FakeInput_InvalidAction(t *testing.T) { { ID: "fake-input", Type: client.UnitTypeInput, - Config: map[string]interface{}{ + Config: component.MustExpectedConfig(map[string]interface{}{ "type": "fake", - "state": client.UnitStateHealthy, + "state": int(client.UnitStateHealthy), "message": "Fake Healthy", - }, + }), }, }, } @@ -1062,29 +1381,29 @@ func TestManager_FakeInput_MultiComponent(t *testing.T) { { ID: "fake-input-0-0", Type: client.UnitTypeInput, - Config: map[string]interface{}{ + Config: component.MustExpectedConfig(map[string]interface{}{ "type": "fake", - "state": client.UnitStateHealthy, + "state": int(client.UnitStateHealthy), "message": "Fake Healthy 0-0", - }, + }), }, { ID: "fake-input-0-1", Type: client.UnitTypeInput, - Config: map[string]interface{}{ + Config: component.MustExpectedConfig(map[string]interface{}{ "type": "fake", - "state": client.UnitStateHealthy, + "state": int(client.UnitStateHealthy), "message": "Fake Healthy 0-1", - }, + }), }, { ID: "fake-input-0-2", Type: client.UnitTypeInput, - Config: map[string]interface{}{ + Config: component.MustExpectedConfig(map[string]interface{}{ "type": "fake", - "state": client.UnitStateHealthy, + "state": int(client.UnitStateHealthy), "message": "Fake Healthy 0-2", - }, + }), }, }, }, @@ -1095,29 +1414,29 @@ func TestManager_FakeInput_MultiComponent(t *testing.T) { { ID: "fake-input-1-0", Type: client.UnitTypeInput, - Config: map[string]interface{}{ + Config: component.MustExpectedConfig(map[string]interface{}{ "type": "fake", - "state": client.UnitStateHealthy, + "state": int(client.UnitStateHealthy), "message": "Fake Healthy 1-0", - }, + }), }, { ID: "fake-input-1-1", Type: client.UnitTypeInput, - Config: map[string]interface{}{ + Config: component.MustExpectedConfig(map[string]interface{}{ "type": "fake", - "state": client.UnitStateHealthy, + "state": int(client.UnitStateHealthy), "message": "Fake Healthy 1-1", - }, + }), }, { ID: "fake-input-1-2", Type: client.UnitTypeInput, - Config: map[string]interface{}{ + Config: component.MustExpectedConfig(map[string]interface{}{ "type": "fake", - "state": client.UnitStateHealthy, + "state": int(client.UnitStateHealthy), "message": "Fake Healthy 1-2", - }, + }), }, }, }, @@ -1128,29 +1447,29 @@ func TestManager_FakeInput_MultiComponent(t *testing.T) { { ID: "fake-input-2-0", Type: client.UnitTypeInput, - Config: map[string]interface{}{ + Config: component.MustExpectedConfig(map[string]interface{}{ "type": "fake", - "state": client.UnitStateHealthy, + "state": int(client.UnitStateHealthy), "message": "Fake Healthy 2-0", - }, + }), }, { ID: "fake-input-2-1", Type: client.UnitTypeInput, - Config: map[string]interface{}{ + Config: component.MustExpectedConfig(map[string]interface{}{ "type": "fake", - "state": client.UnitStateHealthy, + "state": int(client.UnitStateHealthy), "message": "Fake Healthy 2-1", - }, + }), }, { ID: "fake-input-2-2", Type: client.UnitTypeInput, - Config: map[string]interface{}{ + Config: component.MustExpectedConfig(map[string]interface{}{ "type": "fake", - "state": client.UnitStateHealthy, + "state": int(client.UnitStateHealthy), "message": "Fake Healthy 2-2", - }, + }), }, }, }, @@ -1235,6 +1554,141 @@ LOOP: require.NoError(t, err) } +func TestManager_FakeInput_LogLevel(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + m, err := NewManager(newErrorLogger(t), "localhost:0", apmtest.DiscardTracer) + require.NoError(t, err) + errCh := make(chan error) + go func() { + errCh <- m.Run(ctx) + }() + + waitCtx, waitCancel := context.WithTimeout(ctx, 1*time.Second) + defer waitCancel() + if err := m.WaitForReady(waitCtx); err != nil { + require.NoError(t, err) + } + + binaryPath := filepath.Join("..", "fake", "fake") + if runtime.GOOS == component.Windows { + binaryPath += exeExt + } + comp := component.Component{ + ID: "fake-default", + Spec: component.InputRuntimeSpec{ + InputType: "fake", + BinaryName: "", + BinaryPath: binaryPath, + Spec: fakeInputSpec, + }, + Units: []component.Unit{ + { + ID: "fake-input", + Type: client.UnitTypeInput, + LogLevel: client.UnitLogLevelInfo, + Config: component.MustExpectedConfig(map[string]interface{}{ + "type": "fake", + "state": int(client.UnitStateHealthy), + "message": "Fake Healthy", + }), + }, + }, + } + + subCtx, subCancel := context.WithCancel(context.Background()) + defer subCancel() + subErrCh := make(chan error) + go func() { + sub := m.Subscribe(subCtx, "fake-default") + for { + select { + case <-subCtx.Done(): + return + case state := <-sub.Ch(): + t.Logf("component state changed: %+v", state) + if state.State == client.UnitStateFailed { + subErrCh <- fmt.Errorf("component failed: %s", state.Message) + } else { + unit, ok := state.Units[ComponentUnitKey{UnitType: client.UnitTypeInput, UnitID: "fake-input"}] + if ok { + if unit.State == client.UnitStateFailed { + subErrCh <- fmt.Errorf("unit failed: %s", unit.Message) + } else if unit.State == client.UnitStateHealthy { + updatedComp := comp + updatedComp.Units = make([]component.Unit, len(comp.Units)) + copy(updatedComp.Units, comp.Units) + updatedComp.Units[0] = component.Unit{ + ID: "fake-input", + Type: client.UnitTypeInput, + LogLevel: client.UnitLogLevelTrace, + Config: component.MustExpectedConfig(map[string]interface{}{ + "type": "fake", + "state": int(client.UnitStateHealthy), + "message": "Fake Healthy", + }), + } + + actionCtx, actionCancel := context.WithTimeout(context.Background(), 5*time.Second) + _, err := m.PerformAction(actionCtx, comp.Units[0], "invalid_missing_action", nil) + actionCancel() + if err == nil { + subErrCh <- fmt.Errorf("should have returned an error") + } else if err.Error() != "action undefined" { + subErrCh <- fmt.Errorf("should have returned error: action undefined") + } else { + subErrCh <- nil + } + } else if unit.State == client.UnitStateStarting { + // acceptable + } else { + // unknown state that should not have occurred + subErrCh <- fmt.Errorf("unit reported unexpected state: %v", unit.State) + } + } else { + subErrCh <- errors.New("unit missing: fake-input") + } + } + } + } + }() + + defer drainErrChan(errCh) + defer drainErrChan(subErrCh) + + startTimer := time.NewTimer(100 * time.Millisecond) + defer startTimer.Stop() + select { + case <-startTimer.C: + err = m.Update([]component.Component{comp}) + require.NoError(t, err) + case err := <-errCh: + t.Fatalf("failed early: %s", err) + } + + endTimer := time.NewTimer(30 * time.Second) + defer endTimer.Stop() +LOOP: + for { + select { + case <-endTimer.C: + t.Fatalf("timed out after 30 seconds") + case err := <-errCh: + require.NoError(t, err) + case err := <-subErrCh: + require.NoError(t, err) + break LOOP + } + } + + subCancel() + cancel() + + err = <-errCh + require.NoError(t, err) +} + func newErrorLogger(t *testing.T) *logger.Logger { t.Helper() diff --git a/pkg/component/runtime/runtime.go b/pkg/component/runtime/runtime.go index b84e5b48202..e2c9a2bd013 100644 --- a/pkg/component/runtime/runtime.go +++ b/pkg/component/runtime/runtime.go @@ -6,7 +6,6 @@ package runtime import ( "context" - "encoding/json" "errors" "sync" @@ -17,172 +16,6 @@ import ( "github.com/elastic/elastic-agent/pkg/core/logger" ) -// ComponentUnitState is the state for a unit running in a component. -type ComponentUnitState struct { - State client.UnitState - Message string - Payload map[string]interface{} - - // internal - configStateIdx uint64 - config map[string]interface{} - payloadStr string -} - -// ComponentUnitKey is a composite key to identify a unit by its type and ID. -type ComponentUnitKey struct { - UnitType client.UnitType - UnitID string -} - -// ComponentVersionInfo provides version information reported by the component. -type ComponentVersionInfo struct { - // Name of the binary. - Name string - // Version of the binary. - Version string - // Additional metadata about the binary. - Meta map[string]string -} - -// ComponentState is the overall state of the component. -type ComponentState struct { - State client.UnitState - Message string - - Units map[ComponentUnitKey]ComponentUnitState - - VersionInfo ComponentVersionInfo -} - -func newComponentState(comp *component.Component, initState client.UnitState, initMessage string, initCfgIdx uint64) (s ComponentState) { - s.Units = make(map[ComponentUnitKey]ComponentUnitState) - s.syncComponent(comp, initState, initMessage, initCfgIdx) - return s -} - -// Copy returns a copy of the structure. -func (s *ComponentState) Copy() (c ComponentState) { - c = *s - c.Units = make(map[ComponentUnitKey]ComponentUnitState) - for k, v := range s.Units { - c.Units[k] = v - } - return c -} - -func (s *ComponentState) syncComponent(comp *component.Component, initState client.UnitState, initMessage string, initCfgIdx uint64) { - s.State = initState - s.Message = initMessage - touched := make(map[ComponentUnitKey]bool) - for _, unit := range comp.Units { - key := ComponentUnitKey{ - UnitType: unit.Type, - UnitID: unit.ID, - } - - touched[key] = true - existing, ok := s.Units[key] - existing.State = initState - existing.Message = initMessage - existing.Payload = nil - existing.config = unit.Config - if ok { - existing.configStateIdx++ - } else { - existing.configStateIdx = initCfgIdx - } - s.Units[key] = existing - } - for key, unit := range s.Units { - _, ok := touched[key] - if !ok { - if unit.State != client.UnitStateStopped { - unit.State = client.UnitStateStopped - unit.Message = "Stopped" - - // unit is a copy and must be set back into the map - s.Units[key] = unit - } - } - } -} - -func (s *ComponentState) syncCheckin(checkin *proto.CheckinObserved) bool { - changed := false - touched := make(map[ComponentUnitKey]bool) - for _, unit := range checkin.Units { - key := ComponentUnitKey{ - UnitType: client.UnitType(unit.Type), - UnitID: unit.Id, - } - - var payloadStr string - var payload map[string]interface{} - if unit.Payload != nil { - payloadStr = string(unit.Payload) - // err is ignored (must be valid JSON for Agent to use it) - _ = json.Unmarshal(unit.Payload, &payload) - } - - touched[key] = true - existing, ok := s.Units[key] - if !ok { - changed = true - existing = ComponentUnitState{ - State: client.UnitState(unit.State), - Message: unit.Message, - Payload: payload, - configStateIdx: unit.ConfigStateIdx, - payloadStr: payloadStr, - } - } else { - existing.configStateIdx = unit.ConfigStateIdx - if existing.State != client.UnitState(unit.State) || existing.Message != unit.Message || existing.payloadStr != payloadStr { - changed = true - existing.State = client.UnitState(unit.State) - existing.Message = unit.Message - existing.Payload = payload - existing.payloadStr = payloadStr - } - } - s.Units[key] = existing - } - for key, unit := range s.Units { - _, ok := touched[key] - if !ok { - unit.configStateIdx = 0 - if unit.State != client.UnitStateStarting { - state := client.UnitStateFailed - msg := "Failed: not reported in check-in" - payloadStr := "" - if unit.State != state || unit.Message != msg || unit.payloadStr != payloadStr { - changed = true - unit.State = state - unit.Message = msg - unit.Payload = nil - unit.payloadStr = payloadStr - - // unit is a copy and must be set back into the map - s.Units[key] = unit - } - } - } - } - if checkin.VersionInfo != nil { - if checkin.VersionInfo.Name != "" { - s.VersionInfo.Name = checkin.VersionInfo.Name - } - if checkin.VersionInfo.Version != "" { - s.VersionInfo.Version = checkin.VersionInfo.Version - } - if checkin.VersionInfo.Meta != nil { - s.VersionInfo.Meta = checkin.VersionInfo.Meta - } - } - return changed -} - // ComponentRuntime manages runtime lifecycle operations for a component and stores its state. type ComponentRuntime interface { // Run starts the runtime for the component. diff --git a/pkg/component/runtime/state.go b/pkg/component/runtime/state.go new file mode 100644 index 00000000000..b8278d334c7 --- /dev/null +++ b/pkg/component/runtime/state.go @@ -0,0 +1,427 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package runtime + +import ( + "fmt" + "reflect" + + "github.com/elastic/elastic-agent-client/v7/pkg/client" + "github.com/elastic/elastic-agent-client/v7/pkg/proto" + "github.com/elastic/elastic-agent/pkg/component" +) + +const ( + startingMsg = "Starting" + stoppedMsg = "Stopped" + unknownMsg = "Failed: reported unit is unknown" + missingMsg = "Failed: not reported in check-in" +) + +// ComponentUnitState is the state for a unit running in a component. +type ComponentUnitState struct { + State client.UnitState + Message string + Payload map[string]interface{} + + // internal + unitState client.UnitState + unitMessage string + unitPayload map[string]interface{} + configStateIdx uint64 + err error +} + +// ComponentUnitKey is a composite key to identify a unit by its type and ID. +type ComponentUnitKey struct { + UnitType client.UnitType + UnitID string +} + +// ComponentVersionInfo provides version information reported by the component. +type ComponentVersionInfo struct { + // Name of the binary. + Name string + // Version of the binary. + Version string + // Additional metadata about the binary. + Meta map[string]string +} + +// ComponentState is the overall state of the component. +type ComponentState struct { + State client.UnitState + Message string + + Units map[ComponentUnitKey]ComponentUnitState + + VersionInfo ComponentVersionInfo + + // internal + expectedUnits map[ComponentUnitKey]expectedUnitState +} + +// expectedUnitState is the expected state of a unit. +type expectedUnitState struct { + state client.UnitState + configStateIdx uint64 + config *proto.UnitExpectedConfig + err error + logLevel client.UnitLogLevel +} + +func newComponentState(comp *component.Component) (s ComponentState) { + s.State = client.UnitStateStarting + s.Message = startingMsg + s.Units = make(map[ComponentUnitKey]ComponentUnitState) + s.expectedUnits = make(map[ComponentUnitKey]expectedUnitState) + s.syncComponent(comp) + return s +} + +// Copy returns a copy of the structure. +func (s *ComponentState) Copy() (c ComponentState) { + c = *s + c.Units = make(map[ComponentUnitKey]ComponentUnitState) + for k, v := range s.Units { + c.Units[k] = v + } + c.expectedUnits = make(map[ComponentUnitKey]expectedUnitState) + for k, v := range s.expectedUnits { + c.expectedUnits[k] = v + } + return c +} + +func (s *ComponentState) syncComponent(comp *component.Component) bool { + changed := s.syncExpected(comp) + s.syncUnits(comp) + if changed { + return true + } + return s.unsettled() +} + +func (s *ComponentState) syncExpected(comp *component.Component) bool { + changed := false + touched := make(map[ComponentUnitKey]bool) + for _, unit := range comp.Units { + key := ComponentUnitKey{ + UnitType: unit.Type, + UnitID: unit.ID, + } + + touched[key] = true + existing, ok := s.expectedUnits[key] + if ok { + if existing.logLevel != unit.LogLevel { + existing.logLevel = unit.LogLevel + changed = true + } + if !reflect.DeepEqual(existing.config, unit.Config) { + existing.config = unit.Config + existing.configStateIdx++ + changed = true + } + } else { + existing.state = client.UnitStateHealthy + existing.logLevel = unit.LogLevel + existing.config = unit.Config + existing.configStateIdx = 1 + changed = true + } + if existing.err != unit.Err { + existing.err = unit.Err + if existing.err != nil { + existing.state = client.UnitStateFailed + } + changed = true + } + s.expectedUnits[key] = existing + } + for key, unit := range s.expectedUnits { + _, ok := touched[key] + if !ok { + if unit.state != client.UnitStateStopped { + unit.state = client.UnitStateStopped + changed = true + + // unit is a copy and must be set back into the map + s.expectedUnits[key] = unit + } + } + } + return changed +} + +func (s *ComponentState) syncUnits(comp *component.Component) bool { + changed := false + touched := make(map[ComponentUnitKey]bool) + for _, unit := range comp.Units { + key := ComponentUnitKey{ + UnitType: unit.Type, + UnitID: unit.ID, + } + + touched[key] = true + existing, ok := s.Units[key] + if !ok { + existing.State = client.UnitStateStarting + existing.Message = startingMsg + existing.Payload = nil + existing.configStateIdx = 0 + existing.unitState = client.UnitStateStarting + existing.unitMessage = startingMsg + existing.unitPayload = nil + changed = true + } + existing.err = unit.Err + if existing.err != nil { + errMsg := existing.err.Error() + if existing.State != client.UnitStateFailed || existing.Message != errMsg || diffPayload(existing.Payload, nil) { + existing.State = client.UnitStateFailed + existing.Message = existing.err.Error() + existing.Payload = nil + changed = true + } + } + s.Units[key] = existing + } + for key, unit := range s.Units { + _, ok := touched[key] + if !ok { + if unit.State != client.UnitStateStopped { + unit.State = client.UnitStateStopped + unit.Message = stoppedMsg + unit.Payload = nil + unit.unitState = client.UnitStateStopped + unit.unitMessage = stoppedMsg + unit.unitPayload = nil + changed = true + + // unit is a copy and must be set back into the map + s.Units[key] = unit + } + } + } + return changed +} + +func (s *ComponentState) syncCheckin(checkin *proto.CheckinObserved) bool { + changed := false + touched := make(map[ComponentUnitKey]bool) + for _, unit := range checkin.Units { + key := ComponentUnitKey{ + UnitType: client.UnitType(unit.Type), + UnitID: unit.Id, + } + + var payload map[string]interface{} + if unit.Payload != nil { + payload = unit.Payload.AsMap() + } + + touched[key] = true + _, inExpected := s.expectedUnits[key] + existing, _ := s.Units[key] + existing.unitState = client.UnitState(unit.State) + if existing.unitState == client.UnitStateStopped { + fmt.Printf("stopped") + } + existing.unitMessage = unit.Message + existing.unitPayload = payload + existing.configStateIdx = unit.ConfigStateIdx + if existing.err != nil && existing.unitState != client.UnitStateStopped { + errMsg := existing.err.Error() + if existing.State != client.UnitStateFailed || existing.Message != errMsg || diffPayload(existing.Payload, nil) { + changed = true + existing.State = client.UnitStateFailed + existing.Message = errMsg + existing.Payload = nil + } + } else if !inExpected && existing.unitState != client.UnitStateStopped { + if existing.State != client.UnitStateFailed || existing.Message != unknownMsg || diffPayload(existing.Payload, nil) { + changed = true + existing.State = client.UnitStateFailed + existing.Message = unknownMsg + existing.Payload = nil + } + } else { + if existing.unitState != existing.State || existing.unitMessage != existing.Message || diffPayload(existing.unitPayload, existing.Payload) { + changed = true + existing.State = existing.unitState + existing.Message = existing.unitMessage + existing.Payload = existing.unitPayload + } + } + s.Units[key] = existing + } + for key, unit := range s.Units { + _, ok := touched[key] + if !ok { + unit.unitState = client.UnitStateStarting + unit.unitMessage = "" + unit.unitPayload = nil + unit.configStateIdx = 0 + if unit.err != nil { + errMsg := unit.err.Error() + if unit.State != client.UnitStateFailed || unit.Message != errMsg || diffPayload(unit.Payload, nil) { + changed = true + unit.State = client.UnitStateFailed + unit.Message = errMsg + unit.Payload = nil + } + } else if unit.State != client.UnitStateStarting { + if unit.State != client.UnitStateFailed || unit.Message != missingMsg || diffPayload(unit.Payload, nil) { + changed = true + unit.State = client.UnitStateFailed + unit.Message = missingMsg + unit.Payload = nil + } + } + } + s.Units[key] = unit + } + if checkin.VersionInfo != nil { + if checkin.VersionInfo.Name != "" && s.VersionInfo.Name != checkin.VersionInfo.Name { + s.VersionInfo.Name = checkin.VersionInfo.Name + changed = true + } + if checkin.VersionInfo.Version != "" && s.VersionInfo.Version != checkin.VersionInfo.Version { + s.VersionInfo.Version = checkin.VersionInfo.Version + changed = true + } + if checkin.VersionInfo.Meta != nil && diffMeta(s.VersionInfo.Meta, checkin.VersionInfo.Meta) { + s.VersionInfo.Meta = checkin.VersionInfo.Meta + changed = true + } + } + return changed +} + +func (s *ComponentState) unsettled() bool { + if len(s.expectedUnits) != len(s.Units) { + // mismatch on unit count + return true + } + for ek, e := range s.expectedUnits { + o, ok := s.Units[ek] + if !ok { + // unit missing + return true + } + if o.configStateIdx != e.configStateIdx || e.state != o.State { + // config or state mismatch + return true + } + } + return false +} + +func (s *ComponentState) toCheckinExpected() *proto.CheckinExpected { + units := make([]*proto.UnitExpected, 0, len(s.expectedUnits)) + for k, u := range s.expectedUnits { + e := &proto.UnitExpected{ + Id: k.UnitID, + Type: proto.UnitType(k.UnitType), + State: proto.State(u.state), + LogLevel: proto.UnitLogLevel(u.logLevel), + ConfigStateIdx: u.configStateIdx, + Config: nil, + } + o, ok := s.Units[k] + if !ok || o.configStateIdx != u.configStateIdx { + e.Config = u.config + } + if u.err != nil { + if !ok || o.unitState == client.UnitStateStopped || o.configStateIdx == 0 { + // unit not existing, already stopped or never sent + continue + } + // unit in error needs to be stopped (no config change) + e.State = proto.State_STOPPED + e.ConfigStateIdx = o.configStateIdx + e.Config = nil + } + units = append(units, e) + } + return &proto.CheckinExpected{Units: units} +} + +func (s *ComponentState) cleanupStopped() bool { + cleaned := false + for ek, e := range s.expectedUnits { + if e.state == client.UnitStateStopped { + // should be stopped; check if observed is also reporting stopped + o, ok := s.Units[ek] + if ok && o.unitState == client.UnitStateStopped { + // its also stopped; so it can now be removed from both + delete(s.expectedUnits, ek) + delete(s.Units, ek) + cleaned = true + } + } + } + return cleaned +} + +// forceState force updates the state for the entire component, forcing that state on all units. +func (s *ComponentState) forceState(state client.UnitState, msg string) bool { + changed := false + if s.State != state || s.Message != msg { + s.State = state + s.Message = msg + changed = true + } + for k, unit := range s.Units { + unitState := state + unitMsg := msg + if unit.err != nil && state != client.UnitStateStopped { + // must stay as failed as then unit config is in error + unitState = client.UnitStateFailed + unitMsg = unit.err.Error() + } + if unit.State != unitState || unit.Message != unitMsg || diffPayload(unit.Payload, nil) { + unit.State = unitState + unit.Message = unitMsg + unit.Payload = nil + changed = true + } + + // unit is a copy and must be set back into the map + s.Units[k] = unit + } + return changed +} + +// compState updates just the component state not all the units. +func (s *ComponentState) compState(state client.UnitState, msg string) bool { + if s.State != state || s.Message != msg { + s.State = state + s.Message = msg + return true + } + return false +} + +func diffPayload(existing map[string]interface{}, new map[string]interface{}) bool { + if existing == nil && new != nil { + return true + } + if existing != nil && new == nil { + return true + } + return !reflect.DeepEqual(existing, new) +} + +func diffMeta(existing map[string]string, new map[string]string) bool { + if existing == nil && new != nil { + return true + } + if existing != nil && new == nil { + return true + } + return !reflect.DeepEqual(existing, new) +} From 9bba9753120f2849abd6760c26653a205f73174b Mon Sep 17 00:00:00 2001 From: Aleksandr Maus Date: Wed, 24 Aug 2022 10:05:59 -0400 Subject: [PATCH 14/49] Fix action dispatching that was using ActionType instead of InputType as before (#973) --- .../actions/handlers/handler_action_application.go | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/internal/pkg/agent/application/actions/handlers/handler_action_application.go b/internal/pkg/agent/application/actions/handlers/handler_action_application.go index a5de2384bd1..d83d536dfc0 100644 --- a/internal/pkg/agent/application/actions/handlers/handler_action_application.go +++ b/internal/pkg/agent/application/actions/handlers/handler_action_application.go @@ -78,7 +78,7 @@ func (h *AppAction) Handle(ctx context.Context, a fleetapi.Action, acker acker.A h.log.Debugf("handlerAppAction: action '%v' started with timeout: %v", action.ActionType, timeout) ctx, cancel := context.WithTimeout(ctx, timeout) defer cancel() - res, err = h.coord.PerformAction(ctx, unit, action.ActionType, params) + res, err = h.coord.PerformAction(ctx, unit, action.InputType, params) } end := time.Now().UTC() @@ -111,11 +111,12 @@ var ( ) // appendActionResponse appends the action response property with all the action response values excluding the ones specified in excludeActionResponseFields -// "action_response": { -// "endpoint": { -// "acked": true -// } -// } +// +// "action_response": { +// "endpoint": { +// "acked": true +// } +// } func appendActionResponse(action *fleetapi.ActionApp, inputType string, res map[string]interface{}) { if len(res) == 0 { return From 43ad01d518d7e4a9c0acc1868684e7757a0bcb8c Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Mon, 29 Aug 2022 16:30:51 -0400 Subject: [PATCH 15/49] Fix bootstrapping a Fleet Server with v2. (#1010) * Fix bootstrapping a Fleet Server with v2. * Fix lint. * Fix tests. --- internal/pkg/agent/application/application.go | 28 +-- .../application/coordinator/coordinator.go | 4 +- .../application/fleet_server_bootstrap.go | 168 ++++++++++++++++-- .../fleet_server_bootstrap_test.go | 56 ++++++ .../pkg/agent/application/managed_mode.go | 54 ++++-- internal/pkg/agent/cmd/run.go | 44 ++--- internal/pkg/testutils/testutils.go | 17 ++ pkg/component/component_test.go | 3 +- pkg/component/runtime/manager.go | 33 ++-- pkg/component/runtime/manager_test.go | 110 +++++++++--- pkg/component/runtime/runtime.go | 2 +- pkg/component/runtime/runtime_comm.go | 15 +- pkg/component/runtime/state.go | 14 +- specs/fleet-server.spec.yml | 36 ++-- 14 files changed, 458 insertions(+), 126 deletions(-) create mode 100644 internal/pkg/agent/application/fleet_server_bootstrap_test.go diff --git a/internal/pkg/agent/application/application.go b/internal/pkg/agent/application/application.go index 6b3c4b73d42..c5076535825 100644 --- a/internal/pkg/agent/application/application.go +++ b/internal/pkg/agent/application/application.go @@ -65,7 +65,7 @@ func New( upgrader := upgrade.NewUpgrader(log, cfg.Settings.DownloadConfig, agentInfo) - runtime, err := runtime.NewManager(log, cfg.Settings.GRPC.String(), tracer) + runtime, err := runtime.NewManager(log, cfg.Settings.GRPC.String(), agentInfo, tracer) if err != nil { return nil, fmt.Errorf("failed to initialize runtime manager: %w", err) } @@ -85,13 +85,6 @@ func New( log.Debugf("Reloading of configuration is on, frequency is set to %s", cfg.Settings.Reload.Period) configMgr = newPeriodic(log, cfg.Settings.Reload.Period, discover, loader) } - } else if configuration.IsFleetServerBootstrap(cfg.Fleet) { - log.Info("Parsed configuration and determined agent is in Fleet Server bootstrap mode") - compModifiers = append(compModifiers, FleetServerComponentModifier) - configMgr, err = newFleetServerBootstrapManager(log) - if err != nil { - return nil, err - } } else { var store storage.Store store, cfg, err = mergeFleetConfig(rawConfig) @@ -99,14 +92,21 @@ func New( return nil, err } - log.Info("Parsed configuration and determined agent is managed by Fleet") + if configuration.IsFleetServerBootstrap(cfg.Fleet) { + log.Info("Parsed configuration and determined agent is in Fleet Server bootstrap mode") - compModifiers = append(compModifiers, FleetServerComponentModifier) - managed, err = newManagedConfigManager(log, agentInfo, cfg, store, runtime) - if err != nil { - return nil, err + compModifiers = append(compModifiers, FleetServerComponentModifier(cfg.Fleet.Server)) + configMgr = newFleetServerBootstrapManager(log) + } else { + log.Info("Parsed configuration and determined agent is managed by Fleet") + + compModifiers = append(compModifiers, FleetServerComponentModifier(cfg.Fleet.Server)) + managed, err = newManagedConfigManager(log, agentInfo, cfg, store, runtime) + if err != nil { + return nil, err + } + configMgr = managed } - configMgr = managed } composable, err := composable.New(log, rawConfig) diff --git a/internal/pkg/agent/application/coordinator/coordinator.go b/internal/pkg/agent/application/coordinator/coordinator.go index 906b9af2d64..30fcdfcce81 100644 --- a/internal/pkg/agent/application/coordinator/coordinator.go +++ b/internal/pkg/agent/application/coordinator/coordinator.go @@ -113,7 +113,7 @@ type VarsManager interface { // ComponentsModifier is a function that takes the computed components model and modifies it before // passing it into the components runtime manager. -type ComponentsModifier func(comps []component.Component, policy map[string]interface{}) ([]component.Component, error) +type ComponentsModifier func(comps []component.Component) ([]component.Component, error) // State provides the current state of the coordinator along with all the current states of components and units. type State struct { @@ -492,7 +492,7 @@ func (c *Coordinator) process(ctx context.Context) (err error) { } for _, modifier := range c.modifiers { - comps, err = modifier(comps, cfg) + comps, err = modifier(comps) if err != nil { return fmt.Errorf("failed to modify components: %w", err) } diff --git a/internal/pkg/agent/application/fleet_server_bootstrap.go b/internal/pkg/agent/application/fleet_server_bootstrap.go index bfb801b9dde..369c63bf53b 100644 --- a/internal/pkg/agent/application/fleet_server_bootstrap.go +++ b/internal/pkg/agent/application/fleet_server_bootstrap.go @@ -6,35 +6,80 @@ package application import ( "context" - "time" + "fmt" + "gopkg.in/yaml.v2" + + "github.com/elastic/elastic-agent-client/v7/pkg/client" "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" + "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" "github.com/elastic/elastic-agent/internal/pkg/config" "github.com/elastic/elastic-agent/pkg/component" "github.com/elastic/elastic-agent/pkg/core/logger" ) +const ( + elasticsearch = "elasticsearch" + fleetServer = "fleet-server" +) + // injectFleetServerInput is the base configuration that is used plus the FleetServerComponentModifier that adjusts // the components before sending them to the runtime manager. var injectFleetServerInput = config.MustNewConfigFrom(map[string]interface{}{ "outputs": map[string]interface{}{ "default": map[string]interface{}{ - "type": "elasticsearch", + "type": elasticsearch, "hosts": []string{"localhost:9200"}, }, }, "inputs": []interface{}{ map[string]interface{}{ - "type": "fleet-server", + "id": fleetServer, + "type": fleetServer, }, }, }) // FleetServerComponentModifier modifies the comps to inject extra information from the policy into // the Fleet Server component and units needed to run Fleet Server correctly. -func FleetServerComponentModifier(comps []component.Component, policy map[string]interface{}) ([]component.Component, error) { - // TODO(blakerouse): Need to add logic to update the Fleet Server component with extra information from the policy. - return comps, nil +func FleetServerComponentModifier(serverCfg *configuration.FleetServerConfig) coordinator.ComponentsModifier { + return func(comps []component.Component) ([]component.Component, error) { + for i, comp := range comps { + if comp.Spec.InputType == fleetServer { + for j, unit := range comp.Units { + if unit.Type == client.UnitTypeOutput && unit.Config.Type == elasticsearch { + unitCfgMap, err := toMapStr(unit.Config.Source.AsMap(), &serverCfg.Output.Elasticsearch) + if err != nil { + return nil, err + } + fixOutputMap(unitCfgMap) + unitCfg, err := component.ExpectedConfig(unitCfgMap) + if err != nil { + return nil, err + } + unit.Config = unitCfg + } else if unit.Type == client.UnitTypeInput && unit.Config.Type == fleetServer { + unitCfgMap, err := toMapStr(unit.Config.Source.AsMap(), &inputFleetServer{ + Policy: serverCfg.Policy, + Server: serverCfg, + }) + if err != nil { + return nil, err + } + fixInputMap(unitCfgMap) + unitCfg, err := component.ExpectedConfig(unitCfgMap) + if err != nil { + return nil, err + } + unit.Config = unitCfg + } + comp.Units[j] = unit + } + } + comps[i] = comp + } + return comps, nil + } } type fleetServerBootstrapManager struct { @@ -46,18 +91,15 @@ type fleetServerBootstrapManager struct { func newFleetServerBootstrapManager( log *logger.Logger, -) (*fleetServerBootstrapManager, error) { +) *fleetServerBootstrapManager { return &fleetServerBootstrapManager{ log: log, ch: make(chan coordinator.ConfigChange), errCh: make(chan error), - }, nil + } } func (m *fleetServerBootstrapManager) Run(ctx context.Context) error { - ctx, cancel := context.WithTimeout(ctx, 15*time.Second) - defer cancel() - m.log.Debugf("injecting fleet-server for bootstrap") select { case <-ctx.Done(): @@ -76,3 +118,107 @@ func (m *fleetServerBootstrapManager) Errors() <-chan error { func (m *fleetServerBootstrapManager) Watch() <-chan coordinator.ConfigChange { return m.ch } + +func fixOutputMap(m map[string]interface{}) { + // api_key cannot be present or Fleet Server will complain + delete(m, "api_key") +} + +type inputFleetServer struct { + Policy *configuration.FleetServerPolicyConfig `yaml:"policy,omitempty"` + Server *configuration.FleetServerConfig `yaml:"server"` +} + +func fixInputMap(m map[string]interface{}) { + if srv, ok := m["server"]; ok { + if srvMap, ok := srv.(map[string]interface{}); ok { + // bootstrap is internal to Elastic Agent + delete(srvMap, "bootstrap") + // policy is present one level input when sent to Fleet Server + delete(srvMap, "policy") + // output is present in the output unit + delete(srvMap, "output") + } + } +} + +// toMapStr converts the input into a map[string]interface{}. +// +// This is done by using YAMl to marshal and then unmarshal it into the map[string]interface{}. YAML tags on the struct +// match the loading and unloading of the configuration so this ensures that it will match what Fleet Server is +// expecting. +func toMapStr(input ...interface{}) (map[string]interface{}, error) { + m := map[interface{}]interface{}{} + for _, i := range input { + im, err := toMapInterface(i) + if err != nil { + return nil, err + } + m = mergeNestedMaps(m, im) + } + // toMapInterface will set nested maps to a map[interface{}]interface{} which `component.ExpectedConfig` cannot + // handle they must be a map[string]interface{}. + fm := fixYamlMap(m) + r, ok := fm.(map[string]interface{}) + if !ok { + return nil, fmt.Errorf("expected map[string]interface{}, got %T", fm) + } + return r, nil +} + +// toMapInterface converts the input into a map[interface{}]interface{} using YAML marshall and unmarshall. +func toMapInterface(input interface{}) (map[interface{}]interface{}, error) { + var res map[interface{}]interface{} + raw, err := yaml.Marshal(input) + if err != nil { + return nil, err + } + err = yaml.Unmarshal(raw, &res) + if err != nil { + return nil, err + } + return res, nil +} + +// mergeNestedMaps merges two map[interface{}]interface{} together deeply. +func mergeNestedMaps(a, b map[interface{}]interface{}) map[interface{}]interface{} { + res := make(map[interface{}]interface{}, len(a)) + for k, v := range a { + res[k] = v + } + for k, v := range b { + if v, ok := v.(map[interface{}]interface{}); ok { + if bv, ok := res[k]; ok { + if bv, ok := bv.(map[interface{}]interface{}); ok { + res[k] = mergeNestedMaps(bv, v) + continue + } + } + } + res[k] = v + } + return res +} + +// fixYamlMap converts map[interface{}]interface{} into map[string]interface{} through out the entire map. +func fixYamlMap(input interface{}) interface{} { + switch i := input.(type) { + case map[string]interface{}: + for k, v := range i { + i[k] = fixYamlMap(v) + } + case map[interface{}]interface{}: + m := map[string]interface{}{} + for k, v := range i { + if ks, ok := k.(string); ok { + m[ks] = fixYamlMap(v) + } + } + return m + case []interface{}: + for j, v := range i { + i[j] = fixYamlMap(v) + } + } + return input +} diff --git a/internal/pkg/agent/application/fleet_server_bootstrap_test.go b/internal/pkg/agent/application/fleet_server_bootstrap_test.go new file mode 100644 index 00000000000..53fd864fdb6 --- /dev/null +++ b/internal/pkg/agent/application/fleet_server_bootstrap_test.go @@ -0,0 +1,56 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package application + +import ( + "context" + "errors" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/sync/errgroup" + + "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" + "github.com/elastic/elastic-agent/internal/pkg/testutils" +) + +func TestFleetServerBootstrapManager(t *testing.T) { + l := testutils.NewErrorLogger(t) + mgr := newFleetServerBootstrapManager(l) + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + g, _ := errgroup.WithContext(ctx) + + var change coordinator.ConfigChange + g.Go(func() error { + for { + select { + case <-ctx.Done(): + return ctx.Err() + case err := <-mgr.Errors(): + cancel() + return err + case change = <-mgr.Watch(): + cancel() + } + } + }) + + g.Go(func() error { + return mgr.Run(ctx) + }) + + err := g.Wait() + if err != nil && !errors.Is(err, context.Canceled) { + require.NoError(t, err) + } + + require.NotNil(t, change) + assert.NotNil(t, change.Config()) +} diff --git a/internal/pkg/agent/application/managed_mode.go b/internal/pkg/agent/application/managed_mode.go index 893b7541606..8abeab60eba 100644 --- a/internal/pkg/agent/application/managed_mode.go +++ b/internal/pkg/agent/application/managed_mode.go @@ -146,12 +146,19 @@ func (m *managedConfigManager) Run(ctx context.Context) error { stateRestored = true } - // In the case this is the first start and this Elastic Agent is running a Fleet Server; we need to ensure that + // In the case this Elastic Agent is running a Fleet Server; we need to ensure that // the Fleet Server is running before the Fleet gateway is started. - if !stateRestored && m.cfg.Fleet.Server != nil { - err = m.initFleetServer(ctx) - if err != nil { - return fmt.Errorf("failed to initialize Fleet Server: %w", err) + if m.cfg.Fleet.Server != nil { + if stateRestored { + err = m.waitForFleetServer(ctx) + if err != nil { + return fmt.Errorf("failed to initialize Fleet Server: %w", err) + } + } else { + err = m.initFleetServer(ctx) + if err != nil { + return fmt.Errorf("failed to initialize Fleet Server: %w", err) + } } } @@ -233,31 +240,42 @@ func (m *managedConfigManager) initFleetServer(ctx context.Context) error { case m.ch <- &localConfigChange{injectFleetServerInput}: } - m.log.Debugf("watching fleet-server-default component state") - sub := m.runtime.Subscribe(ctx, "fleet-server-default") + return m.waitForFleetServer(ctx) +} + +func (m *managedConfigManager) waitForFleetServer(ctx context.Context) error { + m.log.Debugf("watching Fleet Server component state") + + ctx, cancel := context.WithCancel(ctx) + defer cancel() + sub := m.runtime.SubscribeAll(ctx) for { select { case <-ctx.Done(): return ctx.Err() - case state := <-sub.Ch(): - if fleetServerRunning(state) { - m.log.With("state", state).Debugf("fleet-server-default component is running") - return nil + case compState := <-sub.Ch(): + if compState.Component.Spec.InputType == "fleet-server" { + if fleetServerRunning(compState.State) { + m.log.With("state", compState.State).Debugf("Fleet Server is running") + return nil + } + m.log.With("state", compState.State).Debugf("Fleet Server is not running") } - m.log.With("state", state).Debugf("fleet-server-default component is not running") } } } func fleetServerRunning(state runtime.ComponentState) bool { - if state.State == client.UnitStateHealthy || state.State == client.UnitStateDegraded { - for key, unit := range state.Units { - if key.UnitType == client.UnitTypeInput && key.UnitID == "fleet-server-default-fleet-server" { - if unit.State == client.UnitStateHealthy || unit.State == client.UnitStateDegraded { - return true - } + if state.State == client.UnitStateHealthy { + if len(state.Units) == 0 { + return false + } + for _, unit := range state.Units { + if unit.State != client.UnitStateHealthy { + return false } } + return true } return false } diff --git a/internal/pkg/agent/cmd/run.go b/internal/pkg/agent/cmd/run.go index ef04422cff9..1867325f7a6 100644 --- a/internal/pkg/agent/cmd/run.go +++ b/internal/pkg/agent/cmd/run.go @@ -50,7 +50,7 @@ func newRunCommandWithArgs(_ []string, streams *cli.IOStreams) *cobra.Command { Use: "run", Short: "Start the elastic-agent.", Run: func(_ *cobra.Command, _ []string) { - if err := run(nil); err != nil { + if err := run(nil); err != nil && !errors.Is(err, context.Canceled) { fmt.Fprintf(streams.Err, "Error: %v\n%s\n", err, troubleshootMessage()) os.Exit(1) } @@ -181,53 +181,53 @@ func run(override cfgOverrider, modifiers ...component.PlatformModifier) error { */ appDone := make(chan bool) - appErrCh := make(chan error) - ctx, cancel = context.WithCancel(context.Background()) - defer cancel() + appErr := make(chan error) go func() { err := app.Run(ctx) close(appDone) - appErrCh <- err + appErr <- err }() // listen for signals signals := make(chan os.Signal, 1) signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT, syscall.SIGHUP) - reexecing := false + isRex := false + logShutdown := true +LOOP: for { - breakout := false select { case <-stop: - breakout = true + break LOOP case <-appDone: - breakout = true + logShutdown = false + break LOOP case <-rex.ShutdownChan(): - reexecing = true - breakout = true + isRex = true + logShutdown = false + break LOOP case sig := <-signals: if sig == syscall.SIGHUP { rexLogger.Infof("SIGHUP triggered re-exec") + isRex = true rex.ReExec(nil) } else { - breakout = true + break LOOP } } - if breakout { - if !reexecing { - logger.Info("Shutting down Elastic Agent and sending last events...") - } - break - } } + if logShutdown { + logger.Info("Shutting down Elastic Agent and sending last events...") + } cancel() - err = <-appErrCh + err = <-appErr - if !reexecing { + if logShutdown { logger.Info("Shutting down completed.") - return err } - rex.ShutdownComplete() + if isRex { + rex.ShutdownComplete() + } return err } diff --git a/internal/pkg/testutils/testutils.go b/internal/pkg/testutils/testutils.go index e1cbf7d34ed..fcd7cbbe2b6 100644 --- a/internal/pkg/testutils/testutils.go +++ b/internal/pkg/testutils/testutils.go @@ -8,6 +8,11 @@ import ( "runtime" "testing" + "github.com/stretchr/testify/require" + + "github.com/elastic/elastic-agent-libs/logp" + "github.com/elastic/elastic-agent/pkg/core/logger" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/secret" "github.com/elastic/elastic-agent/internal/pkg/agent/storage" ) @@ -23,3 +28,15 @@ func InitStorage(t *testing.T) { } } } + +// NewErrorLogger creates an error logger for testing. +func NewErrorLogger(t *testing.T) *logger.Logger { + t.Helper() + + loggerCfg := logger.DefaultLoggingConfig() + loggerCfg.Level = logp.ErrorLevel + + log, err := logger.NewFromConfig("", loggerCfg, false) + require.NoError(t, err) + return log +} diff --git a/pkg/component/component_test.go b/pkg/component/component_test.go index 03d18172593..06fc30c56c0 100644 --- a/pkg/component/component_test.go +++ b/pkg/component/component_test.go @@ -8,11 +8,12 @@ package component import ( "errors" - "github.com/elastic/elastic-agent-client/v7/pkg/proto" "path/filepath" "sort" "testing" + "github.com/elastic/elastic-agent-client/v7/pkg/proto" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/pkg/component/runtime/manager.go b/pkg/component/runtime/manager.go index 8fbeeb73ff7..573bb1653da 100644 --- a/pkg/component/runtime/manager.go +++ b/pkg/component/runtime/manager.go @@ -16,6 +16,8 @@ import ( "sync" "time" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" + "github.com/gofrs/uuid" "go.elastic.co/apm" @@ -61,6 +63,7 @@ type Manager struct { logger *logger.Logger ca *authority.CertificateAuthority listenAddr string + agentInfo *info.AgentInfo tracer *apm.Tracer netMx sync.RWMutex @@ -85,7 +88,7 @@ type Manager struct { } // NewManager creates a new manager. -func NewManager(logger *logger.Logger, listenAddr string, tracer *apm.Tracer) (*Manager, error) { +func NewManager(logger *logger.Logger, listenAddr string, agentInfo *info.AgentInfo, tracer *apm.Tracer) (*Manager, error) { ca, err := authority.NewCA() if err != nil { return nil, err @@ -94,6 +97,7 @@ func NewManager(logger *logger.Logger, listenAddr string, tracer *apm.Tracer) (* logger: logger, ca: ca, listenAddr: listenAddr, + agentInfo: agentInfo, tracer: tracer, waitReady: make(map[string]waitForReady), current: make(map[string]*componentRuntimeState), @@ -143,23 +147,32 @@ func (m *Manager) Run(ctx context.Context) error { m.shuttingDown.Store(false) // start serving GRPC connections - errCh := make(chan error) + var wg sync.WaitGroup + wg.Add(1) go func() { - errCh <- server.Serve(lis) + defer wg.Done() + for { + err := server.Serve(lis) + if err != nil { + m.logger.Errorf("control protocol failed: %w", err) + } + if ctx.Err() != nil { + // context has an error don't start again + return + } + } }() - select { - case <-ctx.Done(): - server.Stop() - err = <-errCh - case err = <-errCh: - } + <-ctx.Done() m.shutdown() + + server.Stop() + wg.Wait() m.netMx.Lock() m.listener = nil m.server = nil m.netMx.Unlock() - return err + return ctx.Err() } // WaitForReady waits until the manager is ready to be used. diff --git a/pkg/component/runtime/manager_test.go b/pkg/component/runtime/manager_test.go index a28566c43eb..247f54ccd6e 100644 --- a/pkg/component/runtime/manager_test.go +++ b/pkg/component/runtime/manager_test.go @@ -15,6 +15,8 @@ import ( "testing" "time" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" + "go.elastic.co/apm/apmtest" "github.com/elastic/elastic-agent-libs/logp" @@ -47,11 +49,16 @@ func TestManager_SimpleComponentErr(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - m, err := NewManager(newErrorLogger(t), "localhost:0", apmtest.DiscardTracer) + ai, _ := info.NewAgentInfo(true) + m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer) require.NoError(t, err) errCh := make(chan error) go func() { - errCh <- m.Run(ctx) + err := m.Run(ctx) + if errors.Is(err, context.Canceled) { + err = nil + } + errCh <- err }() waitCtx, waitCancel := context.WithTimeout(ctx, 1*time.Second) @@ -142,11 +149,16 @@ func TestManager_FakeInput_StartStop(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - m, err := NewManager(newErrorLogger(t), "localhost:0", apmtest.DiscardTracer) + ai, _ := info.NewAgentInfo(true) + m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer) require.NoError(t, err) errCh := make(chan error) go func() { - errCh <- m.Run(ctx) + err := m.Run(ctx) + if errors.Is(err, context.Canceled) { + err = nil + } + errCh <- err }() waitCtx, waitCancel := context.WithTimeout(ctx, 1*time.Second) @@ -259,11 +271,16 @@ func TestManager_FakeInput_BadUnitToGood(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - m, err := NewManager(newErrorLogger(t), "localhost:0", apmtest.DiscardTracer) + ai, _ := info.NewAgentInfo(true) + m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer) require.NoError(t, err) errCh := make(chan error) go func() { - errCh <- m.Run(ctx) + err := m.Run(ctx) + if errors.Is(err, context.Canceled) { + err = nil + } + errCh <- err }() waitCtx, waitCancel := context.WithTimeout(ctx, 1*time.Second) @@ -426,11 +443,16 @@ func TestManager_FakeInput_GoodUnitToBad(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - m, err := NewManager(newErrorLogger(t), "localhost:0", apmtest.DiscardTracer) + ai, _ := info.NewAgentInfo(true) + m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer) require.NoError(t, err) errCh := make(chan error) go func() { - errCh <- m.Run(ctx) + err := m.Run(ctx) + if errors.Is(err, context.Canceled) { + err = nil + } + errCh <- err }() waitCtx, waitCancel := context.WithTimeout(ctx, 1*time.Second) @@ -576,11 +598,16 @@ func TestManager_FakeInput_Configure(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - m, err := NewManager(newErrorLogger(t), "localhost:0", apmtest.DiscardTracer) + ai, _ := info.NewAgentInfo(true) + m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer) require.NoError(t, err) errCh := make(chan error) go func() { - errCh <- m.Run(ctx) + err := m.Run(ctx) + if errors.Is(err, context.Canceled) { + err = nil + } + errCh <- err }() waitCtx, waitCancel := context.WithTimeout(ctx, 1*time.Second) @@ -698,11 +725,16 @@ func TestManager_FakeInput_RemoveUnit(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - m, err := NewManager(newErrorLogger(t), "localhost:0", apmtest.DiscardTracer) + ai, _ := info.NewAgentInfo(true) + m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer) require.NoError(t, err) errCh := make(chan error) go func() { - errCh <- m.Run(ctx) + err := m.Run(ctx) + if errors.Is(err, context.Canceled) { + err = nil + } + errCh <- err }() waitCtx, waitCancel := context.WithTimeout(ctx, 1*time.Second) @@ -852,11 +884,16 @@ func TestManager_FakeInput_ActionState(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - m, err := NewManager(newErrorLogger(t), "localhost:0", apmtest.DiscardTracer) + ai, _ := info.NewAgentInfo(true) + m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer) require.NoError(t, err) errCh := make(chan error) go func() { - errCh <- m.Run(ctx) + err := m.Run(ctx) + if errors.Is(err, context.Canceled) { + err = nil + } + errCh <- err }() waitCtx, waitCancel := context.WithTimeout(ctx, 1*time.Second) @@ -978,11 +1015,16 @@ func TestManager_FakeInput_Restarts(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - m, err := NewManager(newErrorLogger(t), "localhost:0", apmtest.DiscardTracer) + ai, _ := info.NewAgentInfo(true) + m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer) require.NoError(t, err) errCh := make(chan error) go func() { - errCh <- m.Run(ctx) + err := m.Run(ctx) + if errors.Is(err, context.Canceled) { + err = nil + } + errCh <- err }() waitCtx, waitCancel := context.WithTimeout(ctx, 1*time.Second) @@ -1113,11 +1155,16 @@ func TestManager_FakeInput_RestartsOnMissedCheckins(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - m, err := NewManager(newErrorLogger(t), "localhost:0", apmtest.DiscardTracer) + ai, _ := info.NewAgentInfo(true) + m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer) require.NoError(t, err) errCh := make(chan error) go func() { - errCh <- m.Run(ctx) + err := m.Run(ctx) + if errors.Is(err, context.Canceled) { + err = nil + } + errCh <- err }() waitCtx, waitCancel := context.WithTimeout(ctx, 1*time.Second) @@ -1230,11 +1277,16 @@ func TestManager_FakeInput_InvalidAction(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - m, err := NewManager(newErrorLogger(t), "localhost:0", apmtest.DiscardTracer) + ai, _ := info.NewAgentInfo(true) + m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer) require.NoError(t, err) errCh := make(chan error) go func() { - errCh <- m.Run(ctx) + err := m.Run(ctx) + if errors.Is(err, context.Canceled) { + err = nil + } + errCh <- err }() waitCtx, waitCancel := context.WithTimeout(ctx, 1*time.Second) @@ -1350,11 +1402,16 @@ func TestManager_FakeInput_MultiComponent(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - m, err := NewManager(newErrorLogger(t), "localhost:0", apmtest.DiscardTracer) + ai, _ := info.NewAgentInfo(true) + m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer) require.NoError(t, err) errCh := make(chan error) go func() { - errCh <- m.Run(ctx) + err := m.Run(ctx) + if errors.Is(err, context.Canceled) { + err = nil + } + errCh <- err }() waitCtx, waitCancel := context.WithTimeout(ctx, 1*time.Second) @@ -1558,11 +1615,16 @@ func TestManager_FakeInput_LogLevel(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - m, err := NewManager(newErrorLogger(t), "localhost:0", apmtest.DiscardTracer) + ai, _ := info.NewAgentInfo(true) + m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer) require.NoError(t, err) errCh := make(chan error) go func() { - errCh <- m.Run(ctx) + err := m.Run(ctx) + if errors.Is(err, context.Canceled) { + err = nil + } + errCh <- err }() waitCtx, waitCancel := context.WithTimeout(ctx, 1*time.Second) diff --git a/pkg/component/runtime/runtime.go b/pkg/component/runtime/runtime.go index e2c9a2bd013..aae913efac4 100644 --- a/pkg/component/runtime/runtime.go +++ b/pkg/component/runtime/runtime.go @@ -88,7 +88,7 @@ type componentRuntimeState struct { } func newComponentRuntimeState(m *Manager, logger *logger.Logger, comp component.Component) (*componentRuntimeState, error) { - comm, err := newRuntimeComm(logger, m.getListenAddr(), m.ca) + comm, err := newRuntimeComm(logger, m.getListenAddr(), m.ca, m.agentInfo) if err != nil { return nil, err } diff --git a/pkg/component/runtime/runtime_comm.go b/pkg/component/runtime/runtime_comm.go index 622b514c230..4e9b4c23598 100644 --- a/pkg/component/runtime/runtime_comm.go +++ b/pkg/component/runtime/runtime_comm.go @@ -11,6 +11,8 @@ import ( "strings" "sync" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" + protobuf "google.golang.org/protobuf/proto" "github.com/elastic/elastic-agent-client/v7/pkg/client" @@ -40,6 +42,7 @@ type runtimeComm struct { logger *logger.Logger listenAddr string ca *authority.CertificateAuthority + agentInfo *info.AgentInfo name string token string @@ -58,7 +61,7 @@ type runtimeComm struct { actionsResponse chan *proto.ActionResponse } -func newRuntimeComm(logger *logger.Logger, listenAddr string, ca *authority.CertificateAuthority) (*runtimeComm, error) { +func newRuntimeComm(logger *logger.Logger, listenAddr string, ca *authority.CertificateAuthority, agentInfo *info.AgentInfo) (*runtimeComm, error) { token, err := uuid.NewV4() if err != nil { return nil, err @@ -75,6 +78,7 @@ func newRuntimeComm(logger *logger.Logger, listenAddr string, ca *authority.Cert logger: logger, listenAddr: listenAddr, ca: ca, + agentInfo: agentInfo, name: name, token: token.String(), cert: pair, @@ -123,6 +127,15 @@ func (c *runtimeComm) WriteConnInfo(w io.Writer, services ...client.Service) err } func (c *runtimeComm) CheckinExpected(expected *proto.CheckinExpected) { + if c.agentInfo != nil && c.agentInfo.AgentID() != "" { + expected.AgentInfo = &proto.CheckinAgentInfo{ + Id: c.agentInfo.AgentID(), + Version: c.agentInfo.Version(), + Snapshot: c.agentInfo.Snapshot(), + } + } else { + expected.AgentInfo = nil + } c.checkinExpected <- expected } diff --git a/pkg/component/runtime/state.go b/pkg/component/runtime/state.go index b8278d334c7..2bd848cfce0 100644 --- a/pkg/component/runtime/state.go +++ b/pkg/component/runtime/state.go @@ -5,7 +5,6 @@ package runtime import ( - "fmt" "reflect" "github.com/elastic/elastic-agent-client/v7/pkg/client" @@ -227,9 +226,6 @@ func (s *ComponentState) syncCheckin(checkin *proto.CheckinObserved) bool { _, inExpected := s.expectedUnits[key] existing, _ := s.Units[key] existing.unitState = client.UnitState(unit.State) - if existing.unitState == client.UnitStateStopped { - fmt.Printf("stopped") - } existing.unitMessage = unit.Message existing.unitPayload = payload existing.configStateIdx = unit.ConfigStateIdx @@ -273,7 +269,7 @@ func (s *ComponentState) syncCheckin(checkin *proto.CheckinObserved) bool { unit.Message = errMsg unit.Payload = nil } - } else if unit.State != client.UnitStateStarting { + } else if unit.State != client.UnitStateStarting && unit.State != client.UnitStateStopped { if unit.State != client.UnitStateFailed || unit.Message != missingMsg || diffPayload(unit.Payload, nil) { changed = true unit.State = client.UnitStateFailed @@ -364,6 +360,14 @@ func (s *ComponentState) cleanupStopped() bool { } } } + for k, u := range s.Units { + _, ok := s.expectedUnits[k] + if !ok && u.State == client.UnitStateStopped { + // stopped unit that is not expected (remove it) + delete(s.Units, k) + cleaned = true + } + } return cleaned } diff --git a/specs/fleet-server.spec.yml b/specs/fleet-server.spec.yml index e0bf9c996ff..f1e760efe8b 100644 --- a/specs/fleet-server.spec.yml +++ b/specs/fleet-server.spec.yml @@ -1,17 +1,19 @@ -version: 2 -inputs: - - name: fleet-server - description: "Fleet Server" - platforms: - - linux/amd64 - - linux/arm64 - - darwin/amd64 - - darwin/arm64 - - windows/amd64 - - container/amd64 - - container/arm64 - outputs: - - elasticsearch - command: - args: - - "--agent-mode" +version: 2 +inputs: + - name: fleet-server + description: "Fleet Server" + platforms: + - linux/amd64 + - linux/arm64 + - darwin/amd64 + - darwin/arm64 + - windows/amd64 + - container/amd64 + - container/arm64 + outputs: + - elasticsearch + command: + args: + - "--agent-mode" + - "-E" + - "logging.to_stderr=true" From dee2403106a738b99f1e8a6e96e35c75b562e177 Mon Sep 17 00:00:00 2001 From: Michal Pristas Date: Tue, 30 Aug 2022 14:41:14 +0200 Subject: [PATCH 16/49] Query just related files on build (#1045) --- magefile.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/magefile.go b/magefile.go index ead48b5ee42..036b4ffa2f6 100644 --- a/magefile.go +++ b/magefile.go @@ -776,7 +776,7 @@ func packageAgent(requiredPackages []string, packagingFn func()) { } } - files, err := filepath.Glob(filepath.Join(versionedFlatPath, "*")) + files, err := filepath.Glob(filepath.Join(versionedFlatPath, fmt.Sprintf("*%s*", version))) if err != nil { panic(err) } From 712b300fad497066c7c6e66558e248abcf089f81 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Tue, 30 Aug 2022 11:26:59 -0400 Subject: [PATCH 17/49] Update main to 8.5.0 (#793) (#1050) (cherry picked from commit 317e03116aa919d69be97242207ad11a28c826aa) Co-authored-by: Pier-Hugues Pellerin --- version/version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/version/version.go b/version/version.go index 07f3cb8046a..f5101a34efa 100644 --- a/version/version.go +++ b/version/version.go @@ -4,4 +4,4 @@ package version -const defaultBeatVersion = "8.4.0" +const defaultBeatVersion = "8.5.0" From 5f1e54f40d3e17c36d18b025a67f5b04c4d4af3d Mon Sep 17 00:00:00 2001 From: Craig MacKenzie Date: Wed, 31 Aug 2022 12:27:06 -0400 Subject: [PATCH 18/49] Create archive directory if it doesn't exist. (#1058) On an M1 Mac rename seems to fail if the containing directories do not already exist. --- magefile.go | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/magefile.go b/magefile.go index 036b4ffa2f6..31d13cabd7a 100644 --- a/magefile.go +++ b/magefile.go @@ -894,8 +894,13 @@ func movePackagesToArchive(dropPath string, requiredPackages []string) string { continue } - targetPath := filepath.Join(archivePath, rp) - if err := os.Rename(f, filepath.Join(targetPath, filepath.Base(f))); err != nil { + targetDir := filepath.Join(archivePath, rp, filepath.Base(f)) + targetPath := filepath.Join(targetDir, filepath.Base(f)) + if err := os.MkdirAll(targetDir, 0750); err != nil { + fmt.Printf("warning: failed to create directory %s: %s", targetDir, err) + } + + if err := os.Rename(f, targetPath); err != nil { panic(errors.Wrap(err, "failed renaming file")) } } From 6ff50acc3b08026aee04cb1d46f4865f1d76ac6e Mon Sep 17 00:00:00 2001 From: Michal Pristas Date: Wed, 7 Sep 2022 14:19:50 +0200 Subject: [PATCH 19/49] fixed docker build (#1105) --- magefile.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/magefile.go b/magefile.go index 31d13cabd7a..9599d4eef52 100644 --- a/magefile.go +++ b/magefile.go @@ -894,12 +894,11 @@ func movePackagesToArchive(dropPath string, requiredPackages []string) string { continue } - targetDir := filepath.Join(archivePath, rp, filepath.Base(f)) - targetPath := filepath.Join(targetDir, filepath.Base(f)) + targetPath := filepath.Join(archivePath, rp, filepath.Base(f)) + targetDir := filepath.Dir(targetPath) if err := os.MkdirAll(targetDir, 0750); err != nil { fmt.Printf("warning: failed to create directory %s: %s", targetDir, err) } - if err := os.Rename(f, targetPath); err != nil { panic(errors.Wrap(err, "failed renaming file")) } From f95c9edf9be0e8a8d6b76868423b322aaba3df21 Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Wed, 7 Sep 2022 10:50:34 -0400 Subject: [PATCH 20/49] V2 command work dir (#1061) * Fix v2 work directory for command. Add permission check for execution. Add determining root into runtime prevention. * Add writeable by group and other in check. * Fix restart and stopping issues in command runtime for failing binaries. * Fix issue in endpoint spec. Allow an input to not require an ID, but that ID must be unique. * Remove unused transpiler rules and steps. * Fix test. * Fix workDir for windows. * Reset to checkin period. * Fix test and code review issues. * Add extra log message in unit test. * More fixes from code review. * Fix test. --- .../pkg/agent/application/paths/common.go | 5 + internal/pkg/agent/application/periodic.go | 2 +- internal/pkg/agent/cmd/install.go | 5 +- internal/pkg/agent/cmd/run.go | 2 +- internal/pkg/agent/cmd/uninstall.go | 5 +- .../pkg/agent/transpiler/merge_strategy.go | 91 - internal/pkg/agent/transpiler/rules.go | 1844 ----------------- internal/pkg/agent/transpiler/rules_test.go | 1122 ---------- internal/pkg/agent/transpiler/steps.go | 345 --- internal/pkg/agent/transpiler/steps_test.go | 137 -- .../tests/exec-1.0-darwin-x86_64/main.go | 35 - magefile.go | 19 +- pkg/component/component.go | 30 +- pkg/component/component_test.go | 7 +- pkg/component/runtime/command.go | 156 +- pkg/component/runtime/manager_test.go | 137 +- pkg/component/spec.go | 2 + pkg/utils/perm_unix.go | 32 + pkg/utils/perm_windows.go | 15 + .../agent/install => pkg/utils}/root_unix.go | 2 +- .../install => pkg/utils}/root_windows.go | 2 +- .../utils}/root_windows_test.go | 2 +- specs/endpoint-security.spec.yml | 140 +- 23 files changed, 410 insertions(+), 3727 deletions(-) delete mode 100644 internal/pkg/agent/transpiler/merge_strategy.go delete mode 100644 internal/pkg/agent/transpiler/rules.go delete mode 100644 internal/pkg/agent/transpiler/rules_test.go delete mode 100644 internal/pkg/agent/transpiler/steps.go delete mode 100644 internal/pkg/agent/transpiler/steps_test.go delete mode 100644 internal/pkg/agent/transpiler/tests/exec-1.0-darwin-x86_64/main.go create mode 100644 pkg/utils/perm_unix.go create mode 100644 pkg/utils/perm_windows.go rename {internal/pkg/agent/install => pkg/utils}/root_unix.go (97%) rename {internal/pkg/agent/install => pkg/utils}/root_windows.go (98%) rename {internal/pkg/agent/install => pkg/utils}/root_windows_test.go (96%) diff --git a/internal/pkg/agent/application/paths/common.go b/internal/pkg/agent/application/paths/common.go index 8b6cc06743e..1b6ef95b188 100644 --- a/internal/pkg/agent/application/paths/common.go +++ b/internal/pkg/agent/application/paths/common.go @@ -136,6 +136,11 @@ func Data() string { return filepath.Join(Top(), "data") } +// Run returns the run directory for Agent +func Run() string { + return filepath.Join(Home(), "run") +} + // Components returns the component directory for Agent func Components() string { return filepath.Join(Home(), "components") diff --git a/internal/pkg/agent/application/periodic.go b/internal/pkg/agent/application/periodic.go index e32234a4ca3..3c4e2ed4d63 100644 --- a/internal/pkg/agent/application/periodic.go +++ b/internal/pkg/agent/application/periodic.go @@ -108,7 +108,7 @@ func (p *periodic) work() error { return nil } - p.log.Info("No configuration change") + p.log.Debug("No configuration change") return nil } diff --git a/internal/pkg/agent/cmd/install.go b/internal/pkg/agent/cmd/install.go index f111d4d4618..f0049a828e6 100644 --- a/internal/pkg/agent/cmd/install.go +++ b/internal/pkg/agent/cmd/install.go @@ -16,6 +16,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/install" "github.com/elastic/elastic-agent/internal/pkg/cli" + "github.com/elastic/elastic-agent/pkg/utils" ) func newInstallCommandWithArgs(_ []string, streams *cli.IOStreams) *cobra.Command { @@ -48,12 +49,12 @@ func installCmd(streams *cli.IOStreams, cmd *cobra.Command) error { return err } - isAdmin, err := install.HasRoot() + isAdmin, err := utils.HasRoot() if err != nil { return fmt.Errorf("unable to perform install command while checking for administrator rights, %w", err) } if !isAdmin { - return fmt.Errorf("unable to perform install command, not executed with %s permissions", install.PermissionUser) + return fmt.Errorf("unable to perform install command, not executed with %s permissions", utils.PermissionUser) } status, reason := install.Status() force, _ := cmd.Flags().GetBool("force") diff --git a/internal/pkg/agent/cmd/run.go b/internal/pkg/agent/cmd/run.go index 1867325f7a6..9ad4827e166 100644 --- a/internal/pkg/agent/cmd/run.go +++ b/internal/pkg/agent/cmd/run.go @@ -134,7 +134,7 @@ func run(override cfgOverrider, modifiers ...component.PlatformModifier) error { } if allowEmptyPgp, _ := release.PGP(); allowEmptyPgp { - logger.Info("Artifact has been built with security disabled. Elastic Agent will not verify signatures of the artifacts.") + logger.Info("Elastic Agent has been built with security disabled. Elastic Agent will not verify signatures of upgrade artifact.") } execPath, err := reexecPath() diff --git a/internal/pkg/agent/cmd/uninstall.go b/internal/pkg/agent/cmd/uninstall.go index f66c381add6..145cfe3aa08 100644 --- a/internal/pkg/agent/cmd/uninstall.go +++ b/internal/pkg/agent/cmd/uninstall.go @@ -14,6 +14,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/install" "github.com/elastic/elastic-agent/internal/pkg/cli" + "github.com/elastic/elastic-agent/pkg/utils" ) func newUninstallCommandWithArgs(_ []string, streams *cli.IOStreams) *cobra.Command { @@ -38,12 +39,12 @@ Unless -f is used this command will ask confirmation before performing removal. } func uninstallCmd(streams *cli.IOStreams, cmd *cobra.Command) error { - isAdmin, err := install.HasRoot() + isAdmin, err := utils.HasRoot() if err != nil { return fmt.Errorf("unable to perform command while checking for administrator rights, %w", err) } if !isAdmin { - return fmt.Errorf("unable to perform command, not executed with %s permissions", install.PermissionUser) + return fmt.Errorf("unable to perform command, not executed with %s permissions", utils.PermissionUser) } status, reason := install.Status() if status == install.NotInstalled { diff --git a/internal/pkg/agent/transpiler/merge_strategy.go b/internal/pkg/agent/transpiler/merge_strategy.go deleted file mode 100644 index a20e44936d2..00000000000 --- a/internal/pkg/agent/transpiler/merge_strategy.go +++ /dev/null @@ -1,91 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package transpiler - -type injector interface { - Inject(target []Node, source interface{}) []Node - InjectItem(target []Node, source Node) []Node - InjectCollection(target []Node, source []Node) []Node -} - -func mergeStrategy(strategy string) injector { - - switch strategy { - case "insert_before": - return injectBeforeInjector{} - case "insert_after": - return injectAfterInjector{} - case "replace": - return replaceInjector{} - case "noop": - return noopInjector{} - } - - return injectAfterInjector{} -} - -type noopInjector struct{} - -func (i noopInjector) Inject(target []Node, source interface{}) []Node { - return inject(i, target, source) -} - -func (noopInjector) InjectItem(target []Node, source Node) []Node { return target } - -func (noopInjector) InjectCollection(target []Node, source []Node) []Node { return target } - -type injectAfterInjector struct{} - -func (i injectAfterInjector) Inject(target []Node, source interface{}) []Node { - return inject(i, target, source) -} - -func (injectAfterInjector) InjectItem(target []Node, source Node) []Node { - return append(target, source) -} - -func (injectAfterInjector) InjectCollection(target []Node, source []Node) []Node { - return append(target, source...) -} - -type injectBeforeInjector struct{} - -func (i injectBeforeInjector) Inject(target []Node, source interface{}) []Node { - return inject(i, target, source) -} - -func (injectBeforeInjector) InjectItem(target []Node, source Node) []Node { - return append([]Node{source}, target...) -} - -func (injectBeforeInjector) InjectCollection(target []Node, source []Node) []Node { - return append(source, target...) -} - -type replaceInjector struct{} - -func (i replaceInjector) Inject(target []Node, source interface{}) []Node { - return inject(i, target, source) -} - -func (replaceInjector) InjectItem(target []Node, source Node) []Node { - return []Node{source} -} - -func (replaceInjector) InjectCollection(target []Node, source []Node) []Node { - return source -} - -func inject(i injector, target []Node, source interface{}) []Node { - if sourceCollection, ok := source.([]Node); ok { - return i.InjectCollection(target, sourceCollection) - } - - if node, ok := source.(Node); ok { - return i.InjectItem(target, node) - } - - return target -} diff --git a/internal/pkg/agent/transpiler/rules.go b/internal/pkg/agent/transpiler/rules.go deleted file mode 100644 index ca97cedd707..00000000000 --- a/internal/pkg/agent/transpiler/rules.go +++ /dev/null @@ -1,1844 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package transpiler - -import ( - "fmt" - "reflect" - "regexp" - - "gopkg.in/yaml.v2" - - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" -) - -// AgentInfo is an interface to get the agent info. -type AgentInfo interface { - AgentID() string - Version() string - Snapshot() bool - Headers() map[string]string -} - -// RuleList is a container that allow the same tree to be executed on multiple defined Rule. -type RuleList struct { - Rules []Rule -} - -// Rule defines a rule that can be Applied on the Tree. -type Rule interface { - Apply(AgentInfo, *AST) error -} - -// Apply applies a list of rules over the same tree and use the result of the previous execution -// as the input of the next rule, will return early if any error is raise during the execution. -func (r *RuleList) Apply(agentInfo AgentInfo, ast *AST) error { - var err error - for _, rule := range r.Rules { - err = rule.Apply(agentInfo, ast) - if err != nil { - return err - } - } - - return nil -} - -// MarshalYAML marsharl a rule list to YAML. -func (r *RuleList) MarshalYAML() (interface{}, error) { - doc := make([]map[string]Rule, 0, len(r.Rules)) - - for _, rule := range r.Rules { - var name string - switch rule.(type) { - case *SelectIntoRule: - name = "select_into" - case *CopyRule: - name = "copy" - case *CopyToListRule: - name = "copy_to_list" - case *CopyAllToListRule: - name = "copy_all_to_list" - case *RenameRule: - name = "rename" - case *TranslateRule: - name = "translate" - case *TranslateWithRegexpRule: - name = "translate_with_regexp" - case *MapRule: - name = "map" - case *FilterRule: - name = "filter" - case *FilterValuesRule: - name = "filter_values" - case *FilterValuesWithRegexpRule: - name = "filter_values_with_regexp" - case *ExtractListItemRule: - name = "extract_list_items" - case *InjectIndexRule: - name = "inject_index" - case *InjectStreamProcessorRule: - name = "inject_stream_processor" - case *InjectAgentInfoRule: - name = "inject_agent_info" - case *MakeArrayRule: - name = "make_array" - case *RemoveKeyRule: - name = "remove_key" - case *FixStreamRule: - name = "fix_stream" - case *InsertDefaultsRule: - name = "insert_defaults" - case *InjectHeadersRule: - name = "inject_headers" - case *InjectQueueRule: - name = "inject_queue" - default: - return nil, fmt.Errorf("unknown rule of type %T", rule) - } - - subdoc := map[string]Rule{ - name: rule, - } - - doc = append(doc, subdoc) - } - return doc, nil -} - -// UnmarshalYAML unmarshal a YAML document into a RuleList. -func (r *RuleList) UnmarshalYAML(unmarshal func(interface{}) error) error { - var unpackTo []map[string]interface{} - - err := unmarshal(&unpackTo) - if err != nil { - return err - } - - // NOTE(ph): this is a bit of a hack because I want to make sure - // the unpack strategy stay in the struct implementation and yaml - // doesn't have a RawMessage similar to the JSON package, so partial unpack - // is not possible. - unpack := func(in interface{}, out interface{}) error { - b, err := yaml.Marshal(in) - if err != nil { - return err - } - return yaml.Unmarshal(b, out) - } - - var rules []Rule - - for _, m := range unpackTo { - ks := keys(m) - if len(ks) > 1 { - return fmt.Errorf("unknown rule identifier, expecting one identifier and received %d", len(ks)) - } - - name := ks[0] - fields := m[name] - - var r Rule - switch name { - case "select_into": - r = &SelectIntoRule{} - case "copy": - r = &CopyRule{} - case "copy_to_list": - r = &CopyToListRule{} - case "copy_all_to_list": - r = &CopyAllToListRule{} - case "rename": - r = &RenameRule{} - case "translate": - r = &TranslateRule{} - case "translate_with_regexp": - r = &TranslateWithRegexpRule{} - case "map": - r = &MapRule{} - case "filter": - r = &FilterRule{} - case "filter_values": - r = &FilterValuesRule{} - case "filter_values_with_regexp": - r = &FilterValuesWithRegexpRule{} - case "extract_list_items": - r = &ExtractListItemRule{} - case "inject_index": - r = &InjectIndexRule{} - case "inject_stream_processor": - r = &InjectStreamProcessorRule{} - case "inject_agent_info": - r = &InjectAgentInfoRule{} - case "make_array": - r = &MakeArrayRule{} - case "remove_key": - r = &RemoveKeyRule{} - case "fix_stream": - r = &FixStreamRule{} - case "insert_defaults": - r = &InsertDefaultsRule{} - case "inject_headers": - r = &InjectHeadersRule{} - case "inject_queue": - r = &InjectQueueRule{} - default: - return fmt.Errorf("unknown rule of type %s", name) - } - - if err := unpack(fields, r); err != nil { - return err - } - - rules = append(rules, r) - } - r.Rules = rules - return nil -} - -// SelectIntoRule inserts selected paths into a new Dict node. -type SelectIntoRule struct { - Selectors []Selector - Path string -} - -// Apply applies select into rule. -func (r *SelectIntoRule) Apply(_ AgentInfo, ast *AST) (err error) { - defer func() { - if err != nil { - err = errors.New(err, "failed to select data into configuration") - } - }() - target := &Dict{} - - for _, selector := range r.Selectors { - lookupNode, ok := Lookup(ast.Clone(), selector) - if !ok { - continue - } - - target.value = append(target.value, lookupNode.Clone()) - } - - if len(target.value) > 0 { - return Insert(ast, target, r.Path) - } - - return nil -} - -// SelectInto creates a SelectIntoRule -func SelectInto(path string, selectors ...Selector) *SelectIntoRule { - return &SelectIntoRule{ - Selectors: selectors, - Path: path, - } -} - -// RemoveKeyRule removes key from a dict. -type RemoveKeyRule struct { - Key string -} - -// Apply applies remove key rule. -func (r *RemoveKeyRule) Apply(_ AgentInfo, ast *AST) (err error) { - defer func() { - if err != nil { - err = errors.New(err, "failed to remove key from configuration") - } - }() - - sourceMap, ok := ast.root.(*Dict) - if !ok { - return nil - } - - for i, item := range sourceMap.value { - itemKey, ok := item.(*Key) - if !ok { - continue - } - - if itemKey.name != r.Key { - continue - } - - sourceMap.value = append(sourceMap.value[:i], sourceMap.value[i+1:]...) - return nil - } - return nil -} - -// RemoveKey creates a RemoveKeyRule -func RemoveKey(key string) *RemoveKeyRule { - return &RemoveKeyRule{ - Key: key, - } -} - -// MakeArrayRule transforms a single value into an array of length 1. -type MakeArrayRule struct { - Item Selector - To string -} - -// Apply applies make array rule. -func (r *MakeArrayRule) Apply(_ AgentInfo, ast *AST) (err error) { - defer func() { - if err != nil { - err = errors.New(err, "failed to create Dictionary out of configuration") - } - }() - - sourceNode, found := Lookup(ast, r.Item) - if !found { - return nil - } - - newList := &List{ - value: make([]Node, 0, 1), - } - - sourceKey, ok := sourceNode.(*Key) - if !ok { - return nil - } - - newList.value = append(newList.value, sourceKey.value.Clone()) - return Insert(ast, newList, r.To) -} - -// MakeArray creates a MakeArrayRule -func MakeArray(item Selector, to string) *MakeArrayRule { - return &MakeArrayRule{ - Item: item, - To: to, - } -} - -// CopyToListRule is a rule which copies a specified -// node into every item in a provided list. -type CopyToListRule struct { - Item Selector - To string - OnConflict string `yaml:"on_conflict" config:"on_conflict"` -} - -// Apply copies specified node into every item of the list. -func (r *CopyToListRule) Apply(_ AgentInfo, ast *AST) (err error) { - defer func() { - if err != nil { - err = errors.New(err, "failed to copy segment into configuration") - } - }() - - sourceNode, found := Lookup(ast, r.Item) - if !found { - // nothing to copy - return nil - } - - targetListNode, found := Lookup(ast, r.To) - if !found { - // nowhere to copy - return nil - } - - targetList, ok := targetListNode.Value().(*List) - if !ok { - // not a list; skip - return nil - } - - for _, listItem := range targetList.value { - listItemMap, ok := listItem.(*Dict) - if !ok { - continue - } - - if existingNode, found := listItemMap.Find(r.Item); found { - sourceNodeItemsList := sourceNode.Clone().Value().(Node) // key.value == node - if existingList, ok := existingNode.Value().(*List); ok { - existingList.value = mergeStrategy(r.OnConflict).Inject(existingList.Clone().Value().([]Node), sourceNodeItemsList.Value()) - } else if existingMap, ok := existingNode.Value().(*Dict); ok { - existingMap.value = mergeStrategy(r.OnConflict).Inject(existingMap.Clone().Value().([]Node), sourceNodeItemsList.Value()) - } - - continue - } - - // if not conflicting move entire node - listItemMap.value = append(listItemMap.value, sourceNode.Clone()) - } - - return nil -} - -// CopyToList creates a CopyToListRule -func CopyToList(item Selector, to, onMerge string) *CopyToListRule { - return &CopyToListRule{ - Item: item, - To: to, - OnConflict: onMerge, - } -} - -// CopyAllToListRule is a rule which copies a all nodes -// into every item in a provided list. -type CopyAllToListRule struct { - To string - Except []string - OnConflict string `yaml:"on_conflict" config:"on_conflict"` -} - -// Apply copies all nodes into every item of the list. -func (r *CopyAllToListRule) Apply(agentInfo AgentInfo, ast *AST) (err error) { - defer func() { - if err != nil { - err = errors.New(err, "failed to copy all nodes into a list") - } - }() - - // get list of nodes - astMap, err := ast.Map() - if err != nil { - return err - } - - isFiltered := func(item string) bool { - for _, f := range r.Except { - if f == item { - return true - } - } - - return false - } - - // foreach node if not filtered out - for item := range astMap { - if isFiltered(item) { - continue - } - - if err := CopyToList(item, r.To, r.OnConflict).Apply(agentInfo, ast); err != nil { - return err - } - } - - return nil -} - -// CopyAllToList creates a CopyAllToListRule -func CopyAllToList(to, onMerge string, except ...string) *CopyAllToListRule { - return &CopyAllToListRule{ - To: to, - Except: except, - OnConflict: onMerge, - } -} - -// FixStreamRule fixes streams to contain default values -// in case no value or invalid value are provided -type FixStreamRule struct { -} - -// Apply stream fixes. -func (r *FixStreamRule) Apply(_ AgentInfo, ast *AST) (err error) { - defer func() { - if err != nil { - err = errors.New(err, "failed to fix stream section of configuration") - } - }() - - const defaultDataset = "generic" - const defaultNamespace = "default" - - inputsNode, found := Lookup(ast, "inputs") - if !found { - return nil - } - - inputsNodeList, ok := inputsNode.Value().(*List) - if !ok { - return nil - } - - for _, inputNode := range inputsNodeList.value { - // fix this only if in compact form - if nsNode, found := inputNode.Find("data_stream.namespace"); found { - nsKey, ok := nsNode.(*Key) - if ok { - if newNamespace := nsKey.value.String(); newNamespace == "" { - nsKey.value = &StrVal{value: defaultNamespace} - } - } - } else { - dsNode, found := inputNode.Find("data_stream") - if found { - // got a datastream - datastreamMap, ok := dsNode.Value().(*Dict) - if ok { - nsNode, found := datastreamMap.Find("namespace") - if found { - nsKey, ok := nsNode.(*Key) - if ok { - if newNamespace := nsKey.value.String(); newNamespace == "" { - nsKey.value = &StrVal{value: defaultNamespace} - } - } - } else { - inputMap, ok := inputNode.(*Dict) - if ok { - inputMap.value = append(inputMap.value, &Key{ - name: "data_stream.namespace", - value: &StrVal{value: defaultNamespace}, - }) - } - } - } - } else { - inputMap, ok := inputNode.(*Dict) - if ok { - inputMap.value = append(inputMap.value, &Key{ - name: "data_stream.namespace", - value: &StrVal{value: defaultNamespace}, - }) - } - } - } - - streamsNode, ok := inputNode.Find("streams") - if !ok { - continue - } - - streamsList, ok := streamsNode.Value().(*List) - if !ok { - continue - } - - for _, streamNode := range streamsList.value { - streamMap, ok := streamNode.(*Dict) - if !ok { - continue - } - - // fix this only if in compact form - if dsNameNode, found := streamMap.Find("data_stream.dataset"); found { - dsKey, ok := dsNameNode.(*Key) - if ok { - if newDataset := dsKey.value.String(); newDataset == "" { - dsKey.value = &StrVal{value: defaultDataset} - } - } - } else { - - datastreamNode, found := streamMap.Find("data_stream") - if found { - datastreamMap, ok := datastreamNode.Value().(*Dict) - if !ok { - continue - } - - dsNameNode, found := datastreamMap.Find("dataset") - if found { - dsKey, ok := dsNameNode.(*Key) - if ok { - if newDataset := dsKey.value.String(); newDataset == "" { - dsKey.value = &StrVal{value: defaultDataset} - } - } - } else { - streamMap.value = append(streamMap.value, &Key{ - name: "data_stream.dataset", - value: &StrVal{value: defaultDataset}, - }) - } - } else { - streamMap.value = append(streamMap.value, &Key{ - name: "data_stream.dataset", - value: &StrVal{value: defaultDataset}, - }) - } - } - } - } - - return nil -} - -// FixStream creates a FixStreamRule -func FixStream() *FixStreamRule { - return &FixStreamRule{} -} - -// InjectIndexRule injects index to each input. -// Index is in form {type}-{namespace}-{dataset} -// type: is provided to the rule. -// namespace: is collected from streams[n].namespace. If not found used 'default'. -// dataset: is collected from streams[n].data_stream.dataset. If not found used 'generic'. -type InjectIndexRule struct { - Type string -} - -// Apply injects index into input. -func (r *InjectIndexRule) Apply(_ AgentInfo, ast *AST) (err error) { - defer func() { - if err != nil { - err = errors.New(err, "failed to inject index into configuration") - } - }() - - inputsNode, found := Lookup(ast, "inputs") - if !found { - return nil - } - - inputsList, ok := inputsNode.Value().(*List) - if !ok { - return nil - } - - for _, inputNode := range inputsList.value { - namespace := datastreamNamespaceFromInputNode(inputNode) - datastreamType := datastreamTypeFromInputNode(inputNode, r.Type) - - streamsNode, ok := inputNode.Find("streams") - if !ok { - continue - } - - streamsList, ok := streamsNode.Value().(*List) - if !ok { - continue - } - - for _, streamNode := range streamsList.value { - streamMap, ok := streamNode.(*Dict) - if !ok { - continue - } - - dataset := datasetNameFromStreamNode(streamNode) - streamMap.value = append(streamMap.value, &Key{ - name: "index", - value: &StrVal{value: fmt.Sprintf("%s-%s-%s", datastreamType, dataset, namespace)}, - }) - } - } - - return nil -} - -// InjectIndex creates a InjectIndexRule -func InjectIndex(indexType string) *InjectIndexRule { - return &InjectIndexRule{ - Type: indexType, - } -} - -// InjectStreamProcessorRule injects a add fields processor providing -// stream type, namespace and dataset fields into events. -type InjectStreamProcessorRule struct { - Type string - OnConflict string `yaml:"on_conflict" config:"on_conflict"` -} - -// Apply injects processor into input. -func (r *InjectStreamProcessorRule) Apply(_ AgentInfo, ast *AST) (err error) { - defer func() { - if err != nil { - err = errors.New(err, "failed to add stream processor to configuration") - } - }() - - inputsNode, found := Lookup(ast, "inputs") - if !found { - return nil - } - - inputsList, ok := inputsNode.Value().(*List) - if !ok { - return nil - } - - for _, inputNode := range inputsList.value { - namespace := datastreamNamespaceFromInputNode(inputNode) - datastreamType := datastreamTypeFromInputNode(inputNode, r.Type) - - var inputID *StrVal - inputIDNode, found := inputNode.Find("id") - if found { - inputID, _ = inputIDNode.Value().(*StrVal) - } - - if inputID != nil { - // get input-level processors node - processorsNode, found := inputNode.Find("processors") - if !found { - processorsNode = &Key{ - name: "processors", - value: &List{value: make([]Node, 0)}, - } - - inputMap, ok := inputNode.(*Dict) - if ok { - inputMap.value = append(inputMap.value, processorsNode) - } - } - - processorsList, ok := processorsNode.Value().(*List) - if !ok { - return errors.New("InjectStreamProcessorRule: input processors is not a list") - } - - // inject `input_id` on the input level - processorMap := &Dict{value: make([]Node, 0)} - processorMap.value = append(processorMap.value, &Key{name: "target", value: &StrVal{value: "@metadata"}}) - processorMap.value = append(processorMap.value, &Key{name: "fields", value: &Dict{value: []Node{ - &Key{name: "input_id", value: inputID}, - }}}) - addFieldsMap := &Dict{value: []Node{&Key{"add_fields", processorMap}}} - processorsList.value = mergeStrategy(r.OnConflict).InjectItem(processorsList.value, addFieldsMap) - } - - streamsNode, ok := inputNode.Find("streams") - if !ok { - continue - } - - streamsList, ok := streamsNode.Value().(*List) - if !ok { - continue - } - - for _, streamNode := range streamsList.value { - var streamID *StrVal - streamIDNode, ok := streamNode.Find("id") - if ok { - streamID, _ = streamIDNode.Value().(*StrVal) - } - - streamMap, ok := streamNode.(*Dict) - if !ok { - continue - } - - dataset := datasetNameFromStreamNode(streamNode) - - // get processors node - processorsNode, found := streamNode.Find("processors") - if !found { - processorsNode = &Key{ - name: "processors", - value: &List{value: make([]Node, 0)}, - } - - streamMap.value = append(streamMap.value, processorsNode) - } - - processorsList, ok := processorsNode.Value().(*List) - if !ok { - return errors.New("InjectStreamProcessorRule: processors is not a list") - } - - // datastream - processorMap := &Dict{value: make([]Node, 0)} - processorMap.value = append(processorMap.value, &Key{name: "target", value: &StrVal{value: "data_stream"}}) - processorMap.value = append(processorMap.value, &Key{name: "fields", value: &Dict{value: []Node{ - &Key{name: "type", value: &StrVal{value: datastreamType}}, - &Key{name: "namespace", value: &StrVal{value: namespace}}, - &Key{name: "dataset", value: &StrVal{value: dataset}}, - }}}) - addFieldsMap := &Dict{value: []Node{&Key{"add_fields", processorMap}}} - processorsList.value = mergeStrategy(r.OnConflict).InjectItem(processorsList.value, addFieldsMap) - - // event - processorMap = &Dict{value: make([]Node, 0)} - processorMap.value = append(processorMap.value, &Key{name: "target", value: &StrVal{value: "event"}}) - processorMap.value = append(processorMap.value, &Key{name: "fields", value: &Dict{value: []Node{ - &Key{name: "dataset", value: &StrVal{value: dataset}}, - }}}) - addFieldsMap = &Dict{value: []Node{&Key{"add_fields", processorMap}}} - processorsList.value = mergeStrategy(r.OnConflict).InjectItem(processorsList.value, addFieldsMap) - - if streamID != nil { - // source stream - processorMap = &Dict{value: make([]Node, 0)} - processorMap.value = append(processorMap.value, &Key{name: "target", value: &StrVal{value: "@metadata"}}) - processorMap.value = append(processorMap.value, &Key{name: "fields", value: &Dict{value: []Node{ - &Key{name: "stream_id", value: streamID.Clone()}, - }}}) - addFieldsMap = &Dict{value: []Node{&Key{"add_fields", processorMap}}} - processorsList.value = mergeStrategy(r.OnConflict).InjectItem(processorsList.value, addFieldsMap) - } - } - } - - return nil -} - -// InjectStreamProcessor creates a InjectStreamProcessorRule -func InjectStreamProcessor(onMerge, streamType string) *InjectStreamProcessorRule { - return &InjectStreamProcessorRule{ - OnConflict: onMerge, - Type: streamType, - } -} - -// InjectAgentInfoRule injects agent information into each rule. -type InjectAgentInfoRule struct{} - -// Apply injects index into input. -func (r *InjectAgentInfoRule) Apply(agentInfo AgentInfo, ast *AST) (err error) { - defer func() { - if err != nil { - err = errors.New(err, "failed to inject agent information into configuration") - } - }() - - inputsNode, found := Lookup(ast, "inputs") - if !found { - return nil - } - - inputsList, ok := inputsNode.Value().(*List) - if !ok { - return nil - } - - for _, inputNode := range inputsList.value { - inputMap, ok := inputNode.(*Dict) - if !ok { - continue - } - - // get processors node - processorsNode, found := inputMap.Find("processors") - if !found { - processorsNode = &Key{ - name: "processors", - value: &List{value: make([]Node, 0)}, - } - - inputMap.value = append(inputMap.value, processorsNode) - } - - processorsList, ok := processorsNode.Value().(*List) - if !ok { - return errors.New("InjectAgentInfoRule: processors is not a list") - } - - // elastic_agent - processorMap := &Dict{value: make([]Node, 0)} - processorMap.value = append(processorMap.value, &Key{name: "target", value: &StrVal{value: "elastic_agent"}}) - processorMap.value = append(processorMap.value, &Key{name: "fields", value: &Dict{value: []Node{ - &Key{name: "id", value: &StrVal{value: agentInfo.AgentID()}}, - &Key{name: "version", value: &StrVal{value: agentInfo.Version()}}, - &Key{name: "snapshot", value: &BoolVal{value: agentInfo.Snapshot()}}, - }}}) - addFieldsMap := &Dict{value: []Node{&Key{"add_fields", processorMap}}} - processorsList.value = mergeStrategy("").InjectItem(processorsList.value, addFieldsMap) - - // agent.id - processorMap = &Dict{value: make([]Node, 0)} - processorMap.value = append(processorMap.value, &Key{name: "target", value: &StrVal{value: "agent"}}) - processorMap.value = append(processorMap.value, &Key{name: "fields", value: &Dict{value: []Node{ - &Key{name: "id", value: &StrVal{value: agentInfo.AgentID()}}, - }}}) - addFieldsMap = &Dict{value: []Node{&Key{"add_fields", processorMap}}} - processorsList.value = mergeStrategy("").InjectItem(processorsList.value, addFieldsMap) - } - - return nil -} - -// InjectAgentInfo creates a InjectAgentInfoRule -func InjectAgentInfo() *InjectAgentInfoRule { - return &InjectAgentInfoRule{} -} - -// ExtractListItemRule extract items with specified name from a list of maps. -// The result is store in a new array. -// Example: -// Source: {items: []List{ map{"key": "val1"}, map{"key", "val2"} } } -// extract-list-item -path:items -item:key -to:keys -// result: -// {items: []List{ map{"key": "val1"}, map{"key", "val2"} }, keys: []List {"val1", "val2"} } -type ExtractListItemRule struct { - Path Selector - Item string - To string -} - -// Apply extracts items from array. -func (r *ExtractListItemRule) Apply(_ AgentInfo, ast *AST) (err error) { - defer func() { - if err != nil { - err = errors.New(err, "failed to extract items from configuration") - } - }() - - node, found := Lookup(ast, r.Path) - if !found { - return nil - } - - nodeVal := node.Value() - if nodeVal == nil { - return nil - } - - l, isList := nodeVal.(*List) - if !isList { - return nil - } - - newList := &List{ - value: make([]Node, 0, len(l.value)), - } - - for _, n := range l.value { - in, found := n.Find(r.Item) - if !found { - continue - } - - vn, ok := in.Value().(Node) - if !ok { - continue - } - - if ln, ok := vn.(*List); ok { - for _, lnItem := range ln.value { - newList.value = append(newList.value, lnItem.Clone()) - } - continue - } - - newList.value = append(newList.value, vn.Clone()) - } - - return Insert(ast, newList, r.To) -} - -// ExtractListItem creates a ExtractListItemRule -func ExtractListItem(path Selector, item, target string) *ExtractListItemRule { - return &ExtractListItemRule{ - Path: path, - Item: item, - To: target, - } -} - -// RenameRule takes a selectors and will rename the last path of a Selector to a new name. -type RenameRule struct { - From Selector - To string -} - -// Apply renames the last items of a Selector to a new name and keep all the other values and will -// return an error on failure. -func (r *RenameRule) Apply(_ AgentInfo, ast *AST) (err error) { - defer func() { - if err != nil { - err = errors.New(err, "failed to rename section of configuration") - } - }() - - // Skip rename when node is not found. - node, ok := Lookup(ast, r.From) - if !ok { - return nil - } - - n, ok := node.(*Key) - if !ok { - return fmt.Errorf("cannot rename, invalid type expected 'Key' received '%T'", node) - } - n.name = r.To - return nil -} - -// Rename creates a rename rule. -func Rename(from Selector, to string) *RenameRule { - return &RenameRule{From: from, To: to} -} - -// CopyRule take a from Selector and a destination selector and will insert an existing node into -// the destination, will return an errors if the types are incompatible. -type CopyRule struct { - From Selector - To Selector -} - -// Copy creates a copy rule. -func Copy(from, to Selector) *CopyRule { - return &CopyRule{From: from, To: to} -} - -// Apply copy a part of a tree into a new destination. -func (r CopyRule) Apply(_ AgentInfo, ast *AST) (err error) { - defer func() { - if err != nil { - err = errors.New(err, "failed to copy section of configuration") - } - }() - - node, ok := Lookup(ast, r.From) - // skip when the `from` node is not found. - if !ok { - return nil - } - - if err := Insert(ast, node, r.To); err != nil { - return err - } - - return nil -} - -// TranslateRule take a selector and will try to replace any values that match the translation -// table. -type TranslateRule struct { - Path Selector - Mapper map[string]interface{} -} - -// Translate create a translation rule. -func Translate(path Selector, mapper map[string]interface{}) *TranslateRule { - return &TranslateRule{Path: path, Mapper: mapper} -} - -// Apply translates matching elements of a translation table for a specific selector. -func (r *TranslateRule) Apply(_ AgentInfo, ast *AST) (err error) { - defer func() { - if err != nil { - err = errors.New(err, "failed to translate elements of configuration") - } - }() - - // Skip translate when node is not found. - node, ok := Lookup(ast, r.Path) - if !ok { - return nil - } - - n, ok := node.(*Key) - if !ok { - return fmt.Errorf("cannot rename, invalid type expected 'Key' received '%T'", node) - } - - for k, v := range r.Mapper { - if k == n.Value().(Node).Value() { - val := reflect.ValueOf(v) - nodeVal, err := load(val) - if err != nil { - return err - } - n.value = nodeVal - } - } - - return nil -} - -// TranslateWithRegexpRule take a selector and will try to replace using the regular expression. -type TranslateWithRegexpRule struct { - Path Selector - Re *regexp.Regexp - With string -} - -// MarshalYAML marshal a TranslateWithRegexpRule into a YAML document. -func (r *TranslateWithRegexpRule) MarshalYAML() (interface{}, error) { - return map[string]interface{}{ - "path": r.Path, - "re": r.Re.String(), - "with": r.With, - }, nil -} - -// UnmarshalYAML unmarshal a YAML document into a TranslateWithRegexpRule. -func (r *TranslateWithRegexpRule) UnmarshalYAML(unmarshal func(interface{}) error) error { - tmp := struct { - Path string - Re string - With string - }{} - - if err := unmarshal(&tmp); err != nil { - return errors.New(err, "cannot unmarshal into a TranslateWithRegexpRule") - } - - re, err := regexp.Compile(tmp.Re) - if err != nil { - errors.New(err, "invalid regular expression for TranslateWithRegexpRule") - } - - *r = TranslateWithRegexpRule{ - Path: tmp.Path, - Re: re, - With: tmp.With, - } - return nil -} - -// TranslateWithRegexp create a translation rule. -func TranslateWithRegexp(path Selector, re *regexp.Regexp, with string) *TranslateWithRegexpRule { - return &TranslateWithRegexpRule{Path: path, Re: re, With: with} -} - -// Apply translates matching elements of a translation table for a specific selector. -func (r *TranslateWithRegexpRule) Apply(_ AgentInfo, ast *AST) (err error) { - defer func() { - if err != nil { - err = errors.New(err, "failed to translate elements of configuration using regex") - } - }() - - // Skip translate when node is not found. - node, ok := Lookup(ast, r.Path) - if !ok { - return nil - } - - n, ok := node.(*Key) - if !ok { - return fmt.Errorf("cannot rename, invalid type expected 'Key' received '%T'", node) - } - - candidate, ok := n.value.Value().(string) - if !ok { - return fmt.Errorf("cannot filter on value expected 'string' and received %T", candidate) - } - - s := r.Re.ReplaceAllString(candidate, r.With) - val := reflect.ValueOf(s) - nodeVal, err := load(val) - if err != nil { - return err - } - - n.value = nodeVal - - return nil -} - -// MapRule allow to apply multiple rules on a subset of a Tree based on a provided selector. -type MapRule struct { - Path Selector - Rules []Rule -} - -// Map creates a new map rule. -func Map(path Selector, rules ...Rule) *MapRule { - return &MapRule{Path: path, Rules: rules} -} - -// Apply maps multiples rules over a subset of the tree. -func (r *MapRule) Apply(agentInfo AgentInfo, ast *AST) (err error) { - defer func() { - if err != nil { - err = errors.New(err, "failed to apply multiple rules on configuration") - } - }() - - node, ok := Lookup(ast, r.Path) - // Skip map when node is not found. - if !ok { - return nil - } - - n, ok := node.(*Key) - if !ok { - return fmt.Errorf( - "cannot iterate over node, invalid type expected 'Key' received '%T'", - node, - ) - } - - switch t := n.Value().(type) { - case *List: - l, err := mapList(agentInfo, r, t) - if err != nil { - return err - } - n.value = l - return nil - case *Dict: - d, err := mapDict(agentInfo, r, t) - if err != nil { - return err - } - n.value = d - return nil - case *Key: - switch t := n.Value().(type) { - case *List: - l, err := mapList(agentInfo, r, t) - if err != nil { - return err - } - n.value = l - return nil - case *Dict: - d, err := mapDict(agentInfo, r, t) - if err != nil { - return err - } - n.value = d - return nil - default: - return fmt.Errorf( - "cannot iterate over node, invalid type expected 'List' or 'Dict' received '%T'", - node, - ) - } - } - - return fmt.Errorf( - "cannot iterate over node, invalid type expected 'List' or 'Dict' received '%T'", - node, - ) -} - -func mapList(agentInfo AgentInfo, r *MapRule, l *List) (*List, error) { - values := l.Value().([]Node) - - for idx, item := range values { - newAST := &AST{root: item} - for _, rule := range r.Rules { - err := rule.Apply(agentInfo, newAST) - if err != nil { - return nil, err - } - values[idx] = newAST.root - } - } - return l, nil -} - -func mapDict(agentInfo AgentInfo, r *MapRule, l *Dict) (*Dict, error) { - newAST := &AST{root: l} - for _, rule := range r.Rules { - err := rule.Apply(agentInfo, newAST) - if err != nil { - return nil, err - } - } - - n, ok := newAST.root.(*Dict) - if !ok { - return nil, fmt.Errorf( - "after applying rules from map, root is no longer a 'Dict' it is an invalid type of '%T'", - newAST.root, - ) - } - return n, nil -} - -// MarshalYAML marshal a MapRule into a YAML document. -func (r *MapRule) MarshalYAML() (interface{}, error) { - rules, err := NewRuleList(r.Rules...).MarshalYAML() - if err != nil { - return nil, err - } - - return map[string]interface{}{ - "path": r.Path, - "rules": rules, - }, nil -} - -// UnmarshalYAML unmarshal a YAML document into a MapRule. -func (r *MapRule) UnmarshalYAML(unmarshal func(interface{}) error) error { - tmp := struct { - Path string - Rules RuleList - }{} - - if err := unmarshal(&tmp); err != nil { - return errors.New(err, "cannot unmarshal into a MapRule") - } - - *r = MapRule{ - Path: tmp.Path, - Rules: tmp.Rules.Rules, - } - return nil -} - -// FilterRule allows to filter the tree and return only a subset of selectors. -type FilterRule struct { - Selectors []Selector -} - -// Filter returns a new Filter Rule. -func Filter(selectors ...Selector) *FilterRule { - return &FilterRule{Selectors: selectors} -} - -// Apply filters a Tree based on list of selectors. -func (r *FilterRule) Apply(_ AgentInfo, ast *AST) (err error) { - defer func() { - if err != nil { - err = errors.New(err, "failed to filter subset of configuration") - } - }() - - mergedAST := &AST{root: &Dict{}} - for _, selector := range r.Selectors { - newAST, ok := Select(ast.Clone(), selector) - if !ok { - continue - } - mergedAST, err = Combine(mergedAST, newAST) - if err != nil { - return err - } - } - ast.root = mergedAST.root - return nil -} - -// FilterValuesRule allows to filter the tree and return only a subset of selectors with a predefined set of values. -type FilterValuesRule struct { - Selector Selector - Key Selector - Values []interface{} -} - -// FilterValues returns a new FilterValues Rule. -func FilterValues(selector Selector, key Selector, values ...interface{}) *FilterValuesRule { - return &FilterValuesRule{Selector: selector, Key: key, Values: values} -} - -// Apply filters a Tree based on list of selectors. -func (r *FilterValuesRule) Apply(_ AgentInfo, ast *AST) (err error) { - defer func() { - if err != nil { - err = errors.New(err, "failed to filter section based on values from configuration") - } - }() - - node, ok := Lookup(ast, r.Selector) - // Skip map when node is not found. - if !ok { - return nil - } - - n, ok := node.(*Key) - if !ok { - return fmt.Errorf( - "cannot iterate over node, invalid type expected 'Key' received '%T'", - node, - ) - } - - l, ok := n.Value().(*List) - if !ok { - return fmt.Errorf( - "cannot iterate over node, invalid type expected 'List' received '%T'", - node, - ) - } - - values := l.Value().([]Node) - var newNodes []Node - - for idx := 0; idx < len(values); idx++ { - item := values[idx] - newRoot := &AST{root: item} - - newAST, ok := Lookup(newRoot, r.Key) - if !ok { - newNodes = append(newNodes, item) - continue - } - - // filter values - n, ok := newAST.(*Key) - if !ok { - return fmt.Errorf("cannot filter on value, invalid type expected 'Key' received '%T'", newAST) - } - - if n.name != r.Key { - newNodes = append(newNodes, item) - continue - } - - for _, v := range r.Values { - if v == n.value.Value() { - newNodes = append(newNodes, item) - break - } - } - - } - - l.value = newNodes - n.value = l - return nil -} - -// FilterValuesWithRegexpRule allows to filter the tree and return only a subset of selectors with -// a regular expression. -type FilterValuesWithRegexpRule struct { - Selector Selector - Key Selector - Re *regexp.Regexp -} - -// FilterValuesWithRegexp returns a new FilterValuesWithRegexp Rule. -func FilterValuesWithRegexp( - selector Selector, - key Selector, - re *regexp.Regexp, -) *FilterValuesWithRegexpRule { - return &FilterValuesWithRegexpRule{Selector: selector, Key: key, Re: re} -} - -// MarshalYAML marshal a FilterValuesWithRegexpRule into a YAML document. -func (r *FilterValuesWithRegexpRule) MarshalYAML() (interface{}, error) { - return map[string]interface{}{ - "selector": r.Selector, - "key": r.Key, - "re": r.Re.String(), - }, nil -} - -// UnmarshalYAML unmarshal a YAML document into a FilterValuesWithRegexpRule. -func (r *FilterValuesWithRegexpRule) UnmarshalYAML(unmarshal func(interface{}) error) error { - tmp := struct { - Selector string - Key string - Re string - }{} - - if err := unmarshal(&tmp); err != nil { - return errors.New(err, "cannot unmarshal into a FilterValuesWithRegexpRule") - } - - re, err := regexp.Compile(tmp.Re) - if err != nil { - errors.New(err, "invalid regular expression for FilterValuesWithRegexpRule") - } - *r = FilterValuesWithRegexpRule{ - Selector: tmp.Selector, - Key: tmp.Key, - Re: re, - } - - return nil -} - -// Apply filters a Tree based on list of selectors. -func (r *FilterValuesWithRegexpRule) Apply(_ AgentInfo, ast *AST) (err error) { - defer func() { - if err != nil { - err = errors.New(err, "failed to filter section of configuration using regex") - } - }() - - node, ok := Lookup(ast, r.Selector) - // Skip map when node is not found. - if !ok { - return nil - } - - n, ok := node.(*Key) - if !ok { - return fmt.Errorf( - "cannot iterate over node, invalid type expected 'Key' received '%T'", - node, - ) - } - - l, ok := n.Value().(*List) - if !ok { - return fmt.Errorf( - "cannot iterate over node, invalid type expected 'List' received '%T'", - node, - ) - } - - values := l.Value().([]Node) - var newNodes []Node - - for idx := 0; idx < len(values); idx++ { - item := values[idx] - newRoot := &AST{root: item} - - newAST, ok := Lookup(newRoot, r.Key) - if !ok { - // doesn't have key so its filtered out - continue - } - - // filter values - n, ok := newAST.(*Key) - if !ok { - return fmt.Errorf("cannot filter on value, invalid type expected 'Key' received '%T'", newAST) - } - - if n.name != r.Key { - // doesn't match so its filtered out - continue - } - - candidate, ok := n.value.Value().(string) - if !ok { - return fmt.Errorf("cannot filter on value expected 'string' and received %T", candidate) - } - - if r.Re.MatchString(candidate) { - newNodes = append(newNodes, item) - } - } - - l.value = newNodes - n.value = l - return nil -} - -// InsertDefaultsRule inserts selected paths into keys if they do not exist. -// -// In the case that an exiting key already exists then it is not inserted. -type InsertDefaultsRule struct { - Selectors []Selector - Path string -} - -// Apply applies select into rule. -func (r *InsertDefaultsRule) Apply(_ AgentInfo, ast *AST) (err error) { - defer func() { - if err != nil { - err = errors.New(err, "failed to select data into configuration") - } - }() - - insertTo := ast.root - for _, part := range splitPath(r.Path) { - n, ok := insertTo.Find(part) - if !ok { - insertTo = nil - break - } - insertTo = n - } - - // path completely missing; easy path is just to insert all selectors - if insertTo == nil { - target := &Dict{} - for _, selector := range r.Selectors { - lookupNode, ok := Lookup(ast.Clone(), selector) - if !ok { - continue - } - target.value = append(target.value, lookupNode.Clone()) - } - if len(target.value) > 0 { - return Insert(ast, target, r.Path) - } - return nil - } - - // path does exist, so we insert the keys only if they don't exist - for _, selector := range r.Selectors { - lookupNode, ok := Lookup(ast.Clone(), selector) - if !ok { - continue - } - switch lt := lookupNode.(type) { - case *Key: - _, ok := insertTo.Find(lt.name) - if !ok { - // doesn't exist; insert it - if err := Insert(ast, lt, r.Path); err != nil { - return err - } - } - } - } - - return nil -} - -// InsertDefaults creates a InsertDefaultsRule -func InsertDefaults(path string, selectors ...Selector) *InsertDefaultsRule { - return &InsertDefaultsRule{ - Selectors: selectors, - Path: path, - } -} - -// InjectQueueRule injects inferred queue parameters into program -// configurations. -type InjectQueueRule struct{} - -// InjectQueue creates a InjectQueueRule -func InjectQueue() *InjectQueueRule { - return &InjectQueueRule{} -} - -// Apply adds queue parameters to a program configuration based on the -// output settings "worker" and "bulk_max_size". -func (r *InjectQueueRule) Apply(agentInfo AgentInfo, ast *AST) (err error) { - defer func() { - if err != nil { - err = errors.New(err, "failed to inject queue parameters into configuration") - } - }() - - outputNode, found := Lookup(ast, "output") - if !found { - return nil - } - - outputDict, ok := outputNode.Value().(*Dict) - if !ok || len(outputDict.value) == 0 { - return nil - } - outputChild := outputDict.value[0] - - // Initialize the bulk_max_size and worker parameters to the global defaults, - // then override them if there's an explicit setting. - bulkMaxSize := 50 - worker := 1 - - if bulkMaxSizeNode, ok := outputChild.Find("bulk_max_size"); ok { - if bulkMaxSizeInt, ok := bulkMaxSizeNode.Value().(*IntVal); ok { - bulkMaxSize = bulkMaxSizeInt.value - } - } - - if workerNode, ok := outputChild.Find("worker"); ok { - if workerInt, ok := workerNode.Value().(*IntVal); ok { - worker = workerInt.value - } - } - - // Insert memory queue settings based on the output params. - queueNode := queueDictFromOutputSettings(bulkMaxSize, worker) - if err := Insert(ast, queueNode, "queue.mem"); err != nil { - return err - } - - return nil -} - -func queueDictFromOutputSettings(bulkMaxSize, worker int) Node { - events, minEvents := queueParamsFromOutputSettings(bulkMaxSize, worker) - dict := &Dict{ - value: []Node{ - &Key{ - name: "events", - value: &IntVal{value: events}, - }, - &Key{ - name: "flush", - value: &Dict{ - value: []Node{ - &Key{ - name: "min_events", - value: &IntVal{value: minEvents}, - }, - &Key{ - name: "timeout", - value: &StrVal{value: "1s"}, - }, - }, - }, - }, - }, - } - return dict -} - -// Given output settings, returns inferred values for queue.mem.events -// and queue.mem.flush.min_events. -// See https://github.com/elastic/beats/issues/26638. -func queueParamsFromOutputSettings(bulkMaxSize, worker int) (int, int) { - // Create space in the queue for each worker to have a full batch in flight - // and another one pending, plus a correction factor so users with the - // default worker count of 1 aren't surprised by an unreasonably small queue. - // These formulas could and perhaps should be customized further based on - // the specific beats being called, but their default behavior is already to - // significantly reduce the queue size, so let's get some experience using - // these baselines before optimizing further. - events := bulkMaxSize * (2*worker + 5) - minEvents := bulkMaxSize - return events, minEvents -} - -// InjectHeadersRule injects headers into output. -type InjectHeadersRule struct{} - -// Apply injects headers into output. -func (r *InjectHeadersRule) Apply(agentInfo AgentInfo, ast *AST) (err error) { - defer func() { - if err != nil { - err = errors.New(err, "failed to inject headers into configuration") - } - }() - - headers := agentInfo.Headers() - if len(headers) == 0 { - return nil - } - - outputNode, found := Lookup(ast, "output") - if !found { - return nil - } - - elasticsearchNode, found := outputNode.Find("elasticsearch") - if found { - headersNode, found := elasticsearchNode.Find("headers") - if found { - headersDict, ok := headersNode.Value().(*Dict) - if !ok { - return errors.New("headers not a dictionary") - } - - for k, v := range headers { - headersDict.value = append(headersDict.value, &Key{ - name: k, - value: &StrVal{value: v}, - }) - } - } else { - nodes := make([]Node, 0, len(headers)) - for k, v := range headers { - nodes = append(nodes, &Key{ - name: k, - value: &StrVal{value: v}, - }) - } - headersDict := NewDict(nodes) - elasticsearchDict, ok := elasticsearchNode.Value().(*Dict) - if !ok { - return errors.New("elasticsearch output is not a dictionary") - } - elasticsearchDict.value = append(elasticsearchDict.value, &Key{ - name: "headers", - value: headersDict, - }) - } - } - - return nil -} - -// InjectHeaders creates a InjectHeadersRule -func InjectHeaders() *InjectHeadersRule { - return &InjectHeadersRule{} -} - -// NewRuleList returns a new list of rules to be executed. -func NewRuleList(rules ...Rule) *RuleList { - return &RuleList{Rules: rules} -} - -func keys(m map[string]interface{}) []string { - keys := make([]string, 0, len(m)) - for k := range m { - keys = append(keys, k) - } - return keys -} - -func datastreamNamespaceFromInputNode(inputNode Node) string { - const defaultNamespace = "default" - - if namespaceNode, found := inputNode.Find("data_stream.namespace"); found { - nsKey, ok := namespaceNode.(*Key) - if ok { - if newNamespace := nsKey.value.String(); newNamespace != "" { - return newNamespace - } - } - } - - dsNode, found := inputNode.Find("data_stream") - if found { - dsMapNode, ok := dsNode.Value().(*Dict) - if ok { - nsNode, found := dsMapNode.Find("namespace") - if found { - nsKey, ok := nsNode.(*Key) - if ok { - if newNamespace := nsKey.value.String(); newNamespace != "" { - return newNamespace - } - } - } - } - } - - return defaultNamespace -} - -func datastreamTypeFromInputNode(inputNode Node, defaultType string) string { - if dsTypeNode, found := inputNode.Find("data_stream.type"); found { - dsTypeKey, ok := dsTypeNode.(*Key) - if ok { - if newDatastreamType := dsTypeKey.value.String(); newDatastreamType != "" { - return newDatastreamType - } - } - } - - dsNode, found := inputNode.Find("data_stream") - if found { - dsMapNode, ok := dsNode.Value().(*Dict) - if ok { - typeNode, found := dsMapNode.Find("type") - if found { - typeKey, ok := typeNode.(*Key) - if ok { - if newDatastreamType := typeKey.value.String(); newDatastreamType != "" { - return newDatastreamType - } - } - } - } - } - - return defaultType -} - -func datasetNameFromStreamNode(streamNode Node) string { - const defaultDataset = "generic" - - if dsNameNode, found := streamNode.Find("data_stream.dataset"); found { - dsNameKey, ok := dsNameNode.(*Key) - if ok { - if newDatasetName := dsNameKey.value.String(); newDatasetName != "" { - return newDatasetName - } - } - } - - dsNode, found := streamNode.Find("data_stream") - if found { - dsMapNode, ok := dsNode.Value().(*Dict) - if ok { - dsNameNode, found := dsMapNode.Find("dataset") - if found { - dsKey, ok := dsNameNode.(*Key) - if ok { - if newDataset := dsKey.value.String(); newDataset != "" { - return newDataset - } - } - } - } - } - - return defaultDataset -} diff --git a/internal/pkg/agent/transpiler/rules_test.go b/internal/pkg/agent/transpiler/rules_test.go deleted file mode 100644 index 840e1442fde..00000000000 --- a/internal/pkg/agent/transpiler/rules_test.go +++ /dev/null @@ -1,1122 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package transpiler - -import ( - "regexp" - "testing" - - "github.com/google/go-cmp/cmp" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "gopkg.in/yaml.v2" - - "github.com/elastic/elastic-agent/internal/pkg/agent/internal/yamltest" -) - -func TestRules(t *testing.T) { - testcases := map[string]struct { - givenYAML string - expectedYAML string - rule Rule - }{ - "fix streams": { - givenYAML: ` -inputs: - - name: All default - type: file - streams: - - paths: /var/log/mysql/error.log - - name: Specified namespace - type: file - data_stream.namespace: nsns - streams: - - paths: /var/log/mysql/error.log - - name: Specified dataset - type: file - streams: - - paths: /var/log/mysql/error.log - data_stream.dataset: dsds - - name: All specified - type: file - data_stream.namespace: nsns - streams: - - paths: /var/log/mysql/error.log - data_stream.dataset: dsds - - name: All specified with empty strings - type: file - data_stream.namespace: "" - streams: - - paths: /var/log/mysql/error.log - data_stream.dataset: "" -`, - expectedYAML: ` -inputs: - - name: All default - type: file - data_stream.namespace: default - streams: - - paths: /var/log/mysql/error.log - data_stream.dataset: generic - - name: Specified namespace - type: file - data_stream.namespace: nsns - streams: - - paths: /var/log/mysql/error.log - data_stream.dataset: generic - - name: Specified dataset - type: file - data_stream.namespace: default - streams: - - paths: /var/log/mysql/error.log - data_stream.dataset: dsds - - name: All specified - type: file - data_stream.namespace: nsns - streams: - - paths: /var/log/mysql/error.log - data_stream.dataset: dsds - - name: All specified with empty strings - type: file - data_stream.namespace: default - streams: - - paths: /var/log/mysql/error.log - data_stream.dataset: generic -`, - rule: &RuleList{ - Rules: []Rule{ - FixStream(), - }, - }, - }, - - "inject index": { - givenYAML: ` -inputs: - - name: All default - type: file - streams: - - paths: /var/log/mysql/error.log - - name: Specified namespace - type: file - data_stream.namespace: nsns - streams: - - paths: /var/log/mysql/error.log - - - name: Specified dataset - type: file - streams: - - paths: /var/log/mysql/error.log - data_stream.dataset: dsds - - name: All specified - type: file - data_stream.namespace: nsns - streams: - - paths: /var/log/mysql/error.log - data_stream.dataset: dsds - - name: All specified with empty strings - type: file - data_stream.namespace: "" - streams: - - paths: /var/log/mysql/error.log - data_stream.dataset: "" -`, - expectedYAML: ` -inputs: - - name: All default - type: file - streams: - - paths: /var/log/mysql/error.log - index: mytype-generic-default - - name: Specified namespace - type: file - data_stream.namespace: nsns - streams: - - paths: /var/log/mysql/error.log - index: mytype-generic-nsns - - - name: Specified dataset - type: file - streams: - - paths: /var/log/mysql/error.log - data_stream.dataset: dsds - index: mytype-dsds-default - - name: All specified - type: file - data_stream.namespace: nsns - streams: - - paths: /var/log/mysql/error.log - data_stream.dataset: dsds - index: mytype-dsds-nsns - - name: All specified with empty strings - type: file - data_stream.namespace: "" - streams: - - paths: /var/log/mysql/error.log - data_stream.dataset: "" - index: mytype-generic-default -`, - rule: &RuleList{ - Rules: []Rule{ - InjectIndex("mytype"), - }, - }, - }, - - "inject stream": { - givenYAML: ` -inputs: - - name: No streams, no IDs - type: file - - name: With streams and IDs - id: input-id - type: file - data_stream.namespace: nsns - streams: - - paths: /var/log/mysql/error.log - id: stream-id - data_stream.dataset: dsds - - name: With processors - id: input-id - type: file - data_stream.namespace: nsns - processors: - - add_fields: - target: some - fields: - dataset: value - streams: - - paths: /var/log/mysql/error.log - id: stream-id - data_stream.dataset: dsds - processors: - - add_fields: - target: another - fields: - dataset: value -`, - expectedYAML: ` -inputs: - - name: No streams, no IDs - type: file - - name: With streams and IDs - id: input-id - type: file - data_stream.namespace: nsns - processors: - - add_fields: - target: "@metadata" - fields: - input_id: input-id - streams: - - paths: /var/log/mysql/error.log - id: stream-id - data_stream.dataset: dsds - processors: - - add_fields: - target: data_stream - fields: - type: stream-type - namespace: nsns - dataset: dsds - - add_fields: - target: event - fields: - dataset: dsds - - add_fields: - target: "@metadata" - fields: - stream_id: stream-id - - name: With processors - id: input-id - type: file - data_stream.namespace: nsns - processors: - - add_fields: - target: some - fields: - dataset: value - - add_fields: - target: "@metadata" - fields: - input_id: input-id - streams: - - paths: /var/log/mysql/error.log - id: stream-id - data_stream.dataset: dsds - processors: - - add_fields: - target: another - fields: - dataset: value - - add_fields: - target: data_stream - fields: - type: stream-type - namespace: nsns - dataset: dsds - - add_fields: - target: event - fields: - dataset: dsds - - add_fields: - target: "@metadata" - fields: - stream_id: stream-id -`, - rule: &RuleList{ - Rules: []Rule{ - InjectStreamProcessor("insert_after", "stream-type"), - }, - }, - }, - - "inject agent info": { - givenYAML: ` -inputs: - - name: No processors - type: file - - name: With processors - type: file - processors: - - add_fields: - target: other - fields: - data: more -`, - expectedYAML: ` -inputs: - - name: No processors - type: file - processors: - - add_fields: - target: elastic_agent - fields: - id: agent-id - snapshot: false - version: 8.0.0 - - add_fields: - target: agent - fields: - id: agent-id - - name: With processors - type: file - processors: - - add_fields: - target: other - fields: - data: more - - add_fields: - target: elastic_agent - fields: - id: agent-id - snapshot: false - version: 8.0.0 - - add_fields: - target: agent - fields: - id: agent-id -`, - rule: &RuleList{ - Rules: []Rule{ - InjectAgentInfo(), - }, - }, - }, - - "extract items from array": { - givenYAML: ` -streams: - - name: MySQL error log - input: - type: file - path: /var/log/mysql/error.log - - name: MySQL access log - input: - type: file - path: /var/log/mysql/access.log - - name: MySQL metrics - input: - type: mysql - host: localhost - port: 3306 -`, - expectedYAML: ` -streams: - - name: MySQL error log - input: - type: file - path: /var/log/mysql/error.log - - name: MySQL access log - input: - type: file - path: /var/log/mysql/access.log - - name: MySQL metrics - input: - type: mysql - host: localhost - port: 3306 -inputs: - - type: file - path: /var/log/mysql/error.log - - type: file - path: /var/log/mysql/access.log - - type: mysql - host: localhost - port: 3306 -`, - rule: &RuleList{ - Rules: []Rule{ - ExtractListItem("streams", "input", "inputs"), - }, - }, - }, - "two level rename": { - givenYAML: ` -output: - elasticsearch: - hosts: - - "127.0.0.1:9201" - - "127.0.0.1:9202" - logstash: - port: 5 -`, - expectedYAML: ` -output: - what: - hosts: - - "127.0.0.1:9201" - - "127.0.0.1:9202" - logstash: - port: 5 -`, - rule: &RuleList{ - Rules: []Rule{ - Rename("output.elasticsearch", "what"), - }, - }, - }, - "rename non existing key": { - givenYAML: ` -output: - elasticsearch: - hosts: - - "127.0.0.1:9201" - - "127.0.0.1:9202" - logstash: - port: 5 -`, - expectedYAML: ` -output: - elasticsearch: - hosts: - - "127.0.0.1:9201" - - "127.0.0.1:9202" - logstash: - port: 5 -`, - rule: &RuleList{ - Rules: []Rule{ - Rename("donoexist", "what"), - }, - }, - }, - "select into": { - givenYAML: ` -level_one: - key1: val1 - key2: - d_key1: val2 - d_key2: val3 -rest: of -`, - expectedYAML: ` -level_one: - key1: val1 - key2: - d_key1: val2 - d_key2: val3 - level_two: - key1: val1 - key2: - d_key1: val2 - d_key2: val3 -rest: of -`, - rule: &RuleList{ - Rules: []Rule{ - SelectInto("level_one.level_two", "level_one.key1", "level_one.key2"), - }, - }, - }, - "copy top level slice": { - givenYAML: ` -inputs: - - type: event/file - - type: metric/docker -`, - expectedYAML: ` -inputs: - - type: event/file - - type: metric/docker -filebeat: - inputs: - - type: event/file - - type: metric/docker -`, - rule: &RuleList{ - Rules: []Rule{ - Copy("inputs", "filebeat"), - }, - }, - }, - "copy keep ordering for filtering": { - givenYAML: ` -inputs: - - type: event/file - - type: metric/docker -`, - expectedYAML: ` -filebeat: - inputs: - - type: event/file - - type: metric/docker -`, - rule: &RuleList{ - Rules: []Rule{ - Copy("inputs", "filebeat"), - Filter("filebeat"), - }, - }, - }, - "copy non existing key": { - givenYAML: ` -inputs: - - type: event/file - - type: metric/docker -`, - expectedYAML: ` -inputs: - - type: event/file - - type: metric/docker -`, - rule: &RuleList{ - Rules: []Rule{ - Copy("what-inputs", "filebeat"), - }, - }, - }, - "translate key values to another value": { - givenYAML: ` -name: "hello" -`, - expectedYAML: ` -name: "bonjour" -`, - rule: &RuleList{ - Rules: []Rule{ - Translate("name", map[string]interface{}{ - "aurevoir": "a bientot", - "hello": "bonjour", - }), - }, - }, - }, - "translate on non existing key": { - givenYAML: ` -name: "hello" -`, - expectedYAML: ` -name: "hello" -`, - rule: &RuleList{ - Rules: []Rule{ - Translate("donotexist", map[string]interface{}{ - "aurevoir": "a bientot", - "hello": "bonjour", - }), - }, - }, - }, - "translate 1 level deep key values to another value": { - givenYAML: ` -input: - type: "aurevoir" -`, - expectedYAML: ` -input: - type: "a bientot" -`, - rule: &RuleList{ - Rules: []Rule{ - Translate("input.type", map[string]interface{}{ - "aurevoir": "a bientot", - "hello": "bonjour", - }), - }, - }, - }, - "map operation on array": { - givenYAML: ` -inputs: - - type: event/file - - type: log/docker -`, - expectedYAML: ` -inputs: - - type: log - - type: docker -`, - rule: &RuleList{ - Rules: []Rule{ - Map("inputs", - Translate("type", map[string]interface{}{ - "event/file": "log", - "log/docker": "docker", - })), - }, - }, - }, - "map operation on non existing": { - givenYAML: ` -inputs: - - type: event/file - - type: log/docker -`, - expectedYAML: ` -inputs: - - type: event/file - - type: log/docker -`, - rule: &RuleList{ - Rules: []Rule{ - Map("no-inputs", - Translate("type", map[string]interface{}{ - "event/file": "log", - "log/docker": "docker", - })), - }, - }, - }, - "single selector on top level keys": { - givenYAML: ` -inputs: - - type: event/file -output: - logstash: - port: 5 -`, - expectedYAML: ` -output: - logstash: - port: 5 -`, - rule: &RuleList{ - Rules: []Rule{ - Filter("output"), - }, - }, - }, - "multiple selectors on top level keys": { - givenYAML: ` -inputs: - - type: event/file -filebeat: - - type: docker -output: - logstash: - port: 5 -`, - expectedYAML: ` -inputs: - - type: event/file -output: - logstash: - port: 5 -`, - rule: &RuleList{ - Rules: []Rule{ - Filter("output", "inputs"), - }, - }, - }, - "filter for non existing keys": { - givenYAML: ` -inputs: - - type: event/file -filebeat: - - type: docker -output: - logstash: - port: 5 -`, - expectedYAML: ``, - rule: &RuleList{ - Rules: []Rule{ - Filter("no-output", "no-inputs"), - }, - }, - }, - - "filter for values": { - givenYAML: ` -inputs: - - type: log - - type: tcp - - type: udp -`, - expectedYAML: ` -inputs: - - type: log - - type: tcp -`, - rule: &RuleList{ - Rules: []Rule{ - FilterValues("inputs", "type", "log", "tcp"), - }, - }, - }, - "filter for regexp": { - givenYAML: ` -inputs: - - type: metric/log - - type: metric/tcp - - type: udp - - type: unknown -`, - expectedYAML: ` -inputs: - - type: metric/log - - type: metric/tcp -`, - rule: &RuleList{ - Rules: []Rule{ - FilterValuesWithRegexp("inputs", "type", regexp.MustCompile("^metric/.*")), - }, - }, - }, - "translate with regexp": { - givenYAML: ` -inputs: - - type: metric/log - - type: metric/tcp -`, - expectedYAML: ` -inputs: - - type: log - - type: tcp -`, - rule: &RuleList{ - Rules: []Rule{ - Map("inputs", TranslateWithRegexp("type", regexp.MustCompile("^metric/(.*)"), "$1")), - }, - }, - }, - - "remove key": { - givenYAML: ` -key1: val1 -key2: val2 -`, - expectedYAML: ` -key1: val1 -`, - rule: &RuleList{ - Rules: []Rule{ - RemoveKey("key2"), - }, - }, - }, - - "copy item to list": { - givenYAML: ` -namespace: testing -inputs: - - type: metric/log - - type: metric/tcp -`, - expectedYAML: ` -namespace: testing -inputs: - - type: metric/log - namespace: testing - - type: metric/tcp - namespace: testing -`, - rule: &RuleList{ - Rules: []Rule{ - CopyToList("namespace", "inputs", "insert_after"), - }, - }, - }, - - "Make array": { - givenYAML: ` -sample: - log: "log value" -`, - expectedYAML: ` -sample: - log: "log value" -logs: - - "log value" -`, - rule: &RuleList{ - Rules: []Rule{ - MakeArray("sample.log", "logs"), - }, - }, - }, - "insert defaults into existing": { - givenYAML: ` -level_one: - key1: val1 - key2: - d_key1: val2 - d_key2: val3 - level_two: - key2: - d_key3: val3 - d_key4: val4 -rest: of -`, - expectedYAML: ` -level_one: - key1: val1 - key2: - d_key1: val2 - d_key2: val3 - level_two: - key1: val1 - key2: - d_key3: val3 - d_key4: val4 -rest: of -`, - rule: &RuleList{ - Rules: []Rule{ - InsertDefaults("level_one.level_two", "level_one.key1", "level_one.key2"), - }, - }, - }, - - "insert defaults into not existing": { - givenYAML: ` -level_one: - key1: val1 - key2: - d_key1: val2 - d_key2: val3 -rest: of -`, - expectedYAML: ` -level_one: - key1: val1 - key2: - d_key1: val2 - d_key2: val3 - level_two: - key1: val1 - key2: - d_key1: val2 - d_key2: val3 -rest: of -`, - rule: &RuleList{ - Rules: []Rule{ - InsertDefaults("level_one.level_two", "level_one.key1", "level_one.key2"), - }, - }, - }, - - "inject auth headers: no headers": { - givenYAML: ` -output: - elasticsearch: - hosts: - - "127.0.0.1:9201" - - "127.0.0.1:9202" - logstash: - port: 5 -`, - expectedYAML: ` -output: - elasticsearch: - headers: - h1: test-header - hosts: - - "127.0.0.1:9201" - - "127.0.0.1:9202" - logstash: - port: 5 -`, - rule: &RuleList{ - Rules: []Rule{ - InjectHeaders(), - }, - }, - }, - - "inject auth headers: existing headers": { - givenYAML: ` -output: - elasticsearch: - headers: - sample-header: existing - hosts: - - "127.0.0.1:9201" - - "127.0.0.1:9202" - logstash: - port: 5 -`, - expectedYAML: ` -output: - elasticsearch: - headers: - sample-header: existing - h1: test-header - hosts: - - "127.0.0.1:9201" - - "127.0.0.1:9202" - logstash: - port: 5 -`, - rule: &RuleList{ - Rules: []Rule{ - InjectHeaders(), - }, - }, - }, - "inject queue settings": { - givenYAML: ` -output: - elasticsearch: - hosts: - - "127.0.0.1:9201" - bulk_max_size: 46 - worker: 5 -`, - expectedYAML: ` -queue: - mem: - events: 690 - flush: - min_events: 46 - timeout: 1s - -output: - elasticsearch: - hosts: - - "127.0.0.1:9201" - bulk_max_size: 46 - worker: 5 -`, - rule: &RuleList{ - Rules: []Rule{ - InjectQueue(), - }, - }, - }, - "inject queue settings falls back on default values": { - givenYAML: ` -output: - elasticsearch: - hosts: - - "127.0.0.1:9201" -`, - expectedYAML: ` -queue: - mem: - events: 350 - flush: - min_events: 50 - timeout: 1s - -output: - elasticsearch: - hosts: - - "127.0.0.1:9201" -`, - rule: &RuleList{ - Rules: []Rule{ - InjectQueue(), - }, - }, - }, - } - - for name, test := range testcases { - t.Run(name, func(t *testing.T) { - a, err := makeASTFromYAML(test.givenYAML) - require.NoError(t, err) - - err = test.rule.Apply(FakeAgentInfo(), a) - require.NoError(t, err) - - v := &MapVisitor{} - a.Accept(v) - - var m map[string]interface{} - if len(test.expectedYAML) == 0 { - m = make(map[string]interface{}) - } else { - err := yamltest.FromYAML([]byte(test.expectedYAML), &m) - require.NoError(t, err) - } - - if !assert.True(t, cmp.Equal(m, v.Content)) { - diff := cmp.Diff(m, v.Content) - if diff != "" { - t.Errorf("mismatch (-want +got):\n%s", diff) - } - } - }) - } -} - -func makeASTFromYAML(yamlStr string) (*AST, error) { - var m map[string]interface{} - if err := yaml.Unmarshal([]byte(yamlStr), &m); err != nil { - return nil, err - } - - return NewAST(m) -} - -func TestSerialization(t *testing.T) { - value := NewRuleList( - Rename("from-value", "to-value"), - Copy("from-value", "to-value"), - Translate("path-value", map[string]interface{}{ - "key-v-1": "value-v-1", - "key-v-2": "value-v-2", - }), - TranslateWithRegexp("path-value", regexp.MustCompile("^metric/(.+)"), "log/$1"), - Map("path-value", - Rename("from-value", "to-value"), - Copy("from-value", "to-value"), - ), - Filter("f1", "f2"), - FilterValues("select-v", "key-v", "v1", "v2"), - FilterValuesWithRegexp("inputs", "type", regexp.MustCompile("^metric/.*")), - ExtractListItem("path.p", "item", "target"), - InjectIndex("index-type"), - InjectStreamProcessor("insert_after", "index-type"), - CopyToList("t1", "t2", "insert_after"), - CopyAllToList("t2", "insert_before", "a", "b"), - FixStream(), - SelectInto("target", "s1", "s2"), - InsertDefaults("target", "s1", "s2"), - InjectHeaders(), - ) - - y := `- rename: - from: from-value - to: to-value -- copy: - from: from-value - to: to-value -- translate: - path: path-value - mapper: - key-v-1: value-v-1 - key-v-2: value-v-2 -- translate_with_regexp: - path: path-value - re: ^metric/(.+) - with: log/$1 -- map: - path: path-value - rules: - - rename: - from: from-value - to: to-value - - copy: - from: from-value - to: to-value -- filter: - selectors: - - f1 - - f2 -- filter_values: - selector: select-v - key: key-v - values: - - v1 - - v2 -- filter_values_with_regexp: - key: type - re: ^metric/.* - selector: inputs -- extract_list_items: - path: path.p - item: item - to: target -- inject_index: - type: index-type -- inject_stream_processor: - type: index-type - on_conflict: insert_after -- copy_to_list: - item: t1 - to: t2 - on_conflict: insert_after -- copy_all_to_list: - to: t2 - except: - - a - - b - on_conflict: insert_before -- fix_stream: {} -- select_into: - selectors: - - s1 - - s2 - path: target -- insert_defaults: - selectors: - - s1 - - s2 - path: target -- inject_headers: {} -` - - t.Run("serialize_rules", func(t *testing.T) { - b, err := yaml.Marshal(value) - require.NoError(t, err) - assert.Equal(t, string(b), y) - }) - - t.Run("unserialize_rules", func(t *testing.T) { - v := &RuleList{} - err := yaml.Unmarshal([]byte(y), v) - require.NoError(t, err) - assert.Equal(t, value, v) - }) -} - -type fakeAgentInfo struct{} - -func (*fakeAgentInfo) AgentID() string { - return "agent-id" -} - -func (*fakeAgentInfo) Version() string { - return "8.0.0" -} - -func (*fakeAgentInfo) Snapshot() bool { - return false -} - -func (*fakeAgentInfo) Headers() map[string]string { - return map[string]string{ - "h1": "test-header", - } -} - -func FakeAgentInfo() AgentInfo { - return &fakeAgentInfo{} -} diff --git a/internal/pkg/agent/transpiler/steps.go b/internal/pkg/agent/transpiler/steps.go deleted file mode 100644 index 0d7e4941f3e..00000000000 --- a/internal/pkg/agent/transpiler/steps.go +++ /dev/null @@ -1,345 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package transpiler - -import ( - "context" - "errors" - "fmt" - "os" - "os/exec" - "path/filepath" - "runtime" - "strings" - "time" - - "gopkg.in/yaml.v2" -) - -// StepList is a container that allow the same tree to be executed on multiple defined Step. -type StepList struct { - Steps []Step -} - -// NewStepList returns a new list of rules to be executed. -func NewStepList(steps ...Step) *StepList { - return &StepList{Steps: steps} -} - -// Step is an execution step which needs to be run. -type Step interface { - Execute(ctx context.Context, rootDir string) error -} - -// Execute executes a list of steps. -func (r *StepList) Execute(ctx context.Context, rootDir string) error { - var err error - for _, step := range r.Steps { - err = step.Execute(ctx, rootDir) - if err != nil { - return err - } - } - - return nil -} - -// MarshalYAML marsharl a steps list to YAML. -func (r *StepList) MarshalYAML() (interface{}, error) { - doc := make([]map[string]Step, 0, len(r.Steps)) - - for _, step := range r.Steps { - var name string - switch step.(type) { - case *DeleteFileStep: - name = "delete_file" - case *MoveFileStep: - name = "move_file" - case *ExecFileStep: - name = "exec_file" - - default: - return nil, fmt.Errorf("unknown rule of type %T", step) - } - - subdoc := map[string]Step{ - name: step, - } - - doc = append(doc, subdoc) - } - return doc, nil -} - -// UnmarshalYAML unmarshal a YAML document into a RuleList. -func (r *StepList) UnmarshalYAML(unmarshal func(interface{}) error) error { - var unpackTo []map[string]interface{} - - err := unmarshal(&unpackTo) - if err != nil { - return err - } - - // NOTE: this is a bit of a hack because I want to make sure - // the unpack strategy stay in the struct implementation and yaml - // doesn't have a RawMessage similar to the JSON package, so partial unpack - // is not possible. - unpack := func(in interface{}, out interface{}) error { - b, err := yaml.Marshal(in) - if err != nil { - return err - } - return yaml.Unmarshal(b, out) - } - - var steps []Step - - for _, m := range unpackTo { - ks := keys(m) - if len(ks) > 1 { - return fmt.Errorf("unknown rule identifier, expecting one identifier and received %d", len(ks)) - } - - name := ks[0] - fields := m[name] - - var s Step - switch name { - case "delete_file": - s = &DeleteFileStep{} - case "move_file": - s = &MoveFileStep{} - case "exec_file": - s = &ExecFileStep{} - default: - return fmt.Errorf("unknown rule of type %s", name) - } - - if err := unpack(fields, s); err != nil { - return err - } - - steps = append(steps, s) - } - r.Steps = steps - return nil -} - -// DeleteFileStep removes a file from disk. -type DeleteFileStep struct { - Path string - // FailOnMissing fails if file is already missing - FailOnMissing bool `yaml:"fail_on_missing" config:"fail_on_missing"` -} - -// Execute executes delete file step. -func (r *DeleteFileStep) Execute(_ context.Context, rootDir string) error { - path, isSubpath, err := joinPaths(rootDir, r.Path) - if err != nil { - return err - } - - if !isSubpath { - return fmt.Errorf("invalid path value for operation 'Delete': %s", path) - } - - err = os.Remove(path) - - if os.IsNotExist(err) && r.FailOnMissing { - // is not found and should be reported - return err - } - - if err != nil && !os.IsNotExist(err) { - // report others - return err - } - - return nil -} - -// DeleteFile creates a DeleteFileStep -func DeleteFile(path string, failOnMissing bool) *DeleteFileStep { - return &DeleteFileStep{ - Path: path, - FailOnMissing: failOnMissing, - } -} - -// MoveFileStep moves a file to a new location. -type MoveFileStep struct { - Path string - Target string - // FailOnMissing fails if file is already missing - FailOnMissing bool `yaml:"fail_on_missing" config:"fail_on_missing"` -} - -// Execute executes move file step. -func (r *MoveFileStep) Execute(_ context.Context, rootDir string) error { - path, isSubpath, err := joinPaths(rootDir, r.Path) - if err != nil { - return err - } - - if !isSubpath { - return fmt.Errorf("invalid path value for operation 'Move': %s", path) - } - - target, isSubpath, err := joinPaths(rootDir, r.Target) - if err != nil { - return err - } - - if !isSubpath { - return fmt.Errorf("invalid target value for operation 'Move': %s", target) - } - - err = os.Rename(path, target) - - if os.IsNotExist(err) && r.FailOnMissing { - // is not found and should be reported - return err - } - - if err != nil && !os.IsNotExist(err) { - // report others - return err - } - - return nil -} - -// MoveFile creates a MoveFileStep -func MoveFile(path, target string, failOnMissing bool) *MoveFileStep { - return &MoveFileStep{ - Path: path, - Target: target, - FailOnMissing: failOnMissing, - } -} - -// ExecFileStep executes a file. -type ExecFileStep struct { - Path string - Args []string - Timeout int -} - -// Execute executes file with provided arguments. -func (r *ExecFileStep) Execute(ctx context.Context, rootDir string) error { - path, isSubpath, err := joinPaths(rootDir, r.Path) - if err != nil { - return err - } - - if !isSubpath { - return fmt.Errorf("invalid path value for operation 'Exec': %s", path) - } - - // timeout defaults to 60 seconds - if r.Timeout == 0 { - r.Timeout = 60 - } - ctx, cancel := context.WithTimeout(ctx, time.Duration(r.Timeout)*time.Second) - defer cancel() - - cmd := exec.CommandContext(ctx, path, r.Args...) - cmd.Env = nil - cmd.Dir = rootDir - output, err := cmd.Output() - if errors.Is(ctx.Err(), context.DeadlineExceeded) { - return fmt.Errorf("operation 'Exec' timed out after %d seconds", r.Timeout) - } - if err != nil { - exitErr, ok := err.(*exec.ExitError) // nolint:errorlint // Require more logic changes. - if ok && exitErr.Stderr != nil { - errStr := strings.TrimSpace(string(exitErr.Stderr)) - if len(errStr) > 0 { - return fmt.Errorf("operation 'Exec' failed (return code: %d): %s", exitErr.ExitCode(), errStr) - } - } - exitCode := 1 - if ok { - exitCode = exitErr.ExitCode() - } - outStr := strings.TrimSpace(string(output)) - if len(outStr) == 0 { - outStr = "(command had no output)" - } - return fmt.Errorf("operation 'Exec' failed (return code: %d): %s", exitCode, outStr) - } - return nil -} - -// ExecFile creates a ExecFileStep -func ExecFile(timeoutSecs int, path string, args ...string) *ExecFileStep { - return &ExecFileStep{ - Path: path, - Args: args, - Timeout: timeoutSecs, - } -} - -// joinPaths joins paths and returns true if path is subpath of rootDir -func joinPaths(rootDir, path string) (string, bool, error) { - rootDir = filepath.FromSlash(rootDir) - path = filepath.FromSlash(path) - - if runtime.GOOS == "windows" { - // if is unix absolute fix to win absolute - if strings.HasPrefix(path, "\\") { - abs, err := filepath.Abs(rootDir) // get current volume - if err != nil { - return "", false, err - } - vol := filepath.VolumeName(abs) - path = filepath.Join(vol, path) - } - } - - if !filepath.IsAbs(path) { - path = filepath.Join(rootDir, path) - } - - absRoot := filepath.Clean(rootDir) - absPath := filepath.Clean(path) - - // path on windows are case insensitive - if !isFsCaseSensitive(rootDir) { - absRoot = strings.ToLower(absRoot) - absPath = strings.ToLower(absPath) - } - - return absPath, strings.HasPrefix(absPath, absRoot), nil -} - -func isFsCaseSensitive(rootDir string) bool { - defaultCaseSens := runtime.GOOS != "windows" && runtime.GOOS != "darwin" - - dir := filepath.Dir(rootDir) - base := filepath.Base(rootDir) - // if rootdir not exist create it - if _, err := os.Stat(rootDir); os.IsNotExist(err) { - os.MkdirAll(rootDir, 0775) - defer os.RemoveAll(rootDir) - } - - lowDir := filepath.Join(base, strings.ToLower(dir)) - upDir := filepath.Join(base, strings.ToUpper(dir)) - - if _, err := os.Stat(rootDir); err != nil { - return defaultCaseSens - } - - // check lower/upper dir - if _, lowErr := os.Stat(lowDir); os.IsNotExist(lowErr) { - return true - } - if _, upErr := os.Stat(upDir); os.IsNotExist(upErr) { - return true - } - - return defaultCaseSens -} diff --git a/internal/pkg/agent/transpiler/steps_test.go b/internal/pkg/agent/transpiler/steps_test.go deleted file mode 100644 index 7a14e63c00d..00000000000 --- a/internal/pkg/agent/transpiler/steps_test.go +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package transpiler - -import ( - "context" - "fmt" - "os" - "runtime" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestIsSubpath(t *testing.T) { - testCases := map[string][]struct { - root string - path string - resultPath string - isSubpath bool - }{ - "linux": { - {"/", "a", "/a", true}, - {"/a", "b", "/a/b", true}, - {"/a", "b/c", "/a/b/c", true}, - - {"/a/b", "/a/c", "/a/c", false}, - - {"/a/b", "/a/b/../c", "/a/c", false}, - {"/a/b", "../c", "/a/c", false}, - {"/a", "/a/b/c", "/a/b/c", true}, - {"/a", "/A/b/c", "/A/b/c", false}, - }, - "darwin": { - {"/", "a", "/a", true}, - {"/a", "b", "/a/b", true}, - {"/a", "b/c", "/a/b/c", true}, - {"/a/b", "/a/c", "/a/c", false}, - {"/a/b", "/a/b/../c", "/a/c", false}, - {"/a/b", "../c", "/a/c", false}, - {"/a", "/a/b/c", "/a/b/c", true}, - {"/a", "/A/b/c", "/a/b/c", true}, - }, - "windows": { - {"c:/", "/a", "c:\\a", true}, - {"c:/a", "b", "c:\\a\\b", true}, - {"c:/a", "b/c", "c:\\a\\b\\c", true}, - {"c:/a/b", "/a/c", "c:\\a\\c", false}, - {"c:/a/b", "/a/b/../c", "c:\\a\\c", false}, - {"c:/a/b", "../c", "c:\\a\\c", false}, - {"c:/a", "/a/b/c", "c:\\a\\b\\c", true}, - {"c:/a", "/A/b/c", "c:\\a\\b\\c", true}, - {"c:/a", "c:/A/b/c", "c:\\a\\b\\c", true}, - {"c:/a", "c:/b/c", "c:\\b\\c", false}, - }, - } - - osSpecificTests, found := testCases[runtime.GOOS] - if !found { - return - } - - for _, test := range osSpecificTests { - t.Run(fmt.Sprintf("[%s] root:'%s path: %s'", runtime.GOOS, test.root, test.path), func(t *testing.T) { - newPath, result, err := joinPaths(test.root, test.path) - assert.NoError(t, err) - assert.Equal(t, test.resultPath, newPath) - assert.Equal(t, test.isSubpath, result) - }) - } -} - -func TestExecFile_Success(t *testing.T) { - pwd, err := os.Getwd() - if err != nil { - panic(err) - } - binaryPath := "tests/exec-1.0-darwin-x86_64/exec" - step := ExecFile(10, binaryPath, "-output=stdout", "-exitcode=0") - err = step.Execute(context.Background(), pwd) - if err != nil { - t.Fatal("command should not have errored") - } -} - -func TestExecFile_StdErr(t *testing.T) { - pwd, err := os.Getwd() - if err != nil { - panic(err) - } - binaryPath := "tests/exec-1.0-darwin-x86_64/exec" - step := ExecFile(10, binaryPath, "-output=stderr", "-exitcode=15") - err = step.Execute(context.Background(), pwd) - if err == nil { - t.Fatal("command should have errored") - } - errMsg := "operation 'Exec' failed (return code: 15): message written to stderr" - if err.Error() != errMsg { - t.Fatalf("got unexpected error: %s", err) - } -} - -func TestExecFile_StdOut(t *testing.T) { - pwd, err := os.Getwd() - if err != nil { - panic(err) - } - binaryPath := "tests/exec-1.0-darwin-x86_64/exec" - step := ExecFile(10, binaryPath, "-output=stdout", "-exitcode=16") - err = step.Execute(context.Background(), pwd) - if err == nil { - t.Fatal("command should have errored") - } - errMsg := "operation 'Exec' failed (return code: 16): message written to stdout" - if err.Error() != errMsg { - t.Fatalf("got unexpected error: %s", err) - } -} - -func TestExecFile_NoOutput(t *testing.T) { - pwd, err := os.Getwd() - if err != nil { - panic(err) - } - binaryPath := "tests/exec-1.0-darwin-x86_64/exec" - step := ExecFile(10, binaryPath, "-no-output", "-exitcode=17") - err = step.Execute(context.Background(), pwd) - if err == nil { - t.Fatal("command should have errored") - } - errMsg := "operation 'Exec' failed (return code: 17): (command had no output)" - if err.Error() != errMsg { - t.Fatalf("got unexpected error: %s", err) - } -} diff --git a/internal/pkg/agent/transpiler/tests/exec-1.0-darwin-x86_64/main.go b/internal/pkg/agent/transpiler/tests/exec-1.0-darwin-x86_64/main.go deleted file mode 100644 index be043c53ac9..00000000000 --- a/internal/pkg/agent/transpiler/tests/exec-1.0-darwin-x86_64/main.go +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package main - -import ( - "flag" - "fmt" - "io" - "os" -) - -func main() { - noOutput := flag.Bool("no-output", false, "disable output") - output := flag.String("output", "stderr", "output destination") - exitcode := flag.Int("exitcode", 0, "exit code") - flag.Parse() - - if *noOutput { - os.Exit(*exitcode) - } - - var dest io.Writer - if *output == "stdout" { - dest = os.Stdout - } else if *output == "stderr" { - dest = os.Stderr - } else { - panic("unknown destination") - } - - fmt.Fprintf(dest, "message written to %s", *output) - os.Exit(*exitcode) -} diff --git a/magefile.go b/magefile.go index 9599d4eef52..084aa62dc08 100644 --- a/magefile.go +++ b/magefile.go @@ -270,18 +270,17 @@ func (Build) Clean() { // TestBinaries build the required binaries for the test suite. func (Build) TestBinaries() error { - p := filepath.Join("internal", "pkg", "agent", "transpiler", "tests") - p2 := filepath.Join("pkg", "component") - execName := "exec" - fakeName := "fake" + p := filepath.Join("pkg", "component") + fakeBinary := "fake" if runtime.GOOS == "windows" { - execName += ".exe" - fakeName += ".exe" + fakeBinary += ".exe" } - return combineErr( - RunGo("build", "-o", filepath.Join(p, "exec-1.0-darwin-x86_64", execName), filepath.Join(p, "exec-1.0-darwin-x86_64", "main.go")), - RunGo("build", "-o", filepath.Join(p2, "fake", fakeName), filepath.Join(p2, "fake", "main.go")), - ) + outputName := filepath.Join(p, "fake", fakeBinary) + err := RunGo("build", "-o", outputName, filepath.Join(p, "fake", "main.go")) + if err != nil { + return err + } + return os.Chmod(outputName, 0755) } // All run all the code checks. diff --git a/pkg/component/component.go b/pkg/component/component.go index 75728b3e847..20969e9a11d 100644 --- a/pkg/component/component.go +++ b/pkg/component/component.go @@ -6,6 +6,7 @@ package component import ( "fmt" + "os" "strings" "github.com/elastic/elastic-agent-client/v7/pkg/client" @@ -13,6 +14,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" "github.com/elastic/elastic-agent/internal/pkg/eql" + "github.com/elastic/elastic-agent/pkg/utils" ) const ( @@ -87,6 +89,10 @@ func (r *RuntimeSpecs) ToComponents(policy map[string]interface{}) ([]Component, } // set the runtime variables that are available in the input specification runtime checks + hasRoot, err := utils.HasRoot() + if err != nil { + return nil, err + } vars, err := transpiler.NewVars(map[string]interface{}{ "runtime": map[string]interface{}{ "platform": r.platform.String(), @@ -96,6 +102,11 @@ func (r *RuntimeSpecs) ToComponents(policy map[string]interface{}) ([]Component, "major": r.platform.Major, "minor": r.platform.Minor, }, + "user": map[string]interface{}{ + "uid": os.Geteuid(), + "gid": os.Getegid(), + "root": hasRoot, + }, }, nil) if err != nil { return nil, err @@ -260,12 +271,16 @@ func toIntermediate(policy map[string]interface{}) (map[string]outputI, error) { } idRaw, ok := input[idKey] if !ok { - return nil, fmt.Errorf("invalid 'inputs.%d', 'id' missing", idx) + // no ID; fallback to type + idRaw = t } id, ok := idRaw.(string) if !ok { return nil, fmt.Errorf("invalid 'inputs.%d.id', expected a string not a %T", idx, idRaw) } + if hasDuplicate(outputsMap, id) { + return nil, fmt.Errorf("invalid 'inputs.%d.id', has a duplicate id %q (id is required to be unique)", idx, id) + } outputName := "default" if outputRaw, ok := input[useKey]; ok { outputNameVal, ok := outputRaw.(string) @@ -346,6 +361,19 @@ func validateRuntimeChecks(spec *InputSpec, store eql.VarStore) error { return nil } +func hasDuplicate(outputsMap map[string]outputI, id string) bool { + for _, o := range outputsMap { + for _, i := range o.inputs { + for _, j := range i { + if j.id == id { + return true + } + } + } + } + return false +} + func getLogLevel(val map[string]interface{}) (client.UnitLogLevel, error) { const logLevelKey = "log_level" diff --git a/pkg/component/component_test.go b/pkg/component/component_test.go index 06fc30c56c0..f43dfc244c6 100644 --- a/pkg/component/component_test.go +++ b/pkg/component/component_test.go @@ -171,7 +171,7 @@ func TestToComponents(t *testing.T) { Err: "invalid 'inputs.0.type', expected a string not a int", }, { - Name: "Invalid: inputs entry missing id", + Name: "Invalid: inputs entry duplicate because of missing id", Platform: linuxAMD64Platform, Policy: map[string]interface{}{ "outputs": map[string]interface{}{ @@ -184,9 +184,12 @@ func TestToComponents(t *testing.T) { map[string]interface{}{ "type": "filestream", }, + map[string]interface{}{ + "type": "filestream", + }, }, }, - Err: "invalid 'inputs.0', 'id' missing", + Err: `invalid 'inputs.1.id', has a duplicate id "filestream" (id is required to be unique)`, }, { Name: "Invalid: inputs entry id not a string", diff --git a/pkg/component/runtime/command.go b/pkg/component/runtime/command.go index ebbca961228..8c1722d657f 100644 --- a/pkg/component/runtime/command.go +++ b/pkg/component/runtime/command.go @@ -10,8 +10,13 @@ import ( "fmt" "os" "os/exec" + "path/filepath" + "runtime" "time" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" + "github.com/elastic/elastic-agent/pkg/utils" + "github.com/elastic/elastic-agent-client/v7/pkg/client" "github.com/elastic/elastic-agent/pkg/component" "github.com/elastic/elastic-agent/pkg/core/process" @@ -20,8 +25,14 @@ import ( type actionMode int const ( - actionStart = actionMode(0) - actionStop = actionMode(1) + actionTeardown = actionMode(-1) + actionStop = actionMode(0) + actionStart = actionMode(1) + + runDirMod = 0770 + + envAgentComponentID = "AGENT_COMPONENT_ID" + envAgentComponentInputType = "AGENT_COMPONENT_INPUT_TYPE" ) type procState struct { @@ -57,7 +68,7 @@ func NewCommandRuntime(comp component.Component) (ComponentRuntime, error) { actionCh: make(chan actionMode), procCh: make(chan procState), compCh: make(chan component.Component), - actionState: actionStart, + actionState: actionStop, state: newComponentState(&comp), }, nil } @@ -69,6 +80,7 @@ func NewCommandRuntime(comp component.Component) (ComponentRuntime, error) { // ever be called again. func (c *CommandRuntime) Run(ctx context.Context, comm Communicator) error { checkinPeriod := c.current.Spec.Spec.Command.Timeouts.Checkin + restartPeriod := c.current.Spec.Spec.Command.Timeouts.Restart c.forceCompState(client.UnitStateStarting, "Starting") t := time.NewTicker(checkinPeriod) defer t.Stop() @@ -81,12 +93,12 @@ func (c *CommandRuntime) Run(ctx context.Context, comm Communicator) error { switch as { case actionStart: if err := c.start(comm); err != nil { - c.forceCompState(client.UnitStateFailed, err.Error()) + c.forceCompState(client.UnitStateFailed, fmt.Sprintf("Failed: %s", err)) } t.Reset(checkinPeriod) - case actionStop: + case actionStop, actionTeardown: if err := c.stop(ctx); err != nil { - c.forceCompState(client.UnitStateFailed, err.Error()) + c.forceCompState(client.UnitStateFailed, fmt.Sprintf("Failed: %s", err)) } } case ps := <-c.procCh: @@ -94,12 +106,9 @@ func (c *CommandRuntime) Run(ctx context.Context, comm Communicator) error { if ps.proc == c.proc { c.proc = nil if c.handleProc(ps.state) { - // start again - if err := c.start(comm); err != nil { - c.forceCompState(client.UnitStateFailed, err.Error()) - } + // start again after restart period + t.Reset(restartPeriod) } - t.Reset(checkinPeriod) } case newComp := <-c.compCh: sendExpected := c.state.syncExpected(&newComp) @@ -140,30 +149,38 @@ func (c *CommandRuntime) Run(ctx context.Context, comm Communicator) error { c.sendObserved() } case <-t.C: - if c.proc != nil && c.actionState == actionStart { - // running and should be running - now := time.Now().UTC() - if c.lastCheckin.IsZero() { - // never checked-in - c.missedCheckins++ - } else if now.Sub(c.lastCheckin) > checkinPeriod { - // missed check-in during required period - c.missedCheckins++ - } else if now.Sub(c.lastCheckin) <= checkinPeriod { - c.missedCheckins = 0 - } - if c.missedCheckins == 0 { - c.compState(client.UnitStateHealthy) - } else if c.missedCheckins > 0 && c.missedCheckins < maxCheckinMisses { - c.compState(client.UnitStateDegraded) - } else if c.missedCheckins >= maxCheckinMisses { - // something is wrong; the command should be checking in - // - // at this point it is assumed the sub-process has locked up and will not respond to a nice - // termination signal, so we jump directly to killing the process - msg := fmt.Sprintf("Failed: pid '%d' missed %d check-ins and will be killed", c.proc.PID, maxCheckinMisses) - c.forceCompState(client.UnitStateFailed, msg) - _ = c.proc.Kill() // watcher will handle it from here + t.Reset(checkinPeriod) + if c.actionState == actionStart { + if c.proc == nil { + // not running, but should be running + if err := c.start(comm); err != nil { + c.forceCompState(client.UnitStateFailed, fmt.Sprintf("Failed: %s", err)) + } + } else { + // running and should be running + now := time.Now().UTC() + if c.lastCheckin.IsZero() { + // never checked-in + c.missedCheckins++ + } else if now.Sub(c.lastCheckin) > checkinPeriod { + // missed check-in during required period + c.missedCheckins++ + } else if now.Sub(c.lastCheckin) <= checkinPeriod { + c.missedCheckins = 0 + } + if c.missedCheckins == 0 { + c.compState(client.UnitStateHealthy) + } else if c.missedCheckins > 0 && c.missedCheckins < maxCheckinMisses { + c.compState(client.UnitStateDegraded) + } else if c.missedCheckins >= maxCheckinMisses { + // something is wrong; the command should be checking in + // + // at this point it is assumed the sub-process has locked up and will not respond to a nice + // termination signal, so we jump directly to killing the process + msg := fmt.Sprintf("Failed: pid '%d' missed %d check-ins and will be killed", c.proc.PID, maxCheckinMisses) + c.forceCompState(client.UnitStateFailed, msg) + _ = c.proc.Kill() // watcher will handle it from here + } } } } @@ -205,8 +222,8 @@ func (c *CommandRuntime) Stop() error { // // Non-blocking and never returns an error. func (c *CommandRuntime) Teardown() error { - // teardown is not different from stop for command runtime - return c.Stop() + c.actionCh <- actionTeardown + return nil } // forceCompState force updates the state for the entire component, forcing that state on all units. @@ -243,11 +260,26 @@ func (c *CommandRuntime) start(comm Communicator) error { return nil } cmdSpec := c.current.Spec.Spec.Command - env := make([]string, 0, len(cmdSpec.Env)) + env := make([]string, 0, len(cmdSpec.Env)+2) for _, e := range cmdSpec.Env { env = append(env, fmt.Sprintf("%s=%s", e.Name, e.Value)) } - proc, err := process.Start(c.current.Spec.BinaryPath, os.Geteuid(), os.Getgid(), cmdSpec.Args, env, attachOutErr) + env = append(env, fmt.Sprintf("%s=%s", envAgentComponentID, c.current.ID)) + env = append(env, fmt.Sprintf("%s=%s", envAgentComponentInputType, c.current.Spec.InputType)) + uid, gid := os.Geteuid(), os.Getegid() + workDir, err := c.workDir(uid, gid) + if err != nil { + return err + } + path, err := filepath.Abs(c.current.Spec.BinaryPath) + if err != nil { + return fmt.Errorf("failed to determine absolute path: %w", err) + } + err = utils.HasStrictExecPerms(path, uid) + if err != nil { + return fmt.Errorf("execution of component prevented: %w", err) + } + proc, err := process.Start(path, uid, gid, cmdSpec.Args, env, attachOutErr, dirPath(workDir)) if err != nil { return err } @@ -261,7 +293,14 @@ func (c *CommandRuntime) start(comm Communicator) error { func (c *CommandRuntime) stop(ctx context.Context) error { if c.proc == nil { - // already stopped + // already stopped, ensure that state of the component is also stopped + if c.state.State != client.UnitStateStopped { + if c.state.State == client.UnitStateFailed { + c.forceCompState(client.UnitStateStopped, "Stopped: never started successfully") + } else { + c.forceCompState(client.UnitStateStopped, "Stopped: already stopped") + } + } return nil } cmdSpec := c.current.Spec.Spec.Command @@ -306,16 +345,51 @@ func (c *CommandRuntime) handleProc(state *os.ProcessState) bool { stopMsg := fmt.Sprintf("Failed: pid '%d' exited with code '%d'", state.Pid(), state.ExitCode()) c.forceCompState(client.UnitStateFailed, stopMsg) return true - case actionStop: + case actionStop, actionTeardown: // stopping (should have exited) + if c.actionState == actionTeardown { + // teardown so the entire component has been removed (cleanup work directory) + _ = os.RemoveAll(c.workDirPath()) + } stopMsg := fmt.Sprintf("Stopped: pid '%d' exited with code '%d'", state.Pid(), state.ExitCode()) c.forceCompState(client.UnitStateStopped, stopMsg) } return false } +func (c *CommandRuntime) workDirPath() string { + return filepath.Join(paths.Run(), c.current.ID) +} + +func (c *CommandRuntime) workDir(uid int, gid int) (string, error) { + path := c.workDirPath() + err := os.MkdirAll(path, runDirMod) + if err != nil { + return "", fmt.Errorf("failed to create path %q: %w", path, err) + } + if runtime.GOOS == component.Windows { + return path, nil + } + err = os.Chown(path, uid, gid) + if err != nil { + return "", fmt.Errorf("failed to chown %q: %w", path, err) + } + err = os.Chmod(path, runDirMod) + if err != nil { + return "", fmt.Errorf("failed to chmod %q: %w", path, err) + } + return path, nil +} + func attachOutErr(cmd *exec.Cmd) error { cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr return nil } + +func dirPath(path string) process.Option { + return func(cmd *exec.Cmd) error { + cmd.Dir = path + return nil + } +} diff --git a/pkg/component/runtime/manager_test.go b/pkg/component/runtime/manager_test.go index 247f54ccd6e..1475e6fd094 100644 --- a/pkg/component/runtime/manager_test.go +++ b/pkg/component/runtime/manager_test.go @@ -10,23 +10,22 @@ import ( "context" "errors" "fmt" + "os" "path/filepath" "runtime" "testing" "time" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" - - "go.elastic.co/apm/apmtest" - - "github.com/elastic/elastic-agent-libs/logp" - "github.com/elastic/elastic-agent/pkg/core/logger" - "github.com/stretchr/testify/require" + "go.elastic.co/apm/apmtest" "github.com/elastic/elastic-agent-client/v7/pkg/client" + "github.com/elastic/elastic-agent-libs/logp" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/pkg/component" + "github.com/elastic/elastic-agent/pkg/core/logger" ) const ( @@ -39,6 +38,7 @@ var ( Command: &component.CommandSpec{ Timeouts: component.CommandTimeoutSpec{ Checkin: 30 * time.Second, + Restart: 10 * time.Millisecond, // quick restart during tests Stop: 30 * time.Second, }, }, @@ -146,6 +146,8 @@ LOOP: } func TestManager_FakeInput_StartStop(t *testing.T) { + testPaths(t) + ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -167,10 +169,7 @@ func TestManager_FakeInput_StartStop(t *testing.T) { require.NoError(t, err) } - binaryPath := filepath.Join("..", "fake", "fake") - if runtime.GOOS == component.Windows { - binaryPath += exeExt - } + binaryPath := testBinary(t) comp := component.Component{ ID: "fake-default", Spec: component.InputRuntimeSpec{ @@ -265,9 +264,15 @@ LOOP: err = <-errCh require.NoError(t, err) + + workDir := filepath.Join(paths.Run(), comp.ID) + _, err = os.Stat(workDir) + require.ErrorIs(t, err, os.ErrNotExist) } func TestManager_FakeInput_BadUnitToGood(t *testing.T) { + testPaths(t) + ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -289,10 +294,7 @@ func TestManager_FakeInput_BadUnitToGood(t *testing.T) { require.NoError(t, err) } - binaryPath := filepath.Join("..", "fake", "fake") - if runtime.GOOS == component.Windows { - binaryPath += exeExt - } + binaryPath := testBinary(t) comp := component.Component{ ID: "fake-default", Spec: component.InputRuntimeSpec{ @@ -440,6 +442,8 @@ LOOP: } func TestManager_FakeInput_GoodUnitToBad(t *testing.T) { + testPaths(t) + ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -461,10 +465,7 @@ func TestManager_FakeInput_GoodUnitToBad(t *testing.T) { require.NoError(t, err) } - binaryPath := filepath.Join("..", "fake", "fake") - if runtime.GOOS == component.Windows { - binaryPath += exeExt - } + binaryPath := testBinary(t) comp := component.Component{ ID: "fake-default", Spec: component.InputRuntimeSpec{ @@ -518,6 +519,7 @@ func TestManager_FakeInput_GoodUnitToBad(t *testing.T) { subErrCh <- fmt.Errorf("unit failed: %s", unit.Message) } else if unit.State == client.UnitStateHealthy { // good unit it; now make it bad + t.Logf("marking good-input as having a hard-error for config") updatedComp := comp updatedComp.Units = make([]component.Unit, len(comp.Units)) copy(updatedComp.Units, comp.Units) @@ -595,6 +597,8 @@ LOOP: } func TestManager_FakeInput_Configure(t *testing.T) { + testPaths(t) + ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -616,10 +620,7 @@ func TestManager_FakeInput_Configure(t *testing.T) { require.NoError(t, err) } - binaryPath := filepath.Join("..", "fake", "fake") - if runtime.GOOS == component.Windows { - binaryPath += exeExt - } + binaryPath := testBinary(t) comp := component.Component{ ID: "fake-default", Spec: component.InputRuntimeSpec{ @@ -722,6 +723,8 @@ LOOP: } func TestManager_FakeInput_RemoveUnit(t *testing.T) { + testPaths(t) + ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -743,10 +746,7 @@ func TestManager_FakeInput_RemoveUnit(t *testing.T) { require.NoError(t, err) } - binaryPath := filepath.Join("..", "fake", "fake") - if runtime.GOOS == component.Windows { - binaryPath += exeExt - } + binaryPath := testBinary(t) comp := component.Component{ ID: "fake-default", Spec: component.InputRuntimeSpec{ @@ -881,6 +881,8 @@ LOOP: } func TestManager_FakeInput_ActionState(t *testing.T) { + testPaths(t) + ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -902,10 +904,7 @@ func TestManager_FakeInput_ActionState(t *testing.T) { require.NoError(t, err) } - binaryPath := filepath.Join("..", "fake", "fake") - if runtime.GOOS == component.Windows { - binaryPath += exeExt - } + binaryPath := testBinary(t) comp := component.Component{ ID: "fake-default", Spec: component.InputRuntimeSpec{ @@ -1012,6 +1011,8 @@ LOOP: } func TestManager_FakeInput_Restarts(t *testing.T) { + testPaths(t) + ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -1033,10 +1034,7 @@ func TestManager_FakeInput_Restarts(t *testing.T) { require.NoError(t, err) } - binaryPath := filepath.Join("..", "fake", "fake") - if runtime.GOOS == component.Windows { - binaryPath += exeExt - } + binaryPath := testBinary(t) comp := component.Component{ ID: "fake-default", Spec: component.InputRuntimeSpec{ @@ -1152,6 +1150,8 @@ LOOP: } func TestManager_FakeInput_RestartsOnMissedCheckins(t *testing.T) { + testPaths(t) + ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -1173,10 +1173,7 @@ func TestManager_FakeInput_RestartsOnMissedCheckins(t *testing.T) { require.NoError(t, err) } - binaryPath := filepath.Join("..", "fake", "fake") - if runtime.GOOS == component.Windows { - binaryPath += exeExt - } + binaryPath := testBinary(t) comp := component.Component{ ID: "fake-default", Spec: component.InputRuntimeSpec{ @@ -1274,6 +1271,8 @@ LOOP: } func TestManager_FakeInput_InvalidAction(t *testing.T) { + testPaths(t) + ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -1295,10 +1294,7 @@ func TestManager_FakeInput_InvalidAction(t *testing.T) { require.NoError(t, err) } - binaryPath := filepath.Join("..", "fake", "fake") - if runtime.GOOS == component.Windows { - binaryPath += exeExt - } + binaryPath := testBinary(t) comp := component.Component{ ID: "fake-default", Spec: component.InputRuntimeSpec{ @@ -1399,6 +1395,8 @@ LOOP: } func TestManager_FakeInput_MultiComponent(t *testing.T) { + testPaths(t) + ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -1420,10 +1418,7 @@ func TestManager_FakeInput_MultiComponent(t *testing.T) { require.NoError(t, err) } - binaryPath := filepath.Join("..", "fake", "fake") - if runtime.GOOS == component.Windows { - binaryPath += exeExt - } + binaryPath := testBinary(t) runtimeSpec := component.InputRuntimeSpec{ InputType: "fake", BinaryName: "", @@ -1612,6 +1607,8 @@ LOOP: } func TestManager_FakeInput_LogLevel(t *testing.T) { + testPaths(t) + ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -1633,10 +1630,7 @@ func TestManager_FakeInput_LogLevel(t *testing.T) { require.NoError(t, err) } - binaryPath := filepath.Join("..", "fake", "fake") - if runtime.GOOS == component.Windows { - binaryPath += exeExt - } + binaryPath := testBinary(t) comp := component.Component{ ID: "fake-default", Spec: component.InputRuntimeSpec{ @@ -1795,3 +1789,44 @@ func signalState(subErrCh chan error, state *ComponentState) { } } } + +func testPaths(t *testing.T) { + t.Helper() + + versioned := paths.IsVersionHome() + topPath := paths.Top() + + tmpDir := t.TempDir() + paths.SetVersionHome(false) + paths.SetTop(tmpDir) + + t.Cleanup(func() { + paths.SetVersionHome(versioned) + paths.SetTop(topPath) + _ = os.RemoveAll(tmpDir) + }) +} + +func testBinary(t *testing.T) string { + t.Helper() + + var err error + binaryPath := filepath.Join("..", "fake", "fake") + binaryPath, err = filepath.Abs(binaryPath) + if err != nil { + t.Fatalf("failed abs %s: %s", binaryPath, err) + } + if runtime.GOOS == component.Windows { + binaryPath += exeExt + } else { + err = os.Chown(binaryPath, os.Geteuid(), os.Getgid()) + if err != nil { + t.Fatalf("failed chown %s: %s", binaryPath, err) + } + err = os.Chmod(binaryPath, 0755) + if err != nil { + t.Fatalf("failed chmod %s: %s", binaryPath, err) + } + } + return binaryPath +} diff --git a/pkg/component/spec.go b/pkg/component/spec.go index d4ae56e147b..dabcf499817 100644 --- a/pkg/component/spec.go +++ b/pkg/component/spec.go @@ -70,12 +70,14 @@ type CommandEnvSpec struct { // CommandTimeoutSpec is the timeout specification for subprocess. type CommandTimeoutSpec struct { Checkin time.Duration `config:"checkin" yaml:"checkin"` + Restart time.Duration `config:"restart" yaml:"restart"` Stop time.Duration `config:"stop" yaml:"stop"` } // InitDefaults initialized the defaults for the timeouts. func (t *CommandTimeoutSpec) InitDefaults() { t.Checkin = 30 * time.Second + t.Restart = 10 * time.Second t.Stop = 30 * time.Second } diff --git a/pkg/utils/perm_unix.go b/pkg/utils/perm_unix.go new file mode 100644 index 00000000000..e826e87acef --- /dev/null +++ b/pkg/utils/perm_unix.go @@ -0,0 +1,32 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build !windows +// +build !windows + +package utils + +import ( + "errors" + "os" +) + +// HasStrictExecPerms ensures that the path is executable by the owner, cannot be written by anyone other than the +// owner of the file and that the owner of the file is the same as the UID or root. +func HasStrictExecPerms(path string, uid int) error { + info, err := os.Stat(path) + if err != nil { + return err + } + if info.IsDir() { + return errors.New("is a directory") + } + if info.Mode()&0022 != 0 { + return errors.New("cannot be writeable by group or other") + } + if info.Mode()&0100 == 0 { + return errors.New("not executable by owner") + } + return nil +} diff --git a/pkg/utils/perm_windows.go b/pkg/utils/perm_windows.go new file mode 100644 index 00000000000..e1c329a0982 --- /dev/null +++ b/pkg/utils/perm_windows.go @@ -0,0 +1,15 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build windows +// +build windows + +package utils + +// HasStrictExecPerms ensures that the path is executable by the owner and that the owner of the file +// is the same as the UID or root. +func HasStrictExecPerms(path string, uid int) error { + // TODO: Need to add check on Windows to ensure that the ACL are correct for the binary before execution. + return nil +} diff --git a/internal/pkg/agent/install/root_unix.go b/pkg/utils/root_unix.go similarity index 97% rename from internal/pkg/agent/install/root_unix.go rename to pkg/utils/root_unix.go index 753b0106eb1..2c42f72a884 100644 --- a/internal/pkg/agent/install/root_unix.go +++ b/pkg/utils/root_unix.go @@ -5,7 +5,7 @@ //go:build !windows // +build !windows -package install +package utils import "os" diff --git a/internal/pkg/agent/install/root_windows.go b/pkg/utils/root_windows.go similarity index 98% rename from internal/pkg/agent/install/root_windows.go rename to pkg/utils/root_windows.go index da2f67e6f64..0350ddff806 100644 --- a/internal/pkg/agent/install/root_windows.go +++ b/pkg/utils/root_windows.go @@ -5,7 +5,7 @@ //go:build windows // +build windows -package install +package utils import ( "github.com/pkg/errors" diff --git a/internal/pkg/agent/install/root_windows_test.go b/pkg/utils/root_windows_test.go similarity index 96% rename from internal/pkg/agent/install/root_windows_test.go rename to pkg/utils/root_windows_test.go index d734890d7d7..86f3c42aec8 100644 --- a/internal/pkg/agent/install/root_windows_test.go +++ b/pkg/utils/root_windows_test.go @@ -5,7 +5,7 @@ //go:build windows // +build windows -package install +package utils import ( "testing" diff --git a/specs/endpoint-security.spec.yml b/specs/endpoint-security.spec.yml index dbb0079ef11..bf6f638f257 100644 --- a/specs/endpoint-security.spec.yml +++ b/specs/endpoint-security.spec.yml @@ -1,39 +1,101 @@ -version: 2 -inputs: - - name: endpoint - description: "Endpoint Security" - platforms: - - linux/amd64 - - linux/arm64 - - darwin/amd64 - - darwin/arm64 - - windows/amd64 - outputs: - - elasticsearch - runtime: - preventions: - - condition: ${runtime.arch} == 'arm64' and ${runtime.family} == 'redhat' and ${runtime.major} == '7' - message: "No support for RHEL7 on arm64" - service: - operations: - check: - args: - - "verify" - - "--log" - - "stderr" - timeout: 30 - install: - args: - - "install" - - "--log" - - "stderr" - - "--upgrade" - - "--resources" - - "endpoint-security-resources.zip" - timeout: 600 - uninstall: - args: - - "uninstall" - - "--log" - - "stderr" - timeout: 600 +version: 2 +inputs: + - name: endpoint + description: "Endpoint Security" + platforms: + - linux/amd64 + - linux/arm64 + outputs: + - elasticsearch + runtime: + preventions: + - condition: ${runtime.user.root} == false + message: "Elastic Agent must be running as root" + - condition: ${runtime.arch} == 'arm64' and ${runtime.family} == 'redhat' and ${runtime.major} == '7' + message: "No support for RHEL7 on arm64" + service: + operations: + check: + args: + - "verify" + - "--log" + - "stderr" + timeout: 30 + install: + args: + - "install" + - "--log" + - "stderr" + - "--upgrade" + - "--resources" + - "endpoint-security-resources.zip" + timeout: 600 + uninstall: + args: + - "uninstall" + - "--log" + - "stderr" + timeout: 600 + - name: endpoint + description: "Endpoint Security" + platforms: + - darwin/amd64 + - darwin/arm64 + outputs: + - elasticsearch + service: + operations: + check: + args: + - "verify" + - "--log" + - "stderr" + timeout: 30 + install: + args: + - "install" + - "--log" + - "stderr" + - "--upgrade" + - "--resources" + - "endpoint-security-resources.zip" + timeout: 600 + uninstall: + args: + - "uninstall" + - "--log" + - "stderr" + timeout: 600 + - name: endpoint + description: "Endpoint Security" + platforms: + - windows/amd64 + outputs: + - elasticsearch + runtime: + preventions: + - condition: ${runtime.user.root} == false + message: "Elastic Agent must be running as Administrator or SYSTEM" + service: + operations: + check: + args: + - "verify" + - "--log" + - "stderr" + timeout: 30 + install: + args: + - "install" + - "--log" + - "stderr" + - "--upgrade" + - "--resources" + - "endpoint-security-resources.zip" + timeout: 600 + uninstall: + args: + - "uninstall" + - "--log" + - "stderr" + timeout: 600 From 505121857bc03a20c29a6d15ea862c8520f9e366 Mon Sep 17 00:00:00 2001 From: Michel Laterman <82832767+michel-laterman@users.noreply.github.com> Date: Thu, 15 Sep 2022 09:26:29 -0700 Subject: [PATCH 21/49] [v2] Move queue management to dispatcher (#1109) * Move queue management to dispatcher Move queue management actions to the dispatcher from the fleet-server in order to help with future work to add a retry mechanism. Add a PersistedQueue type which wrap the ActionQueue to make persisting the queue simpler for the consumer. * Refactor ActionQueue Refactor ActionQueue to only export methods that are used by consumers. The priority queue implementation has been changed to an unexported type. Persistency has been added and the persistedqueue type has been removed. * Rename persistedQueue interface to priorityQueue * Review feedback * failing to save queue will log message * Chagne gateway to use copy --- .../application/dispatcher/dispatcher.go | 71 ++++- .../application/dispatcher/dispatcher_test.go | 144 ++++++++- .../gateway/fleet/fleet_gateway.go | 91 +----- .../gateway/fleet/fleet_gateway_test.go | 277 ------------------ .../pkg/agent/application/managed_mode.go | 5 +- internal/pkg/queue/actionqueue.go | 69 +++-- internal/pkg/queue/actionqueue_test.go | 154 ++++++---- 7 files changed, 359 insertions(+), 452 deletions(-) diff --git a/internal/pkg/agent/application/dispatcher/dispatcher.go b/internal/pkg/agent/application/dispatcher/dispatcher.go index 8628cf5a59f..700c7d35349 100644 --- a/internal/pkg/agent/application/dispatcher/dispatcher.go +++ b/internal/pkg/agent/application/dispatcher/dispatcher.go @@ -9,6 +9,7 @@ import ( "fmt" "reflect" "strings" + "time" "go.elastic.co/apm" @@ -21,6 +22,12 @@ import ( type actionHandlers map[string]actions.Handler +type priorityQueue interface { + Add(fleetapi.Action, int64) + DequeueActions() []fleetapi.Action + Save() error +} + // Dispatcher processes actions coming from fleet api. type Dispatcher interface { Dispatch(context.Context, acker.Acker, ...fleetapi.Action) error @@ -31,10 +38,11 @@ type ActionDispatcher struct { log *logger.Logger handlers actionHandlers def actions.Handler + queue priorityQueue } // New creates a new action dispatcher. -func New(log *logger.Logger, def actions.Handler) (*ActionDispatcher, error) { +func New(log *logger.Logger, def actions.Handler, queue priorityQueue) (*ActionDispatcher, error) { var err error if log == nil { log, err = logger.New("action_dispatcher", false) @@ -51,6 +59,7 @@ func New(log *logger.Logger, def actions.Handler) (*ActionDispatcher, error) { log: log, handlers: make(actionHandlers), def: def, + queue: queue, }, nil } @@ -86,6 +95,17 @@ func (ad *ActionDispatcher) Dispatch(ctx context.Context, acker acker.Acker, act span.End() }() + actions = ad.queueScheduledActions(actions) + actions = ad.dispatchCancelActions(ctx, actions, acker) + queued, expired := ad.gatherQueuedActions(time.Now().UTC()) + ad.log.Debugf("Gathered %d actions from queue, %d actions expired", len(queued), len(expired)) + ad.log.Debugf("Expired actions: %v", expired) + actions = append(actions, queued...) + + if err := ad.queue.Save(); err != nil { + ad.log.Errorf("failed to persist action_queue: %v", err) + } + if len(actions) == 0 { ad.log.Debug("No action to dispatch") return nil @@ -128,3 +148,52 @@ func detectTypes(actions []fleetapi.Action) []string { } return str } + +// queueScheduledActions will add any action in actions with a valid start time to the queue and return the rest. +// start time to current time comparisons are purposefully not made in case of cancel actions. +func (ad *ActionDispatcher) queueScheduledActions(input []fleetapi.Action) []fleetapi.Action { + actions := make([]fleetapi.Action, 0, len(input)) + for _, action := range input { + start, err := action.StartTime() + if err == nil { + ad.log.Debugf("Adding action id: %s to queue.", action.ID()) + ad.queue.Add(action, start.Unix()) + continue + } + if !errors.Is(err, fleetapi.ErrNoStartTime) { + ad.log.Warnf("Issue gathering start time from action id %s: %v", action.ID(), err) + } + actions = append(actions, action) + } + return actions +} + +// dispatchCancelActions will separate and dispatch any cancel actions from the actions list and return the rest of the list. +// cancel actions are dispatched seperatly as they may remove items from the queue. +func (ad *ActionDispatcher) dispatchCancelActions(ctx context.Context, actions []fleetapi.Action, acker acker.Acker) []fleetapi.Action { + for i := len(actions) - 1; i >= 0; i-- { + action := actions[i] + // If it is a cancel action, remove from list and dispatch + if action.Type() == fleetapi.ActionTypeCancel { + actions = append(actions[:i], actions[i+1:]...) + if err := ad.dispatchAction(ctx, action, acker); err != nil { + ad.log.Errorf("Unable to dispatch cancel action id %s: %v", action.ID(), err) + } + } + } + return actions +} + +// gatherQueuedActions will dequeue actions from the action queue and separate those that have already expired. +func (ad *ActionDispatcher) gatherQueuedActions(ts time.Time) (queued, expired []fleetapi.Action) { + actions := ad.queue.DequeueActions() + for _, action := range actions { + exp, _ := action.Expiration() + if ts.After(exp) { + expired = append(expired, action) + continue + } + queued = append(queued, action) + } + return queued, expired +} diff --git a/internal/pkg/agent/application/dispatcher/dispatcher_test.go b/internal/pkg/agent/application/dispatcher/dispatcher_test.go index 4c19779688a..d140033655c 100644 --- a/internal/pkg/agent/application/dispatcher/dispatcher_test.go +++ b/internal/pkg/agent/application/dispatcher/dispatcher_test.go @@ -58,13 +58,34 @@ func (m *mockAction) Expiration() (time.Time, error) { return args.Get(0).(time.Time), args.Error(1) } +type mockQueue struct { + mock.Mock +} + +func (m *mockQueue) Add(action fleetapi.Action, n int64) { + m.Called(action, n) +} + +func (m *mockQueue) DequeueActions() []fleetapi.Action { + args := m.Called() + return args.Get(0).([]fleetapi.Action) +} + +func (m *mockQueue) Save() error { + args := m.Called() + return args.Error(0) +} + func TestActionDispatcher(t *testing.T) { ack := noop.New() t.Run("Success to dispatch multiples events", func(t *testing.T) { ctx := context.Background() def := &mockHandler{} - d, err := New(nil, def) + queue := &mockQueue{} + queue.On("Save").Return(nil).Once() + queue.On("DequeueActions").Return([]fleetapi.Action{}).Once() + d, err := New(nil, def, queue) require.NoError(t, err) success1 := &mockHandler{} @@ -76,7 +97,13 @@ func TestActionDispatcher(t *testing.T) { require.NoError(t, err) action1 := &mockAction{} + action1.On("StartTime").Return(time.Time{}, fleetapi.ErrNoStartTime) + action1.On("Type").Return("action") + action1.On("ID").Return("id") action2 := &mockOtherAction{} + action2.On("StartTime").Return(time.Time{}, fleetapi.ErrNoStartTime) + action2.On("Type").Return("action") + action2.On("ID").Return("id") // TODO better matching for actions success1.On("Handle", mock.Anything, mock.Anything, mock.Anything).Return(nil).Once() @@ -88,20 +115,28 @@ func TestActionDispatcher(t *testing.T) { success1.AssertExpectations(t) success2.AssertExpectations(t) def.AssertNotCalled(t, "Handle", mock.Anything, mock.Anything, mock.Anything) + queue.AssertExpectations(t) }) t.Run("Unknown action are caught by the unknown handler", func(t *testing.T) { def := &mockHandler{} def.On("Handle", mock.Anything, mock.Anything, mock.Anything).Return(nil).Once() ctx := context.Background() - d, err := New(nil, def) + queue := &mockQueue{} + queue.On("Save").Return(nil).Once() + queue.On("DequeueActions").Return([]fleetapi.Action{}).Once() + d, err := New(nil, def, queue) require.NoError(t, err) action := &mockUnknownAction{} + action.On("StartTime").Return(time.Time{}, fleetapi.ErrNoStartTime) + action.On("Type").Return("action") + action.On("ID").Return("id") err = d.Dispatch(ctx, ack, action) require.NoError(t, err) def.AssertExpectations(t) + queue.AssertExpectations(t) }) t.Run("Could not register two handlers on the same action", func(t *testing.T) { @@ -109,7 +144,8 @@ func TestActionDispatcher(t *testing.T) { success2 := &mockHandler{} def := &mockHandler{} - d, err := New(nil, def) + queue := &mockQueue{} + d, err := New(nil, def, queue) require.NoError(t, err) err = d.Register(&mockAction{}, success1) @@ -117,5 +153,107 @@ func TestActionDispatcher(t *testing.T) { err = d.Register(&mockAction{}, success2) require.Error(t, err) + queue.AssertExpectations(t) + }) + + t.Run("Dispatched action is queued", func(t *testing.T) { + def := &mockHandler{} + def.On("Handle", mock.Anything, mock.Anything, mock.Anything).Return(nil).Once() + + queue := &mockQueue{} + queue.On("Save").Return(nil).Once() + queue.On("DequeueActions").Return([]fleetapi.Action{}).Once() + queue.On("Add", mock.Anything, mock.Anything).Once() + + d, err := New(nil, def, queue) + require.NoError(t, err) + err = d.Register(&mockAction{}, def) + require.NoError(t, err) + + action1 := &mockAction{} + action1.On("StartTime").Return(time.Time{}, fleetapi.ErrNoStartTime) + action1.On("Type").Return("action") + action1.On("ID").Return("id") + action2 := &mockAction{} + action2.On("StartTime").Return(time.Now().Add(time.Hour), nil) + action2.On("Type").Return("action") + action2.On("ID").Return("id") + + err = d.Dispatch(context.Background(), ack, action1, action2) + require.NoError(t, err) + def.AssertExpectations(t) + queue.AssertExpectations(t) + }) + + t.Run("Cancel queued action", func(t *testing.T) { + def := &mockHandler{} + def.On("Handle", mock.Anything, mock.Anything, mock.Anything).Return(nil).Once() + + queue := &mockQueue{} + queue.On("Save").Return(nil).Once() + queue.On("DequeueActions").Return([]fleetapi.Action{}).Once() + + d, err := New(nil, def, queue) + require.NoError(t, err) + err = d.Register(&mockAction{}, def) + require.NoError(t, err) + + action := &mockAction{} + action.On("StartTime").Return(time.Time{}, fleetapi.ErrNoStartTime) + action.On("Type").Return(fleetapi.ActionTypeCancel) + action.On("ID").Return("id") + + err = d.Dispatch(context.Background(), ack, action) + require.NoError(t, err) + def.AssertExpectations(t) + queue.AssertExpectations(t) + }) + + t.Run("Retrieve actions from queue", func(t *testing.T) { + def := &mockHandler{} + def.On("Handle", mock.Anything, mock.Anything, mock.Anything).Return(nil).Twice() + + action1 := &mockAction{} + action1.On("StartTime").Return(time.Time{}, fleetapi.ErrNoStartTime) + action1.On("Expiration").Return(time.Now().Add(time.Hour), fleetapi.ErrNoStartTime) + action1.On("Type").Return(fleetapi.ActionTypeCancel) + action1.On("ID").Return("id") + + queue := &mockQueue{} + queue.On("Save").Return(nil).Once() + queue.On("DequeueActions").Return([]fleetapi.Action{action1}).Once() + + d, err := New(nil, def, queue) + require.NoError(t, err) + err = d.Register(&mockAction{}, def) + require.NoError(t, err) + + action2 := &mockAction{} + action2.On("StartTime").Return(time.Time{}, fleetapi.ErrNoStartTime) + action2.On("Type").Return(fleetapi.ActionTypeCancel) + action2.On("ID").Return("id") + + err = d.Dispatch(context.Background(), ack, action2) + require.NoError(t, err) + def.AssertExpectations(t) + queue.AssertExpectations(t) + }) + + t.Run("Retrieve no actions from queue", func(t *testing.T) { + def := &mockHandler{} + def.On("Handle", mock.Anything, mock.Anything, mock.Anything).Return(nil) + + queue := &mockQueue{} + queue.On("Save").Return(nil).Once() + queue.On("DequeueActions").Return([]fleetapi.Action{}).Once() + + d, err := New(nil, def, queue) + require.NoError(t, err) + err = d.Register(&mockAction{}, def) + require.NoError(t, err) + + err = d.Dispatch(context.Background(), ack) + require.NoError(t, err) + def.AssertNotCalled(t, "Handle", mock.Anything, mock.Anything, mock.Anything) }) } diff --git a/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go b/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go index fe5028b0fce..38fad92057c 100644 --- a/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go +++ b/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go @@ -6,7 +6,6 @@ package fleet import ( "context" - stderr "errors" "fmt" "time" @@ -62,14 +61,6 @@ type stateStore interface { AckToken() string SetAckToken(ackToken string) Save() error - SetQueue([]fleetapi.Action) - Actions() []fleetapi.Action -} - -type actionQueue interface { - Add(fleetapi.Action, int64) - DequeueActions() []fleetapi.Action - Cancel(string) int Actions() []fleetapi.Action } @@ -84,7 +75,6 @@ type fleetGateway struct { unauthCounter int stateFetcher coordinator.StateFetcher stateStore stateStore - queue actionQueue errCh chan error } @@ -97,7 +87,6 @@ func New( acker acker.Acker, stateFetcher coordinator.StateFetcher, stateStore stateStore, - queue actionQueue, ) (gateway.FleetGateway, error) { scheduler := scheduler.NewPeriodicJitter(defaultGatewaySettings.Duration, defaultGatewaySettings.Jitter) @@ -111,7 +100,6 @@ func New( acker, stateFetcher, stateStore, - queue, ) } @@ -125,7 +113,6 @@ func newFleetGatewayWithScheduler( acker acker.Acker, stateFetcher coordinator.StateFetcher, stateStore stateStore, - queue actionQueue, ) (gateway.FleetGateway, error) { return &fleetGateway{ log: log, @@ -137,7 +124,6 @@ func newFleetGatewayWithScheduler( acker: acker, stateFetcher: stateFetcher, stateStore: stateStore, - queue: queue, errCh: make(chan error), }, nil } @@ -163,7 +149,7 @@ func (f *fleetGateway) Run(ctx context.Context) error { f.scheduler.Stop() f.log.Info("Fleet gateway stopped") return ctx.Err() - case ts := <-f.scheduler.WaitTick(): + case <-f.scheduler.WaitTick(): f.log.Debug("FleetGateway calling Checkin API") // Execute the checkin call and for any errors returned by the fleet-server API @@ -174,28 +160,11 @@ func (f *fleetGateway) Run(ctx context.Context) error { continue } - actions := f.queueScheduledActions(resp.Actions) - actions, err = f.dispatchCancelActions(actions) - if err != nil { - f.log.Error(err.Error()) - } - - queued, expired := f.gatherQueuedActions(ts.UTC()) - f.log.Debugf("Gathered %d actions from queue, %d actions expired", len(queued), len(expired)) - f.log.Debugf("Expired actions: %v", expired) - - actions = append(actions, queued...) + actions := make([]fleetapi.Action, len(resp.Actions)) + copy(actions, resp.Actions) // Persist state hadErr := false - f.stateStore.SetQueue(f.queue.Actions()) - if err := f.stateStore.Save(); err != nil { - err = fmt.Errorf("failed to persist action_queue, error: %w", err) - f.log.Error(err) - f.errCh <- err - hadErr = true - } - if err := f.dispatcher.Dispatch(context.Background(), f.acker, actions...); err != nil { err = fmt.Errorf("failed to dispatch actions, error: %w", err) f.log.Error(err) @@ -216,60 +185,6 @@ func (f *fleetGateway) Errors() <-chan error { return f.errCh } -// queueScheduledActions will add any action in actions with a valid start time to the queue and return the rest. -// start time to current time comparisons are purposefully not made in case of cancel actions. -func (f *fleetGateway) queueScheduledActions(input fleetapi.Actions) []fleetapi.Action { - actions := make([]fleetapi.Action, 0, len(input)) - for _, action := range input { - start, err := action.StartTime() - if err == nil { - f.log.Debugf("Adding action id: %s to queue.", action.ID()) - f.queue.Add(action, start.Unix()) - continue - } - if !stderr.Is(err, fleetapi.ErrNoStartTime) { - f.log.Warnf("Issue gathering start time from action id %s: %v", action.ID(), err) - } - actions = append(actions, action) - } - return actions -} - -// dispatchCancelActions will separate and dispatch any cancel actions from the actions list and return the rest of the list. -// cancel actions are dispatched seperatly as they may remove items from the queue. -func (f *fleetGateway) dispatchCancelActions(actions []fleetapi.Action) ([]fleetapi.Action, error) { - // separate cancel actions from the actions list - cancelActions := make([]fleetapi.Action, 0, len(actions)) - for i := len(actions) - 1; i >= 0; i-- { - action := actions[i] - if action.Type() == fleetapi.ActionTypeCancel { - cancelActions = append(cancelActions, action) - actions = append(actions[:i], actions[i+1:]...) - } - } - // Dispatch cancel actions - if len(cancelActions) > 0 { - if err := f.dispatcher.Dispatch(context.Background(), f.acker, cancelActions...); err != nil { - return actions, fmt.Errorf("failed to dispatch cancel actions: %w", err) - } - } - return actions, nil -} - -// gatherQueuedActions will dequeue actions from the action queue and separate those that have already expired. -func (f *fleetGateway) gatherQueuedActions(ts time.Time) (queued, expired []fleetapi.Action) { - actions := f.queue.DequeueActions() - for _, action := range actions { - exp, _ := action.Expiration() - if ts.After(exp) { - expired = append(expired, action) - continue - } - queued = append(queued, action) - } - return queued, expired -} - func (f *fleetGateway) doExecute(ctx context.Context, bo backoff.Backoff) (*fleetapi.CheckinResponse, error) { bo.Reset() diff --git a/internal/pkg/agent/application/gateway/fleet/fleet_gateway_test.go b/internal/pkg/agent/application/gateway/fleet/fleet_gateway_test.go index f7ba6ec961d..49c05112e18 100644 --- a/internal/pkg/agent/application/gateway/fleet/fleet_gateway_test.go +++ b/internal/pkg/agent/application/gateway/fleet/fleet_gateway_test.go @@ -19,7 +19,6 @@ import ( "time" "github.com/pkg/errors" - "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" @@ -110,29 +109,6 @@ func newTestingDispatcher() *testingDispatcher { return &testingDispatcher{received: make(chan struct{}, 1)} } -type mockQueue struct { - mock.Mock -} - -func (m *mockQueue) Add(action fleetapi.Action, n int64) { - m.Called(action, n) -} - -func (m *mockQueue) DequeueActions() []fleetapi.Action { - args := m.Called() - return args.Get(0).([]fleetapi.Action) -} - -func (m *mockQueue) Cancel(id string) int { - args := m.Called(id) - return args.Int(0) -} - -func (m *mockQueue) Actions() []fleetapi.Action { - args := m.Called() - return args.Get(0).([]fleetapi.Action) -} - type withGatewayFunc func(*testing.T, gateway.FleetGateway, *testingClient, *testingDispatcher, *scheduler.Stepper) func withGateway(agentInfo agentInfo, settings *fleetGatewaySettings, fn withGatewayFunc) func(t *testing.T) { @@ -145,10 +121,6 @@ func withGateway(agentInfo agentInfo, settings *fleetGatewaySettings, fn withGat stateStore := newStateStore(t, log) - queue := &mockQueue{} - queue.On("DequeueActions").Return([]fleetapi.Action{}) - queue.On("Actions").Return([]fleetapi.Action{}) - gateway, err := newFleetGatewayWithScheduler( log, settings, @@ -159,7 +131,6 @@ func withGateway(agentInfo agentInfo, settings *fleetGatewaySettings, fn withGat noop.New(), &emptyStateFetcher{}, stateStore, - queue, ) require.NoError(t, err) @@ -290,10 +261,6 @@ func TestFleetGateway(t *testing.T) { log, _ := logger.New("tst", false) stateStore := newStateStore(t, log) - queue := &mockQueue{} - queue.On("DequeueActions").Return([]fleetapi.Action{}) - queue.On("Actions").Return([]fleetapi.Action{}) - gateway, err := newFleetGatewayWithScheduler( log, settings, @@ -304,7 +271,6 @@ func TestFleetGateway(t *testing.T) { noop.New(), &emptyStateFetcher{}, stateStore, - queue, ) require.NoError(t, err) @@ -337,244 +303,6 @@ func TestFleetGateway(t *testing.T) { require.NoError(t, err) }) - t.Run("queue action from checkin", func(t *testing.T) { - scheduler := scheduler.NewStepper() - client := newTestingClient() - dispatcher := newTestingDispatcher() - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - log, _ := logger.New("tst", false) - stateStore := newStateStore(t, log) - - ts := time.Now().UTC().Round(time.Second) - queue := &mockQueue{} - queue.On("Add", mock.Anything, ts.Add(time.Hour).Unix()).Return().Once() - queue.On("DequeueActions").Return([]fleetapi.Action{}) - queue.On("Actions").Return([]fleetapi.Action{}) - - gateway, err := newFleetGatewayWithScheduler( - log, - settings, - agentInfo, - client, - dispatcher, - scheduler, - noop.New(), - &emptyStateFetcher{}, - stateStore, - queue, - ) - require.NoError(t, err) - - waitFn := ackSeq( - client.Answer(func(headers http.Header, body io.Reader) (*http.Response, error) { - resp := wrapStrToResp(http.StatusOK, fmt.Sprintf(`{"actions": [{ - "type": "UPGRADE", - "id": "id1", - "start_time": "%s", - "expiration": "%s", - "data": { - "version": "1.2.3" - } - }]}`, - ts.Add(time.Hour).Format(time.RFC3339), - ts.Add(2*time.Hour).Format(time.RFC3339), - )) - return resp, nil - }), - dispatcher.Answer(func(actions ...fleetapi.Action) error { - require.Equal(t, 0, len(actions)) - return nil - }), - ) - - errCh := runFleetGateway(ctx, gateway) - - scheduler.Next() - waitFn() - queue.AssertExpectations(t) - - cancel() - err = <-errCh - require.NoError(t, err) - }) - - t.Run("run action from queue", func(t *testing.T) { - scheduler := scheduler.NewStepper() - client := newTestingClient() - dispatcher := newTestingDispatcher() - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - log, _ := logger.New("tst", false) - stateStore := newStateStore(t, log) - - ts := time.Now().UTC().Round(time.Second) - queue := &mockQueue{} - queue.On("DequeueActions").Return([]fleetapi.Action{&fleetapi.ActionUpgrade{ActionID: "id1", ActionType: "UPGRADE", ActionStartTime: ts.Add(-1 * time.Hour).Format(time.RFC3339), ActionExpiration: ts.Add(time.Hour).Format(time.RFC3339)}}).Once() - queue.On("Actions").Return([]fleetapi.Action{}) - - gateway, err := newFleetGatewayWithScheduler( - log, - settings, - agentInfo, - client, - dispatcher, - scheduler, - noop.New(), - &emptyStateFetcher{}, - stateStore, - queue, - ) - require.NoError(t, err) - - waitFn := ackSeq( - client.Answer(func(headers http.Header, body io.Reader) (*http.Response, error) { - resp := wrapStrToResp(http.StatusOK, `{"actions": []}`) - return resp, nil - }), - dispatcher.Answer(func(actions ...fleetapi.Action) error { - require.Equal(t, 1, len(actions)) - return nil - }), - ) - - errCh := runFleetGateway(ctx, gateway) - - scheduler.Next() - waitFn() - queue.AssertExpectations(t) - - cancel() - err = <-errCh - require.NoError(t, err) - }) - - t.Run("discard expired action from queue", func(t *testing.T) { - scheduler := scheduler.NewStepper() - client := newTestingClient() - dispatcher := newTestingDispatcher() - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - log, _ := logger.New("tst", false) - stateStore := newStateStore(t, log) - - ts := time.Now().UTC().Round(time.Second) - queue := &mockQueue{} - queue.On("DequeueActions").Return([]fleetapi.Action{&fleetapi.ActionUpgrade{ActionID: "id1", ActionType: "UPGRADE", ActionStartTime: ts.Add(-2 * time.Hour).Format(time.RFC3339), ActionExpiration: ts.Add(-1 * time.Hour).Format(time.RFC3339)}}).Once() - queue.On("Actions").Return([]fleetapi.Action{}) - - gateway, err := newFleetGatewayWithScheduler( - log, - settings, - agentInfo, - client, - dispatcher, - scheduler, - noop.New(), - &emptyStateFetcher{}, - stateStore, - queue, - ) - require.NoError(t, err) - - waitFn := ackSeq( - client.Answer(func(headers http.Header, body io.Reader) (*http.Response, error) { - resp := wrapStrToResp(http.StatusOK, `{"actions": []}`) - return resp, nil - }), - dispatcher.Answer(func(actions ...fleetapi.Action) error { - require.Equal(t, 0, len(actions)) - return nil - }), - ) - - errCh := runFleetGateway(ctx, gateway) - - scheduler.Next() - waitFn() - queue.AssertExpectations(t) - - cancel() - err = <-errCh - require.NoError(t, err) - }) - - t.Run("cancel action from checkin", func(t *testing.T) { - scheduler := scheduler.NewStepper() - client := newTestingClient() - dispatcher := newTestingDispatcher() - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - log, _ := logger.New("tst", false) - stateStore := newStateStore(t, log) - - ts := time.Now().UTC().Round(time.Second) - queue := &mockQueue{} - queue.On("Add", mock.Anything, ts.Add(-1*time.Hour).Unix()).Return().Once() - queue.On("DequeueActions").Return([]fleetapi.Action{}) - queue.On("Actions").Return([]fleetapi.Action{}).Maybe() // this test seems flakey if we check for this call - // queue.Cancel does not need to be mocked here as it is ran in the cancel action dispatcher. - - gateway, err := newFleetGatewayWithScheduler( - log, - settings, - agentInfo, - client, - dispatcher, - scheduler, - noop.New(), - &emptyStateFetcher{}, - stateStore, - queue, - ) - require.NoError(t, err) - - waitFn := ackSeq( - client.Answer(func(headers http.Header, body io.Reader) (*http.Response, error) { - resp := wrapStrToResp(http.StatusOK, fmt.Sprintf(`{"actions": [{ - "type": "UPGRADE", - "id": "id1", - "start_time": "%s", - "expiration": "%s", - "data": { - "version": "1.2.3" - } - }, { - "type": "CANCEL", - "id": "id2", - "data": { - "target_id": "id1" - } - }]}`, - ts.Add(-1*time.Hour).Format(time.RFC3339), - ts.Add(2*time.Hour).Format(time.RFC3339), - )) - return resp, nil - }), - dispatcher.Answer(func(actions ...fleetapi.Action) error { - return nil - }), - ) - - errCh := runFleetGateway(ctx, gateway) - - scheduler.Next() - waitFn() - queue.AssertExpectations(t) - - cancel() - err = <-errCh - require.NoError(t, err) - }) - t.Run("Test the wait loop is interruptible", func(t *testing.T) { // 20mins is the double of the base timeout values for golang test suites. // If we cannot interrupt we will timeout. @@ -588,10 +316,6 @@ func TestFleetGateway(t *testing.T) { log, _ := logger.New("tst", false) stateStore := newStateStore(t, log) - queue := &mockQueue{} - queue.On("DequeueActions").Return([]fleetapi.Action{}) - queue.On("Actions").Return([]fleetapi.Action{}) - gateway, err := newFleetGatewayWithScheduler( log, &fleetGatewaySettings{ @@ -605,7 +329,6 @@ func TestFleetGateway(t *testing.T) { noop.New(), &emptyStateFetcher{}, stateStore, - queue, ) require.NoError(t, err) diff --git a/internal/pkg/agent/application/managed_mode.go b/internal/pkg/agent/application/managed_mode.go index 8abeab60eba..cd477753a1f 100644 --- a/internal/pkg/agent/application/managed_mode.go +++ b/internal/pkg/agent/application/managed_mode.go @@ -67,7 +67,7 @@ func newManagedConfigManager( return nil, errors.New(err, fmt.Sprintf("fail to read action store '%s'", paths.AgentActionStoreFile())) } - actionQueue, err := queue.NewActionQueue(stateStore.Queue()) + actionQueue, err := queue.NewActionQueue(stateStore.Queue(), stateStore) if err != nil { return nil, fmt.Errorf("unable to initialize action queue: %w", err) } @@ -170,7 +170,6 @@ func (m *managedConfigManager) Run(ctx context.Context) error { actionAcker, m.coord, m.stateStore, - m.actionQueue, ) if err != nil { return err @@ -281,7 +280,7 @@ func fleetServerRunning(state runtime.ComponentState) bool { } func newManagedActionDispatcher(m *managedConfigManager, canceller context.CancelFunc) (*dispatcher.ActionDispatcher, *handlers.PolicyChange, error) { - actionDispatcher, err := dispatcher.New(m.log, handlers.NewDefault(m.log)) + actionDispatcher, err := dispatcher.New(m.log, handlers.NewDefault(m.log), m.actionQueue) if err != nil { return nil, nil, err } diff --git a/internal/pkg/queue/actionqueue.go b/internal/pkg/queue/actionqueue.go index 671291639a2..0f3a2c20ffc 100644 --- a/internal/pkg/queue/actionqueue.go +++ b/internal/pkg/queue/actionqueue.go @@ -11,6 +11,12 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/fleetapi" ) +// saver is an the minimal interface needed for state storage. +type saver interface { + SetQueue(a []fleetapi.Action) + Save() error +} + // item tracks an action in the action queue type item struct { action fleetapi.Action @@ -18,23 +24,28 @@ type item struct { index int } -// ActionQueue uses the standard library's container/heap to implement a priority queue -// This queue should not be indexed directly, instead use the provided Add, DequeueActions, or Cancel methods to add or remove items -// Actions() is indended to get the list of actions in the queue for serialization. -type ActionQueue []*item +// queue uses the standard library's container/heap to implement a priority queue +// This queue should not be used directly, instead the exported ActionQueue should be used. +type queue []*item + +// ActionQueue is a priority queue with the ability to persist to disk. +type ActionQueue struct { + q *queue + s saver +} // Len returns the length of the queue -func (q ActionQueue) Len() int { +func (q queue) Len() int { return len(q) } // Less will determine if item i's priority is less then item j's -func (q ActionQueue) Less(i, j int) bool { +func (q queue) Less(i, j int) bool { return q[i].priority < q[j].priority } // Swap will swap the items at index i and j -func (q ActionQueue) Swap(i, j int) { +func (q queue) Swap(i, j int) { q[i], q[j] = q[j], q[i] q[i].index = i q[j].index = j @@ -42,7 +53,7 @@ func (q ActionQueue) Swap(i, j int) { // Push will add x as an item to the queue // When using the queue, the Add method should be used instead. -func (q *ActionQueue) Push(x interface{}) { +func (q *queue) Push(x interface{}) { n := len(*q) e := x.(*item) //nolint:errcheck // should be an *item e.index = n @@ -51,7 +62,7 @@ func (q *ActionQueue) Push(x interface{}) { // Pop will return the last item from the queue // When using the queue, DequeueActions should be used instead -func (q *ActionQueue) Pop() interface{} { +func (q *queue) Pop() interface{} { old := *q n := len(old) e := old[n-1] @@ -61,10 +72,10 @@ func (q *ActionQueue) Pop() interface{} { return e } -// NewActionQueue creates a new ActionQueue initialized with the passed actions. +// newQueue creates a new priority queue using container/heap. // Will return an error if StartTime fails for any action. -func NewActionQueue(actions []fleetapi.Action) (*ActionQueue, error) { - q := make(ActionQueue, len(actions)) +func newQueue(actions []fleetapi.Action) (*queue, error) { + q := make(queue, len(actions)) for i, action := range actions { ts, err := action.StartTime() if err != nil { @@ -80,6 +91,18 @@ func NewActionQueue(actions []fleetapi.Action) (*ActionQueue, error) { return &q, nil } +// NewActionQueue creates a new queue with the passed actions using the persistor for state storage. +func NewActionQueue(actions []fleetapi.Action, s saver) (*ActionQueue, error) { + q, err := newQueue(actions) + if err != nil { + return nil, err + } + return &ActionQueue{ + q: q, + s: s, + }, nil +} + // Add will add an action to the queue with the associated priority. // The priority is meant to be the start-time of the action as a unix epoch time. // Complexity: O(log n) @@ -88,7 +111,7 @@ func (q *ActionQueue) Add(action fleetapi.Action, priority int64) { action: action, priority: priority, } - heap.Push(q, e) + heap.Push(q.q, e) } // DequeueActions will dequeue all actions that have a priority less then time.Now(). @@ -96,11 +119,11 @@ func (q *ActionQueue) Add(action fleetapi.Action, priority int64) { func (q *ActionQueue) DequeueActions() []fleetapi.Action { ts := time.Now().Unix() actions := make([]fleetapi.Action, 0) - for q.Len() != 0 { - if (*q)[0].priority > ts { + for q.q.Len() != 0 { + if (*q.q)[0].priority > ts { break } - item := heap.Pop(q).(*item) //nolint:errcheck // should be an *item + item := heap.Pop(q.q).(*item) //nolint:errcheck // should be an *item actions = append(actions, item.action) } return actions @@ -110,22 +133,28 @@ func (q *ActionQueue) DequeueActions() []fleetapi.Action { // Complexity: O(n*log n) func (q *ActionQueue) Cancel(actionID string) int { items := make([]*item, 0) - for _, item := range *q { + for _, item := range *q.q { if item.action.ID() == actionID { items = append(items, item) } } for _, item := range items { - heap.Remove(q, item.index) + heap.Remove(q.q, item.index) } return len(items) } // Actions returns all actions in the queue, item 0 is garunteed to be the min, the rest may not be in sorted order. func (q *ActionQueue) Actions() []fleetapi.Action { - actions := make([]fleetapi.Action, q.Len()) - for i, item := range *q { + actions := make([]fleetapi.Action, q.q.Len()) + for i, item := range *q.q { actions[i] = item.action } return actions } + +// Save persists the queue to disk. +func (q *ActionQueue) Save() error { + q.s.SetQueue(q.Actions()) + return q.s.Save() +} diff --git a/internal/pkg/queue/actionqueue_test.go b/internal/pkg/queue/actionqueue_test.go index 1c1e1959a9f..d951f855737 100644 --- a/internal/pkg/queue/actionqueue_test.go +++ b/internal/pkg/queue/actionqueue_test.go @@ -47,7 +47,20 @@ func (m *mockAction) Expiration() (time.Time, error) { return args.Get(0).(time.Time), args.Error(1) } -func TestNewActionQueue(t *testing.T) { +type mockPersistor struct { + mock.Mock +} + +func (m *mockPersistor) SetQueue(a []fleetapi.Action) { + m.Called(a) +} + +func (m *mockPersistor) Save() error { + args := m.Called() + return args.Error(0) +} + +func TestNewQueue(t *testing.T) { ts := time.Now() a1 := &mockAction{} a1.On("ID").Return("test-1") @@ -60,21 +73,21 @@ func TestNewActionQueue(t *testing.T) { a3.On("StartTime").Return(ts.Add(time.Minute), nil) t.Run("nil actions slice", func(t *testing.T) { - q, err := NewActionQueue(nil) + q, err := newQueue(nil) require.NoError(t, err) assert.NotNil(t, q) assert.Empty(t, q) }) t.Run("empty actions slice", func(t *testing.T) { - q, err := NewActionQueue([]fleetapi.Action{}) + q, err := newQueue([]fleetapi.Action{}) require.NoError(t, err) assert.NotNil(t, q) assert.Empty(t, q) }) t.Run("ordered actions list", func(t *testing.T) { - q, err := NewActionQueue([]fleetapi.Action{a1, a2, a3}) + q, err := newQueue([]fleetapi.Action{a1, a2, a3}) assert.NotNil(t, q) require.NoError(t, err) assert.Len(t, *q, 3) @@ -89,7 +102,7 @@ func TestNewActionQueue(t *testing.T) { }) t.Run("unordered actions list", func(t *testing.T) { - q, err := NewActionQueue([]fleetapi.Action{a3, a2, a1}) + q, err := newQueue([]fleetapi.Action{a3, a2, a1}) require.NoError(t, err) assert.NotNil(t, q) assert.Len(t, *q, 3) @@ -106,13 +119,13 @@ func TestNewActionQueue(t *testing.T) { t.Run("start time error", func(t *testing.T) { a := &mockAction{} a.On("StartTime").Return(time.Time{}, errors.New("oh no")) - q, err := NewActionQueue([]fleetapi.Action{a}) + q, err := newQueue([]fleetapi.Action{a}) assert.EqualError(t, err, "oh no") assert.Nil(t, q) }) } -func assertOrdered(t *testing.T, q *ActionQueue) { +func assertOrdered(t *testing.T, q *queue) { t.Helper() require.Len(t, *q, 3) i := heap.Pop(q).(*item) @@ -137,48 +150,56 @@ func Test_ActionQueue_Add(t *testing.T) { a3.On("ID").Return("test-3") t.Run("ascending order", func(t *testing.T) { - q := &ActionQueue{} - q.Add(a1, 1) - q.Add(a2, 2) - q.Add(a3, 3) - - assertOrdered(t, q) + aq := &ActionQueue{ + q: &queue{}, + } + aq.Add(a1, 1) + aq.Add(a2, 2) + aq.Add(a3, 3) + + assertOrdered(t, aq.q) }) t.Run("Add descending order", func(t *testing.T) { - q := &ActionQueue{} - q.Add(a3, 3) - q.Add(a2, 2) - q.Add(a1, 1) - - assertOrdered(t, q) + aq := &ActionQueue{ + q: &queue{}, + } + aq.Add(a3, 3) + aq.Add(a2, 2) + aq.Add(a1, 1) + + assertOrdered(t, aq.q) }) t.Run("mixed order", func(t *testing.T) { - q := &ActionQueue{} - q.Add(a1, 1) - q.Add(a3, 3) - q.Add(a2, 2) - - assertOrdered(t, q) + aq := &ActionQueue{ + q: &queue{}, + } + aq.Add(a1, 1) + aq.Add(a3, 3) + aq.Add(a2, 2) + + assertOrdered(t, aq.q) }) t.Run("two items have same priority", func(t *testing.T) { - q := &ActionQueue{} - q.Add(a1, 1) - q.Add(a2, 2) - q.Add(a3, 2) - - require.Len(t, *q, 3) - i := heap.Pop(q).(*item) + aq := &ActionQueue{ + q: &queue{}, + } + aq.Add(a1, 1) + aq.Add(a2, 2) + aq.Add(a3, 2) + + require.Len(t, *aq.q, 3) + i := heap.Pop(aq.q).(*item) assert.Equal(t, int64(1), i.priority) assert.Equal(t, "test-1", i.action.ID()) // next two items have same priority, however the ids may not match insertion order - i = heap.Pop(q).(*item) + i = heap.Pop(aq.q).(*item) assert.Equal(t, int64(2), i.priority) - i = heap.Pop(q).(*item) + i = heap.Pop(aq.q).(*item) assert.Equal(t, int64(2), i.priority) - assert.Empty(t, *q) + assert.Empty(t, *aq.q) }) } @@ -191,17 +212,19 @@ func Test_ActionQueue_DequeueActions(t *testing.T) { a3.On("ID").Return("test-3") t.Run("empty queue", func(t *testing.T) { - q := &ActionQueue{} + aq := &ActionQueue{ + q: &queue{}, + } - actions := q.DequeueActions() + actions := aq.DequeueActions() assert.Empty(t, actions) - assert.Empty(t, *q) + assert.Empty(t, *aq.q) }) t.Run("one action from queue", func(t *testing.T) { ts := time.Now() - q := &ActionQueue{&item{ + q := &queue{&item{ action: a1, priority: ts.Add(-1 * time.Minute).Unix(), index: 0, @@ -215,8 +238,9 @@ func Test_ActionQueue_DequeueActions(t *testing.T) { index: 2, }} heap.Init(q) + aq := &ActionQueue{q, &mockPersistor{}} - actions := q.DequeueActions() + actions := aq.DequeueActions() require.Len(t, actions, 1) assert.Equal(t, "test-1", actions[0].ID()) @@ -234,7 +258,7 @@ func Test_ActionQueue_DequeueActions(t *testing.T) { t.Run("two actions from queue", func(t *testing.T) { ts := time.Now() - q := &ActionQueue{&item{ + q := &queue{&item{ action: a1, priority: ts.Add(-1 * time.Minute).Unix(), index: 0, @@ -248,8 +272,9 @@ func Test_ActionQueue_DequeueActions(t *testing.T) { index: 2, }} heap.Init(q) + aq := &ActionQueue{q, &mockPersistor{}} - actions := q.DequeueActions() + actions := aq.DequeueActions() require.Len(t, actions, 2) assert.Equal(t, "test-2", actions[0].ID()) @@ -265,7 +290,7 @@ func Test_ActionQueue_DequeueActions(t *testing.T) { t.Run("all actions from queue", func(t *testing.T) { ts := time.Now() - q := &ActionQueue{&item{ + q := &queue{&item{ action: a1, priority: ts.Add(-1 * time.Minute).Unix(), index: 0, @@ -279,8 +304,9 @@ func Test_ActionQueue_DequeueActions(t *testing.T) { index: 2, }} heap.Init(q) + aq := &ActionQueue{q, &mockPersistor{}} - actions := q.DequeueActions() + actions := aq.DequeueActions() require.Len(t, actions, 3) assert.Equal(t, "test-3", actions[0].ID()) @@ -292,7 +318,7 @@ func Test_ActionQueue_DequeueActions(t *testing.T) { t.Run("no actions from queue", func(t *testing.T) { ts := time.Now() - q := &ActionQueue{&item{ + q := &queue{&item{ action: a1, priority: ts.Add(1 * time.Minute).Unix(), index: 0, @@ -306,8 +332,9 @@ func Test_ActionQueue_DequeueActions(t *testing.T) { index: 2, }} heap.Init(q) + aq := &ActionQueue{q, &mockPersistor{}} - actions := q.DequeueActions() + actions := aq.DequeueActions() assert.Empty(t, actions) require.Len(t, *q, 3) @@ -333,15 +360,16 @@ func Test_ActionQueue_Cancel(t *testing.T) { a3.On("ID").Return("test-3") t.Run("empty queue", func(t *testing.T) { - q := &ActionQueue{} + q := &queue{} + aq := &ActionQueue{q, &mockPersistor{}} - n := q.Cancel("test-1") + n := aq.Cancel("test-1") assert.Zero(t, n) assert.Empty(t, *q) }) t.Run("one item cancelled", func(t *testing.T) { - q := &ActionQueue{&item{ + q := &queue{&item{ action: a1, priority: 1, index: 0, @@ -355,8 +383,9 @@ func Test_ActionQueue_Cancel(t *testing.T) { index: 2, }} heap.Init(q) + aq := &ActionQueue{q, &mockPersistor{}} - n := q.Cancel("test-1") + n := aq.Cancel("test-1") assert.Equal(t, 1, n) assert.Len(t, *q, 2) @@ -370,7 +399,7 @@ func Test_ActionQueue_Cancel(t *testing.T) { }) t.Run("two items cancelled", func(t *testing.T) { - q := &ActionQueue{&item{ + q := &queue{&item{ action: a1, priority: 1, index: 0, @@ -384,8 +413,9 @@ func Test_ActionQueue_Cancel(t *testing.T) { index: 2, }} heap.Init(q) + aq := &ActionQueue{q, &mockPersistor{}} - n := q.Cancel("test-1") + n := aq.Cancel("test-1") assert.Equal(t, 2, n) assert.Len(t, *q, 1) @@ -396,7 +426,7 @@ func Test_ActionQueue_Cancel(t *testing.T) { }) t.Run("all items cancelled", func(t *testing.T) { - q := &ActionQueue{&item{ + q := &queue{&item{ action: a1, priority: 1, index: 0, @@ -410,14 +440,15 @@ func Test_ActionQueue_Cancel(t *testing.T) { index: 2, }} heap.Init(q) + aq := &ActionQueue{q, &mockPersistor{}} - n := q.Cancel("test-1") + n := aq.Cancel("test-1") assert.Equal(t, 3, n) assert.Empty(t, *q) }) t.Run("no items cancelled", func(t *testing.T) { - q := &ActionQueue{&item{ + q := &queue{&item{ action: a1, priority: 1, index: 0, @@ -431,8 +462,9 @@ func Test_ActionQueue_Cancel(t *testing.T) { index: 2, }} heap.Init(q) + aq := &ActionQueue{q, &mockPersistor{}} - n := q.Cancel("test-0") + n := aq.Cancel("test-0") assert.Zero(t, n) assert.Len(t, *q, 3) @@ -451,8 +483,9 @@ func Test_ActionQueue_Cancel(t *testing.T) { func Test_ActionQueue_Actions(t *testing.T) { t.Run("empty queue", func(t *testing.T) { - q := &ActionQueue{} - actions := q.Actions() + q := &queue{} + aq := &ActionQueue{q, &mockPersistor{}} + actions := aq.Actions() assert.Len(t, actions, 0) }) @@ -463,7 +496,7 @@ func Test_ActionQueue_Actions(t *testing.T) { a2.On("ID").Return("test-2") a3 := &mockAction{} a3.On("ID").Return("test-3") - q := &ActionQueue{&item{ + q := &queue{&item{ action: a1, priority: 1, index: 0, @@ -477,8 +510,9 @@ func Test_ActionQueue_Actions(t *testing.T) { index: 2, }} heap.Init(q) + aq := &ActionQueue{q, &mockPersistor{}} - actions := q.Actions() + actions := aq.Actions() assert.Len(t, actions, 3) assert.Equal(t, "test-1", actions[0].ID()) }) From e91e3ce8889d199a2a2a0e4256cddd13c9c43183 Mon Sep 17 00:00:00 2001 From: Aleksandr Maus Date: Wed, 28 Sep 2022 20:37:19 -0400 Subject: [PATCH 22/49] Fix [V2]: Elastic Agent Install is broken. (#1331) --- internal/pkg/agent/cmd/inspect.go | 65 +-------------------- internal/pkg/agent/install/uninstall.go | 7 +-- internal/pkg/agent/vars/vars.go | 78 +++++++++++++++++++++++++ 3 files changed, 82 insertions(+), 68 deletions(-) create mode 100644 internal/pkg/agent/vars/vars.go diff --git a/internal/pkg/agent/cmd/inspect.go b/internal/pkg/agent/cmd/inspect.go index 32455a179c4..77e917c9c3c 100644 --- a/internal/pkg/agent/cmd/inspect.go +++ b/internal/pkg/agent/cmd/inspect.go @@ -12,7 +12,6 @@ import ( "time" "github.com/spf13/cobra" - "golang.org/x/sync/errgroup" "gopkg.in/yaml.v2" "github.com/elastic/elastic-agent-libs/logp" @@ -21,9 +20,9 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" + "github.com/elastic/elastic-agent/internal/pkg/agent/vars" "github.com/elastic/elastic-agent/internal/pkg/capabilities" "github.com/elastic/elastic-agent/internal/pkg/cli" - "github.com/elastic/elastic-agent/internal/pkg/composable" "github.com/elastic/elastic-agent/internal/pkg/config" "github.com/elastic/elastic-agent/internal/pkg/config/operations" "github.com/elastic/elastic-agent/pkg/component" @@ -302,7 +301,7 @@ func getConfigWithVariables(ctx context.Context, l *logger.Logger, cfgPath strin } // Wait for the variables based on the timeout. - vars, err := waitForVariables(ctx, l, cfg, timeout) + vars, err := vars.WaitForVariables(ctx, l, cfg, timeout) if err != nil { return nil, fmt.Errorf("failed to gather variables: %w", err) } @@ -326,66 +325,6 @@ func getConfigWithVariables(ctx context.Context, l *logger.Logger, cfgPath strin return m, nil } -func waitForVariables(ctx context.Context, l *logger.Logger, cfg *config.Config, wait time.Duration) ([]*transpiler.Vars, error) { - var cancel context.CancelFunc - var vars []*transpiler.Vars - - composable, err := composable.New(l, cfg) - if err != nil { - return nil, fmt.Errorf("failed to create composable controller: %w", err) - } - - hasTimeout := false - if wait > time.Duration(0) { - hasTimeout = true - ctx, cancel = context.WithTimeout(ctx, wait) - } else { - ctx, cancel = context.WithCancel(ctx) - } - defer cancel() - - g, ctx := errgroup.WithContext(ctx) - g.Go(func() error { - var err error - for { - select { - case <-ctx.Done(): - if err == nil { - err = ctx.Err() - } - if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { - err = nil - } - return err - case cErr := <-composable.Errors(): - err = cErr - if err != nil { - cancel() - } - case cVars := <-composable.Watch(): - vars = cVars - if !hasTimeout { - cancel() - } - } - } - }) - - g.Go(func() error { - err := composable.Run(ctx) - if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { - err = nil - } - return err - }) - - err = g.Wait() - if err != nil { - return nil, err - } - return vars, nil -} - func printComponents(components []component.Component, streams *cli.IOStreams) error { topLevel := struct { Components []component.Component `yaml:"components"` diff --git a/internal/pkg/agent/install/uninstall.go b/internal/pkg/agent/install/uninstall.go index 61588f5de97..df5c11d747c 100644 --- a/internal/pkg/agent/install/uninstall.go +++ b/internal/pkg/agent/install/uninstall.go @@ -19,8 +19,8 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" + "github.com/elastic/elastic-agent/internal/pkg/agent/vars" "github.com/elastic/elastic-agent/internal/pkg/capabilities" - "github.com/elastic/elastic-agent/internal/pkg/composable" "github.com/elastic/elastic-agent/internal/pkg/config" "github.com/elastic/elastic-agent/internal/pkg/config/operations" "github.com/elastic/elastic-agent/pkg/component" @@ -195,13 +195,10 @@ func applyDynamics(ctx context.Context, log *logger.Logger, cfg *config.Config) // apply dynamic inputs inputs, ok := transpiler.Lookup(ast, "inputs") if ok { - varsArray := make([]*transpiler.Vars, 0) - - ctrl, err := composable.New(log, cfg) + varsArray, err := vars.WaitForVariables(ctx, log, cfg, 0) if err != nil { return nil, err } - _ = ctrl.Run(ctx) renderedInputs, err := transpiler.RenderInputs(inputs, varsArray) if err != nil { diff --git a/internal/pkg/agent/vars/vars.go b/internal/pkg/agent/vars/vars.go new file mode 100644 index 00000000000..7f0aff1c329 --- /dev/null +++ b/internal/pkg/agent/vars/vars.go @@ -0,0 +1,78 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package vars + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" + "github.com/elastic/elastic-agent/internal/pkg/composable" + "github.com/elastic/elastic-agent/internal/pkg/config" + "github.com/elastic/elastic-agent/pkg/core/logger" + "golang.org/x/sync/errgroup" +) + +func WaitForVariables(ctx context.Context, l *logger.Logger, cfg *config.Config, wait time.Duration) ([]*transpiler.Vars, error) { + var cancel context.CancelFunc + var vars []*transpiler.Vars + + composable, err := composable.New(l, cfg) + if err != nil { + return nil, fmt.Errorf("failed to create composable controller: %w", err) + } + + hasTimeout := false + if wait > time.Duration(0) { + hasTimeout = true + ctx, cancel = context.WithTimeout(ctx, wait) + } else { + ctx, cancel = context.WithCancel(ctx) + } + defer cancel() + + g, ctx := errgroup.WithContext(ctx) + g.Go(func() error { + var err error + for { + select { + case <-ctx.Done(): + if err == nil { + err = ctx.Err() + } + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + err = nil + } + return err + case cErr := <-composable.Errors(): + err = cErr + if err != nil { + cancel() + } + case cVars := <-composable.Watch(): + vars = cVars + if !hasTimeout { + cancel() + } + } + } + }) + + g.Go(func() error { + err := composable.Run(ctx) + if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) { + err = nil + } + return err + }) + + err = g.Wait() + if err != nil { + return nil, err + } + return vars, nil +} From e6a038ed916b12a410325e1839e3b885aa166fa8 Mon Sep 17 00:00:00 2001 From: Aleksandr Maus Date: Wed, 28 Sep 2022 20:38:09 -0400 Subject: [PATCH 23/49] Fix agent shutdown on SIGINT (#1258) * Fix agent shutdown on SIGINT * Update runtime_comm expected check-in handling to eliminate the lock in failure cases * Remove some buffered channels that are not longer blocking shutdown after the runtime comms fix commit * Fix the recursive lock on itself in the runtime loop, refactored code to make it cleaner * Fix the comment typo * Fixed managed_mode coordination with fleet gateway. Now the gateway errors reading loop waits until gateway exits. Otherwise the gateway shutdown out of sequence blocks on errCh * Fix linter * Fix make check-ci * Fix runner Err() possible race * Update the runer DoneWithTimeout implementation * Address code review comments --- NOTICE.txt | 266 ------------------ go.mod | 2 +- go.sum | 10 - .../pkg/agent/application/managed_mode.go | 24 +- .../pkg/fleetapi/acker/retrier/retrier.go | 2 +- internal/pkg/runner/runner.go | 85 ++++++ internal/pkg/runner/runner_test.go | 90 ++++++ pkg/component/runtime/manager.go | 18 +- pkg/component/runtime/runtime.go | 61 ++-- pkg/component/runtime/runtime_comm.go | 26 +- 10 files changed, 237 insertions(+), 347 deletions(-) create mode 100644 internal/pkg/runner/runner.go create mode 100644 internal/pkg/runner/runner_test.go diff --git a/NOTICE.txt b/NOTICE.txt index 6aef478cffe..3c55a5e0295 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -6545,36 +6545,6 @@ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. --------------------------------------------------------------------------------- -Dependency : github.com/cenkalti/backoff -Version: v2.2.1+incompatible -Licence type (autodetected): MIT --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/cenkalti/backoff@v2.2.1+incompatible/LICENSE: - -The MIT License (MIT) - -Copyright (c) 2014 Cenk Altı - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -------------------------------------------------------------------------------- Dependency : github.com/cenkalti/backoff/v4 Version: v4.1.1 @@ -6806,207 +6776,6 @@ Contents of probable licence file $GOMODCACHE/github.com/containerd/containerd@v limitations under the License. --------------------------------------------------------------------------------- -Dependency : github.com/coreos/go-systemd -Version: v0.0.0-20190321100706-95778dfbb74e -Licence type (autodetected): Apache-2.0 --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/coreos/go-systemd@v0.0.0-20190321100706-95778dfbb74e/LICENSE: - -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - -------------------------------------------------------------------------------- Dependency : github.com/cyphar/filepath-securejoin Version: v0.2.3 @@ -8347,41 +8116,6 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. --------------------------------------------------------------------------------- -Dependency : github.com/godbus/dbus -Version: v0.0.0-20190422162347-ade71ed3457e -Licence type (autodetected): BSD-2-Clause --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/godbus/dbus@v0.0.0-20190422162347-ade71ed3457e/LICENSE: - -Copyright (c) 2013, Georg Reinke (), Google -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: - -1. Redistributions of source code must retain the above copyright notice, -this list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright -notice, this list of conditions and the following disclaimer in the -documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED -TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -------------------------------------------------------------------------------- Dependency : github.com/godbus/dbus/v5 Version: v5.0.5 diff --git a/go.mod b/go.mod index 45a5e482760..0aa3bfaa8cd 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/elastic/elastic-agent -go 1.17 +go 1.18 require ( github.com/Microsoft/go-winio v0.5.2 diff --git a/go.sum b/go.sum index ae8ec309edf..bc17c5e307b 100644 --- a/go.sum +++ b/go.sum @@ -92,7 +92,6 @@ github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2 github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= -github.com/Microsoft/hcsshim v0.8.24/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= @@ -175,7 +174,6 @@ github.com/cavaliercoder/badio v0.0.0-20160213150051-ce5280129e9e h1:YYUjy5BRwO5 github.com/cavaliercoder/badio v0.0.0-20160213150051-ce5280129e9e/go.mod h1:V284PjgVwSk4ETmz84rpu9ehpGg7swlIH8npP9k2bGw= github.com/cavaliercoder/go-rpm v0.0.0-20190131055624-7a9c54e3d83e h1:Gbx+iVCXG/1m5WSnidDGuHgN+vbIwl+6fR092ANU+Y8= github.com/cavaliercoder/go-rpm v0.0.0-20190131055624-7a9c54e3d83e/go.mod h1:AZIh1CCnMrcVm6afFf96PBvE2MRpWFco91z8ObJtgDY= -github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.1.0/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/cenkalti/backoff/v4 v4.1.1 h1:G2HAfAmvm/GcKan2oOQpBXOd2tT2G57ZnZGWa1PxPBQ= @@ -229,7 +227,6 @@ github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4S github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= -github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= @@ -243,7 +240,6 @@ github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMX github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= @@ -285,7 +281,6 @@ github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDG github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= -github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= @@ -313,7 +308,6 @@ github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3Ee github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20161114122254-48702e0da86b/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= @@ -395,7 +389,6 @@ github.com/elastic/go-elasticsearch/v8 v8.0.0-20210317102009-a9d74cec0186/go.mod github.com/elastic/go-licenser v0.3.1/go.mod h1:D8eNQk70FOCVBl3smCGQt/lv7meBeQno2eI1S5apiHQ= github.com/elastic/go-licenser v0.4.0 h1:jLq6A5SilDS/Iz1ABRkO6BHy91B9jBora8FwGRsDqUI= github.com/elastic/go-licenser v0.4.0/go.mod h1:V56wHMpmdURfibNBggaSBfqgPxyT1Tldns1i87iTEvU= -github.com/elastic/go-structform v0.0.9/go.mod h1:CZWf9aIRYY5SuKSmOhtXScE5uQiLZNqAFnwKR4OrIM4= github.com/elastic/go-sysinfo v1.1.1/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= github.com/elastic/go-sysinfo v1.7.1 h1:Wx4DSARcKLllpKT2TnFVdSUJOsybqMYCNQZq1/wO+s0= github.com/elastic/go-sysinfo v1.7.1/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= @@ -519,7 +512,6 @@ github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY9 github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= -github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e h1:BWhy2j3IXJhjCbC68FptL43tDKIq8FladmaTs3Xs7Z8= github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= @@ -1401,7 +1393,6 @@ golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211020060615-d418f374d309/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4 h1:HVyaeDAYux4pnY+D/SiwmLOR36ewZ4iGQIIrtnuCjFA= @@ -1560,7 +1551,6 @@ golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 h1:xHms4gcpe1YE7A3yIllJXP16CMAGuqwO2lX1mTyyRRc= golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= diff --git a/internal/pkg/agent/application/managed_mode.go b/internal/pkg/agent/application/managed_mode.go index cd477753a1f..cb72af2a700 100644 --- a/internal/pkg/agent/application/managed_mode.go +++ b/internal/pkg/agent/application/managed_mode.go @@ -27,6 +27,7 @@ import ( fleetclient "github.com/elastic/elastic-agent/internal/pkg/fleetapi/client" "github.com/elastic/elastic-agent/internal/pkg/queue" "github.com/elastic/elastic-agent/internal/pkg/remote" + "github.com/elastic/elastic-agent/internal/pkg/runner" "github.com/elastic/elastic-agent/pkg/component/runtime" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -182,32 +183,25 @@ func (m *managedConfigManager) Run(ctx context.Context) error { } // Proxy errors from the gateway to our own channel. - go func() { + gatewayErrorsRunner := runner.Start(context.Background(), func(ctx context.Context) error { for { select { case <-ctx.Done(): - return + return nil case err := <-gateway.Errors(): m.errCh <- err } } - }() + }) // Run the gateway. - gatewayRun := make(chan bool) - gatewayErrCh := make(chan error) - defer func() { - gatewayCancel() - <-gatewayRun - }() - go func() { - err := gateway.Run(gatewayCtx) - close(gatewayRun) - gatewayErrCh <- err - }() + gatewayRunner := runner.Start(gatewayCtx, func(ctx context.Context) error { + defer gatewayErrorsRunner.Stop() + return gateway.Run(ctx) + }) <-ctx.Done() - return <-gatewayErrCh + return gatewayRunner.Err() } func (m *managedConfigManager) Errors() <-chan error { diff --git a/internal/pkg/fleetapi/acker/retrier/retrier.go b/internal/pkg/fleetapi/acker/retrier/retrier.go index 406d6570611..747fe93645d 100644 --- a/internal/pkg/fleetapi/acker/retrier/retrier.go +++ b/internal/pkg/fleetapi/acker/retrier/retrier.go @@ -98,7 +98,7 @@ func (r *Retrier) Run(ctx context.Context) { case <-r.kickCh: r.runRetries(ctx) case <-ctx.Done(): - r.log.Debug("ack retrier: exit on %v", ctx.Err()) + r.log.Debugf("ack retrier: exit on %v", ctx.Err()) return } } diff --git a/internal/pkg/runner/runner.go b/internal/pkg/runner/runner.go new file mode 100644 index 00000000000..63f0851550e --- /dev/null +++ b/internal/pkg/runner/runner.go @@ -0,0 +1,85 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package runner + +import ( + "context" + "sync" + "time" +) + +type RunnerFunc func(context.Context) error + +type Runner struct { + fn RunnerFunc + cn context.CancelFunc + + mx sync.Mutex + done chan struct{} + err error +} + +func (r *Runner) Stop() { + r.mx.Lock() + if r.cn != nil { + r.cn() + r.cn = nil + } + r.mx.Unlock() +} + +func (r *Runner) Err() error { + r.mx.Lock() + err := r.err + r.mx.Unlock() + return err +} + +func (r *Runner) Done() <-chan struct{} { + return r.done +} + +func (r *Runner) DoneWithTimeout(to time.Duration) <-chan struct{} { + done := make(chan struct{}) + + t := time.NewTimer(to) + + go func() { + defer t.Stop() + + select { + case <-r.Done(): + case <-t.C: + r.setError(context.DeadlineExceeded) + } + close(done) + }() + + return done +} + +func Start(ctx context.Context, fn RunnerFunc) *Runner { + ctx, cn := context.WithCancel(ctx) + + r := &Runner{fn: fn, cn: cn, done: make(chan struct{})} + + go func() { + err := fn(ctx) + r.setError(err) + cn() + close(r.done) + }() + + return r +} + +func (r *Runner) setError(err error) { + r.mx.Lock() + // Only set the error if it was not set before. Capturing the first error. + if r.err == nil { + r.err = err + } + r.mx.Unlock() +} diff --git a/internal/pkg/runner/runner_test.go b/internal/pkg/runner/runner_test.go new file mode 100644 index 00000000000..77660bb3eed --- /dev/null +++ b/internal/pkg/runner/runner_test.go @@ -0,0 +1,90 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package runner + +import ( + "context" + "errors" + "testing" + "time" +) + +func TestRunnerStartStop(t *testing.T) { + ctx, cn := context.WithCancel(context.Background()) + defer cn() + + runner := Start(ctx, func(ctx context.Context) error { + <-ctx.Done() + return nil + }) + + go func() { + runner.Stop() + }() + + <-runner.Done() +} + +func TestRunnerStartCancel(t *testing.T) { + ctx, cn := context.WithCancel(context.Background()) + defer cn() + + runner := Start(ctx, func(ctx context.Context) error { + <-ctx.Done() + return nil + }) + + go func() { + cn() + }() + + <-runner.Done() +} + +func TestRunnerDoneWithTimeout(t *testing.T) { + ctx, cn := context.WithCancel(context.Background()) + defer cn() + + runner := Start(ctx, func(ctx context.Context) error { + <-ctx.Done() + return nil + }) + + go func() { + runner.Stop() + }() + + // Should be done much sooner + <-runner.DoneWithTimeout(time.Second) + + // Should have no errors + if runner.Err() != nil { + t.Fatal(runner.Err()) + } +} + +func TestRunnerDoneTimedOut(t *testing.T) { + ctx, cn := context.WithCancel(context.Background()) + defer cn() + + runner := Start(ctx, func(ctx context.Context) error { + time.Sleep(time.Second) + <-ctx.Done() + return nil + }) + + go func() { + runner.Stop() + }() + + // Should be done much sooner + <-runner.DoneWithTimeout(500 * time.Millisecond) + + // Should have no errors + err := runner.Err() + if !errors.Is(err, context.DeadlineExceeded) { + t.Fatalf("want: %v, got: %v", context.DeadlineExceeded, err) + } +} diff --git a/pkg/component/runtime/manager.go b/pkg/component/runtime/manager.go index 573bb1653da..7fd240431d0 100644 --- a/pkg/component/runtime/manager.go +++ b/pkg/component/runtime/manager.go @@ -76,11 +76,10 @@ type Manager struct { mx sync.RWMutex current map[string]*componentRuntimeState - subMx sync.RWMutex - subscriptions map[string][]*Subscription - subAllMx sync.RWMutex - subscribeAll []*SubscriptionAll - subscribeAllInit chan *SubscriptionAll + subMx sync.RWMutex + subscriptions map[string][]*Subscription + subAllMx sync.RWMutex + subscribeAll []*SubscriptionAll errCh chan error @@ -202,6 +201,7 @@ func (m *Manager) WaitForReady(ctx context.Context) error { ServerName: name, Certificates: []tls.Certificate{cert}, RootCAs: caCertPool, + MinVersion: tls.VersionTLS12, }) m.waitMx.Lock() @@ -565,7 +565,7 @@ func (m *Manager) update(components []component.Component, teardown bool) error continue } // component was removed (time to clean it up) - existing.stop(teardown) + _ = existing.stop(teardown) } return nil } @@ -589,7 +589,8 @@ func (m *Manager) shutdown() { } } -func (m *Manager) stateChanged(state *componentRuntimeState, latest ComponentState) { +// stateChanged notifies of the state change and returns true if the state is final (stopped) +func (m *Manager) stateChanged(state *componentRuntimeState, latest ComponentState) (exit bool) { m.subAllMx.RLock() for _, sub := range m.subscribeAll { select { @@ -621,8 +622,9 @@ func (m *Manager) stateChanged(state *componentRuntimeState, latest ComponentSta delete(m.current, state.currComp.ID) m.mx.Unlock() - state.destroy() + exit = true } + return exit } func (m *Manager) getCertificate(chi *tls.ClientHelloInfo) (*tls.Certificate, error) { diff --git a/pkg/component/runtime/runtime.go b/pkg/component/runtime/runtime.go index aae913efac4..43731645b5f 100644 --- a/pkg/component/runtime/runtime.go +++ b/pkg/component/runtime/runtime.go @@ -12,6 +12,7 @@ import ( "github.com/elastic/elastic-agent-client/v7/pkg/client" "github.com/elastic/elastic-agent-client/v7/pkg/proto" "github.com/elastic/elastic-agent-libs/atomic" + "github.com/elastic/elastic-agent/internal/pkg/runner" "github.com/elastic/elastic-agent/pkg/component" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -77,12 +78,6 @@ type componentRuntimeState struct { latestMx sync.RWMutex latestState ComponentState - watchChan chan bool - watchCanceller context.CancelFunc - - runChan chan bool - runCanceller context.CancelFunc - actionsMx sync.Mutex actions map[string]func(*proto.ActionResponse) } @@ -97,8 +92,6 @@ func newComponentRuntimeState(m *Manager, logger *logger.Logger, comp component. return nil, err } - watchChan := make(chan bool) - runChan := make(chan bool) state := &componentRuntimeState{ manager: m, logger: logger, @@ -110,25 +103,33 @@ func newComponentRuntimeState(m *Manager, logger *logger.Logger, comp component. Message: "Starting", Units: nil, }, - watchChan: watchChan, - runChan: runChan, - actions: make(map[string]func(response *proto.ActionResponse)), + actions: make(map[string]func(response *proto.ActionResponse)), } + // start the go-routine that operates the runtime for the component + runtimeRunner := runner.Start(context.Background(), func(ctx context.Context) error { + defer comm.destroy() + _ = runtime.Run(ctx, comm) + return nil + }) + // start the go-routine that watches for updates from the component - watchCtx, watchCanceller := context.WithCancel(context.Background()) - state.watchCanceller = watchCanceller - go func() { - defer close(watchChan) + runner.Start(context.Background(), func(ctx context.Context) error { for { select { - case <-watchCtx.Done(): - return + case <-ctx.Done(): + runtimeRunner.Stop() + case <-runtimeRunner.Done(): + // Exit from the watcher loop only when the runner is done + // This is the same behaviour as before this change, just refactored and cleaned up + return nil case s := <-runtime.Watch(): state.latestMx.Lock() state.latestState = s state.latestMx.Unlock() - state.manager.stateChanged(state, s) + if state.manager.stateChanged(state, s) { + runtimeRunner.Stop() + } case ar := <-comm.actionsResponse: state.actionsMx.Lock() callback, ok := state.actions[ar.Id] @@ -141,16 +142,7 @@ func newComponentRuntimeState(m *Manager, logger *logger.Logger, comp component. } } } - }() - - // start the go-routine that operates the runtime for the component - runCtx, runCanceller := context.WithCancel(context.Background()) - state.runCanceller = runCanceller - go func() { - defer close(runChan) - defer comm.destroy() - _ = runtime.Run(runCtx, comm) - }() + }) return state, nil } @@ -167,19 +159,6 @@ func (s *componentRuntimeState) stop(teardown bool) error { return s.runtime.Stop() } -func (s *componentRuntimeState) destroy() { - if s.runCanceller != nil { - s.runCanceller() - s.runCanceller = nil - <-s.runChan - } - if s.watchCanceller != nil { - s.watchCanceller() - s.watchCanceller = nil - <-s.watchChan - } -} - func (s *componentRuntimeState) performAction(ctx context.Context, req *proto.ActionRequest) (*proto.ActionResponse, error) { ch := make(chan *proto.ActionResponse) callback := func(response *proto.ActionResponse) { diff --git a/pkg/component/runtime/runtime_comm.go b/pkg/component/runtime/runtime_comm.go index 4e9b4c23598..2bc2e297179 100644 --- a/pkg/component/runtime/runtime_comm.go +++ b/pkg/component/runtime/runtime_comm.go @@ -48,10 +48,13 @@ type runtimeComm struct { token string cert *authority.Pair - checkinConn bool - checkinDone chan bool - checkinLock sync.RWMutex - checkinExpected chan *proto.CheckinExpected + checkinConn bool + checkinDone chan bool + checkinLock sync.RWMutex + + checkinExpectedLock sync.Mutex + checkinExpected chan *proto.CheckinExpected + checkinObserved chan *proto.CheckinObserved actionsConn bool @@ -83,7 +86,7 @@ func newRuntimeComm(logger *logger.Logger, listenAddr string, ca *authority.Cert token: token.String(), cert: pair, checkinConn: true, - checkinExpected: make(chan *proto.CheckinExpected), + checkinExpected: make(chan *proto.CheckinExpected, 1), // size of 1 channel to keep the latest expected checkin state checkinObserved: make(chan *proto.CheckinObserved), actionsConn: true, actionsRequest: make(chan *proto.ActionRequest), @@ -136,7 +139,20 @@ func (c *runtimeComm) CheckinExpected(expected *proto.CheckinExpected) { } else { expected.AgentInfo = nil } + + // Lock to avoid race if this function is called from the different go routines + c.checkinExpectedLock.Lock() + + // Empty the channel + select { + case <-c.checkinExpected: + default: + } + + // Put the new expected state in c.checkinExpected <- expected + + c.checkinExpectedLock.Unlock() } func (c *runtimeComm) CheckinObserved() <-chan *proto.CheckinObserved { From 9d1cea3385b749e9642ae08584ab6fecca0cf6cd Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Thu, 29 Sep 2022 09:50:05 -0400 Subject: [PATCH 24/49] [v2] Re-enable diagnostics for Elastic Agent and all components (#1140) * Add diagnostics back to v2. * Update pkg/component/runtime/manager.go Co-authored-by: Anderson Queiroz Co-authored-by: Anderson Queiroz --- control.proto | 128 +-- .../application/coordinator/coordinator.go | 170 ++- internal/pkg/agent/cmd/diagnostics.go | 679 ++---------- internal/pkg/agent/cmd/diagnostics_test.go | 216 ---- internal/pkg/agent/cmd/run.go | 58 +- internal/pkg/agent/control/client/client.go | 221 ++-- internal/pkg/agent/control/control_test.go | 2 +- .../pkg/agent/control/cproto/control.pb.go | 986 +++++++++--------- .../agent/control/cproto/control_grpc.pb.go | 104 +- internal/pkg/agent/control/server/server.go | 451 ++------ internal/pkg/agent/transpiler/vars.go | 6 + internal/pkg/basecmd/version/cmd_test.go | 4 +- internal/pkg/diagnostics/diagnostics.go | 102 ++ pkg/component/load.go | 11 + pkg/component/runtime/manager.go | 123 ++- pkg/component/runtime/state.go | 20 +- pkg/component/spec.go | 6 + specs/endpoint-security.spec.yml | 54 +- 18 files changed, 1317 insertions(+), 2024 deletions(-) delete mode 100644 internal/pkg/agent/cmd/diagnostics_test.go create mode 100644 internal/pkg/diagnostics/diagnostics.go diff --git a/control.proto b/control.proto index a1a7a3f8b82..4bcef0ea3ed 100644 --- a/control.proto +++ b/control.proto @@ -8,6 +8,7 @@ package cproto; option cc_enable_arenas = true; option go_package = "internal/pkg/agent/control/cproto"; +import "google/protobuf/timestamp.proto"; // State codes for the current state. enum State { @@ -139,77 +140,89 @@ message ComponentState { ComponentVersionInfo version_info = 6; } -// Current metadata for a running process. -message ProcMeta { - string process = 1; - string name = 2; - string hostname = 3; - string id = 4; - string ephemeral_id = 5; - string version = 6; - string build_commit = 7; - string build_time = 8; - string username = 9; - string user_id = 10; - string user_gid = 11; - string architecture = 12; - string route_key = 13; - bool elastic_licensed = 14; - string error = 15; +message StateAgentInfo { + // Current ID of the Agent. + string id = 1; + // Current running version. + string version = 2; + // Current running commit. + string commit = 3; + // Current running build time. + string buildTime = 4; + // Current running version is a snapshot. + bool snapshot = 5; } // StateResponse is the current state of Elastic Agent. message StateResponse { + // Overall information of Elastic Agent. + StateAgentInfo info = 1; // Overall state of Elastic Agent. - State state = 1; + State state = 2; // Overall status message of Elastic Agent. - string message = 2; + string message = 3; // Status of each component in Elastic Agent. - repeated ComponentState components = 3; + repeated ComponentState components = 4; } -// ProcMetaResponse is the current running version infomation for all processes. -message ProcMetaResponse { - repeated ProcMeta procs = 1; +// DiagnosticFileResult is a file result from a diagnostic result. +message DiagnosticFileResult { + // Human readable name of the diagnostic result content. + string name = 1; + // Filename to use to store the diagnostic to the disk. + string filename = 2; + // Human readable description of the information this diagnostic provides. + string description = 3; + // Content-Type of the resulting content. + string content_type = 4; + // Actual file content. + bytes content = 5; + // Timestamp the content was generated at. + google.protobuf.Timestamp generated = 6; } -// PprofRequest is a request for pprof data from and http/pprof endpoint. -message PprofRequest { - // The profiles that are requested - repeated PprofOption pprofType = 1; - // A string representing a time.Duration to apply to trace, and profile options. - string traceDuration = 2; - // The application that will be profiled, if empty all applications are profiled. - string appName = 3; - // The route key to match for profiling, if empty all are profiled. - string routeKey = 4; +// DiagnosticAgentRequest is request to gather diagnostic information about the Elastic Agent. +message DiagnosticAgentRequest { } -// PprofResult is the result of a pprof request for a given application/route key. -message PprofResult { - string appName = 1; - string routeKey = 2; - PprofOption pprofType = 3; - bytes result = 4; - string error = 5; +// DiagnosticAgentResponse is response to gathered diagnostic information about the Elastic Agent. +message DiagnosticAgentResponse { + // Diagnostic results for the agent. + repeated DiagnosticFileResult results = 1; } -// PprofResponse is a wrapper to return all pprof responses. -message PprofResponse { - repeated PprofResult results = 1; +// DiagnosticUnitRequest specifies a specific unit to gather diagnostics from. +message DiagnosticUnitRequest { + // Type of unit. + UnitType unit_type = 2; + // ID of the unit. + string unit_id = 3; } -// MetricsResponse is the result of a request for the metrics buffer endpoint for a application/route key -message MetricsResponse { - string appName = 1; - string routeKey = 2; - bytes result = 3; - string error = 4; +// DiagnosticUnitsRequest allows a diagnostic request to specify the units to target. +message DiagnosticUnitsRequest { + // Specific units to target. (If no units are given then a result for all units is provided). + repeated DiagnosticUnitRequest units = 1; } -// ProcMetricsResponse is a wrapper to return all metrics buffer responses -message ProcMetricsResponse { - repeated MetricsResponse result = 1; +// DiagnosticUnitResponse is diagnostic information about a specific unit. +message DiagnosticUnitResponse { + // ID of the component. + string component_id = 1; + // Type of unit. + UnitType unit_type = 2; + // ID of the unit. + string unit_id = 3; + // Error message for the failure fetching diagnostic information for this unit. + string error = 4; + // Diagnostic results for the unit. + repeated DiagnosticFileResult results = 5; +} + +// DiagnosticUnitsResponse is response to gathered units diagnostic information. +message DiagnosticUnitsResponse { + // Diagnostics results per unit. + repeated DiagnosticUnitResponse units = 2; } service ElasticAgentControl { @@ -225,12 +238,9 @@ service ElasticAgentControl { // Upgrade starts the upgrade process of Elastic Agent. rpc Upgrade(UpgradeRequest) returns (UpgradeResponse); - // Gather all running process metadata. - rpc ProcMeta(Empty) returns (ProcMetaResponse); - - // Gather requested pprof data from specified applications. - rpc Pprof(PprofRequest) returns (PprofResponse); + // Gather diagnostic information for the running Elastic Agent. + rpc DiagnosticAgent(DiagnosticAgentRequest) returns (DiagnosticAgentResponse); - // Gather all running process metrics. - rpc ProcMetrics(Empty) returns (ProcMetricsResponse); + // Gather diagnostic information for the running units. + rpc DiagnosticUnits(DiagnosticUnitsRequest) returns (DiagnosticUnitsResponse); } diff --git a/internal/pkg/agent/application/coordinator/coordinator.go b/internal/pkg/agent/application/coordinator/coordinator.go index 30fcdfcce81..e49198da65f 100644 --- a/internal/pkg/agent/application/coordinator/coordinator.go +++ b/internal/pkg/agent/application/coordinator/coordinator.go @@ -9,6 +9,10 @@ import ( "errors" "fmt" + "gopkg.in/yaml.v2" + + "github.com/elastic/elastic-agent/internal/pkg/diagnostics" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi" "go.elastic.co/apm" @@ -75,6 +79,10 @@ type RuntimeManager interface { // SubscribeAll provides an interface to watch for changes in all components. SubscribeAll(context.Context) *runtime.SubscriptionAll + + // PerformDiagnostics executes the diagnostic action for the provided units. If no units are provided then + // it performs diagnostics for all current units. + PerformDiagnostics(context.Context, ...component.Unit) []runtime.ComponentUnitDiagnostic } // ConfigChange provides an interface for receiving a new configuration. @@ -117,9 +125,9 @@ type ComponentsModifier func(comps []component.Component) ([]component.Component // State provides the current state of the coordinator along with all the current states of components and units. type State struct { - State agentclient.State - Message string - Components []runtime.ComponentComponentState + State agentclient.State `yaml:"state"` + Message string `yaml:"message"` + Components []runtime.ComponentComponentState `yaml:"components"` } // StateFetcher provides an interface to fetch the current state of the coordinator. @@ -252,6 +260,12 @@ func (c *Coordinator) PerformAction(ctx context.Context, unit component.Unit, na return c.runtimeMgr.PerformAction(ctx, unit, name, params) } +// PerformDiagnostics executes the diagnostic action for the provided units. If no units are provided then +// it performs diagnostics for all current units. +func (c *Coordinator) PerformDiagnostics(ctx context.Context, units ...component.Unit) []runtime.ComponentUnitDiagnostic { + return c.runtimeMgr.PerformDiagnostics(ctx, units...) +} + // Run runs the coordinator. // // The RuntimeManager, ConfigManager and VarsManager that is passed into NewCoordinator are also ran and lifecycle controlled by the Run. @@ -301,6 +315,119 @@ func (c *Coordinator) Run(ctx context.Context) error { } } +// DiagnosticHooks returns diagnostic hooks that can be connected to the control server to provide diagnostic +// information about the state of the Elastic Agent. +func (c *Coordinator) DiagnosticHooks() diagnostics.Hooks { + return diagnostics.Hooks{ + { + Name: "pre-config", + Filename: "pre-config.yaml", + Description: "current pre-configuration of the running Elastic Agent before variable substitution", + ContentType: "application/yaml", + Hook: func(_ context.Context) []byte { + if c.state.ast == nil { + return []byte("error: failed no configuration by the coordinator") + } + cfg, err := c.state.ast.Map() + if err != nil { + return []byte(fmt.Sprintf("error: %q", err)) + } + o, err := yaml.Marshal(cfg) + if err != nil { + return []byte(fmt.Sprintf("error: %q", err)) + } + return o + }, + }, + { + Name: "variables", + Filename: "variables.yaml", + Description: "current variable contexts of the running Elastic Agent", + ContentType: "application/yaml", + Hook: func(_ context.Context) []byte { + if c.state.vars == nil { + return []byte("error: failed no variables by the coordinator") + } + vars := make([]map[string]interface{}, 0, len(c.state.vars)) + for _, v := range c.state.vars { + m, err := v.Map() + if err != nil { + return []byte(fmt.Sprintf("error: %q", err)) + } + vars = append(vars, m) + } + o, err := yaml.Marshal(struct { + Variables []map[string]interface{} `yaml:"variables"` + }{ + Variables: vars, + }) + if err != nil { + return []byte(fmt.Sprintf("error: %q", err)) + } + return o + }, + }, + { + Name: "computed-config", + Filename: "computed-config.yaml", + Description: "current computed configuration of the running Elastic Agent after variable substitution", + ContentType: "application/yaml", + Hook: func(_ context.Context) []byte { + if c.state.ast == nil || c.state.vars == nil { + return []byte("error: failed no configuration or variables received by the coordinator") + } + cfg, _, err := c.compute() + if err != nil { + return []byte(fmt.Sprintf("error: %q", err)) + } + o, err := yaml.Marshal(cfg) + if err != nil { + return []byte(fmt.Sprintf("error: %q", err)) + } + return o + }, + }, + { + Name: "components", + Filename: "components.yaml", + Description: "current expected components model of the running Elastic Agent", + ContentType: "application/yaml", + Hook: func(_ context.Context) []byte { + if c.state.ast == nil || c.state.vars == nil { + return []byte("error: failed no configuration or variables received by the coordinator") + } + _, comps, err := c.compute() + if err != nil { + return []byte(fmt.Sprintf("error: %q", err)) + } + o, err := yaml.Marshal(struct { + Components []component.Component `yaml:"components"` + }{ + Components: comps, + }) + if err != nil { + return []byte(fmt.Sprintf("error: %q", err)) + } + return o + }, + }, + { + Name: "state", + Filename: "state.yaml", + Description: "current state of running components by the Elastic Agent", + ContentType: "application/yaml", + Hook: func(_ context.Context) []byte { + s := c.State() + o, err := yaml.Marshal(s) + if err != nil { + return []byte(fmt.Sprintf("error: %q", err)) + } + return o + }, + }, + } +} + // runner performs the actual work of running all the managers // // if one of the managers fails the others are also stopped and then the whole runner returns @@ -469,44 +596,53 @@ func (c *Coordinator) process(ctx context.Context) (err error) { span.End() }() + _, comps, err := c.compute() + if err != nil { + return err + } + + c.logger.Info("Updating running component model") + c.logger.With("components", comps).Debug("Updating running component model") + err = c.runtimeMgr.Update(comps) + if err != nil { + return err + } + c.state.state = agentclient.Healthy + c.state.message = "Running" + return nil +} + +func (c *Coordinator) compute() (map[string]interface{}, []component.Component, error) { ast := c.state.ast.Clone() inputs, ok := transpiler.Lookup(ast, "inputs") if ok { renderedInputs, err := transpiler.RenderInputs(inputs, c.state.vars) if err != nil { - return fmt.Errorf("rendering inputs failed: %w", err) + return nil, nil, fmt.Errorf("rendering inputs failed: %w", err) } err = transpiler.Insert(ast, renderedInputs, "inputs") if err != nil { - return fmt.Errorf("inserting rendered inputs failed: %w", err) + return nil, nil, fmt.Errorf("inserting rendered inputs failed: %w", err) } } cfg, err := ast.Map() if err != nil { - return fmt.Errorf("failed to convert ast to map[string]interface{}: %w", err) + return nil, nil, fmt.Errorf("failed to convert ast to map[string]interface{}: %w", err) } comps, err := c.specs.ToComponents(cfg) if err != nil { - return fmt.Errorf("failed to render components: %w", err) + return nil, nil, fmt.Errorf("failed to render components: %w", err) } for _, modifier := range c.modifiers { comps, err = modifier(comps) if err != nil { - return fmt.Errorf("failed to modify components: %w", err) + return nil, nil, fmt.Errorf("failed to modify components: %w", err) } } - c.logger.Info("Updating running component model") - c.logger.With("components", comps).Debug("Updating running component model") - err = c.runtimeMgr.Update(comps) - if err != nil { - return err - } - c.state.state = agentclient.Healthy - c.state.message = "Running" - return nil + return cfg, comps, nil } type coordinatorState struct { diff --git a/internal/pkg/agent/cmd/diagnostics.go b/internal/pkg/agent/cmd/diagnostics.go index f267c2df162..9fab842375e 100644 --- a/internal/pkg/agent/cmd/diagnostics.go +++ b/internal/pkg/agent/cmd/diagnostics.go @@ -7,7 +7,6 @@ package cmd import ( "archive/zip" "context" - "encoding/json" stderrors "errors" "fmt" "io" @@ -15,240 +14,67 @@ import ( "os" "path/filepath" "strings" - "text/tabwriter" "time" "github.com/hashicorp/go-multierror" "github.com/spf13/cobra" - "gopkg.in/yaml.v2" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" - "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" "github.com/elastic/elastic-agent/internal/pkg/agent/control/client" - "github.com/elastic/elastic-agent/internal/pkg/agent/control/cproto" - "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/cli" - "github.com/elastic/elastic-agent/internal/pkg/config/operations" + "github.com/elastic/elastic-agent/pkg/component" ) -const ( - outputTypeHuman = "human" - outputTypeJSON = "json" - outputTypeYAML = "yaml" -) - -var diagOutputs = map[string]outputter{ - outputTypeHuman: humanDiagnosticsOutput, - outputTypeJSON: jsonOutput, - outputTypeYAML: yamlOutput, -} - -// DiagnosticsInfo a struct to track all information related to diagnostics for the agent. -type DiagnosticsInfo struct { - ProcMeta []client.ProcMeta - AgentInfo AgentInfo -} - -// AgentInfo contains all information about the running Agent. -type AgentInfo struct { - ID string - Version string - Commit string - BuildTime time.Time - Snapshot bool -} - -// AgentConfig tracks all configuration that the agent uses, local files, rendered policies, beat inputs etc. -type AgentConfig struct { - ConfigLocal *configuration.Configuration - ConfigRendered map[string]interface{} - AppConfig map[string]interface{} // map of processName_rk:config -} - -func newDiagnosticsCommand(s []string, streams *cli.IOStreams) *cobra.Command { +func newDiagnosticsCommand(_ []string, streams *cli.IOStreams) *cobra.Command { cmd := &cobra.Command{ Use: "diagnostics", - Short: "Gather diagnostics information from the elastic-agent and running processes.", - Long: "Gather diagnostics information from the elastic-agent and running processes.", + Short: "Gather diagnostics information from the elastic-agent and write it to a zip archive.", + Long: "Gather diagnostics information from the elastic-agent and write it to a zip archive.", Run: func(c *cobra.Command, args []string) { - if err := diagnosticCmd(streams, c, args); err != nil { + if err := diagnosticCmd(streams, c); err != nil { fmt.Fprintf(streams.Err, "Error: %v\n%s\n", err, troubleshootMessage()) os.Exit(1) } }, } - cmd.Flags().String("output", "human", "Output the diagnostics information in either human, json, or yaml (default: human)") - cmd.AddCommand(newDiagnosticsCollectCommandWithArgs(s, streams)) - cmd.AddCommand(newDiagnosticsPprofCommandWithArgs(s, streams)) - - return cmd -} - -func newDiagnosticsCollectCommandWithArgs(_ []string, streams *cli.IOStreams) *cobra.Command { - cmd := &cobra.Command{ - Use: "collect", - Short: "Collect diagnostics information from the elastic-agent and write it to a zip archive.", - Long: "Collect diagnostics information from the elastic-agent and write it to a zip archive.\nNote that any credentials will appear in plain text.", - Args: cobra.MaximumNArgs(3), - RunE: func(c *cobra.Command, args []string) error { - file, _ := c.Flags().GetString("file") - - if file == "" { - ts := time.Now().UTC() - file = "elastic-agent-diagnostics-" + ts.Format("2006-01-02T15-04-05Z07-00") + ".zip" // RFC3339 format that replaces : with -, so it will work on Windows - } - - output, _ := c.Flags().GetString("output") - switch output { - case outputTypeYAML: - case outputTypeJSON: - default: - return fmt.Errorf("unsupported output: %s", output) - } - - pprof, _ := c.Flags().GetBool("pprof") - d, _ := c.Flags().GetDuration("pprof-duration") - // get the command timeout value only if one is set explicitly. - // otherwise a value of 30s + pprof-duration will be used. - var timeout time.Duration - if c.Flags().Changed("timeout") { - timeout, _ = c.Flags().GetDuration("timeout") - } - - return diagnosticsCollectCmd(streams, file, output, pprof, d, timeout) - }, - } - cmd.Flags().StringP("file", "f", "", "name of the output diagnostics zip archive") - cmd.Flags().String("output", "yaml", "Output the collected information in either json, or yaml (default: yaml)") // replace output flag with different options - cmd.Flags().Bool("pprof", false, "Collect all pprof data from all running applications.") - cmd.Flags().Duration("pprof-duration", time.Second*30, "The duration to collect trace and profiling data from the debug/pprof endpoints. (default: 30s)") - cmd.Flags().Duration("timeout", time.Second*30, "The timeout for the diagnostics collect command, will be either 30s or 30s+pprof-duration by default. Should be longer then pprof-duration when pprof is enabled as the command needs time to process/archive the response.") return cmd } -func newDiagnosticsPprofCommandWithArgs(_ []string, streams *cli.IOStreams) *cobra.Command { - cmd := &cobra.Command{ - Use: "pprof", - Short: "Collect pprof information from a running process.", - Long: "Collect pprof information from the elastic-agent or one of its processes and write to stdout or a file.\nBy default it will gather a 30s profile of the elastic-agent and output on stdout.", - Args: cobra.MaximumNArgs(5), - RunE: func(c *cobra.Command, args []string) error { - file, _ := c.Flags().GetString("file") - pprofType, _ := c.Flags().GetString("pprof-type") - d, _ := c.Flags().GetDuration("pprof-duration") - // get the command timeout value only if one is set explicitly. - // otherwise a value of 30s + pprof-duration will be used. - var timeout time.Duration - if c.Flags().Changed("timeout") { - timeout, _ = c.Flags().GetDuration("timeout") - } - - pprofApp, _ := c.Flags().GetString("pprof-application") - pprofRK, _ := c.Flags().GetString("pprof-route-key") - - return diagnosticsPprofCmd(streams, d, timeout, file, pprofType, pprofApp, pprofRK) - }, +func diagnosticCmd(streams *cli.IOStreams, cmd *cobra.Command) error { + fileName, _ := cmd.Flags().GetString("file") + if fileName == "" { + ts := time.Now().UTC() + fileName = "elastic-agent-diagnostics-" + ts.Format("2006-01-02T15-04-05Z07-00") + ".zip" // RFC3339 format that replaces : with -, so it will work on Windows } - cmd.Flags().StringP("file", "f", "", "name of the output file, stdout if unspecified.") - cmd.Flags().String("pprof-type", "profile", "Collect all pprof data from all running applications. Select one of [allocs, block, cmdline, goroutine, heap, mutex, profile, threadcreate, trace]") - cmd.Flags().Duration("pprof-duration", time.Second*30, "The duration to collect trace and profiling data from the debug/pprof endpoints. (default: 30s)") - cmd.Flags().Duration("timeout", time.Second*60, "The timeout for the pprof collect command, defaults to 30s+pprof-duration by default. Should be longer then pprof-duration as the command needs time to process the response.") - cmd.Flags().String("pprof-application", "elastic-agent", "Application name to collect pprof data from.") - cmd.Flags().String("pprof-route-key", "default", "Route key to collect pprof data from.") - - return cmd -} - -func diagnosticCmd(streams *cli.IOStreams, cmd *cobra.Command, _ []string) error { err := tryContainerLoadPaths() if err != nil { return err } - output, _ := cmd.Flags().GetString("output") - outputFunc, ok := diagOutputs[output] - if !ok { - return fmt.Errorf("unsupported output: %s", output) - } - ctx := handleSignal(context.Background()) - innerCtx, cancel := context.WithTimeout(ctx, 30*time.Second) - defer cancel() - - diag, err := getDiagnostics(innerCtx) - if errors.Is(err, context.DeadlineExceeded) { - return errors.New("timed out after 30 seconds trying to connect to Elastic Agent daemon") - } else if errors.Is(err, context.Canceled) { - return nil - } else if err != nil { - return fmt.Errorf("failed to communicate with Elastic Agent daemon: %w", err) - } - - return outputFunc(streams.Out, diag) -} -func diagnosticsCollectCmd(streams *cli.IOStreams, fileName, outputFormat string, pprof bool, pprofDur, cmdTimeout time.Duration) error { - err := tryContainerLoadPaths() + daemon := client.New() + err = daemon.Connect(ctx) if err != nil { - return err - } - - ctx := handleSignal(context.Background()) - // set command timeout to 30s or 30s+pprofDur if no timeout is specified - if cmdTimeout == time.Duration(0) { - cmdTimeout = time.Second * 30 - if pprof { - cmdTimeout += pprofDur - } - - } - innerCtx, cancel := context.WithTimeout(ctx, cmdTimeout) - defer cancel() - - errs := make([]error, 0) - diag, err := getDiagnostics(innerCtx) - if errors.Is(err, context.DeadlineExceeded) { - return errors.New("timed out after 30 seconds trying to connect to Elastic Agent daemon") - } else if errors.Is(err, context.Canceled) { - return nil - } else if err != nil { - errs = append(errs, fmt.Errorf("unable to gather diagnostics data: %w", err)) - fmt.Fprintf(streams.Err, "Failed to gather diagnostics data from elastic-agent: %v\n", err) + return fmt.Errorf("failed to connect to daemon: %w", err) } + defer daemon.Disconnect() - metrics, err := gatherMetrics(innerCtx) + agentDiag, err := daemon.DiagnosticAgent(ctx) if err != nil { - if errors.Is(err, context.DeadlineExceeded) { - return fmt.Errorf("timed out after %s trying to connect to Elastic Agent daemon", cmdTimeout) - } - if errors.Is(err, context.Canceled) { - return nil - } - errs = append(errs, fmt.Errorf("unable to gather metrics data: %w", err)) - fmt.Fprintf(streams.Err, "Failed to gather metrics data from elastic-agent: %v\n", err) + return fmt.Errorf("failed to fetch agent diagnostics: %w", err) } - cfg, err := gatherConfig() + unitDiags, err := daemon.DiagnosticUnits(ctx) if err != nil { - errs = append(errs, fmt.Errorf("unable to gather config data: %w", err)) - fmt.Fprintf(streams.Err, "Failed to gather config data from elastic-agent: %v\n", err) + return fmt.Errorf("failed to fetch component/unit diagnostics: %w", err) } - var pprofData map[string][]client.ProcPProf - if pprof { - pprofData, err = getAllPprof(innerCtx, pprofDur) - if err != nil { - errs = append(errs, fmt.Errorf("unable to gather pprof data: %w", err)) - fmt.Fprintf(streams.Err, "Failed to gather pprof data from elastic-agent: %v\n", err) - } - } - - err = createZip(fileName, outputFormat, diag, cfg, pprofData, metrics, errs) + err = createZip(fileName, agentDiag, unitDiags) if err != nil { return fmt.Errorf("unable to create archive %q: %w", fileName, err) } @@ -257,278 +83,74 @@ func diagnosticsCollectCmd(streams *cli.IOStreams, fileName, outputFormat string return nil } -func diagnosticsPprofCmd(streams *cli.IOStreams, dur, cmdTimeout time.Duration, outFile, pType, appName, rk string) error { - pt, ok := cproto.PprofOption_value[strings.ToUpper(pType)] - if !ok { - return fmt.Errorf("unknown pprof-type %q, select one of [allocs, block, cmdline, goroutine, heap, mutex, profile, threadcreate, trace]", pType) - } - - // the elastic-agent application does not have a route key - if appName == "elastic-agent" { - rk = "" - } - - ctx := handleSignal(context.Background()) - // set cmdTimeout to 30s+dur if not set. - if cmdTimeout == time.Duration(0) { - cmdTimeout = time.Second*30 + dur - } - innerCtx, cancel := context.WithTimeout(ctx, cmdTimeout) - defer cancel() - - daemon := client.New() - err := daemon.Connect(ctx) - if err != nil { - return err - } - - pprofData, err := daemon.Pprof(innerCtx, dur, []cproto.PprofOption{cproto.PprofOption(pt)}, appName, rk) - if err != nil { - return err - } - - // validate response - pArr, ok := pprofData[cproto.PprofOption_name[pt]] - if !ok { - return fmt.Errorf("route key %q not found in response data (map length: %d)", rk, len(pprofData)) - } - if len(pArr) != 1 { - return fmt.Errorf("pprof type length 1 expected, received %d", len(pArr)) - } - res := pArr[0] - - if res.Error != "" { - return fmt.Errorf(res.Error) - } - - // handle result - if outFile != "" { - f, err := os.Create(outFile) - if err != nil { - return err - } - defer f.Close() - _, err = f.Write(res.Result) - if err != nil { - return err - } - fmt.Fprintf(streams.Out, "pprof data written to %s\n", outFile) - return nil - } - _, err = streams.Out.Write(res.Result) - return err -} - -func getDiagnostics(ctx context.Context) (DiagnosticsInfo, error) { - daemon := client.New() - diag := DiagnosticsInfo{} - err := daemon.Connect(ctx) - if err != nil { - return DiagnosticsInfo{}, err - } - defer daemon.Disconnect() - - bv, err := daemon.ProcMeta(ctx) - if err != nil { - return DiagnosticsInfo{}, err - } - diag.ProcMeta = bv - - version, err := daemon.Version(ctx) - if err != nil { - return diag, err - } - diag.AgentInfo = AgentInfo{ - Version: version.Version, - Commit: version.Commit, - BuildTime: version.BuildTime, - Snapshot: version.Snapshot, - } - - agentInfo, err := info.NewAgentInfo(false) - if err != nil { - return diag, err - } - diag.AgentInfo.ID = agentInfo.AgentID() - - return diag, nil -} - -func gatherMetrics(ctx context.Context) (*cproto.ProcMetricsResponse, error) { - daemon := client.New() - err := daemon.Connect(ctx) - if err != nil { - return nil, err - } - defer daemon.Disconnect() - - return daemon.ProcMetrics(ctx) -} - -func humanDiagnosticsOutput(w io.Writer, obj interface{}) error { - diag, ok := obj.(DiagnosticsInfo) - if !ok { - return fmt.Errorf("unable to cast %T as DiagnosticsInfo", obj) - } - return outputDiagnostics(w, diag) -} - -func outputDiagnostics(w io.Writer, d DiagnosticsInfo) error { - tw := tabwriter.NewWriter(w, 4, 1, 2, ' ', 0) - fmt.Fprintf(tw, "elastic-agent\tid: %s\tversion: %s\n", d.AgentInfo.ID, d.AgentInfo.Version) - fmt.Fprintf(tw, "\tbuild_commit: %s\tbuild_time: %s\tsnapshot_build: %v\n", d.AgentInfo.Commit, d.AgentInfo.BuildTime, d.AgentInfo.Snapshot) - if len(d.ProcMeta) == 0 { - fmt.Fprintf(tw, "Applications: (none)\n") - } else { - fmt.Fprintf(tw, "Applications:\n") - for _, app := range d.ProcMeta { - fmt.Fprintf(tw, " *\tname: %s\troute_key: %s\n", app.Name, app.RouteKey) - if app.Error != "" { - fmt.Fprintf(tw, "\terror: %s\n", app.Error) - } else { - fmt.Fprintf(tw, "\tprocess: %s\tid: %s\tephemeral_id: %s\telastic_license: %v\n", app.Process, app.ID, app.EphemeralID, app.ElasticLicensed) - fmt.Fprintf(tw, "\tversion: %s\tcommit: %s\tbuild_time: %s\tbinary_arch: %v\n", app.Version, app.BuildCommit, app.BuildTime, app.BinaryArchitecture) - fmt.Fprintf(tw, "\thostname: %s\tusername: %s\tuser_id: %s\tuser_gid: %s\n", app.Hostname, app.Username, app.UserID, app.UserGID) - } - - } - } - tw.Flush() - return nil -} - -func gatherConfig() (AgentConfig, error) { - log, err := newErrorLogger() - if err != nil { - return AgentConfig{}, err - } - - cfg := AgentConfig{} - localCFG, err := loadConfig(nil) - if err != nil { - return cfg, err - } - cfg.ConfigLocal = localCFG - - renderedCFG, err := operations.LoadFullAgentConfig(log, paths.ConfigFile(), true) - if err != nil { - return cfg, err - } - - agentInfo, err := info.NewAgentInfo(false) - if err != nil { - return cfg, err - } - - if cfg.ConfigLocal.Fleet.Info.ID == "" { - cfg.ConfigLocal.Fleet.Info.ID = agentInfo.AgentID() - } - - // Must force *config.Config to map[string]interface{} in order to write to a file. - mapCFG, err := renderedCFG.ToMapStr() - if err != nil { - return cfg, err - } - cfg.ConfigRendered = mapCFG - - // TODO(blakerouse): Fix diagnostic command for Elastic Agent v2 - /* - // Gather vars to render process config - isStandalone, err := isStandalone(renderedCFG) - if err != nil { - return AgentConfig{}, err - } - - // Get process config - uses same approach as inspect output command. - // Does not contact server process to request configs. - pMap, err := getProgramsFromConfig(log, agentInfo, renderedCFG, isStandalone) - if err != nil { - return AgentConfig{}, err - } - cfg.AppConfig = make(map[string]interface{}, 0) - for rk, programs := range pMap { - for _, p := range programs { - cfg.AppConfig[p.Identifier()+"_"+rk] = p.Configuration() - } - } - */ - - return cfg, nil -} - // createZip creates a zip archive with the passed fileName. // // The passed DiagnosticsInfo and AgentConfig data is written in the specified output format. // Any local log files are collected and copied into the archive. -func createZip(fileName, outputFormat string, diag DiagnosticsInfo, cfg AgentConfig, pprof map[string][]client.ProcPProf, metrics *cproto.ProcMetricsResponse, errs []error) error { +func createZip(fileName string, agentDiag []client.DiagnosticFileResult, unitDiags []client.DiagnosticUnitResult) error { f, err := os.Create(fileName) if err != nil { return err } zw := zip.NewWriter(f) - if len(errs) > 0 { - zf, err := zw.Create("errors.txt") + // write all Elastic Agent diagnostics at the top level + for _, ad := range agentDiag { + zf, err := zw.Create(ad.Filename) if err != nil { return closeHandlers(err, zw, f) } - for i, e := range errs { - fmt.Fprintf(zf, "Error %d: %v\n", i+1, e) - } - } - - _, err = zw.Create("meta/") - if err != nil { - return closeHandlers(err, zw, f) - } - - zf, err := zw.Create("meta/elastic-agent-version." + outputFormat) - if err != nil { - return closeHandlers(err, zw, f) - } - - if err := writeFile(zf, outputFormat, diag.AgentInfo); err != nil { - return closeHandlers(err, zw, f) - } - - for _, m := range diag.ProcMeta { - zf, err = zw.Create("meta/" + m.Name + "-" + m.RouteKey + "." + outputFormat) + _, err = zf.Write(ad.Content) if err != nil { return closeHandlers(err, zw, f) } - - if err := writeFile(zf, outputFormat, m); err != nil { - return closeHandlers(err, zw, f) - } } - _, err = zw.Create("config/") - if err != nil { - return closeHandlers(err, zw, f) + // structure each unit into its own component directory + compDirs := make(map[string][]client.DiagnosticUnitResult) + for _, ud := range unitDiags { + compDir := strings.ReplaceAll(ud.ComponentID, "/", "-") + compDirs[compDir] = append(compDirs[compDir], ud) } - zf, err = zw.Create("config/elastic-agent-local." + outputFormat) + // write each units diagnostics into its own directory + // layout becomes components/// + _, err = zw.Create("components/") if err != nil { return closeHandlers(err, zw, f) } - if err := writeFile(zf, outputFormat, cfg.ConfigLocal); err != nil { - return closeHandlers(err, zw, f) - } - - zf, err = zw.Create("config/elastic-agent-policy." + outputFormat) - if err != nil { - return closeHandlers(err, zw, f) - } - if err := writeFile(zf, outputFormat, cfg.ConfigRendered); err != nil { - return closeHandlers(err, zw, f) - } - for name, appCfg := range cfg.AppConfig { - zf, err := zw.Create("config/" + name + "." + outputFormat) + for dirName, units := range compDirs { + _, err = zw.Create(fmt.Sprintf("components/%s/", dirName)) if err != nil { return closeHandlers(err, zw, f) } - if err := writeFile(zf, outputFormat, appCfg); err != nil { - return closeHandlers(err, zw, f) + for _, ud := range units { + unitDir := strings.ReplaceAll(strings.TrimPrefix(ud.UnitID, ud.ComponentID+"-"), "/", "-") + _, err = zw.Create(fmt.Sprintf("components/%s/%s/", dirName, unitDir)) + if err != nil { + return closeHandlers(err, zw, f) + } + if ud.Err != nil { + w, err := zw.Create(fmt.Sprintf("components/%s/%s/error.txt", dirName, unitDir)) + if err != nil { + return closeHandlers(err, zw, f) + } + _, err = w.Write([]byte(fmt.Sprintf("%s\n", ud.Err))) + if err != nil { + return closeHandlers(err, zw, f) + } + continue + } + for _, fr := range ud.Results { + w, err := zw.Create(fmt.Sprintf("components/%s/%s/%s", dirName, unitDir, fr.Name)) + if err != nil { + return closeHandlers(err, zw, f) + } + _, err = w.Write(fr.Content) + if err != nil { + return closeHandlers(err, zw, f) + } + } } } @@ -536,20 +158,6 @@ func createZip(fileName, outputFormat string, diag DiagnosticsInfo, cfg AgentCon return closeHandlers(err, zw, f) } - if pprof != nil { - err := zipProfs(zw, pprof) - if err != nil { - return closeHandlers(err, zw, f) - } - } - - if metrics != nil && len(metrics.Result) > 0 { - err := zipMetrics(zw, metrics) - if err != nil { - return closeHandlers(err, zw, f) - } - } - return closeHandlers(nil, zw, f) } @@ -560,10 +168,9 @@ func zipLogs(zw *zip.Writer) error { return err } - // TODO(blakerouse): Fix diagnostics for v2 - //if err := collectEndpointSecurityLogs(zw, program.SupportedMap); err != nil { - // return fmt.Errorf("failed to collect endpoint-security logs: %w", err) - //} + if err := collectServiceComponentsLogs(zw); err != nil { + return fmt.Errorf("failed to collect endpoint-security logs: %w", err) + } // using Data() + "/logs", for some reason default paths/Logs() is the home dir... logPath := filepath.Join(paths.Home(), "logs") + string(filepath.Separator) @@ -594,41 +201,48 @@ func zipLogs(zw *zip.Writer) error { }) } -/* -func collectEndpointSecurityLogs(zw *zip.Writer, specs map[string]program.Spec) error { - spec, ok := specs["endpoint-security"] - if !ok { - return nil +func collectServiceComponentsLogs(zw *zip.Writer) error { + platform, err := component.LoadPlatformDetail() + if err != nil { + return fmt.Errorf("failed to gather system information: %w", err) } - - logs, ok := spec.LogPaths[runtime.GOOS] - if !ok { - return nil + specs, err := component.LoadRuntimeSpecs(paths.Components(), platform) + if err != nil { + return fmt.Errorf("failed to detect inputs and outputs: %w", err) } + for _, spec := range specs.ServiceSpecs() { + if spec.Spec.Service.Log == nil || spec.Spec.Service.Log.Path == "" { + // no log path set in specification + continue + } - logPath := filepath.Dir(logs) + string(filepath.Separator) - return filepath.WalkDir(logPath, func(path string, d fs.DirEntry, fErr error) error { - if fErr != nil { - if stderrors.Is(fErr, fs.ErrNotExist) { - return nil + logPath := filepath.Dir(spec.Spec.Service.Log.Path) + string(filepath.Separator) + err = filepath.WalkDir(logPath, func(path string, d fs.DirEntry, fErr error) error { + if fErr != nil { + if stderrors.Is(fErr, fs.ErrNotExist) { + return nil + } + + return fmt.Errorf("unable to walk log directory %q for service input %s: %w", logPath, spec.InputType, fErr) } - return fmt.Errorf("unable to walk log dir: %w", fErr) - } + name := filepath.ToSlash(strings.TrimPrefix(path, logPath)) + if name == "" { + return nil + } - name := filepath.ToSlash(strings.TrimPrefix(path, logPath)) - if name == "" { - return nil - } + if d.IsDir() { + return nil + } - if d.IsDir() { - return nil + return saveLogs("services/"+name, path, zw) + }) + if err != nil { + return err } - - return saveLogs(name, path, zw) - }) + } + return nil } -*/ func saveLogs(name string, logPath string, zw *zip.Writer) error { lf, err := os.Open(logPath) @@ -647,18 +261,6 @@ func saveLogs(name string, logPath string, zw *zip.Writer) error { return lf.Close() } -// writeFile writes json or yaml data from the interface to the writer. -func writeFile(w io.Writer, outputFormat string, v interface{}) error { - if outputFormat == "json" { - je := json.NewEncoder(w) - je.SetIndent("", " ") - return je.Encode(v) - } - ye := yaml.NewEncoder(w) - err := ye.Encode(v) - return closeHandlers(err, ye) -} - // closeHandlers will close all passed closers attaching any errors to the passed err and returning the result func closeHandlers(err error, closers ...io.Closer) error { var mErr *multierror.Error @@ -670,90 +272,3 @@ func closeHandlers(err error, closers ...io.Closer) error { } return mErr.ErrorOrNil() } - -func getAllPprof(ctx context.Context, d time.Duration) (map[string][]client.ProcPProf, error) { - daemon := client.New() - err := daemon.Connect(ctx) - if err != nil { - return nil, err - } - pprofTypes := []cproto.PprofOption{ - cproto.PprofOption_ALLOCS, - cproto.PprofOption_BLOCK, - cproto.PprofOption_CMDLINE, - cproto.PprofOption_GOROUTINE, - cproto.PprofOption_HEAP, - cproto.PprofOption_MUTEX, - cproto.PprofOption_PROFILE, - cproto.PprofOption_THREADCREATE, - cproto.PprofOption_TRACE, - } - return daemon.Pprof(ctx, d, pprofTypes, "", "") -} - -func zipProfs(zw *zip.Writer, pprof map[string][]client.ProcPProf) error { - _, err := zw.Create("pprof/") - if err != nil { - return err - } - - for pType, profs := range pprof { - _, err := zw.Create("pprof/" + pType + "/") - if err != nil { - return err - } - for _, p := range profs { - if p.Error != "" { - zf, err := zw.Create("pprof/" + pType + "/" + p.Name + "_" + p.RouteKey + "_error.txt") - if err != nil { - return err - } - _, err = zf.Write([]byte(p.Error)) - if err != nil { - return err - } - continue - } - zf, err := zw.Create("pprof/" + pType + "/" + p.Name + "_" + p.RouteKey + ".pprof") - if err != nil { - return err - } - _, err = zf.Write(p.Result) - if err != nil { - return err - } - } - } - return nil -} - -func zipMetrics(zw *zip.Writer, metrics *cproto.ProcMetricsResponse) error { - //nolint:staticcheck,wastedassign // false positive - zf, err := zw.Create("metrics/") - if err != nil { - return err - } - - for _, m := range metrics.Result { - if m.Error != "" { - zf, err = zw.Create("metrics/" + m.AppName + "_" + m.RouteKey + "_error.txt") - if err != nil { - return err - } - _, err = zf.Write([]byte(m.Error)) - if err != nil { - return err - } - continue - } - zf, err = zw.Create("metrics/" + m.AppName + "_" + m.RouteKey + ".json") - if err != nil { - return err - } - _, err = zf.Write(m.Result) - if err != nil { - return err - } - } - return nil -} diff --git a/internal/pkg/agent/cmd/diagnostics_test.go b/internal/pkg/agent/cmd/diagnostics_test.go deleted file mode 100644 index 99d98ef78de..00000000000 --- a/internal/pkg/agent/cmd/diagnostics_test.go +++ /dev/null @@ -1,216 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package cmd - -/* - -import ( - "archive/zip" - "bytes" - "io" - "os" - "path/filepath" - "runtime" - "strings" - "testing" - "time" - - "github.com/elastic/elastic-agent-libs/transport/tlscommon" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/elastic/elastic-agent/internal/pkg/agent/control/client" - "github.com/elastic/elastic-agent/internal/pkg/agent/program" -) - -var testDiagnostics = DiagnosticsInfo{ - AgentInfo: AgentInfo{ - ID: "test-id", - Version: "test-version", - Commit: "test-commit", - BuildTime: time.Date(2021, 1, 1, 0, 0, 0, 0, time.UTC), - Snapshot: false, - }, - ProcMeta: []client.ProcMeta{{ - Process: "filebeat", - Name: "filebeat", - Hostname: "test-host", - ID: "filebeat-id", - EphemeralID: "filebeat-ephemeral-id", - Version: "filebeat-version", - BuildCommit: "filebeat-commit", - BuildTime: time.Date(2021, 1, 1, 0, 0, 0, 0, time.UTC), - Username: "test-user", - UserID: "1000", - UserGID: "1000", - BinaryArchitecture: "test-architecture", - RouteKey: "test", - ElasticLicensed: true, - }, { - Process: "filebeat", - Name: "filebeat_monitoring", - Hostname: "test-host", - ID: "filebeat_monitoring-id", - EphemeralID: "filebeat_monitoring-ephemeral-id", - Version: "filebeat_monitoring-version", - BuildCommit: "filebeat_monitoring-commit", - BuildTime: time.Date(2021, 1, 1, 0, 0, 0, 0, time.UTC), - Username: "test-user", - UserID: "1000", - UserGID: "1000", - BinaryArchitecture: "test-architecture", - RouteKey: "test", - ElasticLicensed: true, - }, { - Name: "metricbeat", - RouteKey: "test", - Error: "failed to get metricbeat data", - }}, -} - -func Example_humanDiagnosticsOutput() { - _ = humanDiagnosticsOutput(os.Stdout, testDiagnostics) - // Output: - // elastic-agent id: test-id version: test-version - // build_commit: test-commit build_time: 2021-01-01 00:00:00 +0000 UTC snapshot_build: false - // Applications: - // * name: filebeat route_key: test - // process: filebeat id: filebeat-id ephemeral_id: filebeat-ephemeral-id elastic_license: true - // version: filebeat-version commit: filebeat-commit build_time: 2021-01-01 00:00:00 +0000 UTC binary_arch: test-architecture - // hostname: test-host username: test-user user_id: 1000 user_gid: 1000 - // * name: filebeat_monitoring route_key: test - // process: filebeat id: filebeat_monitoring-id ephemeral_id: filebeat_monitoring-ephemeral-id elastic_license: true - // version: filebeat_monitoring-version commit: filebeat_monitoring-commit build_time: 2021-01-01 00:00:00 +0000 UTC binary_arch: test-architecture - // hostname: test-host username: test-user user_id: 1000 user_gid: 1000 - // * name: metricbeat route_key: test - // error: failed to get metricbeat data -} - -func Test_collectEndpointSecurityLogs(t *testing.T) { - root := filepath.Join("testdata", "diagnostics", "endpoint-security", "logs") - - specs := program.SupportedMap - specs["endpoint-security"].LogPaths[runtime.GOOS] = - filepath.Join(root, "endpoint-*.log") - - buff := bytes.Buffer{} - - zw := zip.NewWriter(&buff) - err := collectEndpointSecurityLogs(zw, specs) - assert.NoError(t, err) - - err = zw.Close() - require.NoError(t, err) - - zr, err := zip.NewReader( - bytes.NewReader(buff.Bytes()), int64(len(buff.Bytes()))) - require.NoError(t, err) - - assert.NotEmpty(t, zr.File, "zip file shouldn't be empty") - for _, f := range zr.File { - split := strings.Split(f.Name, "/") - name := split[len(split)-1] - - wantf, err := os.Open(filepath.Join(root, name)) - require.NoError(t, err) - want, err := io.ReadAll(wantf) - require.NoError(t, err) - - r, err := f.Open() - require.NoError(t, err) - got, err := io.ReadAll(r) - require.NoError(t, err) - - assert.Equal(t, got, want) - } -} - -func Test_collectEndpointSecurityLogs_noEndpointSecurity(t *testing.T) { - root := filepath.Join("doesNotExist") - - specs := program.SupportedMap - specs["endpoint-security"].LogPaths["linux"] = - filepath.Join(root, "endpoint-*.log") - - buff := bytes.Buffer{} - - zw := zip.NewWriter(&buff) - err := collectEndpointSecurityLogs(zw, specs) - assert.NoError(t, err, "collectEndpointSecurityLogs should not return an error") -} - -func Test_redact(t *testing.T) { - tests := []struct { - name string - arg interface{} - wantRedacted []string - wantErr assert.ErrorAssertionFunc - }{ - { - name: "tlscommon.Config", - arg: tlscommon.Config{ - Enabled: nil, - VerificationMode: 0, - Versions: nil, - CipherSuites: nil, - CAs: []string{"ca1", "ca2"}, - Certificate: tlscommon.CertificateConfig{ - Certificate: "Certificate", - Key: "Key", - Passphrase: "Passphrase", - }, - CurveTypes: nil, - Renegotiation: 0, - CASha256: nil, - CATrustedFingerprint: "", - }, - wantRedacted: []string{ - "certificate", "key", "key_passphrase", "certificate_authorities"}, - }, - { - name: "some map", - arg: map[string]interface{}{ - "s": "sss", - "some_key": "hey, a key!", - "a_password": "changeme", - "my_token": "a_token", - "nested": map[string]string{ - "4242": "4242", - "4242key": "4242key", - "4242password": "4242password", - "4242certificate": "4242certificate", - }, - }, - wantRedacted: []string{ - "some_key", "a_password", "my_token", "4242key", "4242password", "4242certificate"}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := redact(tt.arg) - require.NoError(t, err) - - for k, v := range got { - if contains(tt.wantRedacted, k) { - assert.Equal(t, v, REDACTED) - } else { - assert.NotEqual(t, v, REDACTED) - } - } - }) - } -} - -func contains(list []string, val string) bool { - for _, k := range list { - if val == k { - return true - } - } - - return false -} -*/ diff --git a/internal/pkg/agent/cmd/run.go b/internal/pkg/agent/cmd/run.go index 9ad4827e166..ad8690d90ee 100644 --- a/internal/pkg/agent/cmd/run.go +++ b/internal/pkg/agent/cmd/run.go @@ -34,6 +34,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/cli" "github.com/elastic/elastic-agent/internal/pkg/config" monitoringCfg "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/config" + "github.com/elastic/elastic-agent/internal/pkg/diagnostics" "github.com/elastic/elastic-agent/internal/pkg/release" "github.com/elastic/elastic-agent/pkg/component" "github.com/elastic/elastic-agent/pkg/core/logger" @@ -158,32 +159,24 @@ func run(override cfgOverrider, modifiers ...component.PlatformModifier) error { logger.Info("APM instrumentation disabled") } - app, err := application.New(logger, agentInfo, rex, tracer, modifiers...) + coord, err := application.New(logger, agentInfo, rex, tracer, modifiers...) if err != nil { return err } - control := server.New(logger.Named("control"), cfg.Settings.MonitoringConfig, app, tracer) + diagHooks := diagnostics.GlobalHooks() + diagHooks = append(diagHooks, coord.DiagnosticHooks()...) + control := server.New(logger.Named("control"), agentInfo, coord, tracer, diagHooks) // start the control listener if err := control.Start(); err != nil { return err } defer control.Stop() - /* - serverStopFn, err := setupMetrics(agentInfo, logger, cfg.Settings.DownloadConfig.OS(), cfg.Settings.MonitoringConfig, app, tracer) - if err != nil { - return err - } - defer func() { - _ = serverStopFn() - }() - */ - appDone := make(chan bool) appErr := make(chan error) go func() { - err := app.Run(ctx) + err := coord.Run(ctx) close(appDone) appErr <- err }() @@ -330,45 +323,6 @@ func defaultLogLevel(cfg *configuration.Configuration) string { return defaultLogLevel } -/* -func setupMetrics( - _ *info.AgentInfo, - logger *logger.Logger, - operatingSystem string, - cfg *monitoringCfg.MonitoringConfig, - app application.Application, - tracer *apm.Tracer, -) (func() error, error) { - if err := report.SetupMetrics(logger, agentName, version.GetDefaultVersion()); err != nil { - return nil, err - } - - // start server for stats - endpointConfig := api.Config{ - Enabled: true, - Host: beats.AgentMonitoringEndpoint(operatingSystem, cfg.HTTP), - } - - bufferEnabled := cfg.HTTP.Buffer != nil && cfg.HTTP.Buffer.Enabled - s, err := monitoringServer.New(logger, endpointConfig, monitoring.GetNamespace, app.Routes, isProcessStatsEnabled(cfg.HTTP), bufferEnabled, tracer) - if err != nil { - return nil, errors.New(err, "could not start the HTTP server for the API") - } - s.Start() - - if cfg.Pprof != nil && cfg.Pprof.Enabled { - s.AttachPprof() - } - - // return server stopper - return s.Stop, nil -} - -func isProcessStatsEnabled(cfg *monitoringCfg.MonitoringHTTPConfig) bool { - return cfg != nil && cfg.Enabled -} -*/ - func tryDelayEnroll(ctx context.Context, logger *logger.Logger, cfg *configuration.Configuration, override cfgOverrider) (*configuration.Configuration, error) { enrollPath := paths.AgentEnrollFile() if _, err := os.Stat(enrollPath); err != nil { diff --git a/internal/pkg/agent/control/client/client.go b/internal/pkg/agent/control/client/client.go index 634cc25a5af..d876ba4aca1 100644 --- a/internal/pkg/agent/control/client/client.go +++ b/internal/pkg/agent/control/client/client.go @@ -7,6 +7,7 @@ package client import ( "context" "encoding/json" + "errors" "fmt" "sync" "time" @@ -51,10 +52,19 @@ const ( // Version is the current running version of the daemon. type Version struct { - Version string - Commit string - BuildTime time.Time - Snapshot bool + Version string `json:"version" yaml:"version"` + Commit string `json:"commit" yaml:"commit"` + BuildTime time.Time `json:"build_time" yaml:"build_time"` + Snapshot bool `json:"snapshot" yaml:"snapshot"` +} + +type ComponentVersionInfo struct { + // Name of the component. + Name string `json:"name" yaml:"name"` + // Version of the component. + Version string `json:"version" yaml:"version"` + // Extra meta information about the version. + Meta map[string]string `json:"meta,omitempty" yaml:"meta,omitempty"` } // ComponentUnitState is a state of a unit running inside a component. @@ -68,45 +78,54 @@ type ComponentUnitState struct { // ComponentState is a state of a component managed by the Elastic Agent. type ComponentState struct { - ID string `json:"id" yaml:"id"` - Name string `json:"name" yaml:"name"` - State State `json:"state" yaml:"state"` - Message string `json:"message" yaml:"message"` - Units []ComponentUnitState `json:"units" yaml:"units"` + ID string `json:"id" yaml:"id"` + Name string `json:"name" yaml:"name"` + State State `json:"state" yaml:"state"` + Message string `json:"message" yaml:"message"` + Units []ComponentUnitState `json:"units" yaml:"units"` + VersionInfo ComponentVersionInfo `json:"version_info" yaml:"version_info"` +} + +// AgentStateInfo is the overall information about the Elastic Agent. +type AgentStateInfo struct { + ID string `json:"id" yaml:"id"` + Version string `json:"version" yaml:"version"` + Commit string `json:"commit" yaml:"commit"` + BuildTime string `json:"build_time" yaml:"build_time"` + Snapshot bool `json:"snapshot" yaml:"snapshot"` } // AgentState is the current state of the Elastic Agent. type AgentState struct { + Info AgentStateInfo `json:"info" yaml:"info"` State State `json:"state" yaml:"state"` Message string `json:"message" yaml:"message"` Components []ComponentState `json:"components" yaml:"components"` } -// ProcMeta is the running version and ID information for a running process. -type ProcMeta struct { - Process string - Name string - Hostname string - ID string - EphemeralID string - Version string - BuildCommit string - BuildTime time.Time - Username string - UserID string - UserGID string - BinaryArchitecture string - RouteKey string - ElasticLicensed bool - Error string +// DiagnosticFileResult is a diagnostic file result. +type DiagnosticFileResult struct { + Name string + Filename string + Description string + ContentType string + Content []byte + Generated time.Time } -// ProcPProf returns pprof data for a process. -type ProcPProf struct { - Name string - RouteKey string - Result []byte - Error string +// DiagnosticUnitRequest allows a specific unit to be targeted for diagnostics. +type DiagnosticUnitRequest struct { + UnitID string + UnitType UnitType +} + +// DiagnosticUnitResult is a set of results for a unit. +type DiagnosticUnitResult struct { + ComponentID string + UnitID string + UnitType UnitType + Err error + Results []DiagnosticFileResult } // Client communicates to Elastic Agent through the control protocol. @@ -123,12 +142,10 @@ type Client interface { Restart(ctx context.Context) error // Upgrade triggers upgrade of the current running daemon. Upgrade(ctx context.Context, version string, sourceURI string) (string, error) - // ProcMeta gathers running process meta-data. - ProcMeta(ctx context.Context) ([]ProcMeta, error) - // Pprof gathers data from the /debug/pprof/ endpoints specified. - Pprof(ctx context.Context, d time.Duration, pprofTypes []cproto.PprofOption, appName, routeKey string) (map[string][]ProcPProf, error) - // ProcMetrics gathers /buffer data and from the agent and each running process and returns the result. - ProcMetrics(ctx context.Context) (*cproto.ProcMetricsResponse, error) + // DiagnosticAgent gathers diagnostics information for the running Elastic Agent. + DiagnosticAgent(ctx context.Context) ([]DiagnosticFileResult, error) + // DiagnosticUnits gathers diagnostics information from specific units (or all if non are provided). + DiagnosticUnits(ctx context.Context, units ...DiagnosticUnitRequest) ([]DiagnosticUnitResult, error) } // client manages the state and communication to the Elastic Agent. @@ -190,6 +207,13 @@ func (c *client) State(ctx context.Context) (*AgentState, error) { return nil, err } s := &AgentState{ + Info: AgentStateInfo{ + ID: res.Info.Id, + Version: res.Info.Version, + Commit: res.Info.Commit, + BuildTime: res.Info.BuildTime, + Snapshot: res.Info.Snapshot, + }, State: res.State, Message: res.Message, Components: make([]ComponentState, 0, len(res.Components)), @@ -212,13 +236,21 @@ func (c *client) State(ctx context.Context) (*AgentState, error) { Payload: payload, }) } - s.Components = append(s.Components, ComponentState{ + cs := ComponentState{ ID: comp.Id, Name: comp.Name, State: comp.State, Message: comp.Message, Units: units, - }) + } + if comp.VersionInfo != nil { + cs.VersionInfo = ComponentVersionInfo{ + Name: comp.VersionInfo.Name, + Version: comp.VersionInfo.Version, + Meta: comp.VersionInfo.Meta, + } + } + s.Components = append(s.Components, cs) } return s, nil } @@ -250,75 +282,66 @@ func (c *client) Upgrade(ctx context.Context, version string, sourceURI string) return res.Version, nil } -// ProcMeta gathers running beat metadata. -func (c *client) ProcMeta(ctx context.Context) ([]ProcMeta, error) { - resp, err := c.client.ProcMeta(ctx, &cproto.Empty{}) +// DiagnosticAgent gathers diagnostics information for the running Elastic Agent. +func (c *client) DiagnosticAgent(ctx context.Context) ([]DiagnosticFileResult, error) { + resp, err := c.client.DiagnosticAgent(ctx, &cproto.DiagnosticAgentRequest{}) if err != nil { return nil, err } - procMeta := []ProcMeta{} - for _, proc := range resp.Procs { - meta := ProcMeta{ - Process: proc.Process, - Name: proc.Name, - Hostname: proc.Hostname, - ID: proc.Id, - EphemeralID: proc.EphemeralId, - Version: proc.Version, - BuildCommit: proc.BuildCommit, - Username: proc.Username, - UserID: proc.UserId, - UserGID: proc.UserGid, - BinaryArchitecture: proc.Architecture, - RouteKey: proc.RouteKey, - ElasticLicensed: proc.ElasticLicensed, - Error: proc.Error, - } - if proc.BuildTime != "" { - ts, err := time.Parse(time.RFC3339, proc.BuildTime) - if err != nil { - if meta.Error != "" { - meta.Error += ", " + err.Error() - } else { - meta.Error = err.Error() - } - } else { - meta.BuildTime = ts - } - } - procMeta = append(procMeta, meta) + files := make([]DiagnosticFileResult, 0, len(resp.Results)) + for _, f := range resp.Results { + files = append(files, DiagnosticFileResult{ + Name: f.Name, + Filename: f.Filename, + Description: f.Description, + ContentType: f.ContentType, + Content: f.Content, + Generated: f.Generated.AsTime(), + }) } - return procMeta, nil + return files, nil } -// Pprof gathers /debug/pprof data and returns a map of pprof-type: ProcPProf data -func (c *client) Pprof(ctx context.Context, d time.Duration, pprofTypes []cproto.PprofOption, appName, routeKey string) (map[string][]ProcPProf, error) { - resp, err := c.client.Pprof(ctx, &cproto.PprofRequest{ - PprofType: pprofTypes, - TraceDuration: d.String(), - AppName: appName, - RouteKey: routeKey, - }) +// DiagnosticUnits gathers diagnostics information from specific units (or all if non are provided). +func (c *client) DiagnosticUnits(ctx context.Context, units ...DiagnosticUnitRequest) ([]DiagnosticUnitResult, error) { + reqs := make([]*cproto.DiagnosticUnitRequest, 0, len(units)) + for _, u := range units { + reqs = append(reqs, &cproto.DiagnosticUnitRequest{ + UnitType: u.UnitType, + UnitId: u.UnitID, + }) + } + + resp, err := c.client.DiagnosticUnits(ctx, &cproto.DiagnosticUnitsRequest{Units: reqs}) if err != nil { return nil, err } - res := map[string][]ProcPProf{} - for _, pType := range pprofTypes { - res[pType.String()] = make([]ProcPProf, 0) - } - for _, r := range resp.Results { - res[r.PprofType.String()] = append(res[r.PprofType.String()], ProcPProf{ - Name: r.AppName, - RouteKey: r.RouteKey, - Result: r.Result, - Error: r.Error, + + results := make([]DiagnosticUnitResult, 0, len(resp.Units)) + for _, u := range resp.Units { + files := make([]DiagnosticFileResult, 0, len(u.Results)) + for _, f := range u.Results { + files = append(files, DiagnosticFileResult{ + Name: f.Name, + Filename: f.Filename, + Description: f.Description, + ContentType: f.ContentType, + Content: f.Content, + Generated: f.Generated.AsTime(), + }) + } + var err error + if u.Error != "" { + err = errors.New(u.Error) + } + results = append(results, DiagnosticUnitResult{ + ComponentID: u.ComponentId, + UnitID: u.UnitId, + UnitType: u.UnitType, + Err: err, + Results: files, }) } - return res, nil -} - -// ProcMetrics gathers /buffer data and from the agent and each running process and returns the result. -func (c *client) ProcMetrics(ctx context.Context) (*cproto.ProcMetricsResponse, error) { - return c.client.ProcMetrics(ctx, &cproto.Empty{}) + return results, nil } diff --git a/internal/pkg/agent/control/control_test.go b/internal/pkg/agent/control/control_test.go index c189ab4534d..3937b374a36 100644 --- a/internal/pkg/agent/control/control_test.go +++ b/internal/pkg/agent/control/control_test.go @@ -21,7 +21,7 @@ import ( ) func TestServerClient_Version(t *testing.T) { - srv := server.New(newErrorLogger(t), nil, nil, apmtest.DiscardTracer) + srv := server.New(newErrorLogger(t), nil, nil, apmtest.DiscardTracer, nil) err := srv.Start() require.NoError(t, err) defer srv.Stop() diff --git a/internal/pkg/agent/control/cproto/control.pb.go b/internal/pkg/agent/control/cproto/control.pb.go index 7ada35a4fe0..ed681fd38ef 100644 --- a/internal/pkg/agent/control/cproto/control.pb.go +++ b/internal/pkg/agent/control/cproto/control.pb.go @@ -4,8 +4,8 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: -// protoc-gen-go v1.28.0 -// protoc v3.19.4 +// protoc-gen-go v1.28.1 +// protoc v3.21.5 // source: control.proto package cproto @@ -16,6 +16,7 @@ import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" ) const ( @@ -806,31 +807,25 @@ func (x *ComponentState) GetVersionInfo() *ComponentVersionInfo { return nil } -// Current metadata for a running process. -type ProcMeta struct { +type StateAgentInfo struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Process string `protobuf:"bytes,1,opt,name=process,proto3" json:"process,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` - Hostname string `protobuf:"bytes,3,opt,name=hostname,proto3" json:"hostname,omitempty"` - Id string `protobuf:"bytes,4,opt,name=id,proto3" json:"id,omitempty"` - EphemeralId string `protobuf:"bytes,5,opt,name=ephemeral_id,json=ephemeralId,proto3" json:"ephemeral_id,omitempty"` - Version string `protobuf:"bytes,6,opt,name=version,proto3" json:"version,omitempty"` - BuildCommit string `protobuf:"bytes,7,opt,name=build_commit,json=buildCommit,proto3" json:"build_commit,omitempty"` - BuildTime string `protobuf:"bytes,8,opt,name=build_time,json=buildTime,proto3" json:"build_time,omitempty"` - Username string `protobuf:"bytes,9,opt,name=username,proto3" json:"username,omitempty"` - UserId string `protobuf:"bytes,10,opt,name=user_id,json=userId,proto3" json:"user_id,omitempty"` - UserGid string `protobuf:"bytes,11,opt,name=user_gid,json=userGid,proto3" json:"user_gid,omitempty"` - Architecture string `protobuf:"bytes,12,opt,name=architecture,proto3" json:"architecture,omitempty"` - RouteKey string `protobuf:"bytes,13,opt,name=route_key,json=routeKey,proto3" json:"route_key,omitempty"` - ElasticLicensed bool `protobuf:"varint,14,opt,name=elastic_licensed,json=elasticLicensed,proto3" json:"elastic_licensed,omitempty"` - Error string `protobuf:"bytes,15,opt,name=error,proto3" json:"error,omitempty"` -} - -func (x *ProcMeta) Reset() { - *x = ProcMeta{} + // Current ID of the Agent. + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + // Current running version. + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` + // Current running commit. + Commit string `protobuf:"bytes,3,opt,name=commit,proto3" json:"commit,omitempty"` + // Current running build time. + BuildTime string `protobuf:"bytes,4,opt,name=buildTime,proto3" json:"buildTime,omitempty"` + // Current running version is a snapshot. + Snapshot bool `protobuf:"varint,5,opt,name=snapshot,proto3" json:"snapshot,omitempty"` +} + +func (x *StateAgentInfo) Reset() { + *x = StateAgentInfo{} if protoimpl.UnsafeEnabled { mi := &file_control_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -838,13 +833,13 @@ func (x *ProcMeta) Reset() { } } -func (x *ProcMeta) String() string { +func (x *StateAgentInfo) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ProcMeta) ProtoMessage() {} +func (*StateAgentInfo) ProtoMessage() {} -func (x *ProcMeta) ProtoReflect() protoreflect.Message { +func (x *StateAgentInfo) ProtoReflect() protoreflect.Message { mi := &file_control_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -856,128 +851,60 @@ func (x *ProcMeta) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ProcMeta.ProtoReflect.Descriptor instead. -func (*ProcMeta) Descriptor() ([]byte, []int) { +// Deprecated: Use StateAgentInfo.ProtoReflect.Descriptor instead. +func (*StateAgentInfo) Descriptor() ([]byte, []int) { return file_control_proto_rawDescGZIP(), []int{8} } -func (x *ProcMeta) GetProcess() string { - if x != nil { - return x.Process - } - return "" -} - -func (x *ProcMeta) GetName() string { - if x != nil { - return x.Name - } - return "" -} - -func (x *ProcMeta) GetHostname() string { - if x != nil { - return x.Hostname - } - return "" -} - -func (x *ProcMeta) GetId() string { +func (x *StateAgentInfo) GetId() string { if x != nil { return x.Id } return "" } -func (x *ProcMeta) GetEphemeralId() string { - if x != nil { - return x.EphemeralId - } - return "" -} - -func (x *ProcMeta) GetVersion() string { +func (x *StateAgentInfo) GetVersion() string { if x != nil { return x.Version } return "" } -func (x *ProcMeta) GetBuildCommit() string { +func (x *StateAgentInfo) GetCommit() string { if x != nil { - return x.BuildCommit + return x.Commit } return "" } -func (x *ProcMeta) GetBuildTime() string { +func (x *StateAgentInfo) GetBuildTime() string { if x != nil { return x.BuildTime } return "" } -func (x *ProcMeta) GetUsername() string { - if x != nil { - return x.Username - } - return "" -} - -func (x *ProcMeta) GetUserId() string { - if x != nil { - return x.UserId - } - return "" -} - -func (x *ProcMeta) GetUserGid() string { - if x != nil { - return x.UserGid - } - return "" -} - -func (x *ProcMeta) GetArchitecture() string { +func (x *StateAgentInfo) GetSnapshot() bool { if x != nil { - return x.Architecture - } - return "" -} - -func (x *ProcMeta) GetRouteKey() string { - if x != nil { - return x.RouteKey - } - return "" -} - -func (x *ProcMeta) GetElasticLicensed() bool { - if x != nil { - return x.ElasticLicensed + return x.Snapshot } return false } -func (x *ProcMeta) GetError() string { - if x != nil { - return x.Error - } - return "" -} - // StateResponse is the current state of Elastic Agent. type StateResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + // Overall information of Elastic Agent. + Info *StateAgentInfo `protobuf:"bytes,1,opt,name=info,proto3" json:"info,omitempty"` // Overall state of Elastic Agent. - State State `protobuf:"varint,1,opt,name=state,proto3,enum=cproto.State" json:"state,omitempty"` + State State `protobuf:"varint,2,opt,name=state,proto3,enum=cproto.State" json:"state,omitempty"` // Overall status message of Elastic Agent. - Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"` // Status of each component in Elastic Agent. - Components []*ComponentState `protobuf:"bytes,3,rep,name=components,proto3" json:"components,omitempty"` + Components []*ComponentState `protobuf:"bytes,4,rep,name=components,proto3" json:"components,omitempty"` } func (x *StateResponse) Reset() { @@ -1012,6 +939,13 @@ func (*StateResponse) Descriptor() ([]byte, []int) { return file_control_proto_rawDescGZIP(), []int{9} } +func (x *StateResponse) GetInfo() *StateAgentInfo { + if x != nil { + return x.Info + } + return nil +} + func (x *StateResponse) GetState() State { if x != nil { return x.State @@ -1033,17 +967,28 @@ func (x *StateResponse) GetComponents() []*ComponentState { return nil } -// ProcMetaResponse is the current running version infomation for all processes. -type ProcMetaResponse struct { +// DiagnosticFileResult is a file result from a diagnostic result. +type DiagnosticFileResult struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Procs []*ProcMeta `protobuf:"bytes,1,rep,name=procs,proto3" json:"procs,omitempty"` -} - -func (x *ProcMetaResponse) Reset() { - *x = ProcMetaResponse{} + // Human readable name of the diagnostic result content. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Filename to use to store the diagnostic to the disk. + Filename string `protobuf:"bytes,2,opt,name=filename,proto3" json:"filename,omitempty"` + // Human readable description of the information this diagnostic provides. + Description string `protobuf:"bytes,3,opt,name=description,proto3" json:"description,omitempty"` + // Content-Type of the resulting content. + ContentType string `protobuf:"bytes,4,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"` + // Actual file content. + Content []byte `protobuf:"bytes,5,opt,name=content,proto3" json:"content,omitempty"` + // Timestamp the content was generated at. + Generated *timestamppb.Timestamp `protobuf:"bytes,6,opt,name=generated,proto3" json:"generated,omitempty"` +} + +func (x *DiagnosticFileResult) Reset() { + *x = DiagnosticFileResult{} if protoimpl.UnsafeEnabled { mi := &file_control_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1051,13 +996,13 @@ func (x *ProcMetaResponse) Reset() { } } -func (x *ProcMetaResponse) String() string { +func (x *DiagnosticFileResult) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ProcMetaResponse) ProtoMessage() {} +func (*DiagnosticFileResult) ProtoMessage() {} -func (x *ProcMetaResponse) ProtoReflect() protoreflect.Message { +func (x *DiagnosticFileResult) ProtoReflect() protoreflect.Message { mi := &file_control_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1069,36 +1014,62 @@ func (x *ProcMetaResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ProcMetaResponse.ProtoReflect.Descriptor instead. -func (*ProcMetaResponse) Descriptor() ([]byte, []int) { +// Deprecated: Use DiagnosticFileResult.ProtoReflect.Descriptor instead. +func (*DiagnosticFileResult) Descriptor() ([]byte, []int) { return file_control_proto_rawDescGZIP(), []int{10} } -func (x *ProcMetaResponse) GetProcs() []*ProcMeta { +func (x *DiagnosticFileResult) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *DiagnosticFileResult) GetFilename() string { + if x != nil { + return x.Filename + } + return "" +} + +func (x *DiagnosticFileResult) GetDescription() string { + if x != nil { + return x.Description + } + return "" +} + +func (x *DiagnosticFileResult) GetContentType() string { + if x != nil { + return x.ContentType + } + return "" +} + +func (x *DiagnosticFileResult) GetContent() []byte { if x != nil { - return x.Procs + return x.Content } return nil } -// PprofRequest is a request for pprof data from and http/pprof endpoint. -type PprofRequest struct { +func (x *DiagnosticFileResult) GetGenerated() *timestamppb.Timestamp { + if x != nil { + return x.Generated + } + return nil +} + +// DiagnosticAgentRequest is request to gather diagnostic information about the Elastic Agent. +type DiagnosticAgentRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - - // The profiles that are requested - PprofType []PprofOption `protobuf:"varint,1,rep,packed,name=pprofType,proto3,enum=cproto.PprofOption" json:"pprofType,omitempty"` - // A string representing a time.Duration to apply to trace, and profile options. - TraceDuration string `protobuf:"bytes,2,opt,name=traceDuration,proto3" json:"traceDuration,omitempty"` - // The application that will be profiled, if empty all applications are profiled. - AppName string `protobuf:"bytes,3,opt,name=appName,proto3" json:"appName,omitempty"` - // The route key to match for profiling, if empty all are profiled. - RouteKey string `protobuf:"bytes,4,opt,name=routeKey,proto3" json:"routeKey,omitempty"` } -func (x *PprofRequest) Reset() { - *x = PprofRequest{} +func (x *DiagnosticAgentRequest) Reset() { + *x = DiagnosticAgentRequest{} if protoimpl.UnsafeEnabled { mi := &file_control_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1106,13 +1077,13 @@ func (x *PprofRequest) Reset() { } } -func (x *PprofRequest) String() string { +func (x *DiagnosticAgentRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*PprofRequest) ProtoMessage() {} +func (*DiagnosticAgentRequest) ProtoMessage() {} -func (x *PprofRequest) ProtoReflect() protoreflect.Message { +func (x *DiagnosticAgentRequest) ProtoReflect() protoreflect.Message { mi := &file_control_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1124,54 +1095,23 @@ func (x *PprofRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use PprofRequest.ProtoReflect.Descriptor instead. -func (*PprofRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use DiagnosticAgentRequest.ProtoReflect.Descriptor instead. +func (*DiagnosticAgentRequest) Descriptor() ([]byte, []int) { return file_control_proto_rawDescGZIP(), []int{11} } -func (x *PprofRequest) GetPprofType() []PprofOption { - if x != nil { - return x.PprofType - } - return nil -} - -func (x *PprofRequest) GetTraceDuration() string { - if x != nil { - return x.TraceDuration - } - return "" -} - -func (x *PprofRequest) GetAppName() string { - if x != nil { - return x.AppName - } - return "" -} - -func (x *PprofRequest) GetRouteKey() string { - if x != nil { - return x.RouteKey - } - return "" -} - -// PprofResult is the result of a pprof request for a given application/route key. -type PprofResult struct { +// DiagnosticAgentResponse is response to gathered diagnostic information about the Elastic Agent. +type DiagnosticAgentResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - AppName string `protobuf:"bytes,1,opt,name=appName,proto3" json:"appName,omitempty"` - RouteKey string `protobuf:"bytes,2,opt,name=routeKey,proto3" json:"routeKey,omitempty"` - PprofType PprofOption `protobuf:"varint,3,opt,name=pprofType,proto3,enum=cproto.PprofOption" json:"pprofType,omitempty"` - Result []byte `protobuf:"bytes,4,opt,name=result,proto3" json:"result,omitempty"` - Error string `protobuf:"bytes,5,opt,name=error,proto3" json:"error,omitempty"` + // Diagnostic results for the agent. + Results []*DiagnosticFileResult `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` } -func (x *PprofResult) Reset() { - *x = PprofResult{} +func (x *DiagnosticAgentResponse) Reset() { + *x = DiagnosticAgentResponse{} if protoimpl.UnsafeEnabled { mi := &file_control_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1179,13 +1119,13 @@ func (x *PprofResult) Reset() { } } -func (x *PprofResult) String() string { +func (x *DiagnosticAgentResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*PprofResult) ProtoMessage() {} +func (*DiagnosticAgentResponse) ProtoMessage() {} -func (x *PprofResult) ProtoReflect() protoreflect.Message { +func (x *DiagnosticAgentResponse) ProtoReflect() protoreflect.Message { mi := &file_control_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1197,72 +1137,103 @@ func (x *PprofResult) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use PprofResult.ProtoReflect.Descriptor instead. -func (*PprofResult) Descriptor() ([]byte, []int) { +// Deprecated: Use DiagnosticAgentResponse.ProtoReflect.Descriptor instead. +func (*DiagnosticAgentResponse) Descriptor() ([]byte, []int) { return file_control_proto_rawDescGZIP(), []int{12} } -func (x *PprofResult) GetAppName() string { +func (x *DiagnosticAgentResponse) GetResults() []*DiagnosticFileResult { if x != nil { - return x.AppName + return x.Results } - return "" + return nil } -func (x *PprofResult) GetRouteKey() string { - if x != nil { - return x.RouteKey +// DiagnosticUnitRequest specifies a specific unit to gather diagnostics from. +type DiagnosticUnitRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Type of unit. + UnitType UnitType `protobuf:"varint,2,opt,name=unit_type,json=unitType,proto3,enum=cproto.UnitType" json:"unit_type,omitempty"` + // ID of the unit. + UnitId string `protobuf:"bytes,3,opt,name=unit_id,json=unitId,proto3" json:"unit_id,omitempty"` +} + +func (x *DiagnosticUnitRequest) Reset() { + *x = DiagnosticUnitRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_control_proto_msgTypes[13] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) } - return "" } -func (x *PprofResult) GetPprofType() PprofOption { - if x != nil { - return x.PprofType +func (x *DiagnosticUnitRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*DiagnosticUnitRequest) ProtoMessage() {} + +func (x *DiagnosticUnitRequest) ProtoReflect() protoreflect.Message { + mi := &file_control_proto_msgTypes[13] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms } - return PprofOption_ALLOCS + return mi.MessageOf(x) } -func (x *PprofResult) GetResult() []byte { +// Deprecated: Use DiagnosticUnitRequest.ProtoReflect.Descriptor instead. +func (*DiagnosticUnitRequest) Descriptor() ([]byte, []int) { + return file_control_proto_rawDescGZIP(), []int{13} +} + +func (x *DiagnosticUnitRequest) GetUnitType() UnitType { if x != nil { - return x.Result + return x.UnitType } - return nil + return UnitType_INPUT } -func (x *PprofResult) GetError() string { +func (x *DiagnosticUnitRequest) GetUnitId() string { if x != nil { - return x.Error + return x.UnitId } return "" } -// PprofResponse is a wrapper to return all pprof responses. -type PprofResponse struct { +// DiagnosticUnitsRequest allows a diagnostic request to specify the units to target. +type DiagnosticUnitsRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Results []*PprofResult `protobuf:"bytes,1,rep,name=results,proto3" json:"results,omitempty"` + // Specific units to target. (If no units are given then a result for all units is provided). + Units []*DiagnosticUnitRequest `protobuf:"bytes,1,rep,name=units,proto3" json:"units,omitempty"` } -func (x *PprofResponse) Reset() { - *x = PprofResponse{} +func (x *DiagnosticUnitsRequest) Reset() { + *x = DiagnosticUnitsRequest{} if protoimpl.UnsafeEnabled { - mi := &file_control_proto_msgTypes[13] + mi := &file_control_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *PprofResponse) String() string { +func (x *DiagnosticUnitsRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*PprofResponse) ProtoMessage() {} +func (*DiagnosticUnitsRequest) ProtoMessage() {} -func (x *PprofResponse) ProtoReflect() protoreflect.Message { - mi := &file_control_proto_msgTypes[13] +func (x *DiagnosticUnitsRequest) ProtoReflect() protoreflect.Message { + mi := &file_control_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1273,47 +1244,53 @@ func (x *PprofResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use PprofResponse.ProtoReflect.Descriptor instead. -func (*PprofResponse) Descriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{13} +// Deprecated: Use DiagnosticUnitsRequest.ProtoReflect.Descriptor instead. +func (*DiagnosticUnitsRequest) Descriptor() ([]byte, []int) { + return file_control_proto_rawDescGZIP(), []int{14} } -func (x *PprofResponse) GetResults() []*PprofResult { +func (x *DiagnosticUnitsRequest) GetUnits() []*DiagnosticUnitRequest { if x != nil { - return x.Results + return x.Units } return nil } -// MetricsResponse is the result of a request for the metrics buffer endpoint for a application/route key -type MetricsResponse struct { +// DiagnosticUnitResponse is diagnostic information about a specific unit. +type DiagnosticUnitResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - AppName string `protobuf:"bytes,1,opt,name=appName,proto3" json:"appName,omitempty"` - RouteKey string `protobuf:"bytes,2,opt,name=routeKey,proto3" json:"routeKey,omitempty"` - Result []byte `protobuf:"bytes,3,opt,name=result,proto3" json:"result,omitempty"` - Error string `protobuf:"bytes,4,opt,name=error,proto3" json:"error,omitempty"` + // ID of the component. + ComponentId string `protobuf:"bytes,1,opt,name=component_id,json=componentId,proto3" json:"component_id,omitempty"` + // Type of unit. + UnitType UnitType `protobuf:"varint,2,opt,name=unit_type,json=unitType,proto3,enum=cproto.UnitType" json:"unit_type,omitempty"` + // ID of the unit. + UnitId string `protobuf:"bytes,3,opt,name=unit_id,json=unitId,proto3" json:"unit_id,omitempty"` + // Error message for the failure fetching diagnostic information for this unit. + Error string `protobuf:"bytes,4,opt,name=error,proto3" json:"error,omitempty"` + // Diagnostic results for the unit. + Results []*DiagnosticFileResult `protobuf:"bytes,5,rep,name=results,proto3" json:"results,omitempty"` } -func (x *MetricsResponse) Reset() { - *x = MetricsResponse{} +func (x *DiagnosticUnitResponse) Reset() { + *x = DiagnosticUnitResponse{} if protoimpl.UnsafeEnabled { - mi := &file_control_proto_msgTypes[14] + mi := &file_control_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *MetricsResponse) String() string { +func (x *DiagnosticUnitResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*MetricsResponse) ProtoMessage() {} +func (*DiagnosticUnitResponse) ProtoMessage() {} -func (x *MetricsResponse) ProtoReflect() protoreflect.Message { - mi := &file_control_proto_msgTypes[14] +func (x *DiagnosticUnitResponse) ProtoReflect() protoreflect.Message { + mi := &file_control_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1324,65 +1301,73 @@ func (x *MetricsResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use MetricsResponse.ProtoReflect.Descriptor instead. -func (*MetricsResponse) Descriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{14} +// Deprecated: Use DiagnosticUnitResponse.ProtoReflect.Descriptor instead. +func (*DiagnosticUnitResponse) Descriptor() ([]byte, []int) { + return file_control_proto_rawDescGZIP(), []int{15} } -func (x *MetricsResponse) GetAppName() string { +func (x *DiagnosticUnitResponse) GetComponentId() string { if x != nil { - return x.AppName + return x.ComponentId } return "" } -func (x *MetricsResponse) GetRouteKey() string { +func (x *DiagnosticUnitResponse) GetUnitType() UnitType { if x != nil { - return x.RouteKey + return x.UnitType } - return "" + return UnitType_INPUT } -func (x *MetricsResponse) GetResult() []byte { +func (x *DiagnosticUnitResponse) GetUnitId() string { if x != nil { - return x.Result + return x.UnitId } - return nil + return "" } -func (x *MetricsResponse) GetError() string { +func (x *DiagnosticUnitResponse) GetError() string { if x != nil { return x.Error } return "" } -// ProcMetricsResponse is a wrapper to return all metrics buffer responses -type ProcMetricsResponse struct { +func (x *DiagnosticUnitResponse) GetResults() []*DiagnosticFileResult { + if x != nil { + return x.Results + } + return nil +} + +// DiagnosticUnitsResponse is response to gathered units diagnostic information. +type DiagnosticUnitsResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields - Result []*MetricsResponse `protobuf:"bytes,1,rep,name=result,proto3" json:"result,omitempty"` + // Diagnostics results per unit. + Units []*DiagnosticUnitResponse `protobuf:"bytes,2,rep,name=units,proto3" json:"units,omitempty"` } -func (x *ProcMetricsResponse) Reset() { - *x = ProcMetricsResponse{} +func (x *DiagnosticUnitsResponse) Reset() { + *x = DiagnosticUnitsResponse{} if protoimpl.UnsafeEnabled { - mi := &file_control_proto_msgTypes[15] + mi := &file_control_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } -func (x *ProcMetricsResponse) String() string { +func (x *DiagnosticUnitsResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ProcMetricsResponse) ProtoMessage() {} +func (*DiagnosticUnitsResponse) ProtoMessage() {} -func (x *ProcMetricsResponse) ProtoReflect() protoreflect.Message { - mi := &file_control_proto_msgTypes[15] +func (x *DiagnosticUnitsResponse) ProtoReflect() protoreflect.Message { + mi := &file_control_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { @@ -1393,14 +1378,14 @@ func (x *ProcMetricsResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ProcMetricsResponse.ProtoReflect.Descriptor instead. -func (*ProcMetricsResponse) Descriptor() ([]byte, []int) { - return file_control_proto_rawDescGZIP(), []int{15} +// Deprecated: Use DiagnosticUnitsResponse.ProtoReflect.Descriptor instead. +func (*DiagnosticUnitsResponse) Descriptor() ([]byte, []int) { + return file_control_proto_rawDescGZIP(), []int{16} } -func (x *ProcMetricsResponse) GetResult() []*MetricsResponse { +func (x *DiagnosticUnitsResponse) GetUnits() []*DiagnosticUnitResponse { if x != nil { - return x.Result + return x.Units } return nil } @@ -1409,196 +1394,192 @@ var File_control_proto protoreflect.FileDescriptor var file_control_proto_rawDesc = []byte{ 0x0a, 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, - 0x06, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, 0x79, - 0x22, 0x7d, 0x0a, 0x0f, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, - 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x63, - 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x54, 0x69, - 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x54, - 0x69, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x18, - 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x22, - 0x55, 0x0a, 0x0f, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x41, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, - 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x48, 0x0a, 0x0e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x52, 0x49, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x52, 0x49, - 0x22, 0x6f, 0x0a, 0x0f, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, + 0x06, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x07, 0x0a, 0x05, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x22, 0x7d, 0x0a, 0x0f, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, + 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, + 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x54, + 0x69, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x62, 0x75, 0x69, 0x6c, 0x64, + 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, + 0x22, 0x55, 0x0a, 0x0f, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, 0x65, - 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, - 0x72, 0x22, 0xb5, 0x01, 0x0a, 0x12, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x55, - 0x6e, 0x69, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x6e, 0x69, 0x74, - 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x63, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x6e, 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x75, - 0x6e, 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x6e, 0x69, 0x74, 0x5f, - 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x6e, 0x69, 0x74, 0x49, 0x64, - 0x12, 0x23, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, - 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, - 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, - 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, - 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0xb9, 0x01, 0x0a, 0x14, 0x43, 0x6f, + 0x73, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x48, 0x0a, 0x0e, 0x55, 0x70, 0x67, 0x72, 0x61, + 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, + 0x69, 0x6f, 0x6e, 0x12, 0x1c, 0x0a, 0x09, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x52, 0x49, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x55, 0x52, + 0x49, 0x22, 0x6f, 0x0a, 0x0f, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x0e, 0x32, 0x14, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x41, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x14, 0x0a, 0x05, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x22, 0xb5, 0x01, 0x0a, 0x12, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, + 0x55, 0x6e, 0x69, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x6e, 0x69, + 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x63, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x6e, 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, + 0x75, 0x6e, 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x6e, 0x69, 0x74, + 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x6e, 0x69, 0x74, 0x49, + 0x64, 0x12, 0x23, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, + 0x32, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, + 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, + 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0xb9, 0x01, 0x0a, 0x14, 0x43, + 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, + 0x6e, 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x12, 0x3a, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x26, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, + 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4d, 0x65, + 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x1a, 0x37, 0x0a, + 0x09, 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xe6, 0x01, 0x0a, 0x0e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, + 0x6e, 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, + 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, + 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0d, 0x2e, 0x63, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, + 0x74, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x05, + 0x75, 0x6e, 0x69, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x55, 0x6e, + 0x69, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x12, 0x3f, + 0x0a, 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x06, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, - 0x66, 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, - 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x12, 0x3a, 0x0a, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x26, - 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, - 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x2e, 0x4d, 0x65, 0x74, - 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x04, 0x6d, 0x65, 0x74, 0x61, 0x1a, 0x37, 0x0a, 0x09, - 0x4d, 0x65, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xe6, 0x01, 0x0a, 0x0e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, - 0x65, 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x05, - 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0d, 0x2e, 0x63, 0x70, + 0x66, 0x6f, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x22, + 0x8c, 0x01, 0x0a, 0x0e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x49, 0x6e, + 0x66, 0x6f, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, + 0x69, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x0a, 0x06, + 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x63, 0x6f, + 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x54, 0x69, 0x6d, + 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x54, 0x69, + 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x18, 0x05, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x73, 0x6e, 0x61, 0x70, 0x73, 0x68, 0x6f, 0x74, 0x22, 0xb2, + 0x01, 0x0a, 0x0d, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x2a, 0x0a, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, + 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x41, 0x67, 0x65, + 0x6e, 0x74, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x69, 0x6e, 0x66, 0x6f, 0x12, 0x23, 0x0a, 0x05, + 0x73, 0x74, 0x61, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, - 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x30, 0x0a, 0x05, 0x75, - 0x6e, 0x69, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x55, 0x6e, 0x69, - 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x12, 0x3f, 0x0a, - 0x0c, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x06, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6d, - 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, - 0x6f, 0x52, 0x0b, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x22, 0xb5, - 0x03, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x18, 0x0a, 0x07, 0x70, - 0x72, 0x6f, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x72, - 0x6f, 0x63, 0x65, 0x73, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x68, 0x6f, 0x73, - 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x68, 0x6f, 0x73, - 0x74, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x65, 0x70, 0x68, 0x65, 0x6d, 0x65, 0x72, - 0x61, 0x6c, 0x5f, 0x69, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x65, 0x70, 0x68, - 0x65, 0x6d, 0x65, 0x72, 0x61, 0x6c, 0x49, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, - 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, - 0x6f, 0x6e, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x63, 0x6f, 0x6d, 0x6d, - 0x69, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x43, - 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x62, 0x75, 0x69, 0x6c, 0x64, 0x5f, 0x74, - 0x69, 0x6d, 0x65, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x62, 0x75, 0x69, 0x6c, 0x64, - 0x54, 0x69, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, - 0x18, 0x09, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x75, 0x73, 0x65, 0x72, 0x6e, 0x61, 0x6d, 0x65, - 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x06, 0x75, 0x73, 0x65, 0x72, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x08, 0x75, 0x73, 0x65, - 0x72, 0x5f, 0x67, 0x69, 0x64, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x75, 0x73, 0x65, - 0x72, 0x47, 0x69, 0x64, 0x12, 0x22, 0x0a, 0x0c, 0x61, 0x72, 0x63, 0x68, 0x69, 0x74, 0x65, 0x63, - 0x74, 0x75, 0x72, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x72, 0x63, 0x68, - 0x69, 0x74, 0x65, 0x63, 0x74, 0x75, 0x72, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x72, 0x6f, 0x75, 0x74, - 0x65, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x6f, 0x75, - 0x74, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x29, 0x0a, 0x10, 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, - 0x5f, 0x6c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x0f, 0x65, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x4c, 0x69, 0x63, 0x65, 0x6e, 0x73, 0x65, 0x64, - 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x09, 0x52, - 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x86, 0x01, 0x0a, 0x0d, 0x53, 0x74, 0x61, 0x74, 0x65, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x23, 0x0a, 0x05, 0x73, 0x74, 0x61, 0x74, - 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x05, 0x73, 0x74, 0x61, 0x74, 0x65, 0x12, 0x18, 0x0a, - 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, - 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x36, 0x0a, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x6f, - 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x63, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x53, 0x74, - 0x61, 0x74, 0x65, 0x52, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x22, - 0x3a, 0x0a, 0x10, 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x26, 0x0a, 0x05, 0x70, 0x72, 0x6f, 0x63, 0x73, 0x18, 0x01, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x10, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x72, 0x6f, 0x63, - 0x4d, 0x65, 0x74, 0x61, 0x52, 0x05, 0x70, 0x72, 0x6f, 0x63, 0x73, 0x22, 0x9d, 0x01, 0x0a, 0x0c, - 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x09, - 0x70, 0x70, 0x72, 0x6f, 0x66, 0x54, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0e, 0x32, - 0x13, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x4f, 0x70, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x24, 0x0a, 0x0d, 0x74, 0x72, 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x74, 0x72, 0x61, 0x63, 0x65, 0x44, 0x75, 0x72, - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x70, 0x70, 0x4e, 0x61, 0x6d, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x70, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x12, - 0x1a, 0x0a, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x22, 0xa4, 0x01, 0x0a, 0x0b, - 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x61, - 0x70, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x70, - 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, - 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, - 0x79, 0x12, 0x31, 0x0a, 0x09, 0x70, 0x70, 0x72, 0x6f, 0x66, 0x54, 0x79, 0x70, 0x65, 0x18, 0x03, - 0x20, 0x01, 0x28, 0x0e, 0x32, 0x13, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x70, - 0x72, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x09, 0x70, 0x70, 0x72, 0x6f, 0x66, - 0x54, 0x79, 0x70, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x04, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x14, 0x0a, 0x05, - 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, - 0x6f, 0x72, 0x22, 0x3e, 0x0a, 0x0d, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, - 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x70, - 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, - 0x74, 0x73, 0x22, 0x75, 0x0a, 0x0f, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x70, 0x70, 0x4e, 0x61, 0x6d, 0x65, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x70, 0x70, 0x4e, 0x61, 0x6d, 0x65, 0x12, - 0x1a, 0x0a, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x18, 0x02, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x08, 0x72, 0x6f, 0x75, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x12, 0x16, 0x0a, 0x06, 0x72, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x72, 0x65, 0x73, - 0x75, 0x6c, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x22, 0x46, 0x0a, 0x13, 0x50, 0x72, 0x6f, - 0x63, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x2f, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x17, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, - 0x74, 0x2a, 0x85, 0x01, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0c, 0x0a, 0x08, 0x53, - 0x54, 0x41, 0x52, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x43, 0x4f, 0x4e, - 0x46, 0x49, 0x47, 0x55, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x48, 0x45, - 0x41, 0x4c, 0x54, 0x48, 0x59, 0x10, 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x45, 0x47, 0x52, 0x41, - 0x44, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, - 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x54, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, - 0x0b, 0x0a, 0x07, 0x53, 0x54, 0x4f, 0x50, 0x50, 0x45, 0x44, 0x10, 0x06, 0x12, 0x0d, 0x0a, 0x09, - 0x55, 0x50, 0x47, 0x52, 0x41, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x07, 0x12, 0x0c, 0x0a, 0x08, 0x52, - 0x4f, 0x4c, 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x08, 0x2a, 0x21, 0x0a, 0x08, 0x55, 0x6e, 0x69, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x49, 0x4e, 0x50, 0x55, 0x54, 0x10, 0x00, - 0x12, 0x0a, 0x0a, 0x06, 0x4f, 0x55, 0x54, 0x50, 0x55, 0x54, 0x10, 0x01, 0x2a, 0x28, 0x0a, 0x0c, - 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, - 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x46, 0x41, 0x49, - 0x4c, 0x55, 0x52, 0x45, 0x10, 0x01, 0x2a, 0x7f, 0x0a, 0x0b, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x4f, - 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x41, 0x4c, 0x4c, 0x4f, 0x43, 0x53, 0x10, - 0x00, 0x12, 0x09, 0x0a, 0x05, 0x42, 0x4c, 0x4f, 0x43, 0x4b, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, - 0x43, 0x4d, 0x44, 0x4c, 0x49, 0x4e, 0x45, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x47, 0x4f, 0x52, - 0x4f, 0x55, 0x54, 0x49, 0x4e, 0x45, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x45, 0x41, 0x50, - 0x10, 0x04, 0x12, 0x09, 0x0a, 0x05, 0x4d, 0x55, 0x54, 0x45, 0x58, 0x10, 0x05, 0x12, 0x0b, 0x0a, - 0x07, 0x50, 0x52, 0x4f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x48, - 0x52, 0x45, 0x41, 0x44, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x10, 0x07, 0x12, 0x09, 0x0a, 0x05, - 0x54, 0x52, 0x41, 0x43, 0x45, 0x10, 0x08, 0x32, 0x8c, 0x03, 0x0a, 0x13, 0x45, 0x6c, 0x61, 0x73, - 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, - 0x31, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x17, 0x2e, 0x63, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0d, 0x2e, 0x63, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x15, 0x2e, 0x63, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x31, 0x0a, 0x07, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x0d, 0x2e, 0x63, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x17, 0x2e, 0x63, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x07, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x12, - 0x16, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, - 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x12, 0x33, 0x0a, 0x08, 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x12, 0x0d, 0x2e, 0x63, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x18, 0x2e, 0x63, 0x70, - 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, 0x61, 0x52, 0x65, 0x73, - 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x05, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x12, 0x14, - 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x15, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x50, 0x70, - 0x72, 0x6f, 0x66, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x39, 0x0a, 0x0b, 0x50, - 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1b, 0x2e, 0x63, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x50, 0x72, 0x6f, 0x63, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x73, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x26, 0x5a, 0x21, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, - 0x61, 0x6c, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x63, 0x6f, 0x6e, - 0x74, 0x72, 0x6f, 0x6c, 0x2f, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0xf8, 0x01, 0x01, 0x62, 0x06, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x65, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x36, 0x0a, 0x0a, 0x63, + 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x16, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x43, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, + 0x6e, 0x74, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, 0x0a, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, + 0x6e, 0x74, 0x73, 0x22, 0xdf, 0x01, 0x0a, 0x14, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, + 0x69, 0x63, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x12, 0x0a, 0x04, + 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, + 0x12, 0x1a, 0x0a, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x66, 0x69, 0x6c, 0x65, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, + 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x21, + 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x12, 0x38, 0x0a, 0x09, 0x67, + 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x67, 0x65, 0x6e, 0x65, + 0x72, 0x61, 0x74, 0x65, 0x64, 0x22, 0x18, 0x0a, 0x16, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, + 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, + 0x51, 0x0a, 0x17, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, + 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x36, 0x0a, 0x07, 0x72, 0x65, + 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x46, + 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x73, 0x22, 0x5f, 0x0a, 0x15, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, + 0x55, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x09, 0x75, + 0x6e, 0x69, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, + 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x6e, 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, + 0x52, 0x08, 0x75, 0x6e, 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x6e, + 0x69, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x6e, 0x69, + 0x74, 0x49, 0x64, 0x22, 0x4d, 0x0a, 0x16, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, + 0x63, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x33, 0x0a, + 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x63, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, + 0x55, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x05, 0x75, 0x6e, 0x69, + 0x74, 0x73, 0x22, 0xd1, 0x01, 0x0a, 0x16, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, + 0x63, 0x55, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, + 0x0c, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x49, 0x64, + 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x6e, 0x69, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x6e, 0x69, + 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x75, 0x6e, 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x17, 0x0a, 0x07, 0x75, 0x6e, 0x69, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x06, 0x75, 0x6e, 0x69, 0x74, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, + 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x36, + 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, + 0x1c, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, + 0x74, 0x69, 0x63, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, + 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x4f, 0x0a, 0x17, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, + 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x34, 0x0a, 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x1e, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, + 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x52, 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x2a, 0x85, 0x01, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x54, 0x41, 0x52, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, + 0x0f, 0x0a, 0x0b, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x55, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, + 0x12, 0x0b, 0x0a, 0x07, 0x48, 0x45, 0x41, 0x4c, 0x54, 0x48, 0x59, 0x10, 0x02, 0x12, 0x0c, 0x0a, + 0x08, 0x44, 0x45, 0x47, 0x52, 0x41, 0x44, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x46, + 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x54, 0x4f, 0x50, 0x50, + 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x54, 0x4f, 0x50, 0x50, 0x45, 0x44, + 0x10, 0x06, 0x12, 0x0d, 0x0a, 0x09, 0x55, 0x50, 0x47, 0x52, 0x41, 0x44, 0x49, 0x4e, 0x47, 0x10, + 0x07, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x4f, 0x4c, 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x08, 0x2a, + 0x21, 0x0a, 0x08, 0x55, 0x6e, 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x49, + 0x4e, 0x50, 0x55, 0x54, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x4f, 0x55, 0x54, 0x50, 0x55, 0x54, + 0x10, 0x01, 0x2a, 0x28, 0x0a, 0x0c, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, + 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x00, 0x12, + 0x0b, 0x0a, 0x07, 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x01, 0x2a, 0x7f, 0x0a, 0x0b, + 0x50, 0x70, 0x72, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x41, + 0x4c, 0x4c, 0x4f, 0x43, 0x53, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x42, 0x4c, 0x4f, 0x43, 0x4b, + 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4d, 0x44, 0x4c, 0x49, 0x4e, 0x45, 0x10, 0x02, 0x12, + 0x0d, 0x0a, 0x09, 0x47, 0x4f, 0x52, 0x4f, 0x55, 0x54, 0x49, 0x4e, 0x45, 0x10, 0x03, 0x12, 0x08, + 0x0a, 0x04, 0x48, 0x45, 0x41, 0x50, 0x10, 0x04, 0x12, 0x09, 0x0a, 0x05, 0x4d, 0x55, 0x54, 0x45, + 0x58, 0x10, 0x05, 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x52, 0x4f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x06, + 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x48, 0x52, 0x45, 0x41, 0x44, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, + 0x10, 0x07, 0x12, 0x09, 0x0a, 0x05, 0x54, 0x52, 0x41, 0x43, 0x45, 0x10, 0x08, 0x32, 0x8e, 0x03, + 0x0a, 0x13, 0x45, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x43, 0x6f, + 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x31, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, + 0x17, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, + 0x65, 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, + 0x1a, 0x15, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x07, 0x52, 0x65, 0x73, 0x74, 0x61, + 0x72, 0x74, 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x1a, 0x17, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x61, + 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x07, 0x55, 0x70, + 0x67, 0x72, 0x61, 0x64, 0x65, 0x12, 0x16, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, + 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, + 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x52, 0x0a, 0x0f, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, + 0x73, 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x12, 0x1e, 0x2e, 0x63, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, + 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x63, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, + 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x52, 0x0a, 0x0f, 0x44, 0x69, + 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x12, 0x1e, 0x2e, + 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, + 0x63, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, + 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, + 0x63, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x26, + 0x5a, 0x21, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x61, + 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2f, 0x63, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -1614,65 +1595,68 @@ func file_control_proto_rawDescGZIP() []byte { } var file_control_proto_enumTypes = make([]protoimpl.EnumInfo, 4) -var file_control_proto_msgTypes = make([]protoimpl.MessageInfo, 17) +var file_control_proto_msgTypes = make([]protoimpl.MessageInfo, 18) var file_control_proto_goTypes = []interface{}{ - (State)(0), // 0: cproto.State - (UnitType)(0), // 1: cproto.UnitType - (ActionStatus)(0), // 2: cproto.ActionStatus - (PprofOption)(0), // 3: cproto.PprofOption - (*Empty)(nil), // 4: cproto.Empty - (*VersionResponse)(nil), // 5: cproto.VersionResponse - (*RestartResponse)(nil), // 6: cproto.RestartResponse - (*UpgradeRequest)(nil), // 7: cproto.UpgradeRequest - (*UpgradeResponse)(nil), // 8: cproto.UpgradeResponse - (*ComponentUnitState)(nil), // 9: cproto.ComponentUnitState - (*ComponentVersionInfo)(nil), // 10: cproto.ComponentVersionInfo - (*ComponentState)(nil), // 11: cproto.ComponentState - (*ProcMeta)(nil), // 12: cproto.ProcMeta - (*StateResponse)(nil), // 13: cproto.StateResponse - (*ProcMetaResponse)(nil), // 14: cproto.ProcMetaResponse - (*PprofRequest)(nil), // 15: cproto.PprofRequest - (*PprofResult)(nil), // 16: cproto.PprofResult - (*PprofResponse)(nil), // 17: cproto.PprofResponse - (*MetricsResponse)(nil), // 18: cproto.MetricsResponse - (*ProcMetricsResponse)(nil), // 19: cproto.ProcMetricsResponse - nil, // 20: cproto.ComponentVersionInfo.MetaEntry + (State)(0), // 0: cproto.State + (UnitType)(0), // 1: cproto.UnitType + (ActionStatus)(0), // 2: cproto.ActionStatus + (PprofOption)(0), // 3: cproto.PprofOption + (*Empty)(nil), // 4: cproto.Empty + (*VersionResponse)(nil), // 5: cproto.VersionResponse + (*RestartResponse)(nil), // 6: cproto.RestartResponse + (*UpgradeRequest)(nil), // 7: cproto.UpgradeRequest + (*UpgradeResponse)(nil), // 8: cproto.UpgradeResponse + (*ComponentUnitState)(nil), // 9: cproto.ComponentUnitState + (*ComponentVersionInfo)(nil), // 10: cproto.ComponentVersionInfo + (*ComponentState)(nil), // 11: cproto.ComponentState + (*StateAgentInfo)(nil), // 12: cproto.StateAgentInfo + (*StateResponse)(nil), // 13: cproto.StateResponse + (*DiagnosticFileResult)(nil), // 14: cproto.DiagnosticFileResult + (*DiagnosticAgentRequest)(nil), // 15: cproto.DiagnosticAgentRequest + (*DiagnosticAgentResponse)(nil), // 16: cproto.DiagnosticAgentResponse + (*DiagnosticUnitRequest)(nil), // 17: cproto.DiagnosticUnitRequest + (*DiagnosticUnitsRequest)(nil), // 18: cproto.DiagnosticUnitsRequest + (*DiagnosticUnitResponse)(nil), // 19: cproto.DiagnosticUnitResponse + (*DiagnosticUnitsResponse)(nil), // 20: cproto.DiagnosticUnitsResponse + nil, // 21: cproto.ComponentVersionInfo.MetaEntry + (*timestamppb.Timestamp)(nil), // 22: google.protobuf.Timestamp } var file_control_proto_depIdxs = []int32{ 2, // 0: cproto.RestartResponse.status:type_name -> cproto.ActionStatus 2, // 1: cproto.UpgradeResponse.status:type_name -> cproto.ActionStatus 1, // 2: cproto.ComponentUnitState.unit_type:type_name -> cproto.UnitType 0, // 3: cproto.ComponentUnitState.state:type_name -> cproto.State - 20, // 4: cproto.ComponentVersionInfo.meta:type_name -> cproto.ComponentVersionInfo.MetaEntry + 21, // 4: cproto.ComponentVersionInfo.meta:type_name -> cproto.ComponentVersionInfo.MetaEntry 0, // 5: cproto.ComponentState.state:type_name -> cproto.State 9, // 6: cproto.ComponentState.units:type_name -> cproto.ComponentUnitState 10, // 7: cproto.ComponentState.version_info:type_name -> cproto.ComponentVersionInfo - 0, // 8: cproto.StateResponse.state:type_name -> cproto.State - 11, // 9: cproto.StateResponse.components:type_name -> cproto.ComponentState - 12, // 10: cproto.ProcMetaResponse.procs:type_name -> cproto.ProcMeta - 3, // 11: cproto.PprofRequest.pprofType:type_name -> cproto.PprofOption - 3, // 12: cproto.PprofResult.pprofType:type_name -> cproto.PprofOption - 16, // 13: cproto.PprofResponse.results:type_name -> cproto.PprofResult - 18, // 14: cproto.ProcMetricsResponse.result:type_name -> cproto.MetricsResponse - 4, // 15: cproto.ElasticAgentControl.Version:input_type -> cproto.Empty - 4, // 16: cproto.ElasticAgentControl.State:input_type -> cproto.Empty - 4, // 17: cproto.ElasticAgentControl.Restart:input_type -> cproto.Empty - 7, // 18: cproto.ElasticAgentControl.Upgrade:input_type -> cproto.UpgradeRequest - 4, // 19: cproto.ElasticAgentControl.ProcMeta:input_type -> cproto.Empty - 15, // 20: cproto.ElasticAgentControl.Pprof:input_type -> cproto.PprofRequest - 4, // 21: cproto.ElasticAgentControl.ProcMetrics:input_type -> cproto.Empty - 5, // 22: cproto.ElasticAgentControl.Version:output_type -> cproto.VersionResponse - 13, // 23: cproto.ElasticAgentControl.State:output_type -> cproto.StateResponse - 6, // 24: cproto.ElasticAgentControl.Restart:output_type -> cproto.RestartResponse - 8, // 25: cproto.ElasticAgentControl.Upgrade:output_type -> cproto.UpgradeResponse - 14, // 26: cproto.ElasticAgentControl.ProcMeta:output_type -> cproto.ProcMetaResponse - 17, // 27: cproto.ElasticAgentControl.Pprof:output_type -> cproto.PprofResponse - 19, // 28: cproto.ElasticAgentControl.ProcMetrics:output_type -> cproto.ProcMetricsResponse - 22, // [22:29] is the sub-list for method output_type - 15, // [15:22] is the sub-list for method input_type - 15, // [15:15] is the sub-list for extension type_name - 15, // [15:15] is the sub-list for extension extendee - 0, // [0:15] is the sub-list for field type_name + 12, // 8: cproto.StateResponse.info:type_name -> cproto.StateAgentInfo + 0, // 9: cproto.StateResponse.state:type_name -> cproto.State + 11, // 10: cproto.StateResponse.components:type_name -> cproto.ComponentState + 22, // 11: cproto.DiagnosticFileResult.generated:type_name -> google.protobuf.Timestamp + 14, // 12: cproto.DiagnosticAgentResponse.results:type_name -> cproto.DiagnosticFileResult + 1, // 13: cproto.DiagnosticUnitRequest.unit_type:type_name -> cproto.UnitType + 17, // 14: cproto.DiagnosticUnitsRequest.units:type_name -> cproto.DiagnosticUnitRequest + 1, // 15: cproto.DiagnosticUnitResponse.unit_type:type_name -> cproto.UnitType + 14, // 16: cproto.DiagnosticUnitResponse.results:type_name -> cproto.DiagnosticFileResult + 19, // 17: cproto.DiagnosticUnitsResponse.units:type_name -> cproto.DiagnosticUnitResponse + 4, // 18: cproto.ElasticAgentControl.Version:input_type -> cproto.Empty + 4, // 19: cproto.ElasticAgentControl.State:input_type -> cproto.Empty + 4, // 20: cproto.ElasticAgentControl.Restart:input_type -> cproto.Empty + 7, // 21: cproto.ElasticAgentControl.Upgrade:input_type -> cproto.UpgradeRequest + 15, // 22: cproto.ElasticAgentControl.DiagnosticAgent:input_type -> cproto.DiagnosticAgentRequest + 18, // 23: cproto.ElasticAgentControl.DiagnosticUnits:input_type -> cproto.DiagnosticUnitsRequest + 5, // 24: cproto.ElasticAgentControl.Version:output_type -> cproto.VersionResponse + 13, // 25: cproto.ElasticAgentControl.State:output_type -> cproto.StateResponse + 6, // 26: cproto.ElasticAgentControl.Restart:output_type -> cproto.RestartResponse + 8, // 27: cproto.ElasticAgentControl.Upgrade:output_type -> cproto.UpgradeResponse + 16, // 28: cproto.ElasticAgentControl.DiagnosticAgent:output_type -> cproto.DiagnosticAgentResponse + 20, // 29: cproto.ElasticAgentControl.DiagnosticUnits:output_type -> cproto.DiagnosticUnitsResponse + 24, // [24:30] is the sub-list for method output_type + 18, // [18:24] is the sub-list for method input_type + 18, // [18:18] is the sub-list for extension type_name + 18, // [18:18] is the sub-list for extension extendee + 0, // [0:18] is the sub-list for field type_name } func init() { file_control_proto_init() } @@ -1778,7 +1762,7 @@ func file_control_proto_init() { } } file_control_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProcMeta); i { + switch v := v.(*StateAgentInfo); i { case 0: return &v.state case 1: @@ -1802,7 +1786,7 @@ func file_control_proto_init() { } } file_control_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProcMetaResponse); i { + switch v := v.(*DiagnosticFileResult); i { case 0: return &v.state case 1: @@ -1814,7 +1798,7 @@ func file_control_proto_init() { } } file_control_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PprofRequest); i { + switch v := v.(*DiagnosticAgentRequest); i { case 0: return &v.state case 1: @@ -1826,7 +1810,7 @@ func file_control_proto_init() { } } file_control_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PprofResult); i { + switch v := v.(*DiagnosticAgentResponse); i { case 0: return &v.state case 1: @@ -1838,7 +1822,7 @@ func file_control_proto_init() { } } file_control_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*PprofResponse); i { + switch v := v.(*DiagnosticUnitRequest); i { case 0: return &v.state case 1: @@ -1850,7 +1834,7 @@ func file_control_proto_init() { } } file_control_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*MetricsResponse); i { + switch v := v.(*DiagnosticUnitsRequest); i { case 0: return &v.state case 1: @@ -1862,7 +1846,19 @@ func file_control_proto_init() { } } file_control_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ProcMetricsResponse); i { + switch v := v.(*DiagnosticUnitResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_control_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*DiagnosticUnitsResponse); i { case 0: return &v.state case 1: @@ -1880,7 +1876,7 @@ func file_control_proto_init() { GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_control_proto_rawDesc, NumEnums: 4, - NumMessages: 17, + NumMessages: 18, NumExtensions: 0, NumServices: 1, }, diff --git a/internal/pkg/agent/control/cproto/control_grpc.pb.go b/internal/pkg/agent/control/cproto/control_grpc.pb.go index c9e97f7047a..f00afb24d2b 100644 --- a/internal/pkg/agent/control/cproto/control_grpc.pb.go +++ b/internal/pkg/agent/control/cproto/control_grpc.pb.go @@ -5,7 +5,7 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: // - protoc-gen-go-grpc v1.2.0 -// - protoc v3.19.4 +// - protoc v3.21.5 // source: control.proto package cproto @@ -35,12 +35,10 @@ type ElasticAgentControlClient interface { Restart(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*RestartResponse, error) // Upgrade starts the upgrade process of Elastic Agent. Upgrade(ctx context.Context, in *UpgradeRequest, opts ...grpc.CallOption) (*UpgradeResponse, error) - // Gather all running process metadata. - ProcMeta(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*ProcMetaResponse, error) - // Gather requested pprof data from specified applications. - Pprof(ctx context.Context, in *PprofRequest, opts ...grpc.CallOption) (*PprofResponse, error) - // Gather all running process metrics. - ProcMetrics(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*ProcMetricsResponse, error) + // Gather diagnostic information for the running Elastic Agent. + DiagnosticAgent(ctx context.Context, in *DiagnosticAgentRequest, opts ...grpc.CallOption) (*DiagnosticAgentResponse, error) + // Gather diagnostic information for the running units. + DiagnosticUnits(ctx context.Context, in *DiagnosticUnitsRequest, opts ...grpc.CallOption) (*DiagnosticUnitsResponse, error) } type elasticAgentControlClient struct { @@ -87,27 +85,18 @@ func (c *elasticAgentControlClient) Upgrade(ctx context.Context, in *UpgradeRequ return out, nil } -func (c *elasticAgentControlClient) ProcMeta(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*ProcMetaResponse, error) { - out := new(ProcMetaResponse) - err := c.cc.Invoke(ctx, "/cproto.ElasticAgentControl/ProcMeta", in, out, opts...) +func (c *elasticAgentControlClient) DiagnosticAgent(ctx context.Context, in *DiagnosticAgentRequest, opts ...grpc.CallOption) (*DiagnosticAgentResponse, error) { + out := new(DiagnosticAgentResponse) + err := c.cc.Invoke(ctx, "/cproto.ElasticAgentControl/DiagnosticAgent", in, out, opts...) if err != nil { return nil, err } return out, nil } -func (c *elasticAgentControlClient) Pprof(ctx context.Context, in *PprofRequest, opts ...grpc.CallOption) (*PprofResponse, error) { - out := new(PprofResponse) - err := c.cc.Invoke(ctx, "/cproto.ElasticAgentControl/Pprof", in, out, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *elasticAgentControlClient) ProcMetrics(ctx context.Context, in *Empty, opts ...grpc.CallOption) (*ProcMetricsResponse, error) { - out := new(ProcMetricsResponse) - err := c.cc.Invoke(ctx, "/cproto.ElasticAgentControl/ProcMetrics", in, out, opts...) +func (c *elasticAgentControlClient) DiagnosticUnits(ctx context.Context, in *DiagnosticUnitsRequest, opts ...grpc.CallOption) (*DiagnosticUnitsResponse, error) { + out := new(DiagnosticUnitsResponse) + err := c.cc.Invoke(ctx, "/cproto.ElasticAgentControl/DiagnosticUnits", in, out, opts...) if err != nil { return nil, err } @@ -126,12 +115,10 @@ type ElasticAgentControlServer interface { Restart(context.Context, *Empty) (*RestartResponse, error) // Upgrade starts the upgrade process of Elastic Agent. Upgrade(context.Context, *UpgradeRequest) (*UpgradeResponse, error) - // Gather all running process metadata. - ProcMeta(context.Context, *Empty) (*ProcMetaResponse, error) - // Gather requested pprof data from specified applications. - Pprof(context.Context, *PprofRequest) (*PprofResponse, error) - // Gather all running process metrics. - ProcMetrics(context.Context, *Empty) (*ProcMetricsResponse, error) + // Gather diagnostic information for the running Elastic Agent. + DiagnosticAgent(context.Context, *DiagnosticAgentRequest) (*DiagnosticAgentResponse, error) + // Gather diagnostic information for the running units. + DiagnosticUnits(context.Context, *DiagnosticUnitsRequest) (*DiagnosticUnitsResponse, error) mustEmbedUnimplementedElasticAgentControlServer() } @@ -151,14 +138,11 @@ func (UnimplementedElasticAgentControlServer) Restart(context.Context, *Empty) ( func (UnimplementedElasticAgentControlServer) Upgrade(context.Context, *UpgradeRequest) (*UpgradeResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method Upgrade not implemented") } -func (UnimplementedElasticAgentControlServer) ProcMeta(context.Context, *Empty) (*ProcMetaResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ProcMeta not implemented") +func (UnimplementedElasticAgentControlServer) DiagnosticAgent(context.Context, *DiagnosticAgentRequest) (*DiagnosticAgentResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DiagnosticAgent not implemented") } -func (UnimplementedElasticAgentControlServer) Pprof(context.Context, *PprofRequest) (*PprofResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method Pprof not implemented") -} -func (UnimplementedElasticAgentControlServer) ProcMetrics(context.Context, *Empty) (*ProcMetricsResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ProcMetrics not implemented") +func (UnimplementedElasticAgentControlServer) DiagnosticUnits(context.Context, *DiagnosticUnitsRequest) (*DiagnosticUnitsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DiagnosticUnits not implemented") } func (UnimplementedElasticAgentControlServer) mustEmbedUnimplementedElasticAgentControlServer() {} @@ -245,56 +229,38 @@ func _ElasticAgentControl_Upgrade_Handler(srv interface{}, ctx context.Context, return interceptor(ctx, in, info, handler) } -func _ElasticAgentControl_ProcMeta_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Empty) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(ElasticAgentControlServer).ProcMeta(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/cproto.ElasticAgentControl/ProcMeta", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ElasticAgentControlServer).ProcMeta(ctx, req.(*Empty)) - } - return interceptor(ctx, in, info, handler) -} - -func _ElasticAgentControl_Pprof_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(PprofRequest) +func _ElasticAgentControl_DiagnosticAgent_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DiagnosticAgentRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(ElasticAgentControlServer).Pprof(ctx, in) + return srv.(ElasticAgentControlServer).DiagnosticAgent(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/cproto.ElasticAgentControl/Pprof", + FullMethod: "/cproto.ElasticAgentControl/DiagnosticAgent", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ElasticAgentControlServer).Pprof(ctx, req.(*PprofRequest)) + return srv.(ElasticAgentControlServer).DiagnosticAgent(ctx, req.(*DiagnosticAgentRequest)) } return interceptor(ctx, in, info, handler) } -func _ElasticAgentControl_ProcMetrics_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(Empty) +func _ElasticAgentControl_DiagnosticUnits_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DiagnosticUnitsRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(ElasticAgentControlServer).ProcMetrics(ctx, in) + return srv.(ElasticAgentControlServer).DiagnosticUnits(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/cproto.ElasticAgentControl/ProcMetrics", + FullMethod: "/cproto.ElasticAgentControl/DiagnosticUnits", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(ElasticAgentControlServer).ProcMetrics(ctx, req.(*Empty)) + return srv.(ElasticAgentControlServer).DiagnosticUnits(ctx, req.(*DiagnosticUnitsRequest)) } return interceptor(ctx, in, info, handler) } @@ -323,16 +289,12 @@ var ElasticAgentControl_ServiceDesc = grpc.ServiceDesc{ Handler: _ElasticAgentControl_Upgrade_Handler, }, { - MethodName: "ProcMeta", - Handler: _ElasticAgentControl_ProcMeta_Handler, - }, - { - MethodName: "Pprof", - Handler: _ElasticAgentControl_Pprof_Handler, + MethodName: "DiagnosticAgent", + Handler: _ElasticAgentControl_DiagnosticAgent_Handler, }, { - MethodName: "ProcMetrics", - Handler: _ElasticAgentControl_ProcMetrics_Handler, + MethodName: "DiagnosticUnits", + Handler: _ElasticAgentControl_DiagnosticUnits_Handler, }, }, Streams: []grpc.StreamDesc{}, diff --git a/internal/pkg/agent/control/server/server.go b/internal/pkg/agent/control/server/server.go index 15cefc09f7f..160ec2d6b41 100644 --- a/internal/pkg/agent/control/server/server.go +++ b/internal/pkg/agent/control/server/server.go @@ -9,17 +9,21 @@ import ( "encoding/json" "fmt" "net" + "time" "go.elastic.co/apm" "go.elastic.co/apm/module/apmgrpc" "google.golang.org/grpc" + "google.golang.org/protobuf/types/known/timestamppb" + "github.com/elastic/elastic-agent-client/v7/pkg/client" "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" - + "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" "github.com/elastic/elastic-agent/internal/pkg/agent/control" "github.com/elastic/elastic-agent/internal/pkg/agent/control/cproto" - monitoringCfg "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/config" + "github.com/elastic/elastic-agent/internal/pkg/diagnostics" "github.com/elastic/elastic-agent/internal/pkg/release" + "github.com/elastic/elastic-agent/pkg/component" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -27,21 +31,23 @@ import ( type Server struct { cproto.UnimplementedElasticAgentControlServer - logger *logger.Logger - monitoringCfg *monitoringCfg.MonitoringConfig - coord *coordinator.Coordinator - listener net.Listener - server *grpc.Server - tracer *apm.Tracer + logger *logger.Logger + agentInfo *info.AgentInfo + coord *coordinator.Coordinator + listener net.Listener + server *grpc.Server + tracer *apm.Tracer + diagHooks diagnostics.Hooks } // New creates a new control protocol server. -func New(log *logger.Logger, cfg *monitoringCfg.MonitoringConfig, coord *coordinator.Coordinator, tracer *apm.Tracer) *Server { +func New(log *logger.Logger, agentInfo *info.AgentInfo, coord *coordinator.Coordinator, tracer *apm.Tracer, diagHooks diagnostics.Hooks) *Server { return &Server{ - logger: log, - monitoringCfg: cfg, - coord: coord, - tracer: tracer, + logger: log, + agentInfo: agentInfo, + coord: coord, + tracer: tracer, + diagHooks: diagHooks, } } @@ -135,6 +141,13 @@ func (s *Server) State(_ context.Context, _ *cproto.Empty) (*cproto.StateRespons }) } return &cproto.StateResponse{ + Info: &cproto.StateAgentInfo{ + Id: s.agentInfo.AgentID(), + Version: release.Version(), + Commit: release.Commit(), + BuildTime: release.BuildTime().Format(control.TimeFormat()), + Snapshot: release.Snapshot(), + }, State: state.State, Message: state.Message, Components: components, @@ -164,370 +177,66 @@ func (s *Server) Upgrade(ctx context.Context, request *cproto.UpgradeRequest) (* }, nil } -// BeatInfo is the metadata response a beat will provide when the root ("/") is queried. -type BeatInfo struct { - Beat string `json:"beat"` - Name string `json:"name"` - Hostname string `json:"hostname"` - ID string `json:"uuid"` - EphemeralID string `json:"ephemeral_id"` - Version string `json:"version"` - Commit string `json:"build_commit"` - Time string `json:"build_time"` - Username string `json:"username"` - UserID string `json:"uid"` - GroupID string `json:"gid"` - BinaryArch string `json:"binary_arch"` - ElasticLicensed bool `json:"elastic_licensed"` -} - -// ProcMeta returns version and beat inforation for all running processes. -func (s *Server) ProcMeta(ctx context.Context, _ *cproto.Empty) (*cproto.ProcMetaResponse, error) { - /* - if s.routeFn == nil { - return nil, errors.New("route function is nil") - } - - resp := &cproto.ProcMetaResponse{ - Procs: []*cproto.ProcMeta{}, - } - - // gather spec data for all rk/apps running - specs := s.getSpecInfo("", "") - for _, si := range specs { - endpoint := monitoring.MonitoringEndpoint(si.spec, runtime.GOOS, si.rk) - client := newSocketRequester(si.app, si.rk, endpoint) - - procMeta := client.procMeta(ctx) - resp.Procs = append(resp.Procs, procMeta) - } - - return resp, nil - */ - return nil, nil -} - -// Pprof returns /debug/pprof data for the requested applicaiont-route_key or all running applications. -func (s *Server) Pprof(ctx context.Context, req *cproto.PprofRequest) (*cproto.PprofResponse, error) { - if s.monitoringCfg == nil || s.monitoringCfg.Pprof == nil || !s.monitoringCfg.Pprof.Enabled { - return nil, fmt.Errorf("agent.monitoring.pprof disabled") +// DiagnosticAgent returns diagnostic information for this running Elastic Agent. +func (s *Server) DiagnosticAgent(ctx context.Context, _ *cproto.DiagnosticAgentRequest) (*cproto.DiagnosticAgentResponse, error) { + res := make([]*cproto.DiagnosticFileResult, 0, len(s.diagHooks)) + for _, h := range s.diagHooks { + if ctx.Err() != nil { + return nil, ctx.Err() + } + r := h.Hook(ctx) + res = append(res, &cproto.DiagnosticFileResult{ + Name: h.Name, + Filename: h.Filename, + Description: h.Description, + ContentType: h.ContentType, + Content: r, + Generated: timestamppb.New(time.Now().UTC()), + }) } - - /* - if s.routeFn == nil { - return nil, errors.New("route function is nil") - } - - dur, err := time.ParseDuration(req.TraceDuration) - if err != nil { - return nil, fmt.Errorf("unable to parse trace duration: %w", err) - } - - resp := &cproto.PprofResponse{ - Results: []*cproto.PprofResult{}, - } - - var wg sync.WaitGroup - ch := make(chan *cproto.PprofResult, 1) - - // retrieve elastic-agent pprof data if requested or application is unspecified. - if req.AppName == "" || req.AppName == agentName { - endpoint := monitoring.AgentMonitoringEndpoint(runtime.GOOS, s.monitoringCfg.HTTP) - c := newSocketRequester(agentName, "", endpoint) - for _, opt := range req.PprofType { - wg.Add(1) - go func(opt cproto.PprofOption) { - res := c.getPprof(ctx, opt, dur) - ch <- res - wg.Done() - }(opt) - } - } - - // get requested rk/appname spec or all specs - var specs []specInfo - if req.AppName != agentName { - specs = s.getSpecInfo(req.RouteKey, req.AppName) - } - for _, si := range specs { - endpoint := monitoring.MonitoringEndpoint(si.spec, runtime.GOOS, si.rk) - c := newSocketRequester(si.app, si.rk, endpoint) - // Launch a concurrent goroutine to gather all pprof endpoints from a socket. - for _, opt := range req.PprofType { - wg.Add(1) - go func(opt cproto.PprofOption) { - res := c.getPprof(ctx, opt, dur) - ch <- res - wg.Done() - }(opt) - } - } - - // wait for the waitgroup to be done and close the channel - go func() { - wg.Wait() - close(ch) - }() - - // gather all results from channel until closed. - for res := range ch { - resp.Results = append(resp.Results, res) - } - return resp, nil - */ - return nil, nil -} - -// ProcMetrics returns all buffered metrics data for the agent and running processes. -// If the agent.monitoring.http.buffer variable is not set, or set to false, a nil result attribute is returned -func (s *Server) ProcMetrics(ctx context.Context, _ *cproto.Empty) (*cproto.ProcMetricsResponse, error) { - if s.monitoringCfg == nil || s.monitoringCfg.HTTP == nil || s.monitoringCfg.HTTP.Buffer == nil || !s.monitoringCfg.HTTP.Buffer.Enabled { - return &cproto.ProcMetricsResponse{}, nil + if ctx.Err() != nil { + return nil, ctx.Err() } - - /* - if s.routeFn == nil { - return nil, errors.New("route function is nil") - } - - // gather metrics buffer data from the elastic-agent - endpoint := monitoring.AgentMonitoringEndpoint(runtime.GOOS, s.monitoringCfg.HTTP) - c := newSocketRequester(agentName, "", endpoint) - metrics := c.procMetrics(ctx) - - resp := &cproto.ProcMetricsResponse{ - Result: []*cproto.MetricsResponse{metrics}, - } - - // gather metrics buffer data from all other processes - specs := s.getSpecInfo("", "") - for _, si := range specs { - endpoint := monitoring.MonitoringEndpoint(si.spec, runtime.GOOS, si.rk) - client := newSocketRequester(si.app, si.rk, endpoint) - - s.logger.Infof("gather metrics from %s", endpoint) - metrics := client.procMetrics(ctx) - resp.Result = append(resp.Result, metrics) - } - return resp, nil - */ - return nil, nil + return &cproto.DiagnosticAgentResponse{Results: res}, nil } -/* -// getSpecs will return the specs for the program associated with the specified route key/app name, or all programs if no key(s) are specified. -// if matchRK or matchApp are empty all results will be returned. -func (s *Server) getSpecInfo(matchRK, matchApp string) []specInfo { - routes := s.routeFn() - - // find specInfo for a specified rk/app - if matchRK != "" && matchApp != "" { - programs, ok := routes.Get(matchRK) - if !ok { - s.logger.With("route_key", matchRK).Debug("No matching route key found.") - return []specInfo{} - } - sp, ok := programs.(specer) - if !ok { - s.logger.With("route_key", matchRK, "route", programs).Warn("Unable to cast route as specer.") - return []specInfo{} - } - specs := sp.Specs() - - spec, ok := specs[matchApp] - if !ok { - s.logger.With("route_key", matchRK, "application_name", matchApp).Debug("No matching route key/application name found.") - return []specInfo{} - } - return []specInfo{specInfo{spec: spec, app: matchApp, rk: matchRK}} - } - - // gather specInfo for all rk/app values - res := make([]specInfo, 0) - for _, rk := range routes.Keys() { - programs, ok := routes.Get(rk) - if !ok { - // we do not expect to ever hit this code path - // if this log message occurs then the agent is unable to access one of the keys that is returned by the route function - // might be a race condition if someone tries to update the policy to remove an output? - s.logger.With("route_key", rk).Warn("Unable to retrieve route.") - continue - } - sp, ok := programs.(specer) - if !ok { - s.logger.With("route_key", matchRK, "route", programs).Warn("Unable to cast route as specer.") - continue - } - for n, spec := range sp.Specs() { - res = append(res, specInfo{ - rk: rk, - app: n, - spec: spec, - }) - } +// DiagnosticUnits returns diagnostic information for the specific units (or all units if non-provided). +func (s *Server) DiagnosticUnits(ctx context.Context, req *cproto.DiagnosticUnitsRequest) (*cproto.DiagnosticUnitsResponse, error) { + units := make([]component.Unit, 0, len(req.Units)) + for _, u := range req.Units { + units = append(units, component.Unit{ + ID: u.UnitId, + Type: client.UnitType(u.UnitType), + }) } - return res -} - -// socketRequester is a struct to gather (diagnostics) data from a socket opened by elastic-agent or one if it's processes -type socketRequester struct { - c http.Client - endpoint string - appName string - routeKey string -} -func newSocketRequester(appName, routeKey, endpoint string) *socketRequester { - c := http.Client{} - if strings.HasPrefix(endpoint, "unix://") { - c.Transport = &http.Transport{ - Proxy: nil, - DialContext: socket.DialContext(strings.TrimPrefix(endpoint, "unix://")), - } - endpoint = "unix" - } else if strings.HasPrefix(endpoint, "npipe://") { - c.Transport = &http.Transport{ - Proxy: nil, - DialContext: socket.DialContext(strings.TrimPrefix(endpoint, "npipe:///")), + diag := s.coord.PerformDiagnostics(ctx, units...) + res := make([]*cproto.DiagnosticUnitResponse, 0, len(diag)) + for _, d := range diag { + r := &cproto.DiagnosticUnitResponse{ + ComponentId: d.Component.ID, + UnitType: cproto.UnitType(d.Unit.Type), + UnitId: d.Unit.ID, + Error: "", + Results: nil, + } + if d.Err != nil { + r.Error = d.Err.Error() + } else { + results := make([]*cproto.DiagnosticFileResult, 0, len(d.Results)) + for _, fr := range d.Results { + results = append(results, &cproto.DiagnosticFileResult{ + Name: fr.Name, + Filename: fr.Filename, + Description: fr.Description, + ContentType: fr.ContentType, + Content: fr.Content, + Generated: fr.Generated, + }) + } + r.Results = results } - endpoint = "npipe" - } - return &socketRequester{ - c: c, - appName: appName, - routeKey: routeKey, - endpoint: endpoint, - } -} - -// getPath creates a get request for the specified path. -// Will return an error if that status code is not 200. -func (r *socketRequester) getPath(ctx context.Context, path string) (*http.Response, error) { - req, err := http.NewRequest("GET", "http://"+r.endpoint+path, nil) - if err != nil { - return nil, err - } - req = req.WithContext(ctx) - res, err := r.c.Do(req) - if err != nil { - return nil, err - } - if res.StatusCode != 200 { - res.Body.Close() - return nil, fmt.Errorf("response status is %d", res.StatusCode) - } - return res, nil - -} - -// procMeta will return process metadata by querying the "/" path. -func (r *socketRequester) procMeta(ctx context.Context) *cproto.ProcMeta { - pm := &cproto.ProcMeta{ - Name: r.appName, - RouteKey: r.routeKey, - } - - res, err := r.getPath(ctx, "/") - if err != nil { - pm.Error = err.Error() - return pm - } - defer res.Body.Close() - - bi := &BeatInfo{} - dec := json.NewDecoder(res.Body) - if err := dec.Decode(bi); err != nil { - pm.Error = err.Error() - return pm - } - - pm.Process = bi.Beat - pm.Hostname = bi.Hostname - pm.Id = bi.ID - pm.EphemeralId = bi.EphemeralID - pm.Version = bi.Version - pm.BuildCommit = bi.Commit - pm.BuildTime = bi.Time - pm.Username = bi.Username - pm.UserId = bi.UserID - pm.UserGid = bi.GroupID - pm.Architecture = bi.BinaryArch - pm.ElasticLicensed = bi.ElasticLicensed - - return pm -} - -var pprofEndpoints = map[cproto.PprofOption]string{ - cproto.PprofOption_ALLOCS: "/debug/pprof/allocs", - cproto.PprofOption_BLOCK: "/debug/pprof/block", - cproto.PprofOption_CMDLINE: "/debug/pprof/cmdline", - cproto.PprofOption_GOROUTINE: "/debug/pprof/goroutine", - cproto.PprofOption_HEAP: "/debug/pprof/heap", - cproto.PprofOption_MUTEX: "/debug/pprof/mutex", - cproto.PprofOption_PROFILE: "/debug/pprof/profile", - cproto.PprofOption_THREADCREATE: "/debug/pprof/threadcreate", - cproto.PprofOption_TRACE: "/debug/pprof/trace", -} - -// getProf will gather pprof data specified by the option. -func (r *socketRequester) getPprof(ctx context.Context, opt cproto.PprofOption, dur time.Duration) *cproto.PprofResult { - res := &cproto.PprofResult{ - AppName: r.appName, - RouteKey: r.routeKey, - PprofType: opt, - } - - path, ok := pprofEndpoints[opt] - if !ok { - res.Error = "unknown path for option" - return res - } - - if opt == cproto.PprofOption_PROFILE || opt == cproto.PprofOption_TRACE { - path += fmt.Sprintf("?seconds=%0.f", dur.Seconds()) - } - - resp, err := r.getPath(ctx, path) - if err != nil { - res.Error = err.Error() - return res - } - defer resp.Body.Close() - - p, err := io.ReadAll(resp.Body) - if err != nil { - res.Error = err.Error() - return res - } - res.Result = p - return res -} - -// procMetrics will gather metrics buffer data -func (r *socketRequester) procMetrics(ctx context.Context) *cproto.MetricsResponse { - res := &cproto.MetricsResponse{ - AppName: r.appName, - RouteKey: r.routeKey, - } - - resp, err := r.getPath(ctx, "/buffer") - if err != nil { - res.Error = err.Error() - return res - } - defer resp.Body.Close() - - p, err := io.ReadAll(resp.Body) - if err != nil { - res.Error = err.Error() - return res - } - - if len(p) == 0 { - res.Error = "no content" - return res + res = append(res, r) } - res.Result = p - return res + return &cproto.DiagnosticUnitsResponse{Units: res}, nil } -*/ diff --git a/internal/pkg/agent/transpiler/vars.go b/internal/pkg/agent/transpiler/vars.go index e8f06a6928b..96fbacd48c8 100644 --- a/internal/pkg/agent/transpiler/vars.go +++ b/internal/pkg/agent/transpiler/vars.go @@ -97,6 +97,12 @@ func (v *Vars) Lookup(name string) (interface{}, bool) { return v.tree.Lookup(name) } +// Map transforms the variables into a map[string]interface{} and will abort and return any errors related +// to type conversion. +func (v *Vars) Map() (map[string]interface{}, error) { + return v.tree.Map() +} + // lookupNode performs a lookup on the AST, but keeps the result as a `Node`. // // This is different from `Lookup` which returns the actual type, not the AST type. diff --git a/internal/pkg/basecmd/version/cmd_test.go b/internal/pkg/basecmd/version/cmd_test.go index 5b21ed252ad..fb0e7e960bd 100644 --- a/internal/pkg/basecmd/version/cmd_test.go +++ b/internal/pkg/basecmd/version/cmd_test.go @@ -57,7 +57,7 @@ func TestCmdBinaryOnlyYAML(t *testing.T) { } func TestCmdDaemon(t *testing.T) { - srv := server.New(newErrorLogger(t), nil, nil, apmtest.DiscardTracer) + srv := server.New(newErrorLogger(t), nil, nil, apmtest.DiscardTracer, nil) require.NoError(t, srv.Start()) defer srv.Stop() @@ -73,7 +73,7 @@ func TestCmdDaemon(t *testing.T) { } func TestCmdDaemonYAML(t *testing.T) { - srv := server.New(newErrorLogger(t), nil, nil, apmtest.DiscardTracer) + srv := server.New(newErrorLogger(t), nil, nil, apmtest.DiscardTracer, nil) require.NoError(t, srv.Start()) defer srv.Stop() diff --git a/internal/pkg/diagnostics/diagnostics.go b/internal/pkg/diagnostics/diagnostics.go new file mode 100644 index 00000000000..a20e990c0ee --- /dev/null +++ b/internal/pkg/diagnostics/diagnostics.go @@ -0,0 +1,102 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package diagnostics + +import ( + "bytes" + "context" + "fmt" + "runtime/pprof" + + "gopkg.in/yaml.v2" + + "github.com/elastic/elastic-agent/internal/pkg/release" +) + +// Hook is a hook that gets used when diagnostic information is requested from the Elastic Agent. +type Hook struct { + Name string + Filename string + Description string + ContentType string + Hook func(ctx context.Context) []byte +} + +// Hooks is a set of diagnostic hooks. +type Hooks []Hook + +// GlobalHooks returns the global hooks that can be used at anytime with no other references. +func GlobalHooks() Hooks { + return Hooks{ + { + Name: "version", + Filename: "version.txt", + Description: "version information", + ContentType: "application/yaml", + Hook: func(_ context.Context) []byte { + v := release.Info() + o, err := yaml.Marshal(v) + if err != nil { + return []byte(fmt.Sprintf("error: %q", err)) + } + return o + }, + }, + { + Name: "goroutine", + Filename: "goroutine.txt", + Description: "stack traces of all current goroutines", + ContentType: "plain/text", + Hook: pprofDiag("goroutine"), + }, + { + Name: "heap", + Filename: "heap.txt", + Description: "a sampling of memory allocations of live objects", + ContentType: "plain/text", + Hook: pprofDiag("heap"), + }, + { + Name: "allocs", + Filename: "allocs.txt", + Description: "a sampling of all past memory allocations", + ContentType: "plain/text", + Hook: pprofDiag("allocs"), + }, + { + Name: "threadcreate", + Filename: "threadcreate.txt", + Description: "stack traces that led to the creation of new OS threads", + ContentType: "plain/text", + Hook: pprofDiag("threadcreate"), + }, + { + Name: "block", + Filename: "block.txt", + Description: "stack traces that led to blocking on synchronization primitives", + ContentType: "plain/text", + Hook: pprofDiag("block"), + }, + { + Name: "mutex", + Filename: "mutex.txt", + Description: "stack traces of holders of contended mutexes", + ContentType: "plain/text", + Hook: pprofDiag("mutex"), + }, + } +} + +func pprofDiag(name string) func(context.Context) []byte { + return func(_ context.Context) []byte { + var w bytes.Buffer + err := pprof.Lookup(name).WriteTo(&w, 1) + if err != nil { + // error is returned as the content + return []byte(fmt.Sprintf("failed to write pprof to bytes buffer: %s", err)) + } + return w.Bytes() + } +} diff --git a/pkg/component/load.go b/pkg/component/load.go index 2b96c5ad64c..4144a353172 100644 --- a/pkg/component/load.go +++ b/pkg/component/load.go @@ -169,6 +169,17 @@ func (r *RuntimeSpecs) GetInput(inputType string) (InputRuntimeSpec, error) { return InputRuntimeSpec{}, ErrInputNotSupported } +// ServiceSpecs returns only the input specification that are based on the service runtime. +func (r *RuntimeSpecs) ServiceSpecs() []InputRuntimeSpec { + var services []InputRuntimeSpec + for _, s := range r.inputSpecs { + if s.Spec.Service != nil { + services = append(services, s) + } + } + return services +} + // LoadSpec loads the component specification. // // Will error in the case that the specification is not valid. Only valid specifications are allowed. diff --git a/pkg/component/runtime/manager.go b/pkg/component/runtime/manager.go index 7fd240431d0..b475ddef4ca 100644 --- a/pkg/component/runtime/manager.go +++ b/pkg/component/runtime/manager.go @@ -16,10 +16,7 @@ import ( "sync" "time" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" - "github.com/gofrs/uuid" - "go.elastic.co/apm" "go.elastic.co/apm/module/apmgrpc" "google.golang.org/grpc" @@ -31,6 +28,7 @@ import ( "github.com/elastic/elastic-agent-client/v7/pkg/proto" "github.com/elastic/elastic-agent-libs/atomic" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" "github.com/elastic/elastic-agent/internal/pkg/core/authority" "github.com/elastic/elastic-agent/pkg/component" "github.com/elastic/elastic-agent/pkg/core/logger" @@ -43,6 +41,8 @@ const ( // maxCheckinMisses is the maximum number of check-in misses a component can miss before it is killed // and restarted. maxCheckinMisses = 3 + // diagnosticTimeout is the maximum amount of time to wait for a diagnostic response from a unit. + diagnosticTimeout = 20 * time.Second ) var ( @@ -52,8 +52,16 @@ var ( // ComponentComponentState provides a structure to map a component to current component state. type ComponentComponentState struct { + Component component.Component `yaml:"component"` + State ComponentState `yaml:"state"` +} + +// ComponentUnitDiagnostic provides a structure to map a component/unit to diagnostic results. +type ComponentUnitDiagnostic struct { Component component.Component - State ComponentState + Unit component.Unit + Results []*proto.ActionDiagnosticUnitResult + Err error } // Manager for the entire runtime of operating components. @@ -333,6 +341,69 @@ func (m *Manager) PerformAction(ctx context.Context, unit component.Unit, name s return respBody, nil } +// PerformDiagnostics executes the diagnostic action for the provided units. If no units are provided then +// it performs diagnostics for all current units. +func (m *Manager) PerformDiagnostics(ctx context.Context, units ...component.Unit) []ComponentUnitDiagnostic { + // build results from units + var results []ComponentUnitDiagnostic + if len(units) > 0 { + for _, u := range units { + r := m.getRuntimeFromUnit(u) + if r == nil { + results = append(results, ComponentUnitDiagnostic{ + Unit: u, + Err: ErrNoUnit, + }) + } else { + results = append(results, ComponentUnitDiagnostic{ + Component: r.currComp, + Unit: u, + }) + } + } + } else { + m.mx.RLock() + for _, r := range m.current { + for _, u := range r.currComp.Units { + var err error + if r.currComp.Err != nil { + err = r.currComp.Err + } else if u.Err != nil { + err = u.Err + } + if err != nil { + results = append(results, ComponentUnitDiagnostic{ + Component: r.currComp, + Unit: u, + Err: err, + }) + } else { + results = append(results, ComponentUnitDiagnostic{ + Component: r.currComp, + Unit: u, + }) + } + } + } + m.mx.RUnlock() + } + + for i, r := range results { + if r.Err != nil { + // already in error don't perform diagnostics + continue + } + diag, err := m.performDiagAction(ctx, r.Unit) + if err != nil { + r.Err = err + } else { + r.Results = diag + } + results[i] = r + } + return results +} + // Subscribe to changes in a component. // // Allows a component without that ID to exists. Once a component starts matching that ID then changes will start to @@ -696,6 +767,50 @@ func (m *Manager) getListenAddr() string { return m.listenAddr } +func (m *Manager) performDiagAction(ctx context.Context, unit component.Unit) ([]*proto.ActionDiagnosticUnitResult, error) { + ctx, cancel := context.WithTimeout(ctx, diagnosticTimeout) + defer cancel() + + id, err := uuid.NewV4() + if err != nil { + return nil, err + } + + runtime := m.getRuntimeFromUnit(unit) + if runtime == nil { + return nil, ErrNoUnit + } + + req := &proto.ActionRequest{ + Id: id.String(), + UnitId: unit.ID, + UnitType: proto.UnitType(unit.Type), + Type: proto.ActionRequest_DIAGNOSTICS, + } + res, err := runtime.performAction(ctx, req) + if err != nil { + return nil, err + } + if res.Status == proto.ActionResponse_FAILED { + var respBody map[string]interface{} + if res.Result != nil { + err = json.Unmarshal(res.Result, &respBody) + if err != nil { + return nil, err + } + errMsgT, ok := respBody["error"] + if ok { + errMsg, ok := errMsgT.(string) + if ok { + return nil, errors.New(errMsg) + } + } + } + return nil, errors.New("unit failed to perform diagnostics, no error could be extracted from response") + } + return res.Diagnostic, nil +} + type waitForReady struct { name string cert *authority.Pair diff --git a/pkg/component/runtime/state.go b/pkg/component/runtime/state.go index 2bd848cfce0..4a39a21d82e 100644 --- a/pkg/component/runtime/state.go +++ b/pkg/component/runtime/state.go @@ -21,9 +21,9 @@ const ( // ComponentUnitState is the state for a unit running in a component. type ComponentUnitState struct { - State client.UnitState - Message string - Payload map[string]interface{} + State client.UnitState `yaml:"state"` + Message string `yaml:"message"` + Payload map[string]interface{} `yaml:"payload,omitempty"` // internal unitState client.UnitState @@ -42,21 +42,21 @@ type ComponentUnitKey struct { // ComponentVersionInfo provides version information reported by the component. type ComponentVersionInfo struct { // Name of the binary. - Name string + Name string `yaml:"name"` // Version of the binary. - Version string + Version string `yaml:"version"` // Additional metadata about the binary. - Meta map[string]string + Meta map[string]string `yaml:"meta,omitempty"` } // ComponentState is the overall state of the component. type ComponentState struct { - State client.UnitState - Message string + State client.UnitState `yaml:"state"` + Message string `yaml:"message"` - Units map[ComponentUnitKey]ComponentUnitState + Units map[ComponentUnitKey]ComponentUnitState `yaml:"units"` - VersionInfo ComponentVersionInfo + VersionInfo ComponentVersionInfo `yaml:"version_info"` // internal expectedUnits map[ComponentUnitKey]expectedUnitState diff --git a/pkg/component/spec.go b/pkg/component/spec.go index dabcf499817..3d8b5cfe504 100644 --- a/pkg/component/spec.go +++ b/pkg/component/spec.go @@ -83,9 +83,15 @@ func (t *CommandTimeoutSpec) InitDefaults() { // ServiceSpec is the specification for an input that executes as a service. type ServiceSpec struct { + Log *ServiceLogSpec `config:"log,omitempty" yaml:"log,omitempty"` Operations ServiceOperationsSpec `config:"operations" yaml:"operations" validate:"required"` } +// ServiceLogSpec is the specification for the log path that the service logs to. +type ServiceLogSpec struct { + Path string `config:"path,omitempty" yaml:"path,omitempty"` +} + // ServiceOperationsSpec is the specification of the operations that need to be performed to get a service installed/uninstalled. type ServiceOperationsSpec struct { Check *ServiceOperationsCommandSpec `config:"check,omitempty" yaml:"check,omitempty"` diff --git a/specs/endpoint-security.spec.yml b/specs/endpoint-security.spec.yml index bf6f638f257..a34c66086de 100644 --- a/specs/endpoint-security.spec.yml +++ b/specs/endpoint-security.spec.yml @@ -14,7 +14,9 @@ inputs: - condition: ${runtime.arch} == 'arm64' and ${runtime.family} == 'redhat' and ${runtime.major} == '7' message: "No support for RHEL7 on arm64" service: - operations: + log: + path: "/opt/Elastic/Endpoint/state/log/endpoint-*.log" + operations: &operations check: args: - "verify" @@ -44,28 +46,9 @@ inputs: outputs: - elasticsearch service: - operations: - check: - args: - - "verify" - - "--log" - - "stderr" - timeout: 30 - install: - args: - - "install" - - "--log" - - "stderr" - - "--upgrade" - - "--resources" - - "endpoint-security-resources.zip" - timeout: 600 - uninstall: - args: - - "uninstall" - - "--log" - - "stderr" - timeout: 600 + log: + path: "/Library/Elastic/Endpoint/state/log/endpoint-*.log" + operations: *operations - name: endpoint description: "Endpoint Security" platforms: @@ -77,25 +60,6 @@ inputs: - condition: ${runtime.user.root} == false message: "Elastic Agent must be running as Administrator or SYSTEM" service: - operations: - check: - args: - - "verify" - - "--log" - - "stderr" - timeout: 30 - install: - args: - - "install" - - "--log" - - "stderr" - - "--upgrade" - - "--resources" - - "endpoint-security-resources.zip" - timeout: 600 - uninstall: - args: - - "uninstall" - - "--log" - - "stderr" - timeout: 600 + log: + path: "C:\\Program Files\\Elastic\\Endpoint\\state\\log\\endpoint-*.log" + operations: *operations From 90523cc7e6f76c2e208ac3b0333f64c6192d6994 Mon Sep 17 00:00:00 2001 From: Michal Pristas Date: Wed, 5 Oct 2022 15:14:42 +0200 Subject: [PATCH 25/49] Check and create downloads dir before using (#1410) --- .../application/upgrade/artifact/download/fs/downloader.go | 6 ++++++ .../upgrade/artifact/download/http/downloader.go | 7 +++++++ internal/pkg/agent/cmd/container.go | 4 ++++ 3 files changed, 17 insertions(+) diff --git a/internal/pkg/agent/application/upgrade/artifact/download/fs/downloader.go b/internal/pkg/agent/application/upgrade/artifact/download/fs/downloader.go index 46e85defc31..42cc058c16b 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/fs/downloader.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/fs/downloader.go @@ -102,6 +102,12 @@ func (e *Downloader) downloadFile(filename, fullPath string) (string, error) { } defer sourceFile.Close() + if destinationDir := filepath.Dir(fullPath); destinationDir != "" && destinationDir != "." { + if err := os.MkdirAll(destinationDir, 0755); err != nil { + return "", err + } + } + destinationFile, err := os.OpenFile(fullPath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, packagePermissions) if err != nil { return "", errors.New(err, "creating package file failed", errors.TypeFilesystem, errors.M(errors.MetaKeyPath, fullPath)) diff --git a/internal/pkg/agent/application/upgrade/artifact/download/http/downloader.go b/internal/pkg/agent/application/upgrade/artifact/download/http/downloader.go index 2f9a7748660..5ef423825a9 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/http/downloader.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/http/downloader.go @@ -12,6 +12,7 @@ import ( "net/url" "os" "path" + "path/filepath" "strconv" "strings" "time" @@ -156,6 +157,12 @@ func (e *Downloader) downloadFile(ctx context.Context, artifactName, filename, f return "", errors.New(err, "fetching package failed", errors.TypeNetwork, errors.M(errors.MetaKeyURI, sourceURI)) } + if destinationDir := filepath.Dir(fullPath); destinationDir != "" && destinationDir != "." { + if err := os.MkdirAll(destinationDir, 0755); err != nil { + return "", err + } + } + destinationFile, err := os.OpenFile(fullPath, os.O_CREATE|os.O_TRUNC|os.O_WRONLY, packagePermissions) if err != nil { return "", errors.New(err, "creating package file failed", errors.TypeFilesystem, errors.M(errors.MetaKeyPath, fullPath)) diff --git a/internal/pkg/agent/cmd/container.go b/internal/pkg/agent/cmd/container.go index f9f89dd25db..74e574c4806 100644 --- a/internal/pkg/agent/cmd/container.go +++ b/internal/pkg/agent/cmd/container.go @@ -860,6 +860,10 @@ func tryContainerLoadPaths() error { func syncDir(src string, dest string) error { return filepath.Walk(src, func(path string, info os.FileInfo, err error) error { if err != nil { + if os.IsNotExist(err) { + // source dir exists only if there's agent artifact + return nil + } return err } relativePath := strings.TrimPrefix(path, src) From bfc490afe2cbfe865b9ff784be633d100be2994e Mon Sep 17 00:00:00 2001 From: Michel Laterman <82832767+michel-laterman@users.noreply.github.com> Date: Fri, 14 Oct 2022 12:08:34 -0700 Subject: [PATCH 26/49] [v2] Add upgrade action retry (#1219) * Add upgrade action retry Add the ability for the agent to schedule and retry upgrade actions. The fleetapi actions now define a ScheduledAction, and RetryableAction interface to eliminate the need for stub methods on all different action types. Action queue has been changed to function on scheduled actions. Serialization tests now ensure that that the retry attribute needed by retryable actions works. Decouple dispatcher from gateway, dispatcher has an errors channel that will return an error for the list of actions that's sent. Gateway has an Actions method that can be used to get the list of actions from the gateway. The managed_mode config manager will link these two components If a handler returns an error and the action is a RetryableAction, the dispatcher will attempt to schedule a retry. The dispatcher will also ack the action to fleet-server and indicate if it will be retried or has failed (or has been received normally). For the acker, if a RetryableAction has an error and an attempt count that is greater than 0 it will be acked as retried. If it has an error and an attempt count less than 1 it will be acked as failed. Co-authored-by: Blake Rouse --- CHANGELOG.next.asciidoc | 1 + .../application/coordinator/coordinator.go | 12 +- .../application/dispatcher/dispatcher.go | 107 +++++++-- .../application/dispatcher/dispatcher_test.go | 223 +++++++++++++++--- .../application/dispatcher/retryconfig.go | 29 +++ .../dispatcher/retryconfig_test.go | 34 +++ .../gateway/fleet/fleet_gateway.go | 28 +-- .../gateway/fleet/fleet_gateway_test.go | 97 ++------ .../pkg/agent/application/gateway/gateway.go | 4 + .../pkg/agent/application/managed_mode.go | 34 ++- .../pkg/agent/application/upgrade/upgrade.go | 5 +- .../pkg/agent/storage/store/state_store.go | 23 +- .../agent/storage/store/state_store_test.go | 18 +- internal/pkg/fleetapi/ack_cmd.go | 14 +- .../pkg/fleetapi/acker/fleet/fleet_acker.go | 18 ++ .../fleetapi/acker/fleet/fleet_acker_test.go | 65 +++-- internal/pkg/fleetapi/action.go | 133 +++++------ internal/pkg/fleetapi/action_test.go | 18 ++ internal/pkg/queue/actionqueue.go | 28 ++- internal/pkg/queue/actionqueue_test.go | 90 +++++-- 20 files changed, 685 insertions(+), 296 deletions(-) create mode 100644 internal/pkg/agent/application/dispatcher/retryconfig.go create mode 100644 internal/pkg/agent/application/dispatcher/retryconfig_test.go diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 2361baf73f5..acdf4efc087 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -197,3 +197,4 @@ - Add liveness endpoint, allow fleet-gateway component to report degraded state, add update time and messages to status output. {issue}390[390] {pull}569[569] - Redact sensitive information on diagnostics collect command. {issue}[241] {pull}[566] - Fix incorrectly creating a filebeat redis input when a policy contains a packetbeat redis input. {issue}[427] {pull}[700] +- Allow upgrade actions to be retried on failure with action queue scheduling. {issue}778[778] {pull}1219[1219] diff --git a/internal/pkg/agent/application/coordinator/coordinator.go b/internal/pkg/agent/application/coordinator/coordinator.go index e49198da65f..407ebbca625 100644 --- a/internal/pkg/agent/application/coordinator/coordinator.go +++ b/internal/pkg/agent/application/coordinator/coordinator.go @@ -12,6 +12,7 @@ import ( "gopkg.in/yaml.v2" "github.com/elastic/elastic-agent/internal/pkg/diagnostics" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" @@ -51,6 +52,9 @@ type UpgradeManager interface { // Upgrade upgrades running agent. Upgrade(ctx context.Context, version string, sourceURI string, action *fleetapi.ActionUpgrade) (_ reexec.ShutdownCallbackFn, err error) + + // Ack is used on startup to check if the agent has upgraded and needs to send an ack for the action + Ack(ctx context.Context, acker acker.Acker) error } // Runner provides interface to run a manager and receive running errors. @@ -251,10 +255,16 @@ func (c *Coordinator) Upgrade(ctx context.Context, version string, sourceURI str c.state.overrideState = nil return err } - c.ReExec(cb) + if cb != nil { + c.ReExec(cb) + } return nil } +func (c *Coordinator) AckUpgrade(ctx context.Context, acker acker.Acker) error { + return c.upgradeMgr.Ack(ctx, acker) +} + // PerformAction executes an action on a unit. func (c *Coordinator) PerformAction(ctx context.Context, unit component.Unit, name string, params map[string]interface{}) (map[string]interface{}, error) { return c.runtimeMgr.PerformAction(ctx, unit, name, params) diff --git a/internal/pkg/agent/application/dispatcher/dispatcher.go b/internal/pkg/agent/application/dispatcher/dispatcher.go index 700c7d35349..e37fbdc770b 100644 --- a/internal/pkg/agent/application/dispatcher/dispatcher.go +++ b/internal/pkg/agent/application/dispatcher/dispatcher.go @@ -23,14 +23,16 @@ import ( type actionHandlers map[string]actions.Handler type priorityQueue interface { - Add(fleetapi.Action, int64) - DequeueActions() []fleetapi.Action + Add(fleetapi.ScheduledAction, int64) + DequeueActions() []fleetapi.ScheduledAction + CancelType(string) int Save() error } // Dispatcher processes actions coming from fleet api. type Dispatcher interface { - Dispatch(context.Context, acker.Acker, ...fleetapi.Action) error + Dispatch(context.Context, acker.Acker, ...fleetapi.Action) + Errors() <-chan error } // ActionDispatcher processes actions coming from fleet using registered set of handlers. @@ -39,6 +41,8 @@ type ActionDispatcher struct { handlers actionHandlers def actions.Handler queue priorityQueue + rt *retryConfig + errCh chan error } // New creates a new action dispatcher. @@ -60,9 +64,15 @@ func New(log *logger.Logger, def actions.Handler, queue priorityQueue) (*ActionD handlers: make(actionHandlers), def: def, queue: queue, + rt: defaultRetryConfig(), + errCh: make(chan error), }, nil } +func (ad *ActionDispatcher) Errors() <-chan error { + return ad.errCh +} + // Register registers a new handler for action. func (ad *ActionDispatcher) Register(a fleetapi.Action, handler actions.Handler) error { k := ad.key(a) @@ -88,13 +98,18 @@ func (ad *ActionDispatcher) key(a fleetapi.Action) string { } // Dispatch dispatches an action using pre-registered set of handlers. -func (ad *ActionDispatcher) Dispatch(ctx context.Context, acker acker.Acker, actions ...fleetapi.Action) (err error) { +// Dispatch will handle action queue operations, and retries. +// Any action that implements the ScheduledAction interface may be added/removed from the queue based on StartTime. +// Any action that implements the RetryableAction interface will be rescheduled if the handler returns an error. +func (ad *ActionDispatcher) Dispatch(ctx context.Context, acker acker.Acker, actions ...fleetapi.Action) { + var err error span, ctx := apm.StartSpan(ctx, "dispatch", "app.internal") defer func() { apm.CaptureError(ctx, err).Send() span.End() }() + ad.removeQueuedUpgrades(actions) actions = ad.queueScheduledActions(actions) actions = ad.dispatchCancelActions(ctx, actions, acker) queued, expired := ad.gatherQueuedActions(time.Now().UTC()) @@ -108,7 +123,7 @@ func (ad *ActionDispatcher) Dispatch(ctx context.Context, acker acker.Acker, act if len(actions) == 0 { ad.log.Debug("No action to dispatch") - return nil + return } ad.log.Debugf( @@ -118,18 +133,28 @@ func (ad *ActionDispatcher) Dispatch(ctx context.Context, acker acker.Acker, act ) for _, action := range actions { - if err := ctx.Err(); err != nil { - return err + if err = ctx.Err(); err != nil { + ad.errCh <- err + return } if err := ad.dispatchAction(ctx, action, acker); err != nil { + rAction, ok := action.(fleetapi.RetryableAction) + if ok { + rAction.SetError(err) // set the retryable action error to what the dispatcher returned + ad.scheduleRetry(ctx, rAction, acker) + continue + } ad.log.Debugf("Failed to dispatch action '%+v', error: %+v", action, err) - return err + ad.errCh <- err + continue } ad.log.Debugf("Successfully dispatched action: '%+v'", action) } - return acker.Commit(ctx) + if err = acker.Commit(ctx); err != nil { + ad.errCh <- err + } } func (ad *ActionDispatcher) dispatchAction(ctx context.Context, a fleetapi.Action, acker acker.Acker) error { @@ -154,15 +179,18 @@ func detectTypes(actions []fleetapi.Action) []string { func (ad *ActionDispatcher) queueScheduledActions(input []fleetapi.Action) []fleetapi.Action { actions := make([]fleetapi.Action, 0, len(input)) for _, action := range input { - start, err := action.StartTime() - if err == nil { - ad.log.Debugf("Adding action id: %s to queue.", action.ID()) - ad.queue.Add(action, start.Unix()) + sAction, ok := action.(fleetapi.ScheduledAction) + if ok { + start, err := sAction.StartTime() + if err != nil { + ad.log.Warnf("Skipping addition to action-queue, issue gathering start time from action id %s: %v", sAction.ID(), err) + actions = append(actions, action) + continue + } + ad.log.Debugf("Adding action id: %s to queue.", sAction.ID()) + ad.queue.Add(sAction, start.Unix()) continue } - if !errors.Is(err, fleetapi.ErrNoStartTime) { - ad.log.Warnf("Issue gathering start time from action id %s: %v", action.ID(), err) - } actions = append(actions, action) } return actions @@ -197,3 +225,50 @@ func (ad *ActionDispatcher) gatherQueuedActions(ts time.Time) (queued, expired [ } return queued, expired } + +// removeQueuedUpgrades will scan the passed actions and if there is an upgrade action it will remove all upgrade actions in the queue but not alter the passed list. +// this is done to try to only have the most recent upgrade action executed. However it does not eliminate duplicates in retrieved directly from the gateway +func (ad *ActionDispatcher) removeQueuedUpgrades(actions []fleetapi.Action) { + for _, action := range actions { + if action.Type() == fleetapi.ActionTypeUpgrade { + if n := ad.queue.CancelType(fleetapi.ActionTypeUpgrade); n > 0 { + ad.log.Debugw("New upgrade action retrieved from gateway, removing queued upgrade actions", "actions_found", n) + } + return + } + } +} + +func (ad *ActionDispatcher) scheduleRetry(ctx context.Context, action fleetapi.RetryableAction, acker acker.Acker) { + attempt := action.RetryAttempt() + d, err := ad.rt.GetWait(attempt) + if err != nil { + ad.log.Errorf("No more reties for action id %s: %v", action.ID(), err) + action.SetRetryAttempt(-1) + if err := acker.Ack(ctx, action); err != nil { + ad.log.Errorf("Unable to ack action failure (id %s) to fleet-server: %v", action.ID(), err) + return + } + if err := acker.Commit(ctx); err != nil { + ad.log.Errorf("Unable to commit action failure (id %s) to fleet-server: %v", action.ID(), err) + } + return + } + attempt = attempt + 1 + startTime := time.Now().UTC().Add(d) + action.SetRetryAttempt(attempt) + action.SetStartTime(startTime) + ad.log.Debugf("Adding action id: %s to queue.", action.ID()) + ad.queue.Add(action, startTime.Unix()) + err = ad.queue.Save() + if err != nil { + ad.log.Errorf("retry action id %s attempt %d failed to persist action_queue: %v", action.ID(), attempt, err) + } + if err := acker.Ack(ctx, action); err != nil { + ad.log.Errorf("Unable to ack action retry (id %s) to fleet-server: %v", action.ID(), err) + return + } + if err := acker.Commit(ctx); err != nil { + ad.log.Errorf("Unable to commit action retry (id %s) to fleet-server: %v", action.ID(), err) + } +} diff --git a/internal/pkg/agent/application/dispatcher/dispatcher_test.go b/internal/pkg/agent/application/dispatcher/dispatcher_test.go index d140033655c..c9c1397443c 100644 --- a/internal/pkg/agent/application/dispatcher/dispatcher_test.go +++ b/internal/pkg/agent/application/dispatcher/dispatcher_test.go @@ -12,6 +12,7 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" + "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker/noop" @@ -33,9 +34,12 @@ type mockAction struct { type mockOtherAction struct { mockAction } -type mockUnknownAction struct { +type mockScheduledAction struct { mockAction } +type mockRetryableAction struct { + mockScheduledAction +} func (m *mockAction) ID() string { args := m.Called() @@ -49,26 +53,48 @@ func (m *mockAction) String() string { args := m.Called() return args.String(0) } -func (m *mockAction) StartTime() (time.Time, error) { +func (m *mockScheduledAction) StartTime() (time.Time, error) { args := m.Called() return args.Get(0).(time.Time), args.Error(1) } -func (m *mockAction) Expiration() (time.Time, error) { +func (m *mockScheduledAction) Expiration() (time.Time, error) { args := m.Called() return args.Get(0).(time.Time), args.Error(1) } +func (m *mockRetryableAction) RetryAttempt() int { + args := m.Called() + return args.Int(0) +} +func (m *mockRetryableAction) SetRetryAttempt(n int) { + m.Called(n) +} +func (m *mockRetryableAction) SetStartTime(ts time.Time) { + m.Called(ts) +} +func (m *mockRetryableAction) GetError() error { + args := m.Called() + return args.Error(0) +} +func (m *mockRetryableAction) SetError(err error) { + m.Called(err) +} type mockQueue struct { mock.Mock } -func (m *mockQueue) Add(action fleetapi.Action, n int64) { +func (m *mockQueue) Add(action fleetapi.ScheduledAction, n int64) { m.Called(action, n) } -func (m *mockQueue) DequeueActions() []fleetapi.Action { +func (m *mockQueue) DequeueActions() []fleetapi.ScheduledAction { args := m.Called() - return args.Get(0).([]fleetapi.Action) + return args.Get(0).([]fleetapi.ScheduledAction) +} + +func (m *mockQueue) CancelType(t string) int { + args := m.Called(t) + return args.Int(0) } func (m *mockQueue) Save() error { @@ -84,7 +110,7 @@ func TestActionDispatcher(t *testing.T) { def := &mockHandler{} queue := &mockQueue{} queue.On("Save").Return(nil).Once() - queue.On("DequeueActions").Return([]fleetapi.Action{}).Once() + queue.On("DequeueActions").Return([]fleetapi.ScheduledAction{}).Once() d, err := New(nil, def, queue) require.NoError(t, err) @@ -97,11 +123,9 @@ func TestActionDispatcher(t *testing.T) { require.NoError(t, err) action1 := &mockAction{} - action1.On("StartTime").Return(time.Time{}, fleetapi.ErrNoStartTime) action1.On("Type").Return("action") action1.On("ID").Return("id") action2 := &mockOtherAction{} - action2.On("StartTime").Return(time.Time{}, fleetapi.ErrNoStartTime) action2.On("Type").Return("action") action2.On("ID").Return("id") @@ -109,8 +133,12 @@ func TestActionDispatcher(t *testing.T) { success1.On("Handle", mock.Anything, mock.Anything, mock.Anything).Return(nil).Once() success2.On("Handle", mock.Anything, mock.Anything, mock.Anything).Return(nil).Once() - err = d.Dispatch(ctx, ack, action1, action2) - require.NoError(t, err) + d.Dispatch(ctx, ack, action1, action2) + select { + case err := <-d.Errors(): + t.Fatalf("Unexpected error: %v", err) + default: + } success1.AssertExpectations(t) success2.AssertExpectations(t) @@ -124,17 +152,20 @@ func TestActionDispatcher(t *testing.T) { ctx := context.Background() queue := &mockQueue{} queue.On("Save").Return(nil).Once() - queue.On("DequeueActions").Return([]fleetapi.Action{}).Once() + queue.On("DequeueActions").Return([]fleetapi.ScheduledAction{}).Once() d, err := New(nil, def, queue) require.NoError(t, err) - action := &mockUnknownAction{} - action.On("StartTime").Return(time.Time{}, fleetapi.ErrNoStartTime) + action := &mockOtherAction{} action.On("Type").Return("action") action.On("ID").Return("id") - err = d.Dispatch(ctx, ack, action) + d.Dispatch(ctx, ack, action) + select { + case err := <-d.Errors(): + t.Fatalf("Unexpected error: %v", err) + default: + } - require.NoError(t, err) def.AssertExpectations(t) queue.AssertExpectations(t) }) @@ -162,7 +193,7 @@ func TestActionDispatcher(t *testing.T) { queue := &mockQueue{} queue.On("Save").Return(nil).Once() - queue.On("DequeueActions").Return([]fleetapi.Action{}).Once() + queue.On("DequeueActions").Return([]fleetapi.ScheduledAction{}).Once() queue.On("Add", mock.Anything, mock.Anything).Once() d, err := New(nil, def, queue) @@ -171,16 +202,19 @@ func TestActionDispatcher(t *testing.T) { require.NoError(t, err) action1 := &mockAction{} - action1.On("StartTime").Return(time.Time{}, fleetapi.ErrNoStartTime) action1.On("Type").Return("action") action1.On("ID").Return("id") - action2 := &mockAction{} + action2 := &mockScheduledAction{} action2.On("StartTime").Return(time.Now().Add(time.Hour), nil) action2.On("Type").Return("action") action2.On("ID").Return("id") - err = d.Dispatch(context.Background(), ack, action1, action2) - require.NoError(t, err) + d.Dispatch(context.Background(), ack, action1, action2) + select { + case err := <-d.Errors(): + t.Fatalf("Unexpected error: %v", err) + default: + } def.AssertExpectations(t) queue.AssertExpectations(t) }) @@ -191,7 +225,7 @@ func TestActionDispatcher(t *testing.T) { queue := &mockQueue{} queue.On("Save").Return(nil).Once() - queue.On("DequeueActions").Return([]fleetapi.Action{}).Once() + queue.On("DequeueActions").Return([]fleetapi.ScheduledAction{}).Once() d, err := New(nil, def, queue) require.NoError(t, err) @@ -199,12 +233,15 @@ func TestActionDispatcher(t *testing.T) { require.NoError(t, err) action := &mockAction{} - action.On("StartTime").Return(time.Time{}, fleetapi.ErrNoStartTime) action.On("Type").Return(fleetapi.ActionTypeCancel) action.On("ID").Return("id") - err = d.Dispatch(context.Background(), ack, action) - require.NoError(t, err) + d.Dispatch(context.Background(), ack, action) + select { + case err := <-d.Errors(): + t.Fatalf("Unexpected error: %v", err) + default: + } def.AssertExpectations(t) queue.AssertExpectations(t) }) @@ -213,7 +250,7 @@ func TestActionDispatcher(t *testing.T) { def := &mockHandler{} def.On("Handle", mock.Anything, mock.Anything, mock.Anything).Return(nil).Twice() - action1 := &mockAction{} + action1 := &mockScheduledAction{} action1.On("StartTime").Return(time.Time{}, fleetapi.ErrNoStartTime) action1.On("Expiration").Return(time.Now().Add(time.Hour), fleetapi.ErrNoStartTime) action1.On("Type").Return(fleetapi.ActionTypeCancel) @@ -221,7 +258,7 @@ func TestActionDispatcher(t *testing.T) { queue := &mockQueue{} queue.On("Save").Return(nil).Once() - queue.On("DequeueActions").Return([]fleetapi.Action{action1}).Once() + queue.On("DequeueActions").Return([]fleetapi.ScheduledAction{action1}).Once() d, err := New(nil, def, queue) require.NoError(t, err) @@ -229,12 +266,15 @@ func TestActionDispatcher(t *testing.T) { require.NoError(t, err) action2 := &mockAction{} - action2.On("StartTime").Return(time.Time{}, fleetapi.ErrNoStartTime) action2.On("Type").Return(fleetapi.ActionTypeCancel) action2.On("ID").Return("id") - err = d.Dispatch(context.Background(), ack, action2) - require.NoError(t, err) + d.Dispatch(context.Background(), ack, action2) + select { + case err := <-d.Errors(): + t.Fatalf("Unexpected error: %v", err) + default: + } def.AssertExpectations(t) queue.AssertExpectations(t) }) @@ -245,15 +285,132 @@ func TestActionDispatcher(t *testing.T) { queue := &mockQueue{} queue.On("Save").Return(nil).Once() - queue.On("DequeueActions").Return([]fleetapi.Action{}).Once() + queue.On("DequeueActions").Return([]fleetapi.ScheduledAction{}).Once() d, err := New(nil, def, queue) require.NoError(t, err) err = d.Register(&mockAction{}, def) require.NoError(t, err) - err = d.Dispatch(context.Background(), ack) - require.NoError(t, err) + d.Dispatch(context.Background(), ack) + select { + case err := <-d.Errors(): + t.Fatalf("Unexpected error: %v", err) + default: + } def.AssertNotCalled(t, "Handle", mock.Anything, mock.Anything, mock.Anything) }) + + t.Run("Dispatch of a retryable action returns an error", func(t *testing.T) { + def := &mockHandler{} + def.On("Handle", mock.Anything, mock.Anything, mock.Anything).Return(errors.New("test error")).Once() + + queue := &mockQueue{} + queue.On("Save").Return(nil).Twice() + queue.On("DequeueActions").Return([]fleetapi.ScheduledAction{}).Once() + queue.On("Add", mock.Anything, mock.Anything).Once() + + d, err := New(nil, def, queue) + require.NoError(t, err) + err = d.Register(&mockRetryableAction{}, def) + require.NoError(t, err) + + action := &mockRetryableAction{} + action.On("Type").Return("action") + action.On("ID").Return("id") + action.On("StartTime").Return(time.Time{}, fleetapi.ErrNoStartTime).Once() + action.On("SetError", mock.Anything).Once() + action.On("RetryAttempt").Return(0).Once() + action.On("SetRetryAttempt", 1).Once() + action.On("SetStartTime", mock.Anything).Once() + + d.Dispatch(context.Background(), ack, action) + select { + case err := <-d.Errors(): + t.Fatalf("Unexpected error: %v", err) + default: + } + def.AssertExpectations(t) + queue.AssertExpectations(t) + action.AssertExpectations(t) + }) + + t.Run("Dispatch multiples events returns one error", func(t *testing.T) { + def := &mockHandler{} + def.On("Handle", mock.Anything, mock.Anything, mock.Anything).Return(errors.New("test error")).Once() + def.On("Handle", mock.Anything, mock.Anything, mock.Anything).Return(nil).Once() + + queue := &mockQueue{} + queue.On("Save").Return(nil).Once() + queue.On("DequeueActions").Return([]fleetapi.ScheduledAction{}).Once() + + d, err := New(nil, def, queue) + require.NoError(t, err) + err = d.Register(&mockAction{}, def) + require.NoError(t, err) + + action1 := &mockAction{} + action1.On("Type").Return("action") + action1.On("ID").Return("id") + action2 := &mockAction{} + action2.On("Type").Return("action") + action2.On("ID").Return("id") + + // Kind of a dirty work around to test an error return. + // launch in another routing and sleep to check if an error is generated + go d.Dispatch(context.Background(), ack, action1, action2) + time.Sleep(time.Millisecond * 200) + select { + case <-d.Errors(): + default: + t.Fatal("Expected error") + } + time.Sleep(time.Millisecond * 200) + select { + case <-d.Errors(): + t.Fatal(err) + default: + } + + def.AssertExpectations(t) + queue.AssertExpectations(t) + }) +} + +func Test_ActionDispatcher_scheduleRetry(t *testing.T) { + ack := noop.New() + def := &mockHandler{} + + t.Run("no more attmpts", func(t *testing.T) { + queue := &mockQueue{} + d, err := New(nil, def, queue) + require.NoError(t, err) + + action := &mockRetryableAction{} + action.On("ID").Return("id") + action.On("RetryAttempt").Return(len(d.rt.steps)).Once() + action.On("SetRetryAttempt", mock.Anything).Once() + + d.scheduleRetry(context.Background(), action, ack) + queue.AssertExpectations(t) + action.AssertExpectations(t) + }) + + t.Run("schedule an attempt", func(t *testing.T) { + queue := &mockQueue{} + queue.On("Save").Return(nil).Once() + queue.On("Add", mock.Anything, mock.Anything).Once() + d, err := New(nil, def, queue) + require.NoError(t, err) + + action := &mockRetryableAction{} + action.On("ID").Return("id") + action.On("RetryAttempt").Return(0).Once() + action.On("SetRetryAttempt", 1).Once() + action.On("SetStartTime", mock.Anything).Once() + + d.scheduleRetry(context.Background(), action, ack) + queue.AssertExpectations(t) + action.AssertExpectations(t) + }) } diff --git a/internal/pkg/agent/application/dispatcher/retryconfig.go b/internal/pkg/agent/application/dispatcher/retryconfig.go new file mode 100644 index 00000000000..8ed5a6e31af --- /dev/null +++ b/internal/pkg/agent/application/dispatcher/retryconfig.go @@ -0,0 +1,29 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package dispatcher + +import ( + "fmt" + "time" +) + +var ErrNoRetry = fmt.Errorf("no retry attempts remaining") + +type retryConfig struct { + steps []time.Duration +} + +func defaultRetryConfig() *retryConfig { + return &retryConfig{ + steps: []time.Duration{time.Minute, 5 * time.Minute, 10 * time.Minute, 15 * time.Minute, 30 * time.Minute, time.Hour}, + } +} + +func (r *retryConfig) GetWait(step int) (time.Duration, error) { + if step < 0 || step >= len(r.steps) { + return time.Duration(0), ErrNoRetry + } + return r.steps[step], nil +} diff --git a/internal/pkg/agent/application/dispatcher/retryconfig_test.go b/internal/pkg/agent/application/dispatcher/retryconfig_test.go new file mode 100644 index 00000000000..d0db8a7650c --- /dev/null +++ b/internal/pkg/agent/application/dispatcher/retryconfig_test.go @@ -0,0 +1,34 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package dispatcher + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func Test_retryConfig_GetWait(t *testing.T) { + rt := defaultRetryConfig() + + t.Run("step is negative", func(t *testing.T) { + d, err := rt.GetWait(-1) + assert.Equal(t, time.Duration(0), d) + assert.ErrorIs(t, err, ErrNoRetry) + }) + + t.Run("returns duration", func(t *testing.T) { + d, err := rt.GetWait(0) + assert.Equal(t, time.Minute, d) + assert.NoError(t, err) + }) + + t.Run("step too large", func(t *testing.T) { + d, err := rt.GetWait(len(rt.steps)) + assert.Equal(t, time.Duration(0), d) + assert.ErrorIs(t, err, ErrNoRetry) + }) +} diff --git a/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go b/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go index 38fad92057c..0288152f726 100644 --- a/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go +++ b/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go @@ -6,12 +6,10 @@ package fleet import ( "context" - "fmt" "time" eaclient "github.com/elastic/elastic-agent-client/v7/pkg/client" "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/dispatcher" "github.com/elastic/elastic-agent/internal/pkg/agent/application/gateway" "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" agentclient "github.com/elastic/elastic-agent/internal/pkg/agent/control/client" @@ -66,7 +64,6 @@ type stateStore interface { type fleetGateway struct { log *logger.Logger - dispatcher dispatcher.Dispatcher client client.Sender scheduler scheduler.Scheduler settings *fleetGatewaySettings @@ -76,6 +73,7 @@ type fleetGateway struct { stateFetcher coordinator.StateFetcher stateStore stateStore errCh chan error + actionCh chan []fleetapi.Action } // New creates a new fleet gateway @@ -83,7 +81,6 @@ func New( log *logger.Logger, agentInfo agentInfo, client client.Sender, - d dispatcher.Dispatcher, acker acker.Acker, stateFetcher coordinator.StateFetcher, stateStore stateStore, @@ -95,7 +92,6 @@ func New( defaultGatewaySettings, agentInfo, client, - d, scheduler, acker, stateFetcher, @@ -108,7 +104,6 @@ func newFleetGatewayWithScheduler( settings *fleetGatewaySettings, agentInfo agentInfo, client client.Sender, - d dispatcher.Dispatcher, scheduler scheduler.Scheduler, acker acker.Acker, stateFetcher coordinator.StateFetcher, @@ -116,7 +111,6 @@ func newFleetGatewayWithScheduler( ) (gateway.FleetGateway, error) { return &fleetGateway{ log: log, - dispatcher: d, client: client, settings: settings, agentInfo: agentInfo, @@ -125,9 +119,14 @@ func newFleetGatewayWithScheduler( stateFetcher: stateFetcher, stateStore: stateStore, errCh: make(chan error), + actionCh: make(chan []fleetapi.Action, 1), }, nil } +func (f *fleetGateway) Actions() <-chan []fleetapi.Action { + return f.actionCh +} + func (f *fleetGateway) Run(ctx context.Context) error { // Backoff implementation doesn't support the use of a context [cancellation] as the shutdown mechanism. // So we keep a done channel that will be closed when the current context is shutdown. @@ -162,19 +161,8 @@ func (f *fleetGateway) Run(ctx context.Context) error { actions := make([]fleetapi.Action, len(resp.Actions)) copy(actions, resp.Actions) - - // Persist state - hadErr := false - if err := f.dispatcher.Dispatch(context.Background(), f.acker, actions...); err != nil { - err = fmt.Errorf("failed to dispatch actions, error: %w", err) - f.log.Error(err) - f.errCh <- err - hadErr = true - } - - f.log.Debugf("FleetGateway is sleeping, next update in %s", f.settings.Duration) - if !hadErr { - f.errCh <- nil + if len(actions) > 0 { + f.actionCh <- actions } } } diff --git a/internal/pkg/agent/application/gateway/fleet/fleet_gateway_test.go b/internal/pkg/agent/application/gateway/fleet/fleet_gateway_test.go index 49c05112e18..076453f1374 100644 --- a/internal/pkg/agent/application/gateway/fleet/fleet_gateway_test.go +++ b/internal/pkg/agent/application/gateway/fleet/fleet_gateway_test.go @@ -25,8 +25,6 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/gateway" "github.com/elastic/elastic-agent/internal/pkg/agent/storage" "github.com/elastic/elastic-agent/internal/pkg/agent/storage/store" - "github.com/elastic/elastic-agent/internal/pkg/fleetapi" - "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker" "github.com/elastic/elastic-agent/internal/pkg/fleetapi/acker/noop" "github.com/elastic/elastic-agent/internal/pkg/scheduler" "github.com/elastic/elastic-agent/pkg/core/logger" @@ -69,53 +67,12 @@ func newTestingClient() *testingClient { return &testingClient{received: make(chan struct{}, 1)} } -type testingDispatcherFunc func(...fleetapi.Action) error - -type testingDispatcher struct { - sync.Mutex - callback testingDispatcherFunc - received chan struct{} -} - -func (t *testingDispatcher) Dispatch(_ context.Context, acker acker.Acker, actions ...fleetapi.Action) error { - t.Lock() - defer t.Unlock() - defer func() { t.received <- struct{}{} }() - // Get a dummy context. - ctx := context.Background() - - // In context of testing we need to abort on error. - if err := t.callback(actions...); err != nil { - return err - } - - // Ack everything and commit at the end. - for _, action := range actions { - _ = acker.Ack(ctx, action) - } - _ = acker.Commit(ctx) - - return nil -} - -func (t *testingDispatcher) Answer(fn testingDispatcherFunc) <-chan struct{} { - t.Lock() - defer t.Unlock() - t.callback = fn - return t.received -} - -func newTestingDispatcher() *testingDispatcher { - return &testingDispatcher{received: make(chan struct{}, 1)} -} - -type withGatewayFunc func(*testing.T, gateway.FleetGateway, *testingClient, *testingDispatcher, *scheduler.Stepper) +type withGatewayFunc func(*testing.T, gateway.FleetGateway, *testingClient, *scheduler.Stepper) func withGateway(agentInfo agentInfo, settings *fleetGatewaySettings, fn withGatewayFunc) func(t *testing.T) { return func(t *testing.T) { scheduler := scheduler.NewStepper() client := newTestingClient() - dispatcher := newTestingDispatcher() log, _ := logger.New("fleet_gateway", false) @@ -126,7 +83,6 @@ func withGateway(agentInfo agentInfo, settings *fleetGatewaySettings, fn withGat settings, agentInfo, client, - dispatcher, scheduler, noop.New(), &emptyStateFetcher{}, @@ -135,7 +91,7 @@ func withGateway(agentInfo agentInfo, settings *fleetGatewaySettings, fn withGat require.NoError(t, err) - fn(t, gateway, client, dispatcher, scheduler) + fn(t, gateway, client, scheduler) } } @@ -171,7 +127,6 @@ func TestFleetGateway(t *testing.T) { t *testing.T, gateway gateway.FleetGateway, client *testingClient, - dispatcher *testingDispatcher, scheduler *scheduler.Stepper, ) { ctx, cancel := context.WithCancel(context.Background()) @@ -182,10 +137,6 @@ func TestFleetGateway(t *testing.T) { resp := wrapStrToResp(http.StatusOK, `{ "actions": [] }`) return resp, nil }), - dispatcher.Answer(func(actions ...fleetapi.Action) error { - require.Equal(t, 0, len(actions)) - return nil - }), ) errCh := runFleetGateway(ctx, gateway) @@ -197,13 +148,17 @@ func TestFleetGateway(t *testing.T) { cancel() err := <-errCh require.NoError(t, err) + select { + case actions := <-gateway.Actions(): + t.Errorf("Expected no actions, got %v", actions) + default: + } })) t.Run("Successfully connects and receives a series of actions", withGateway(agentInfo, settings, func( t *testing.T, gateway gateway.FleetGateway, client *testingClient, - dispatcher *testingDispatcher, scheduler *scheduler.Stepper, ) { ctx, cancel := context.WithCancel(context.Background()) @@ -233,10 +188,6 @@ func TestFleetGateway(t *testing.T) { `) return resp, nil }), - dispatcher.Answer(func(actions ...fleetapi.Action) error { - require.Len(t, actions, 2) - return nil - }), ) errCh := runFleetGateway(ctx, gateway) @@ -247,13 +198,18 @@ func TestFleetGateway(t *testing.T) { cancel() err := <-errCh require.NoError(t, err) + select { + case actions := <-gateway.Actions(): + require.Len(t, actions, 2) + default: + t.Errorf("Expected to receive actions") + } })) // Test the normal time based execution. t.Run("Periodically communicates with Fleet", func(t *testing.T) { scheduler := scheduler.NewPeriodic(150 * time.Millisecond) client := newTestingClient() - dispatcher := newTestingDispatcher() ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -266,7 +222,6 @@ func TestFleetGateway(t *testing.T) { settings, agentInfo, client, - dispatcher, scheduler, noop.New(), &emptyStateFetcher{}, @@ -279,10 +234,6 @@ func TestFleetGateway(t *testing.T) { resp := wrapStrToResp(http.StatusOK, `{ "actions": [] }`) return resp, nil }), - dispatcher.Answer(func(actions ...fleetapi.Action) error { - require.Equal(t, 0, len(actions)) - return nil - }), ) errCh := runFleetGateway(ctx, gateway) @@ -309,7 +260,6 @@ func TestFleetGateway(t *testing.T) { d := 20 * time.Minute scheduler := scheduler.NewPeriodic(d) client := newTestingClient() - dispatcher := newTestingDispatcher() ctx, cancel := context.WithCancel(context.Background()) @@ -324,7 +274,6 @@ func TestFleetGateway(t *testing.T) { }, agentInfo, client, - dispatcher, scheduler, noop.New(), &emptyStateFetcher{}, @@ -332,7 +281,6 @@ func TestFleetGateway(t *testing.T) { ) require.NoError(t, err) - ch1 := dispatcher.Answer(func(actions ...fleetapi.Action) error { return nil }) ch2 := client.Answer(func(headers http.Header, body io.Reader) (*http.Response, error) { resp := wrapStrToResp(http.StatusOK, `{ "actions": [] }`) return resp, nil @@ -340,14 +288,7 @@ func TestFleetGateway(t *testing.T) { errCh := runFleetGateway(ctx, gateway) - // Silently dispatch action. - go func() { - for range ch1 { - } - }() - // Make sure that all API calls to the checkin API are successful, the following will happen: - // block on the first call. <-ch2 @@ -379,7 +320,6 @@ func TestRetriesOnFailures(t *testing.T) { t *testing.T, gateway gateway.FleetGateway, client *testingClient, - dispatcher *testingDispatcher, scheduler *scheduler.Stepper, ) { ctx, cancel := context.WithCancel(context.Background()) @@ -406,11 +346,6 @@ func TestRetriesOnFailures(t *testing.T) { resp := wrapStrToResp(http.StatusOK, `{ "actions": [] }`) return resp, nil }), - - dispatcher.Answer(func(actions ...fleetapi.Action) error { - require.Equal(t, 0, len(actions)) - return nil - }), ) waitFn() @@ -418,6 +353,11 @@ func TestRetriesOnFailures(t *testing.T) { cancel() err := <-errCh require.NoError(t, err) + select { + case actions := <-gateway.Actions(): + t.Errorf("Expected no actions, got %v", actions) + default: + } })) t.Run("The retry loop is interruptible", @@ -428,7 +368,6 @@ func TestRetriesOnFailures(t *testing.T) { t *testing.T, gateway gateway.FleetGateway, client *testingClient, - dispatcher *testingDispatcher, scheduler *scheduler.Stepper, ) { ctx, cancel := context.WithCancel(context.Background()) diff --git a/internal/pkg/agent/application/gateway/gateway.go b/internal/pkg/agent/application/gateway/gateway.go index d43dd32a0c2..6946c8671a4 100644 --- a/internal/pkg/agent/application/gateway/gateway.go +++ b/internal/pkg/agent/application/gateway/gateway.go @@ -7,6 +7,7 @@ package gateway import ( "context" + "github.com/elastic/elastic-agent/internal/pkg/fleetapi" "github.com/elastic/elastic-agent/internal/pkg/fleetapi/client" ) @@ -21,6 +22,9 @@ type FleetGateway interface { // Errors returns the channel to watch for reported errors. Errors() <-chan error + // Actions returns the channel to watch for new actions from the fleet-server. + Actions() <-chan []fleetapi.Action + // SetClient sets the client for the gateway. SetClient(client.Sender) } diff --git a/internal/pkg/agent/application/managed_mode.go b/internal/pkg/agent/application/managed_mode.go index cb72af2a700..ca50495dcb6 100644 --- a/internal/pkg/agent/application/managed_mode.go +++ b/internal/pkg/agent/application/managed_mode.go @@ -123,6 +123,10 @@ func (m *managedConfigManager) Run(ctx context.Context) error { batchedAcker := lazy.NewAcker(ack, m.log, lazy.WithRetrier(retrier)) actionAcker := store.NewStateStoreActionAcker(batchedAcker, m.stateStore) + if err := m.coord.AckUpgrade(ctx, actionAcker); err != nil { + m.log.Warnf("Failed to ack upgrade: %v", err) + } + // Run the retrier. retrierRun := make(chan bool) retrierCtx, retrierCancel := context.WithCancel(ctx) @@ -135,15 +139,26 @@ func (m *managedConfigManager) Run(ctx context.Context) error { close(retrierRun) }() + // Gather errors from the dispatcher and pass to the error channel. + go func() { + for { + select { + case <-ctx.Done(): + return + case err := <-actionDispatcher.Errors(): + m.errCh <- err // err is one or more failures from dispatching an action + } + } + }() + actions := m.stateStore.Actions() stateRestored := false if len(actions) > 0 && !m.wasUnenrolled() { // TODO(ph) We will need an improvement on fleet, if there is an error while dispatching a // persisted action on disk we should be able to ask Fleet to get the latest configuration. // But at the moment this is not possible because the policy change was acked. - if err := store.ReplayActions(ctx, m.log, actionDispatcher, actionAcker, actions...); err != nil { - m.log.Errorf("could not recover state, error %+v, skipping...", err) - } + m.log.Info("restoring current policy from disk") + actionDispatcher.Dispatch(ctx, actionAcker, actions...) stateRestored = true } @@ -167,7 +182,6 @@ func (m *managedConfigManager) Run(ctx context.Context) error { m.log, m.agentInfo, m.client, - actionDispatcher, actionAcker, m.coord, m.stateStore, @@ -200,6 +214,18 @@ func (m *managedConfigManager) Run(ctx context.Context) error { return gateway.Run(ctx) }) + // pass actions collected from gateway to dispatcher + go func() { + for { + select { + case <-ctx.Done(): + return + case actions := <-gateway.Actions(): + actionDispatcher.Dispatch(ctx, actionAcker, actions...) + } + } + }() + <-ctx.Done() return gatewayRunner.Err() } diff --git a/internal/pkg/agent/application/upgrade/upgrade.go b/internal/pkg/agent/application/upgrade/upgrade.go index edc70c3f5c0..31f48d8d0d0 100644 --- a/internal/pkg/agent/application/upgrade/upgrade.go +++ b/internal/pkg/agent/application/upgrade/upgrade.go @@ -142,7 +142,8 @@ func (u *Upgrader) Upgrade(ctx context.Context, version string, sourceURI string } if strings.HasPrefix(release.Commit(), newHash) { - return nil, ErrSameVersion + u.log.Warn("Upgrade action skipped: upgrade did not occur because its the same version") + return nil, nil } if err := copyActionStore(newHash); err != nil { @@ -161,7 +162,7 @@ func (u *Upgrader) Upgrade(ctx context.Context, version string, sourceURI string if err := InvokeWatcher(u.log); err != nil { rollbackInstall(ctx, newHash) - return nil, errors.New("failed to invoke rollback watcher", err) + return nil, err } cb := shutdownCallback(u.log, paths.Home(), release.Version(), version, release.TrimCommit(newHash)) diff --git a/internal/pkg/agent/storage/store/state_store.go b/internal/pkg/agent/storage/store/state_store.go index 8a6d3fc5e8d..522e46fdade 100644 --- a/internal/pkg/agent/storage/store/state_store.go +++ b/internal/pkg/agent/storage/store/state_store.go @@ -20,10 +20,6 @@ import ( "github.com/elastic/elastic-agent/pkg/core/logger" ) -type dispatcher interface { - Dispatch(context.Context, acker.Acker, ...action) error -} - type store interface { Save(io.Reader) error } @@ -98,7 +94,7 @@ func NewStateStore(log *logger.Logger, store storeLoad) (*StateStore, error) { // persisted and we return an empty store. reader, err := store.Load() if err != nil { - return &StateStore{log: log, store: store}, nil //nolint:nilerr // expected results + return &StateStore{log: log, store: store}, nil } defer reader.Close() @@ -340,23 +336,6 @@ func (a *StateStoreActionAcker) Commit(ctx context.Context) error { return a.acker.Commit(ctx) } -// ReplayActions replays list of actions. -func ReplayActions( - ctx context.Context, - log *logger.Logger, - dispatcher dispatcher, - acker acker.Acker, - actions ...action, -) error { - log.Info("restoring current policy from disk") - - if err := dispatcher.Dispatch(ctx, acker, actions...); err != nil { - return err - } - - return nil -} - func yamlToReader(in interface{}) (io.Reader, error) { data, err := yaml.Marshal(in) if err != nil { diff --git a/internal/pkg/agent/storage/store/state_store_test.go b/internal/pkg/agent/storage/store/state_store_test.go index e73b8721fbe..446433ca1ae 100644 --- a/internal/pkg/agent/storage/store/state_store_test.go +++ b/internal/pkg/agent/storage/store/state_store_test.go @@ -31,7 +31,7 @@ func TestStateStore(t *testing.T) { func runTestStateStore(t *testing.T, ackToken string) { log, _ := logger.New("state_store", false) - withFile := func(fn func(t *testing.T, file string)) func(*testing.T) { //nolint:unparam // false positive + withFile := func(fn func(t *testing.T, file string)) func(*testing.T) { return func(t *testing.T) { dir := t.TempDir() file := filepath.Join(dir, "state.yml") @@ -132,7 +132,9 @@ func runTestStateStore(t *testing.T, ackToken string) { require.Empty(t, store1.Actions()) require.Len(t, store1.Queue(), 1) require.Equal(t, "test", store1.Queue()[0].ID()) - start, err := store1.Queue()[0].StartTime() + scheduledAction, ok := store1.Queue()[0].(fleetapi.ScheduledAction) + require.True(t, ok, "expected to be able to cast Action as ScheduledAction") + start, err := scheduledAction.StartTime() require.NoError(t, err) require.Equal(t, ts, start) })) @@ -146,6 +148,7 @@ func runTestStateStore(t *testing.T, ackToken string) { ActionStartTime: ts.Format(time.RFC3339), Version: "1.2.3", SourceURI: "https://example.com", + Retry: 1, }, &fleetapi.ActionPolicyChange{ ActionID: "abc123", ActionType: "POLICY_CHANGE", @@ -172,13 +175,18 @@ func runTestStateStore(t *testing.T, ackToken string) { require.Len(t, store1.Queue(), 2) require.Equal(t, "test", store1.Queue()[0].ID()) - start, err := store1.Queue()[0].StartTime() + scheduledAction, ok := store1.Queue()[0].(fleetapi.ScheduledAction) + require.True(t, ok, "expected to be able to cast Action as ScheduledAction") + start, err := scheduledAction.StartTime() require.NoError(t, err) require.Equal(t, ts, start) + retryableAction, ok := store1.Queue()[0].(fleetapi.RetryableAction) + require.True(t, ok, "expected to be able to cast Action as RetryableAction") + require.Equal(t, 1, retryableAction.RetryAttempt()) require.Equal(t, "abc123", store1.Queue()[1].ID()) - _, err = store1.Queue()[1].StartTime() - require.ErrorIs(t, err, fleetapi.ErrNoStartTime) + _, ok = store1.Queue()[1].(fleetapi.ScheduledAction) + require.False(t, ok, "expected cast to ScheduledAction to fail") })) t.Run("can save to disk unenroll action type", diff --git a/internal/pkg/fleetapi/ack_cmd.go b/internal/pkg/fleetapi/ack_cmd.go index 09ba6f6b4ac..e8d8ac7e9e3 100644 --- a/internal/pkg/fleetapi/ack_cmd.go +++ b/internal/pkg/fleetapi/ack_cmd.go @@ -21,13 +21,13 @@ const ackPath = "/api/fleet/agents/%s/acks" // AckEvent is an event sent in an ACK request. type AckEvent struct { - EventType string `json:"type"` // 'STATE' | 'ERROR' | 'ACTION_RESULT' | 'ACTION' - SubType string `json:"subtype"` // 'RUNNING','STARTING','IN_PROGRESS','CONFIG','FAILED','STOPPING','STOPPED','DATA_DUMP','ACKNOWLEDGED','UNKNOWN'; - Timestamp string `json:"timestamp"` // : '2019-01-05T14:32:03.36764-05:00', - ActionID string `json:"action_id"` // : '48cebde1-c906-4893-b89f-595d943b72a2', - AgentID string `json:"agent_id"` // : 'agent1', - Message string `json:"message,omitempty"` // : 'hello2', - Payload string `json:"payload,omitempty"` // : 'payload2', + EventType string `json:"type"` // 'STATE' | 'ERROR' | 'ACTION_RESULT' | 'ACTION' + SubType string `json:"subtype"` // 'RUNNING','STARTING','IN_PROGRESS','CONFIG','FAILED','STOPPING','STOPPED','DATA_DUMP','ACKNOWLEDGED','UNKNOWN'; + Timestamp string `json:"timestamp"` // : '2019-01-05T14:32:03.36764-05:00', + ActionID string `json:"action_id"` // : '48cebde1-c906-4893-b89f-595d943b72a2', + AgentID string `json:"agent_id"` // : 'agent1', + Message string `json:"message,omitempty"` // : 'hello2', + Payload json.RawMessage `json:"payload,omitempty"` // : 'payload2', ActionInputType string `json:"action_input_type,omitempty"` // copy of original action input_type ActionData json.RawMessage `json:"action_data,omitempty"` // copy of original action data diff --git a/internal/pkg/fleetapi/acker/fleet/fleet_acker.go b/internal/pkg/fleetapi/acker/fleet/fleet_acker.go index c34fd8c3309..b78a55069d8 100644 --- a/internal/pkg/fleetapi/acker/fleet/fleet_acker.go +++ b/internal/pkg/fleetapi/acker/fleet/fleet_acker.go @@ -6,6 +6,7 @@ package fleet import ( "context" + "encoding/json" "fmt" "strings" "time" @@ -127,6 +128,23 @@ func constructEvent(action fleetapi.Action, agentID string) fleetapi.AckEvent { Message: fmt.Sprintf("Action '%s' of type '%s' acknowledged.", action.ID(), action.Type()), } + if a, ok := action.(fleetapi.RetryableAction); ok { + if err := a.GetError(); err != nil { + ackev.Error = err.Error() + var payload struct { + Retry bool `json:"retry"` + Attempt int `json:"retry_attempt,omitempty"` + } + payload.Retry = true + payload.Attempt = a.RetryAttempt() + if a.RetryAttempt() < 1 { + payload.Retry = false + } + p, _ := json.Marshal(payload) + ackev.Payload = p + } + } + if a, ok := action.(*fleetapi.ActionApp); ok { ackev.ActionInputType = a.InputType ackev.ActionData = a.Data diff --git a/internal/pkg/fleetapi/acker/fleet/fleet_acker_test.go b/internal/pkg/fleetapi/acker/fleet/fleet_acker_test.go index 251495b2173..ebea939a910 100644 --- a/internal/pkg/fleetapi/acker/fleet/fleet_acker_test.go +++ b/internal/pkg/fleetapi/acker/fleet/fleet_acker_test.go @@ -16,7 +16,9 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -114,6 +116,27 @@ func TestAcker_Ack(t *testing.T) { }, }, }, + { + name: "ackupgrade", + actions: []fleetapi.Action{ + &fleetapi.ActionUpgrade{ + ActionID: "upgrade-ok", + ActionType: fleetapi.ActionTypeUpgrade, + }, + &fleetapi.ActionUpgrade{ + ActionID: "upgrade-retry", + ActionType: fleetapi.ActionTypeUpgrade, + Retry: 1, + Err: errors.New("upgrade failed"), + }, + &fleetapi.ActionUpgrade{ + ActionID: "upgrade-failed", + ActionType: fleetapi.ActionTypeUpgrade, + Retry: -1, + Err: errors.New("upgrade failed"), + }, + }, + }, } log, _ := logger.New("fleet_acker", false) @@ -131,6 +154,29 @@ func TestAcker_Ack(t *testing.T) { assert.EqualValues(t, ac.ID(), req.Events[i].ActionID) assert.EqualValues(t, agentInfo.AgentID(), req.Events[i].AgentID) assert.EqualValues(t, fmt.Sprintf("Action '%s' of type '%s' acknowledged.", ac.ID(), ac.Type()), req.Events[i].Message) + // Check if the fleet acker handles RetryableActions correctly using the UpgradeAction + if a, ok := ac.(*fleetapi.ActionUpgrade); ok { + if a.Err != nil { + assert.EqualValues(t, a.Err.Error(), req.Events[i].Error) + // Check payload + require.NotEmpty(t, req.Events[i].Payload) + var pl struct { + Retry bool `json:"retry"` + Attempt int `json:"retry_attempt,omitempty"` + } + err := json.Unmarshal(req.Events[i].Payload, &pl) + require.NoError(t, err) + assert.Equal(t, a.Retry, pl.Attempt, "action ID %s failed", a.ActionID) + // Check retry flag + if pl.Attempt > 0 { + assert.True(t, pl.Retry) + } else { + assert.False(t, pl.Retry) + } + } else { + assert.Empty(t, req.Events[i].Error) + } + } if a, ok := ac.(*fleetapi.ActionApp); ok { assert.EqualValues(t, a.InputType, req.Events[i].ActionInputType) assert.EqualValues(t, a.Data, req.Events[i].ActionData) @@ -147,27 +193,18 @@ func TestAcker_Ack(t *testing.T) { t.Run(tc.name, func(t *testing.T) { sender := &testSender{} acker, err := NewAcker(log, agentInfo, sender) - if err != nil { - t.Fatal(err) - } - - if acker == nil { - t.Fatal("acker not initialized") - } + require.NoError(t, err) + require.NotNil(t, acker, "acker not initialized") if len(tc.actions) == 1 { err = acker.Ack(context.Background(), tc.actions[0]) } else { _, err = acker.AckBatch(context.Background(), tc.actions) } + require.NoError(t, err) - if err != nil { - t.Fatal(err) - } - - if err := acker.Commit(context.Background()); err != nil { - t.Fatal(err) - } + err = acker.Commit(context.Background()) + require.NoError(t, err) checkRequest(t, tc.actions, sender.req) }) diff --git a/internal/pkg/fleetapi/action.go b/internal/pkg/fleetapi/action.go index 4e6b08cd372..f23ec6e89e4 100644 --- a/internal/pkg/fleetapi/action.go +++ b/internal/pkg/fleetapi/action.go @@ -46,14 +46,40 @@ type Action interface { fmt.Stringer Type() string ID() string - // StartTime returns the earliest time an action should start (for schduled actions) - // Only ActionUpgrade implements this at the moment +} + +// ScheduledAction is an Action that may be executed at a later date +// Only ActionUpgrade implements this at the moment +type ScheduledAction interface { + Action + // StartTime returns the earliest time an action should start. StartTime() (time.Time, error) - // Expiration returns the time where an action is expired and should not be ran (for scheduled actions) - // Only ActionUpgrade implements this at the moment + // Expiration returns the time where an action is expired and should not be ran. Expiration() (time.Time, error) } +// RetryableAction is an Action that may be scheduled for a retry. +type RetryableAction interface { + ScheduledAction + // RetryAttempt returns the retry-attempt number of the action + // the retry_attempt number is meant to be an interal counter for the elastic-agent and not communicated to fleet-server or ES. + // If RetryAttempt returns > 1, and GetError is not nil the acker should signal that the action is being retried. + // If RetryAttempt returns < 1, and GetError is not nil the acker should signal that the action has failed. + RetryAttempt() int + // SetRetryAttempt sets the retry-attempt number of the action + // the retry_attempt number is meant to be an interal counter for the elastic-agent and not communicated to fleet-server or ES. + SetRetryAttempt(int) + // SetStartTime sets the start_time of the action to the specified value. + // this is used by the action-retry mechanism. + SetStartTime(t time.Time) + // GetError returns the error that is associated with the retry. + // If it is a retryable action fleet-server should mark it as such. + // Otherwise fleet-server should mark the action as failed. + GetError() error + // SetError sets the retryable action error + SetError(error) +} + // FleetAction represents an action from fleet-server. // should copy the action definition in fleet-server/model/schema.json type FleetAction struct { @@ -64,6 +90,7 @@ type FleetAction struct { ActionStartTime string `yaml:"start_time,omitempty" json:"start_time,omitempty"` Timeout int64 `yaml:"timeout,omitempty" json:"timeout,omitempty"` Data json.RawMessage `yaml:"data,omitempty" json:"data,omitempty"` + Retry int `json:"retry_attempt,omitempty" yaml:"retry_attempt,omitempty"` // used internally for serialization by elastic-agent. //Agents []string // disabled, fleet-server uses this to generate each agent's actions //Timestamp string // disabled, agent does not care when the document was created //UserID string // disabled, agent does not care @@ -91,16 +118,6 @@ func (a *ActionUnknown) ID() string { return a.ActionID } -// StartTime returns ErrNoStartTime -func (a *ActionUnknown) StartTime() (time.Time, error) { - return time.Time{}, ErrNoStartTime -} - -// Expiration returns ErrNoExpiration -func (a *ActionUnknown) Expiration() (time.Time, error) { - return time.Time{}, ErrNoExpiration -} - func (a *ActionUnknown) String() string { var s strings.Builder s.WriteString("action_id: ") @@ -143,16 +160,6 @@ func (a *ActionPolicyReassign) ID() string { return a.ActionID } -// StartTime returns ErrNoStartTime -func (a *ActionPolicyReassign) StartTime() (time.Time, error) { - return time.Time{}, ErrNoStartTime -} - -// Expiration returns ErrNoExpiration -func (a *ActionPolicyReassign) Expiration() (time.Time, error) { - return time.Time{}, ErrNoExpiration -} - // ActionPolicyChange is a request to apply a new type ActionPolicyChange struct { ActionID string `yaml:"action_id"` @@ -179,16 +186,6 @@ func (a *ActionPolicyChange) ID() string { return a.ActionID } -// StartTime returns ErrNoStartTime -func (a *ActionPolicyChange) StartTime() (time.Time, error) { - return time.Time{}, ErrNoStartTime -} - -// Expiration returns ErrNoExpiration -func (a *ActionPolicyChange) Expiration() (time.Time, error) { - return time.Time{}, ErrNoExpiration -} - // ActionUpgrade is a request for agent to upgrade. type ActionUpgrade struct { ActionID string `yaml:"action_id"` @@ -197,6 +194,8 @@ type ActionUpgrade struct { ActionExpiration string `json:"expiration" yaml:"expiration,omitempty"` Version string `json:"version" yaml:"version,omitempty"` SourceURI string `json:"source_uri,omitempty" yaml:"source_uri,omitempty"` + Retry int `json:"retry_attempt,omitempty" yaml:"retry_attempt,omitempty"` + Err error } func (a *ActionUpgrade) String() string { @@ -242,6 +241,31 @@ func (a *ActionUpgrade) Expiration() (time.Time, error) { return ts.UTC(), nil } +// RetryAttempt will return the retry_attempt of the action +func (a *ActionUpgrade) RetryAttempt() int { + return a.Retry +} + +// SetRetryAttempt sets the retry_attempt of the action +func (a *ActionUpgrade) SetRetryAttempt(n int) { + a.Retry = n +} + +// GetError returns the error associated with the attempt to run the action. +func (a *ActionUpgrade) GetError() error { + return a.Err +} + +// SetError sets the error associated with the attempt to run the action. +func (a *ActionUpgrade) SetError(err error) { + a.Err = err +} + +// SetStartTime sets the start time of the action. +func (a *ActionUpgrade) SetStartTime(t time.Time) { + a.ActionStartTime = t.Format(time.RFC3339) +} + // ActionUnenroll is a request for agent to unhook from fleet. type ActionUnenroll struct { ActionID string `yaml:"action_id"` @@ -268,16 +292,6 @@ func (a *ActionUnenroll) ID() string { return a.ActionID } -// StartTime returns ErrNoStartTime -func (a *ActionUnenroll) StartTime() (time.Time, error) { - return time.Time{}, ErrNoStartTime -} - -// Expiration returns ErrNoExpiration -func (a *ActionUnenroll) Expiration() (time.Time, error) { - return time.Time{}, ErrNoExpiration -} - // ActionSettings is a request to change agent settings. type ActionSettings struct { ActionID string `yaml:"action_id"` @@ -295,16 +309,6 @@ func (a *ActionSettings) Type() string { return a.ActionType } -// StartTime returns ErrNoStartTime -func (a *ActionSettings) StartTime() (time.Time, error) { - return time.Time{}, ErrNoStartTime -} - -// Expiration returns ErrNoExpiration -func (a *ActionSettings) Expiration() (time.Time, error) { - return time.Time{}, ErrNoExpiration -} - func (a *ActionSettings) String() string { var s strings.Builder s.WriteString("action_id: ") @@ -333,16 +337,6 @@ func (a *ActionCancel) Type() string { return a.ActionType } -// StartTime returns ErrNoStartTime -func (a *ActionCancel) StartTime() (time.Time, error) { - return time.Time{}, ErrNoStartTime -} - -// Expiration returns ErrNoExpiration -func (a *ActionCancel) Expiration() (time.Time, error) { - return time.Time{}, ErrNoExpiration -} - func (a *ActionCancel) String() string { var s strings.Builder s.WriteString("action_id: ") @@ -388,16 +382,6 @@ func (a *ActionApp) Type() string { return a.ActionType } -// StartTime returns ErrNoStartTime -func (a *ActionApp) StartTime() (time.Time, error) { - return time.Time{}, ErrNoStartTime -} - -// Expiration returns ErrExpiration -func (a *ActionApp) Expiration() (time.Time, error) { - return time.Time{}, ErrNoExpiration -} - // MarshalMap marshals ActionApp into a corresponding map func (a *ActionApp) MarshalMap() (map[string]interface{}, error) { var res map[string]interface{} @@ -544,6 +528,7 @@ func (a *Actions) UnmarshalYAML(unmarshal func(interface{}) error) error { ActionType: n.ActionType, ActionStartTime: n.ActionStartTime, ActionExpiration: n.ActionExpiration, + Retry: n.Retry, } if err := yaml.Unmarshal(n.Data, &action); err != nil { return errors.New(err, diff --git a/internal/pkg/fleetapi/action_test.go b/internal/pkg/fleetapi/action_test.go index b21e591c297..6a8dae3b31a 100644 --- a/internal/pkg/fleetapi/action_test.go +++ b/internal/pkg/fleetapi/action_test.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//nolint:dupl // duplicate code is in test cases package fleetapi import ( @@ -97,6 +98,7 @@ func TestActionsUnmarshalJSON(t *testing.T) { assert.Empty(t, action.ActionExpiration) assert.Equal(t, "1.2.3", action.Version) assert.Equal(t, "http://example.com", action.SourceURI) + assert.Equal(t, 0, action.Retry) }) t.Run("ActionUpgrade with start time", func(t *testing.T) { p := []byte(`[{"id":"testid","type":"UPGRADE","start_time":"2022-01-02T12:00:00Z","expiration":"2022-01-02T13:00:00Z","data":{"version":"1.2.3","source_uri":"http://example.com"}}]`) @@ -111,6 +113,7 @@ func TestActionsUnmarshalJSON(t *testing.T) { assert.Equal(t, "2022-01-02T13:00:00Z", action.ActionExpiration) assert.Equal(t, "1.2.3", action.Version) assert.Equal(t, "http://example.com", action.SourceURI) + assert.Equal(t, 0, action.Retry) }) t.Run("ActionPolicyChange no start time", func(t *testing.T) { p := []byte(`[{"id":"testid","type":"POLICY_CHANGE","data":{"policy":{"key":"value"}}}]`) @@ -134,4 +137,19 @@ func TestActionsUnmarshalJSON(t *testing.T) { assert.Equal(t, ActionTypePolicyChange, action.ActionType) assert.NotNil(t, action.Policy) }) + t.Run("ActionUpgrade with retry_attempt", func(t *testing.T) { + p := []byte(`[{"id":"testid","type":"UPGRADE","data":{"version":"1.2.3","source_uri":"http://example.com","retry_attempt":1}}]`) + a := &Actions{} + err := a.UnmarshalJSON(p) + require.Nil(t, err) + action, ok := (*a)[0].(*ActionUpgrade) + require.True(t, ok, "unable to cast action to specific type") + assert.Equal(t, "testid", action.ActionID) + assert.Equal(t, ActionTypeUpgrade, action.ActionType) + assert.Empty(t, action.ActionStartTime) + assert.Empty(t, action.ActionExpiration) + assert.Equal(t, "1.2.3", action.Version) + assert.Equal(t, "http://example.com", action.SourceURI) + assert.Equal(t, 1, action.Retry) + }) } diff --git a/internal/pkg/queue/actionqueue.go b/internal/pkg/queue/actionqueue.go index 0f3a2c20ffc..b0cdc127dff 100644 --- a/internal/pkg/queue/actionqueue.go +++ b/internal/pkg/queue/actionqueue.go @@ -19,7 +19,7 @@ type saver interface { // item tracks an action in the action queue type item struct { - action fleetapi.Action + action fleetapi.ScheduledAction priority int64 index int } @@ -76,7 +76,11 @@ func (q *queue) Pop() interface{} { // Will return an error if StartTime fails for any action. func newQueue(actions []fleetapi.Action) (*queue, error) { q := make(queue, len(actions)) - for i, action := range actions { + for i, a := range actions { + action, ok := a.(fleetapi.ScheduledAction) + if !ok { + continue + } ts, err := action.StartTime() if err != nil { return nil, err @@ -106,7 +110,7 @@ func NewActionQueue(actions []fleetapi.Action, s saver) (*ActionQueue, error) { // Add will add an action to the queue with the associated priority. // The priority is meant to be the start-time of the action as a unix epoch time. // Complexity: O(log n) -func (q *ActionQueue) Add(action fleetapi.Action, priority int64) { +func (q *ActionQueue) Add(action fleetapi.ScheduledAction, priority int64) { e := &item{ action: action, priority: priority, @@ -116,9 +120,9 @@ func (q *ActionQueue) Add(action fleetapi.Action, priority int64) { // DequeueActions will dequeue all actions that have a priority less then time.Now(). // Complexity: O(n*log n) -func (q *ActionQueue) DequeueActions() []fleetapi.Action { +func (q *ActionQueue) DequeueActions() []fleetapi.ScheduledAction { ts := time.Now().Unix() - actions := make([]fleetapi.Action, 0) + actions := make([]fleetapi.ScheduledAction, 0) for q.q.Len() != 0 { if (*q.q)[0].priority > ts { break @@ -153,6 +157,20 @@ func (q *ActionQueue) Actions() []fleetapi.Action { return actions } +// CancelType cancels all actions in the queue with a matching action type and returns the number of entries cancelled. +func (q *ActionQueue) CancelType(actionType string) int { + items := make([]*item, 0) + for _, item := range *q.q { + if item.action.Type() == actionType { + items = append(items, item) + } + } + for _, item := range items { + heap.Remove(q.q, item.index) + } + return len(items) +} + // Save persists the queue to disk. func (q *ActionQueue) Save() error { q.s.SetQueue(q.Actions()) diff --git a/internal/pkg/queue/actionqueue_test.go b/internal/pkg/queue/actionqueue_test.go index d951f855737..29643a80326 100644 --- a/internal/pkg/queue/actionqueue_test.go +++ b/internal/pkg/queue/actionqueue_test.go @@ -47,15 +47,15 @@ func (m *mockAction) Expiration() (time.Time, error) { return args.Get(0).(time.Time), args.Error(1) } -type mockPersistor struct { +type mockSaver struct { mock.Mock } -func (m *mockPersistor) SetQueue(a []fleetapi.Action) { +func (m *mockSaver) SetQueue(a []fleetapi.Action) { m.Called(a) } -func (m *mockPersistor) Save() error { +func (m *mockSaver) Save() error { args := m.Called() return args.Error(0) } @@ -238,7 +238,7 @@ func Test_ActionQueue_DequeueActions(t *testing.T) { index: 2, }} heap.Init(q) - aq := &ActionQueue{q, &mockPersistor{}} + aq := &ActionQueue{q, &mockSaver{}} actions := aq.DequeueActions() @@ -272,7 +272,7 @@ func Test_ActionQueue_DequeueActions(t *testing.T) { index: 2, }} heap.Init(q) - aq := &ActionQueue{q, &mockPersistor{}} + aq := &ActionQueue{q, &mockSaver{}} actions := aq.DequeueActions() @@ -304,7 +304,7 @@ func Test_ActionQueue_DequeueActions(t *testing.T) { index: 2, }} heap.Init(q) - aq := &ActionQueue{q, &mockPersistor{}} + aq := &ActionQueue{q, &mockSaver{}} actions := aq.DequeueActions() @@ -332,7 +332,7 @@ func Test_ActionQueue_DequeueActions(t *testing.T) { index: 2, }} heap.Init(q) - aq := &ActionQueue{q, &mockPersistor{}} + aq := &ActionQueue{q, &mockSaver{}} actions := aq.DequeueActions() assert.Empty(t, actions) @@ -361,7 +361,7 @@ func Test_ActionQueue_Cancel(t *testing.T) { t.Run("empty queue", func(t *testing.T) { q := &queue{} - aq := &ActionQueue{q, &mockPersistor{}} + aq := &ActionQueue{q, &mockSaver{}} n := aq.Cancel("test-1") assert.Zero(t, n) @@ -383,7 +383,7 @@ func Test_ActionQueue_Cancel(t *testing.T) { index: 2, }} heap.Init(q) - aq := &ActionQueue{q, &mockPersistor{}} + aq := &ActionQueue{q, &mockSaver{}} n := aq.Cancel("test-1") assert.Equal(t, 1, n) @@ -413,7 +413,7 @@ func Test_ActionQueue_Cancel(t *testing.T) { index: 2, }} heap.Init(q) - aq := &ActionQueue{q, &mockPersistor{}} + aq := &ActionQueue{q, &mockSaver{}} n := aq.Cancel("test-1") assert.Equal(t, 2, n) @@ -440,7 +440,7 @@ func Test_ActionQueue_Cancel(t *testing.T) { index: 2, }} heap.Init(q) - aq := &ActionQueue{q, &mockPersistor{}} + aq := &ActionQueue{q, &mockSaver{}} n := aq.Cancel("test-1") assert.Equal(t, 3, n) @@ -462,7 +462,7 @@ func Test_ActionQueue_Cancel(t *testing.T) { index: 2, }} heap.Init(q) - aq := &ActionQueue{q, &mockPersistor{}} + aq := &ActionQueue{q, &mockSaver{}} n := aq.Cancel("test-0") assert.Zero(t, n) @@ -484,7 +484,7 @@ func Test_ActionQueue_Cancel(t *testing.T) { func Test_ActionQueue_Actions(t *testing.T) { t.Run("empty queue", func(t *testing.T) { q := &queue{} - aq := &ActionQueue{q, &mockPersistor{}} + aq := &ActionQueue{q, &mockSaver{}} actions := aq.Actions() assert.Len(t, actions, 0) }) @@ -510,10 +510,72 @@ func Test_ActionQueue_Actions(t *testing.T) { index: 2, }} heap.Init(q) - aq := &ActionQueue{q, &mockPersistor{}} + aq := &ActionQueue{q, &mockSaver{}} actions := aq.Actions() assert.Len(t, actions, 3) assert.Equal(t, "test-1", actions[0].ID()) }) } + +func Test_ActionQueue_CancelType(t *testing.T) { + a1 := &mockAction{} + a1.On("ID").Return("test-1") + a1.On("Type").Return("upgrade") + a2 := &mockAction{} + a2.On("ID").Return("test-2") + a2.On("Type").Return("upgrade") + a3 := &mockAction{} + a3.On("ID").Return("test-3") + a3.On("Type").Return("unknown") + + t.Run("empty queue", func(t *testing.T) { + aq := &ActionQueue{&queue{}, &mockSaver{}} + + n := aq.CancelType("upgrade") + assert.Equal(t, 0, n) + }) + + t.Run("single item in queue", func(t *testing.T) { + q := &queue{&item{ + action: a1, + priority: 1, + index: 0, + }} + heap.Init(q) + aq := &ActionQueue{q, &mockSaver{}} + + n := aq.CancelType("upgrade") + assert.Equal(t, 1, n) + }) + + t.Run("no matches in queue", func(t *testing.T) { + q := &queue{&item{ + action: a3, + priority: 1, + index: 0, + }} + heap.Init(q) + aq := &ActionQueue{q, &mockSaver{}} + + n := aq.CancelType("upgrade") + assert.Equal(t, 0, n) + }) + + t.Run("all items cancelled", func(t *testing.T) { + q := &queue{&item{ + action: a1, + priority: 1, + index: 0, + }, &item{ + action: a2, + priority: 2, + index: 1, + }} + heap.Init(q) + aq := &ActionQueue{q, &mockSaver{}} + + n := aq.CancelType("upgrade") + assert.Equal(t, 2, n) + }) +} From ec83c2c7aa6bad91c850e2436ddffed4b7f21420 Mon Sep 17 00:00:00 2001 From: Michal Pristas Date: Wed, 19 Oct 2022 16:40:35 +0200 Subject: [PATCH 27/49] V1 metrics monitoring for V2 (#1487) V1 metrics monitoring for V2 (#1487) --- NOTICE.txt | 932 +++++++++++++++++- go.mod | 5 + go.sum | 11 + internal/pkg/agent/application/application.go | 6 +- .../application/coordinator/coordinator.go | 28 +- .../agent/application/monitoring/handler.go | 70 ++ .../agent/application/monitoring/server.go | 86 ++ .../pkg/agent/application/monitoring/stats.go | 36 + .../application/monitoring/v1_monitor.go | 913 +++++++++++++++++ internal/pkg/agent/cmd/inspect.go | 79 +- internal/pkg/agent/cmd/run.go | 42 +- internal/pkg/agent/configuration/settings.go | 18 +- internal/pkg/agent/install/uninstall.go | 2 +- internal/pkg/agent/vars/vars.go | 3 +- internal/pkg/core/monitoring/config/config.go | 1 + pkg/component/component.go | 43 +- pkg/component/component_test.go | 3 +- pkg/component/runtime/command.go | 27 +- pkg/component/runtime/manager.go | 6 +- pkg/component/runtime/manager_test.go | 40 +- pkg/component/runtime/runtime.go | 8 +- specs/metricbeat.spec.yml | 6 + version/version.go | 2 +- 23 files changed, 2271 insertions(+), 96 deletions(-) create mode 100644 internal/pkg/agent/application/monitoring/handler.go create mode 100644 internal/pkg/agent/application/monitoring/server.go create mode 100644 internal/pkg/agent/application/monitoring/stats.go create mode 100644 internal/pkg/agent/application/monitoring/v1_monitor.go diff --git a/NOTICE.txt b/NOTICE.txt index 3c55a5e0295..ad7c25aaad6 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -1270,6 +1270,217 @@ Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-l limitations under the License. +-------------------------------------------------------------------------------- +Dependency : github.com/elastic/elastic-agent-system-metrics +Version: v0.4.4 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-system-metrics@v0.4.4/LICENSE.txt: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + -------------------------------------------------------------------------------- Dependency : github.com/elastic/go-licenser Version: v0.4.0 @@ -2009,6 +2220,43 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +-------------------------------------------------------------------------------- +Dependency : github.com/gorilla/mux +Version: v1.8.0 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/gorilla/mux@v1.8.0/LICENSE: + +Copyright (c) 2012-2018 The Gorilla Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + -------------------------------------------------------------------------------- Dependency : github.com/hashicorp/go-multierror Version: v1.1.1 @@ -3996,12 +4244,12 @@ Contents of probable licence file $GOMODCACHE/go.elastic.co/apm@v1.15.0/LICENSE: -------------------------------------------------------------------------------- -Dependency : go.elastic.co/apm/module/apmgrpc +Dependency : go.elastic.co/apm/module/apmgorilla Version: v1.15.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/go.elastic.co/apm/module/apmgrpc@v1.15.0/LICENSE: +Contents of probable licence file $GOMODCACHE/go.elastic.co/apm/module/apmgorilla@v1.15.0/LICENSE: Apache License Version 2.0, January 2004 @@ -4207,13 +4455,12 @@ Contents of probable licence file $GOMODCACHE/go.elastic.co/apm/module/apmgrpc@v -------------------------------------------------------------------------------- -Dependency : go.elastic.co/ecszap -Version: v1.0.1 +Dependency : go.elastic.co/apm/module/apmgrpc +Version: v1.15.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/go.elastic.co/ecszap@v1.0.1/LICENSE: - +Contents of probable licence file $GOMODCACHE/go.elastic.co/apm/module/apmgrpc@v1.15.0/LICENSE: Apache License Version 2.0, January 2004 @@ -4403,7 +4650,219 @@ Contents of probable licence file $GOMODCACHE/go.elastic.co/ecszap@v1.0.1/LICENS same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2020 Elastic and contributors + Copyright 2018 Elasticsearch BV + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + +-------------------------------------------------------------------------------- +Dependency : go.elastic.co/ecszap +Version: v1.0.1 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/go.elastic.co/ecszap@v1.0.1/LICENSE: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2020 Elastic and contributors Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -7491,6 +7950,217 @@ Contents of probable licence file $GOMODCACHE/github.com/docker/go-connections@v limitations under the License. +-------------------------------------------------------------------------------- +Dependency : github.com/elastic/go-structform +Version: v0.0.9 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/elastic/go-structform@v0.0.9/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2012–2018 Elastic + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + -------------------------------------------------------------------------------- Dependency : github.com/elastic/go-windows Version: v1.0.1 @@ -7703,6 +8373,217 @@ Contents of probable licence file $GOMODCACHE/github.com/elastic/go-windows@v1.0 limitations under the License. +-------------------------------------------------------------------------------- +Dependency : github.com/elastic/gosigar +Version: v0.14.2 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/elastic/gosigar@v0.14.2/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + +Copyright [yyyy] [name of copyright owner] + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + + -------------------------------------------------------------------------------- Dependency : github.com/elazarl/goproxy Version: v0.0.0-20180725130230-947c36da3153 @@ -9321,43 +10202,6 @@ Contents of probable licence file $GOMODCACHE/github.com/googleapis/gnostic@v0.5 --------------------------------------------------------------------------------- -Dependency : github.com/gorilla/mux -Version: v1.8.0 -Licence type (autodetected): BSD-3-Clause --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/gorilla/mux@v1.8.0/LICENSE: - -Copyright (c) 2012-2018 The Gorilla Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -------------------------------------------------------------------------------- Dependency : github.com/grpc-ecosystem/go-grpc-middleware Version: v1.3.0 diff --git a/go.mod b/go.mod index 0aa3bfaa8cd..148cee40adc 100644 --- a/go.mod +++ b/go.mod @@ -14,12 +14,14 @@ require ( github.com/elastic/elastic-agent-autodiscover v0.0.0-20220404145827-89887023c1ab github.com/elastic/elastic-agent-client/v7 v7.0.0-20220804181728-b0328d2fe484 github.com/elastic/elastic-agent-libs v0.2.6 + github.com/elastic/elastic-agent-system-metrics v0.4.4 github.com/elastic/go-licenser v0.4.0 github.com/elastic/go-sysinfo v1.7.1 github.com/elastic/go-ucfg v0.8.5 github.com/gofrs/flock v0.8.1 github.com/gofrs/uuid v4.2.0+incompatible github.com/google/go-cmp v0.5.6 + github.com/gorilla/mux v1.8.0 github.com/hashicorp/go-multierror v1.1.1 github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 @@ -38,6 +40,7 @@ require ( github.com/spf13/cobra v1.3.0 github.com/stretchr/testify v1.7.0 github.com/tsg/go-daemon v0.0.0-20200207173439-e704b93fd89b + go.elastic.co/apm/module/apmgorilla v1.15.0 go.elastic.co/ecszap v1.0.1 go.elastic.co/go-licence-detector v0.5.0 go.uber.org/zap v1.21.0 @@ -69,7 +72,9 @@ require ( github.com/docker/distribution v2.8.1+incompatible // indirect github.com/docker/docker v20.10.12+incompatible // indirect github.com/docker/go-connections v0.4.0 // indirect + github.com/elastic/go-structform v0.0.9 // indirect github.com/elastic/go-windows v1.0.1 // indirect + github.com/elastic/gosigar v0.14.2 // indirect github.com/evanphx/json-patch v4.12.0+incompatible // indirect github.com/fatih/color v1.13.0 // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect diff --git a/go.sum b/go.sum index bc17c5e307b..47d8d474785 100644 --- a/go.sum +++ b/go.sum @@ -383,12 +383,16 @@ github.com/elastic/elastic-agent-client/v7 v7.0.0-20220804181728-b0328d2fe484/go github.com/elastic/elastic-agent-libs v0.0.0-20220303160015-5b4e674da3dd/go.mod h1://82M1l73IHx0wDbS2Tzkq6Fx9fkmytS1KgkIyzvNTM= github.com/elastic/elastic-agent-libs v0.2.6 h1:DpcUcCVYZ7lNtHLUlyT1u/GtGAh49wpL15DTH7+8O5o= github.com/elastic/elastic-agent-libs v0.2.6/go.mod h1:chO3rtcLyGlKi9S0iGVZhYCzDfdDsAQYBc+ui588AFE= +github.com/elastic/elastic-agent-system-metrics v0.4.4 h1:Br3S+TlBhijrLysOvbHscFhgQ00X/trDT5VEnOau0E0= +github.com/elastic/elastic-agent-system-metrics v0.4.4/go.mod h1:tF/f9Off38nfzTZHIVQ++FkXrDm9keFhFpJ+3pQ00iI= github.com/elastic/elastic-package v0.32.1/go.mod h1:l1fEnF52XRBL6a5h6uAemtdViz2bjtjUtgdQcuRhEAY= github.com/elastic/go-elasticsearch/v7 v7.16.0/go.mod h1:OJ4wdbtDNk5g503kvlHLyErCgQwwzmDtaFC4XyOxXA4= github.com/elastic/go-elasticsearch/v8 v8.0.0-20210317102009-a9d74cec0186/go.mod h1:xe9a/L2aeOgFKKgrO3ibQTnMdpAeL0GC+5/HpGScSa4= github.com/elastic/go-licenser v0.3.1/go.mod h1:D8eNQk70FOCVBl3smCGQt/lv7meBeQno2eI1S5apiHQ= github.com/elastic/go-licenser v0.4.0 h1:jLq6A5SilDS/Iz1ABRkO6BHy91B9jBora8FwGRsDqUI= github.com/elastic/go-licenser v0.4.0/go.mod h1:V56wHMpmdURfibNBggaSBfqgPxyT1Tldns1i87iTEvU= +github.com/elastic/go-structform v0.0.9 h1:HpcS7xljL4kSyUfDJ8cXTJC6rU5ChL1wYb6cx3HLD+o= +github.com/elastic/go-structform v0.0.9/go.mod h1:CZWf9aIRYY5SuKSmOhtXScE5uQiLZNqAFnwKR4OrIM4= github.com/elastic/go-sysinfo v1.1.1/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= github.com/elastic/go-sysinfo v1.7.1 h1:Wx4DSARcKLllpKT2TnFVdSUJOsybqMYCNQZq1/wO+s0= github.com/elastic/go-sysinfo v1.7.1/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= @@ -398,6 +402,8 @@ github.com/elastic/go-ucfg v0.8.5/go.mod h1:4E8mPOLSUV9hQ7sgLEJ4bvt0KhMuDJa8joDT github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU= github.com/elastic/go-windows v1.0.1 h1:AlYZOldA+UJ0/2nBuqWdo90GFCgG9xuyw9SYzGUtJm0= github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss= +github.com/elastic/gosigar v0.14.2 h1:Dg80n8cr90OZ7x+bAax/QjoW/XqTI11RmA79ZwIm9/4= +github.com/elastic/gosigar v0.14.2/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/elastic/package-spec v1.3.0/go.mod h1:KzGTSDqCkdhmL1IFpOH2ZQNSSE9JEhNtndxU3ZrQilA= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= @@ -634,8 +640,10 @@ github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2c github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw= github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= @@ -1189,6 +1197,8 @@ go.elastic.co/apm v1.13.0/go.mod h1:dylGv2HKR0tiCV+wliJz1KHtDyuD8SPe69oV7VyK6WY= go.elastic.co/apm v1.15.0 h1:uPk2g/whK7c7XiZyz/YCUnAUBNPiyNeE3ARX3G6Gx7Q= go.elastic.co/apm v1.15.0/go.mod h1:dylGv2HKR0tiCV+wliJz1KHtDyuD8SPe69oV7VyK6WY= go.elastic.co/apm/module/apmelasticsearch v1.10.0/go.mod h1:lwoaGDfZzfb9e6TXd3h8/KNmLAONOas7o5NLVNmv8Xk= +go.elastic.co/apm/module/apmgorilla v1.15.0 h1:1yTAksffgaFXYEIwlLRiQnxLfy3p3RtpDw8HDupIJfY= +go.elastic.co/apm/module/apmgorilla v1.15.0/go.mod h1:+23mZudYvZ9VgxCQjseLo9EF5gkKEr0KSQBupw+rzP8= go.elastic.co/apm/module/apmgrpc v1.15.0 h1:Z7h58uuMJUoYXK6INFunlcGEXZQ18QKAhPh6NFYDNHE= go.elastic.co/apm/module/apmgrpc v1.15.0/go.mod h1:IEbTGJzY5Xx737PkHDT3bbzh9syovK+IfAlckJsUgPE= go.elastic.co/apm/module/apmhttp v1.10.0/go.mod h1:Y4timwcJ8sQWbWpcw3Y7Mat1OssNpGhpwyfUnpqIDew= @@ -1429,6 +1439,7 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/internal/pkg/agent/application/application.go b/internal/pkg/agent/application/application.go index c5076535825..d3edb21888e 100644 --- a/internal/pkg/agent/application/application.go +++ b/internal/pkg/agent/application/application.go @@ -11,6 +11,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/coordinator" "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/monitoring" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade" "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" @@ -64,8 +65,9 @@ func New( } upgrader := upgrade.NewUpgrader(log, cfg.Settings.DownloadConfig, agentInfo) + monitor := monitoring.New(cfg.Settings.V1MonitoringEnabled, cfg.Settings.DownloadConfig.OS(), cfg.Settings.MonitoringConfig, agentInfo) - runtime, err := runtime.NewManager(log, cfg.Settings.GRPC.String(), agentInfo, tracer) + runtime, err := runtime.NewManager(log, cfg.Settings.GRPC.String(), agentInfo, tracer, monitor) if err != nil { return nil, fmt.Errorf("failed to initialize runtime manager: %w", err) } @@ -114,7 +116,7 @@ func New( return nil, errors.New(err, "failed to initialize composable controller") } - coord := coordinator.New(log, agentInfo, specs, reexec, upgrader, runtime, configMgr, composable, caps, compModifiers...) + coord := coordinator.New(log, agentInfo, specs, reexec, upgrader, runtime, configMgr, composable, caps, monitor, compModifiers...) if managed != nil { // the coordinator requires the config manager as well as in managed-mode the config manager requires the // coordinator, so it must be set here once the coordinator is created diff --git a/internal/pkg/agent/application/coordinator/coordinator.go b/internal/pkg/agent/application/coordinator/coordinator.go index 407ebbca625..6c68e5cf610 100644 --- a/internal/pkg/agent/application/coordinator/coordinator.go +++ b/internal/pkg/agent/application/coordinator/coordinator.go @@ -57,6 +57,18 @@ type UpgradeManager interface { Ack(ctx context.Context, acker acker.Acker) error } +// MonitorManager provides an interface to perform the monitoring action for the agent. +type MonitorManager interface { + // Enabled when configured to collect metrics/logs. + Enabled() bool + + // Reload reloads the configuration for the upgrade manager. + Reload(rawConfig *config.Config) error + + // InjectMonitoring injects monitoring configuration into resolved ast tree. + MonitoringConfig(map[string]interface{}, map[string]string) (map[string]interface{}, error) +} + // Runner provides interface to run a manager and receive running errors. type Runner interface { // Run runs the manager. @@ -151,6 +163,7 @@ type Coordinator struct { reexecMgr ReExecManager upgradeMgr UpgradeManager + monitorMgr MonitorManager runtimeMgr RuntimeManager runtimeMgrErr error @@ -166,7 +179,7 @@ type Coordinator struct { } // New creates a new coordinator. -func New(logger *logger.Logger, agentInfo *info.AgentInfo, specs component.RuntimeSpecs, reexecMgr ReExecManager, upgradeMgr UpgradeManager, runtimeMgr RuntimeManager, configMgr ConfigManager, varsMgr VarsManager, caps capabilities.Capability, modifiers ...ComponentsModifier) *Coordinator { +func New(logger *logger.Logger, agentInfo *info.AgentInfo, specs component.RuntimeSpecs, reexecMgr ReExecManager, upgradeMgr UpgradeManager, runtimeMgr RuntimeManager, configMgr ConfigManager, varsMgr VarsManager, caps capabilities.Capability, monitorMgr MonitorManager, modifiers ...ComponentsModifier) *Coordinator { return &Coordinator{ logger: logger, agentInfo: agentInfo, @@ -181,6 +194,7 @@ func New(logger *logger.Logger, agentInfo *info.AgentInfo, specs component.Runti state: coordinatorState{ state: agentclient.Starting, }, + monitorMgr: monitorMgr, } } @@ -575,6 +589,10 @@ func (c *Coordinator) processConfig(ctx context.Context, cfg *config.Config) (er return fmt.Errorf("failed to reload upgrade manager configuration: %w", err) } + if err := c.monitorMgr.Reload(cfg); err != nil { + return fmt.Errorf("failed to reload upgrade manager configuration: %w", err) + } + c.state.config = cfg c.state.ast = rawAst @@ -640,7 +658,13 @@ func (c *Coordinator) compute() (map[string]interface{}, []component.Component, if err != nil { return nil, nil, fmt.Errorf("failed to convert ast to map[string]interface{}: %w", err) } - comps, err := c.specs.ToComponents(cfg) + + var configInjector component.GenerateMonitoringCfgFn + if c.monitorMgr.Enabled() { + configInjector = c.monitorMgr.MonitoringConfig + } + + comps, err := c.specs.ToComponents(cfg, configInjector) if err != nil { return nil, nil, fmt.Errorf("failed to render components: %w", err) } diff --git a/internal/pkg/agent/application/monitoring/handler.go b/internal/pkg/agent/application/monitoring/handler.go new file mode 100644 index 00000000000..6bec3eb37f2 --- /dev/null +++ b/internal/pkg/agent/application/monitoring/handler.go @@ -0,0 +1,70 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package monitoring + +import ( + "encoding/json" + "fmt" + "net/http" +) + +const errTypeUnexpected = "UNEXPECTED" + +type apiError interface { + Status() int +} + +func createHandler(fn func(w http.ResponseWriter, r *http.Request) error) *apiHandler { + return &apiHandler{ + innerFn: fn, + } +} + +type apiHandler struct { + innerFn func(w http.ResponseWriter, r *http.Request) error +} + +// ServeHTTP sets status code based on err returned +func (h *apiHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + err := h.innerFn(w, r) + if err != nil { + switch e := err.(type) { // nolint:errorlint // Will need refactor. + case apiError: + w.WriteHeader(e.Status()) + default: + w.WriteHeader(http.StatusInternalServerError) + + } + + writeResponse(w, unexpectedErrorWithReason(err.Error())) + } +} + +func writeResponse(w http.ResponseWriter, c interface{}) { + bytes, err := json.Marshal(c) + if err != nil { + // json marshal failed + fmt.Fprintf(w, "Not valid json: %v", err) + return + } + + fmt.Fprint(w, string(bytes)) + +} + +type errResponse struct { + // Type is a type of error + Type string `json:"type"` + + // Reason is a detailed error message + Reason string `json:"reason"` +} + +func unexpectedErrorWithReason(reason string, args ...interface{}) errResponse { + return errResponse{ + Type: errTypeUnexpected, + Reason: fmt.Sprintf(reason, args...), + } +} diff --git a/internal/pkg/agent/application/monitoring/server.go b/internal/pkg/agent/application/monitoring/server.go new file mode 100644 index 00000000000..ef5a26df9d2 --- /dev/null +++ b/internal/pkg/agent/application/monitoring/server.go @@ -0,0 +1,86 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package monitoring + +import ( + "net/http" + "os" + "path/filepath" + "runtime" + "strings" + + "github.com/gorilla/mux" + "go.elastic.co/apm" + "go.elastic.co/apm/module/apmgorilla" + + "github.com/elastic/elastic-agent-libs/api" + "github.com/elastic/elastic-agent-libs/config" + "github.com/elastic/elastic-agent-libs/monitoring" + "github.com/elastic/elastic-agent/pkg/core/logger" +) + +// New creates a new server exposing metrics and process information. +func NewServer( + log *logger.Logger, + endpointConfig api.Config, + ns func(string) *monitoring.Namespace, + tracer *apm.Tracer, +) (*api.Server, error) { + if err := createAgentMonitoringDrop(endpointConfig.Host); err != nil { + // log but ignore + log.Errorf("failed to create monitoring drop: %v", err) + } + + cfg, err := config.NewConfigFrom(endpointConfig) + if err != nil { + return nil, err + } + + return exposeMetricsEndpoint(log, cfg, ns, tracer) +} + +func exposeMetricsEndpoint( + log *logger.Logger, + config *config.C, + ns func(string) *monitoring.Namespace, + tracer *apm.Tracer, +) (*api.Server, error) { + r := mux.NewRouter() + if tracer != nil { + r.Use(apmgorilla.Middleware(apmgorilla.WithTracer(tracer))) + } + statsHandler := statsHandler(ns("stats")) + r.Handle("/stats", createHandler(statsHandler)) + + mux := http.NewServeMux() + mux.Handle("/", r) + + return api.New(log, mux, config) +} + +func createAgentMonitoringDrop(drop string) error { + if drop == "" || runtime.GOOS == "windows" { + return nil + } + + path := strings.TrimPrefix(drop, "unix://") + if strings.HasSuffix(path, ".sock") { + path = filepath.Dir(path) + } + + _, err := os.Stat(path) + if err != nil { + if !os.IsNotExist(err) { + return err + } + + // create + if err := os.MkdirAll(path, 0775); err != nil { + return err + } + } + + return os.Chown(path, os.Geteuid(), os.Getegid()) +} diff --git a/internal/pkg/agent/application/monitoring/stats.go b/internal/pkg/agent/application/monitoring/stats.go new file mode 100644 index 00000000000..c395e224214 --- /dev/null +++ b/internal/pkg/agent/application/monitoring/stats.go @@ -0,0 +1,36 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package monitoring + +import ( + "encoding/json" + "fmt" + "net/http" + + "github.com/elastic/elastic-agent-libs/monitoring" +) + +func statsHandler(ns *monitoring.Namespace) func(http.ResponseWriter, *http.Request) error { + return func(w http.ResponseWriter, r *http.Request) error { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + + data := monitoring.CollectStructSnapshot( + ns.GetRegistry(), + monitoring.Full, + false, + ) + + bytes, err := json.Marshal(data) + var content string + if err != nil { + content = fmt.Sprintf("Not valid json: %v", err) + } else { + content = string(bytes) + } + fmt.Fprint(w, content) + + return nil + } +} diff --git a/internal/pkg/agent/application/monitoring/v1_monitor.go b/internal/pkg/agent/application/monitoring/v1_monitor.go new file mode 100644 index 00000000000..1d8f2750afd --- /dev/null +++ b/internal/pkg/agent/application/monitoring/v1_monitor.go @@ -0,0 +1,913 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package monitoring + +import ( + "crypto/sha256" + "fmt" + "net/url" + "os" + "path/filepath" + "runtime" + "strings" + "unicode" + + "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" + "github.com/elastic/elastic-agent/internal/pkg/agent/errors" + "github.com/elastic/elastic-agent/internal/pkg/config" + monitoringCfg "github.com/elastic/elastic-agent/internal/pkg/core/monitoring/config" +) + +const ( + // args: data path, pipeline name, application name + logFileFormat = "%s/logs/%s" + // args: data path, install path, pipeline name, application name + logFileFormatWin = "%s\\logs\\%s" + + // args: pipeline name, application name + mbEndpointFileFormatWin = `npipe:///%s` + + // args: pipeline name, application name + agentMbEndpointFileFormatWin = `npipe:///elastic-agent` + // agentMbEndpointHTTP is used with cloud and exposes metrics on http endpoint + agentMbEndpointHTTP = "http://%s:%d" + httpPlusPrefix = "http+" + httpPrefix = "http" + fileSchemePrefix = "file" + unixSchemePrefix = "unix" + + defaultOutputName = "default" + outputsKey = "outputs" + inputsKey = "inputs" + idKey = "id" + agentKey = "agent" + monitoringKey = "monitoring" + useOutputKey = "use_output" + monitoringOutput = "monitoring" + defaultMonitoringNamespace = "default" + agentName = "elastic-agent" + + windowsOS = "windows" +) + +var ( + supportedComponents = []string{"filebeat", "metricbeat", "apm-server", "auditbeat", "cloudbeat", "endpoint-security", "fleet-server", "heartbeat", "osquerybeat", "packetbeat"} + supportedBeatsComponents = []string{"filebeat", "metricbeat", "auditbeat", "cloudbeat", "heartbeat", "osquerybeat", "packetbeat"} +) + +// Beats monitor is providing V1 monitoring support. +type BeatsMonitor struct { + enabled bool // feature flag disabling whole v1 monitoring story + config *monitoringConfig + operatingSystem string + agentInfo *info.AgentInfo +} + +type monitoringConfig struct { + C *monitoringCfg.MonitoringConfig `config:"agent.monitoring"` +} + +// New creates a new BeatsMonitor instance. +func New(enabled bool, operatingSystem string, cfg *monitoringCfg.MonitoringConfig, agentInfo *info.AgentInfo) *BeatsMonitor { + return &BeatsMonitor{ + enabled: enabled, + config: &monitoringConfig{ + C: cfg, + }, + operatingSystem: operatingSystem, + agentInfo: agentInfo, + } +} + +// Enabled returns true if monitoring is enabled and at least one of logs and metrics should be collected. +func (b *BeatsMonitor) Enabled() bool { + return b.enabled && b.config.C.Enabled && (b.config.C.MonitorLogs || b.config.C.MonitorMetrics) +} + +// Reload refreshes monitoring configuration. +func (b *BeatsMonitor) Reload(rawConfig *config.Config) error { + if !b.Enabled() { + return nil + } + + if err := rawConfig.Unpack(&b.config); err != nil { + return errors.New(err, "failed to unpack monitoring config during reload") + } + return nil +} + +// MonitoringConfig adds monitoring inputs to a configuration based on retrieved list of components to run. +func (b *BeatsMonitor) MonitoringConfig(policy map[string]interface{}, componentIDToBinary map[string]string) (map[string]interface{}, error) { + if !b.Enabled() { + return nil, nil + } + + monitoringOutputName := defaultOutputName + if agentCfg, found := policy[agentKey]; found { + agentCfgMap, ok := agentCfg.(map[string]interface{}) + if ok { + if monitoringCfg, found := agentCfgMap[monitoringKey]; found { + monitoringMap, ok := monitoringCfg.(map[string]interface{}) + if ok { + if use, found := monitoringMap[useOutputKey]; found { + if useStr, ok := use.(string); ok { + monitoringOutputName = useStr + } + } + } + } + } + } + + cfg := make(map[string]interface{}) + + if err := b.injectMonitoringOutput(policy, cfg, monitoringOutputName); err != nil { + return nil, errors.New(err, "failed to inject monitoring output") + } + + // initializes inputs collection so injectors don't have to deal with it + b.initInputs(cfg) + + if b.config.C.MonitorLogs { + if err := b.injectLogsInput(cfg, componentIDToBinary, monitoringOutput); err != nil { + return nil, errors.New(err, "failed to inject monitoring output") + } + } + + if b.config.C.MonitorMetrics { + if err := b.injectMetricsInput(cfg, componentIDToBinary, monitoringOutput); err != nil { + return nil, errors.New(err, "failed to inject monitoring output") + } + } + return cfg, nil +} + +// EnrichArgs enriches arguments provided to application, in order to enable +// monitoring +func (b *BeatsMonitor) EnrichArgs(unit, binary string, args []string) []string { + if !b.enabled { + // even if monitoring is disabled enrich args. + // the only way to skip it is by disabling monitoring by feature flag + return args + } + + // only beats understands these flags + if !isSupportedBeatsBinary(binary) { + return args + } + + appendix := make([]string, 0, 20) + endpoint := endpointPath(unit, b.operatingSystem) + if endpoint != "" { + appendix = append(appendix, + "-E", "http.enabled=true", + "-E", "http.host="+endpoint, + ) + if b.config.C.Pprof != nil && b.config.C.Pprof.Enabled { + appendix = append(appendix, + "-E", "http.pprof.enabled=true", + ) + } + if b.config.C.HTTP.Buffer != nil && b.config.C.HTTP.Buffer.Enabled { + appendix = append(appendix, + "-E", "http.buffer.enabled=true", + ) + } + } + + loggingPath := loggingPath(unit, b.operatingSystem) + if loggingPath != "" { + appendix = append(appendix, + "-E", "logging.files.path="+filepath.Dir(loggingPath), + "-E", "logging.files.name="+filepath.Base(loggingPath), + "-E", "logging.files.keepfiles=7", + "-E", "logging.files.permission=0640", + "-E", "logging.files.interval=1h", + ) + + if !b.config.C.LogMetrics { + appendix = append(appendix, + "-E", "logging.metrics.enabled=false", + ) + } + } + + return append(args, appendix...) +} + +// Prepare executes steps in order for monitoring to work correctly +func (b *BeatsMonitor) Prepare() error { + if !b.Enabled() { + return nil + } + drops := make([]string, 0, 2) + if b.config.C.MonitorLogs { + logsDrop := loggingPath("unit", b.operatingSystem) + drops = append(drops, filepath.Dir(logsDrop)) + } + + if b.config.C.MonitorMetrics { + metricsDrop := monitoringDrop(endpointPath("unit", b.operatingSystem)) + drops = append(drops, metricsDrop) + } + + for _, drop := range drops { + if drop == "" { + continue + } + + // skip if already exists + if _, err := os.Stat(drop); err != nil { + if !os.IsNotExist(err) { + return err + } + + // create + if err := os.MkdirAll(drop, 0775); err != nil { + return errors.New(err, fmt.Sprintf("failed to create directory %q", drop)) + } + + uid, gid := os.Geteuid(), os.Getegid() + if err := changeOwner(drop, uid, gid); err != nil { + return errors.New(err, fmt.Sprintf("failed to change owner of directory %q", drop)) + } + } + } + + return nil +} + +// Cleanup removes +func (b *BeatsMonitor) Cleanup(unit string) error { + if !b.Enabled() { + return nil + } + + endpoint := monitoringFile(unit, b.operatingSystem) + if endpoint == "" { + return nil + } + + return os.RemoveAll(endpoint) +} + +func (b *BeatsMonitor) initInputs(cfg map[string]interface{}) { + _, found := cfg[inputsKey] + if found { + return + } + + inputsCollection := make([]interface{}, 0) + cfg[inputsKey] = inputsCollection +} + +func (b *BeatsMonitor) injectMonitoringOutput(source, dest map[string]interface{}, monitoringOutputName string) error { + outputsNode, found := source[outputsKey] + if !found { + return fmt.Errorf("outputs not part of the config") + } + + outputs, ok := outputsNode.(map[string]interface{}) + if !ok { + return fmt.Errorf("outputs not a map") + } + + outputNode, found := outputs[monitoringOutputName] + if !found { + return fmt.Errorf("output %q used for monitoring not found", monitoringOutputName) + } + + monitoringOutputs := map[string]interface{}{ + monitoringOutput: outputNode, + } + + dest[outputsKey] = monitoringOutputs + + return nil +} + +func (b *BeatsMonitor) injectLogsInput(cfg map[string]interface{}, componentIDToBinary map[string]string, monitoringOutput string) error { + monitoringNamespace := b.monitoringNamespace() + //fixedAgentName := strings.ReplaceAll(agentName, "-", "_") + logsDrop := filepath.Dir(loggingPath("unit", b.operatingSystem)) + + streams := []interface{}{ + map[string]interface{}{ + idKey: "logs-monitoring-agent", + "data_stream": map[string]interface{}{ + "type": "logs", + "dataset": "elastic_agent", + "namespace": monitoringNamespace, + }, + "paths": []interface{}{ + filepath.Join(logsDrop, agentName+"-*.ndjson"), + filepath.Join(logsDrop, agentName+"-watcher-*.ndjson"), + }, + "index": fmt.Sprintf("logs-elastic_agent-%s", monitoringNamespace), + "close": map[string]interface{}{ + "on_state_change": map[string]interface{}{ + "inactive": "5m", + }, + }, + "parsers": []interface{}{ + map[string]interface{}{ + "ndjson": map[string]interface{}{ + "overwrite_keys": true, + "message_key": "message", + }, + }, + }, + "processors": []interface{}{ + map[string]interface{}{ + "add_fields": map[string]interface{}{ + "target": "data_stream", + "fields": map[string]interface{}{ + "type": "logs", + "dataset": "elastic_agent", + "namespace": monitoringNamespace, + }, + }, + }, + map[string]interface{}{ + "add_fields": map[string]interface{}{ + "target": "event", + "fields": map[string]interface{}{ + "dataset": "elastic_agent", + }, + }, + }, + map[string]interface{}{ + "add_fields": map[string]interface{}{ + "target": "elastic_agent", + "fields": map[string]interface{}{ + "id": b.agentInfo.AgentID(), + "version": b.agentInfo.Version(), + "snapshot": b.agentInfo.Snapshot(), + }, + }, + }, + map[string]interface{}{ + "add_fields": map[string]interface{}{ + "target": "agent", + "fields": map[string]interface{}{ + "id": b.agentInfo.AgentID(), + }, + }, + }, + map[string]interface{}{ + "drop_fields": map[string]interface{}{ + "fields": []interface{}{ + "ecs.version", //coming from logger, already added by libbeat + }, + "ignore_missing": true, + }, + }}, + }, + } + for unit, binaryName := range componentIDToBinary { + if !isSupportedBinary(binaryName) { + continue + } + + fixedBinaryName := strings.ReplaceAll(binaryName, "-", "_") + name := strings.ReplaceAll(unit, "-", "_") // conform with index naming policy + logFile := loggingPath(unit, b.operatingSystem) + streams = append(streams, map[string]interface{}{ + idKey: "logs-monitoring-" + name, + "data_stream": map[string]interface{}{ + "type": "logs", + "dataset": fmt.Sprintf("elastic_agent.%s", fixedBinaryName), + "namespace": monitoringNamespace, + }, + "index": fmt.Sprintf("logs-elastic_agent.%s-%s", fixedBinaryName, monitoringNamespace), + "paths": []interface{}{logFile, logFile + "*"}, + "close": map[string]interface{}{ + "on_state_change": map[string]interface{}{ + "inactive": "5m", + }, + }, + "parsers": []interface{}{ + map[string]interface{}{ + "ndjson": map[string]interface{}{ + "overwrite_keys": true, + "message_key": "message", + }, + }, + }, + "processors": []interface{}{ + map[string]interface{}{ + "add_fields": map[string]interface{}{ + "target": "data_stream", + "fields": map[string]interface{}{ + "type": "logs", + "dataset": fmt.Sprintf("elastic_agent.%s", fixedBinaryName), + "namespace": monitoringNamespace, + }, + }, + }, + map[string]interface{}{ + "add_fields": map[string]interface{}{ + "target": "event", + "fields": map[string]interface{}{ + "dataset": fmt.Sprintf("elastic_agent.%s", fixedBinaryName), + }, + }, + }, + map[string]interface{}{ + "add_fields": map[string]interface{}{ + "target": "elastic_agent", + "fields": map[string]interface{}{ + "id": b.agentInfo.AgentID(), + "version": b.agentInfo.Version(), + "snapshot": b.agentInfo.Snapshot(), + }, + }, + }, + map[string]interface{}{ + "add_fields": map[string]interface{}{ + "target": "agent", + "fields": map[string]interface{}{ + "id": b.agentInfo.AgentID(), + }, + }, + }, + map[string]interface{}{ + "drop_fields": map[string]interface{}{ + "fields": []interface{}{ + "ecs.version", //coming from logger, already added by libbeat + }, + "ignore_missing": true, + }, + }, + }, + }) + } + + inputs := []interface{}{ + map[string]interface{}{ + idKey: "logs-monitoring-agent", + "name": "logs-monitoring-agent", + "type": "filestream", + useOutputKey: monitoringOutput, + "data_stream": map[string]interface{}{ + "namespace": monitoringNamespace, + }, + "streams": streams, + }, + } + inputsNode, found := cfg[inputsKey] + if !found { + return fmt.Errorf("no inputs in config") + } + + inputsCfg, ok := inputsNode.([]interface{}) + if !ok { + return fmt.Errorf("inputs is not an array") + } + + inputsCfg = append(inputsCfg, inputs...) + cfg[inputsKey] = inputsCfg + return nil +} + +func (b *BeatsMonitor) monitoringNamespace() string { + if ns := b.config.C.Namespace; ns != "" { + return ns + } + return defaultMonitoringNamespace +} +func (b *BeatsMonitor) injectMetricsInput(cfg map[string]interface{}, componentIDToBinary map[string]string, monitoringOutputName string) error { + monitoringNamespace := b.monitoringNamespace() + fixedAgentName := strings.ReplaceAll(agentName, "-", "_") + beatsStreams := make([]interface{}, 0, len(componentIDToBinary)) + streams := []interface{}{ + map[string]interface{}{ + idKey: "metrics-monitoring-agent", + "data_stream": map[string]interface{}{ + "type": "metrics", + "dataset": fmt.Sprintf("elastic_agent.%s", fixedAgentName), + "namespace": monitoringNamespace, + }, + "metricsets": []interface{}{"json"}, + "path": "/stats", + "hosts": []interface{}{HttpPlusAgentMonitoringEndpoint(b.operatingSystem, b.config.C)}, + "namespace": "agent", + "period": "10s", + "index": fmt.Sprintf("metrics-elastic_agent.%s-%s", fixedAgentName, monitoringNamespace), + "processors": []interface{}{ + map[string]interface{}{ + "add_fields": map[string]interface{}{ + "target": "data_stream", + "fields": map[string]interface{}{ + "type": "metrics", + "dataset": fmt.Sprintf("elastic_agent.%s", fixedAgentName), + "namespace": monitoringNamespace, + }, + }, + }, + map[string]interface{}{ + "add_fields": map[string]interface{}{ + "target": "event", + "fields": map[string]interface{}{ + "dataset": fmt.Sprintf("elastic_agent.%s", fixedAgentName), + }, + }, + }, + map[string]interface{}{ + "add_fields": map[string]interface{}{ + "target": "elastic_agent", + "fields": map[string]interface{}{ + "id": b.agentInfo.AgentID(), + "version": b.agentInfo.Version(), + "snapshot": b.agentInfo.Snapshot(), + "process": "elastic-agent", + }, + }, + }, + map[string]interface{}{ + "add_fields": map[string]interface{}{ + "target": "agent", + "fields": map[string]interface{}{ + "id": b.agentInfo.AgentID(), + }, + }, + }, + map[string]interface{}{ + "copy_fields": map[string]interface{}{ + "fields": httpCopyRules(), + "ignore_missing": true, + "fail_on_error": false, + }, + }, + map[string]interface{}{ + "drop_fields": map[string]interface{}{ + "fields": []interface{}{ + "http", + }, + "ignore_missing": true, + }, + }, + }, + }, + } + for unit, binaryName := range componentIDToBinary { + if !isSupportedBinary(binaryName) { + continue + } + + endpoints := []interface{}{prefixedEndpoint(endpointPath(unit, b.operatingSystem))} + name := strings.ReplaceAll(unit, "-", "_") // conform with index naming policy + + if isSupportedBeatsBinary(binaryName) { + beatsStreams = append(beatsStreams, map[string]interface{}{ + idKey: "metrics-monitoring-" + name, + "data_stream": map[string]interface{}{ + "type": "metrics", + "dataset": fmt.Sprintf("elastic_agent.%s", name), + "namespace": monitoringNamespace, + }, + "metricsets": []interface{}{"stats", "state"}, + "hosts": endpoints, + "period": "10s", + "index": fmt.Sprintf("metrics-elastic_agent.%s-%s", fixedAgentName, monitoringNamespace), + "processors": []interface{}{ + map[string]interface{}{ + "add_fields": map[string]interface{}{ + "target": "data_stream", + "fields": map[string]interface{}{ + "type": "metrics", + "dataset": fmt.Sprintf("elastic_agent.%s", name), + "namespace": monitoringNamespace, + }, + }, + }, + map[string]interface{}{ + "add_fields": map[string]interface{}{ + "target": "event", + "fields": map[string]interface{}{ + "dataset": fmt.Sprintf("elastic_agent.%s", name), + }, + }, + }, + map[string]interface{}{ + "add_fields": map[string]interface{}{ + "target": "elastic_agent", + "fields": map[string]interface{}{ + "id": b.agentInfo.AgentID(), + "version": b.agentInfo.Version(), + "snapshot": b.agentInfo.Snapshot(), + "process": binaryName, + }, + }, + }, + map[string]interface{}{ + "add_fields": map[string]interface{}{ + "target": "agent", + "fields": map[string]interface{}{ + "id": b.agentInfo.AgentID(), + }, + }, + }, + }, + }) + } + + streams = append(streams, map[string]interface{}{ + idKey: "metrics-monitoring-" + name + "-1", + "data_stream": map[string]interface{}{ + "type": "metrics", + "dataset": fmt.Sprintf("elastic_agent.%s", fixedAgentName), + "namespace": monitoringNamespace, + }, + "metricsets": []interface{}{"json"}, + "hosts": endpoints, + "path": "/stats", + "namespace": "agent", + "period": "10s", + "index": fmt.Sprintf("metrics-elastic_agent.%s-%s", fixedAgentName, monitoringNamespace), + "processors": []interface{}{ + map[string]interface{}{ + "add_fields": map[string]interface{}{ + "target": "event", + "fields": map[string]interface{}{ + "dataset": fmt.Sprintf("elastic_agent.%s", fixedAgentName), + }, + }, + }, + map[string]interface{}{ + "add_fields": map[string]interface{}{ + "target": "elastic_agent", + "fields": map[string]interface{}{ + "id": b.agentInfo.AgentID(), + "version": b.agentInfo.Version(), + "snapshot": b.agentInfo.Snapshot(), + "process": name, + }, + }, + }, + map[string]interface{}{ + "add_fields": map[string]interface{}{ + "target": "agent", + "fields": map[string]interface{}{ + "id": b.agentInfo.AgentID(), + }, + }, + }, + map[string]interface{}{ + "copy_fields": map[string]interface{}{ + "fields": httpCopyRules(), + "ignore_missing": true, + "fail_on_error": false, + }, + }, + map[string]interface{}{ + "drop_fields": map[string]interface{}{ + "fields": []interface{}{ + "http", + }, + "ignore_missing": true, + }, + }, + }, + }) + } + + inputs := []interface{}{ + map[string]interface{}{ + idKey: "metrics-monitoring-beats", + "name": "metrics-monitoring-beats", + "type": "beat/metrics", + useOutputKey: monitoringOutput, + "data_stream": map[string]interface{}{ + "namespace": monitoringNamespace, + }, + "streams": beatsStreams, + }, + map[string]interface{}{ + idKey: "metrics-monitoring-agent", + "name": "metrics-monitoring-agent", + "type": "http/metrics", + useOutputKey: monitoringOutput, + "data_stream": map[string]interface{}{ + "namespace": monitoringNamespace, + }, + "streams": streams, + }, + } + + inputsNode, found := cfg[inputsKey] + if !found { + return fmt.Errorf("no inputs in config") + } + + inputsCfg, ok := inputsNode.([]interface{}) + if !ok { + return fmt.Errorf("inputs is not an array") + } + + inputsCfg = append(inputsCfg, inputs...) + cfg[inputsKey] = inputsCfg + return nil +} + +func loggingPath(id, operatingSystem string) string { + id = strings.ReplaceAll(id, string(filepath.Separator), "-") + if operatingSystem == windowsOS { + return fmt.Sprintf(logFileFormatWin, paths.Home(), id) + } + + return fmt.Sprintf(logFileFormat, paths.Home(), id) +} + +func endpointPath(id, operatingSystem string) (endpointPath string) { + id = strings.ReplaceAll(id, string(filepath.Separator), "-") + if operatingSystem == windowsOS { + return fmt.Sprintf(mbEndpointFileFormatWin, id) + } + // unix socket path must be less than 104 characters + path := fmt.Sprintf("unix://%s.sock", filepath.Join(paths.TempDir(), id)) + if len(path) < 104 { + return path + } + // place in global /tmp (or /var/tmp on Darwin) to ensure that its small enough to fit; current path is way to long + // for it to be used, but needs to be unique per Agent (in the case that multiple are running) + return fmt.Sprintf(`unix:///tmp/elastic-agent/%x.sock`, sha256.Sum256([]byte(path))) +} + +func prefixedEndpoint(endpoint string) string { + if endpoint == "" || strings.HasPrefix(endpoint, httpPlusPrefix) || strings.HasPrefix(endpoint, httpPrefix) { + return endpoint + } + + return httpPlusPrefix + endpoint +} + +func monitoringFile(id, operatingSystem string) string { + endpoint := endpointPath(id, operatingSystem) + if endpoint == "" { + return "" + } + if isNpipe(endpoint) { + return "" + } + + if isWindowsPath(endpoint) { + return endpoint + } + + u, _ := url.Parse(endpoint) + if u == nil || (u.Scheme != "" && u.Scheme != fileSchemePrefix && u.Scheme != unixSchemePrefix) { + return "" + } + + if u.Scheme == fileSchemePrefix { + return strings.TrimPrefix(endpoint, "file://") + } + + if u.Scheme == unixSchemePrefix { + return strings.TrimPrefix(endpoint, "unix://") + } + return endpoint +} + +func isNpipe(path string) bool { + return strings.HasPrefix(path, "npipe") || strings.HasPrefix(path, `\\.\pipe\`) +} + +func isWindowsPath(path string) bool { + if len(path) < 4 { + return false + } + return unicode.IsLetter(rune(path[0])) && path[1] == ':' +} + +func changeOwner(path string, uid, gid int) error { + if runtime.GOOS == windowsOS { + // on windows it always returns the syscall.EWINDOWS error, wrapped in *PathError + return nil + } + + return os.Chown(path, uid, gid) +} + +// HttpPlusAgentMonitoringEndpoint provides an agent monitoring endpoint path with a `http+` prefix. +func HttpPlusAgentMonitoringEndpoint(operatingSystem string, cfg *monitoringCfg.MonitoringConfig) string { + return prefixedEndpoint(AgentMonitoringEndpoint(operatingSystem, cfg)) +} + +// AgentMonitoringEndpoint provides an agent monitoring endpoint path. +func AgentMonitoringEndpoint(operatingSystem string, cfg *monitoringCfg.MonitoringConfig) string { + if cfg != nil && cfg.Enabled { + return fmt.Sprintf(agentMbEndpointHTTP, cfg.HTTP.Host, cfg.HTTP.Port) + } + + if operatingSystem == windowsOS { + return agentMbEndpointFileFormatWin + } + // unix socket path must be less than 104 characters + path := fmt.Sprintf("unix://%s.sock", filepath.Join(paths.TempDir(), agentName)) + if len(path) < 104 { + return path + } + // place in global /tmp to ensure that its small enough to fit; current path is way to long + // for it to be used, but needs to be unique per Agent (in the case that multiple are running) + return fmt.Sprintf(`unix:///tmp/elastic-agent/%x.sock`, sha256.Sum256([]byte(path))) +} + +func httpCopyRules() []interface{} { + fromToMap := []interface{}{ + // I should be able to see the CPU Usage on the running machine. Am using too much CPU? + map[string]interface{}{ + "from": "http.agent.beat.cpu", + "to": "system.process.cpu", + }, + + // I should be able to see the Memory usage of Elastic Agent. Is the Elastic Agent using too much memory? + map[string]interface{}{ + "from": "http.agent.beat.memstats.memory_sys", + "to": "system.process.memory.size", + }, + + // I should be able to see fd usage. Am I keep too many files open? + map[string]interface{}{ + "from": "http.agent.beat.handles", + "to": "system.process.fd", + }, + + // Cgroup reporting + map[string]interface{}{ + "from": "http.agent.beat.cgroup", + "to": "system.process.cgroup", + }, + + // apm-server specific + map[string]interface{}{ + "from": "http.agent.apm-server", + "to": "apm-server", + }, + } + + return fromToMap +} + +func isSupportedBinary(binaryName string) bool { + for _, supportedBinary := range supportedComponents { + if strings.EqualFold(supportedBinary, binaryName) { + return true + } + } + return false +} + +func isSupportedBeatsBinary(binaryName string) bool { + for _, supportedBinary := range supportedBeatsComponents { + if strings.EqualFold(supportedBinary, binaryName) { + return true + } + } + return false +} + +func monitoringDrop(path string) (drop string) { + defer func() { + if drop != "" { + // Dir call changes separator to the one used in OS + // '/var/lib' -> '\var\lib\' on windows + baseLen := len(filepath.Dir(drop)) + drop = drop[:baseLen] + } + }() + + if strings.Contains(path, "localhost") { + return "" + } + + path = strings.TrimPrefix(path, httpPlusPrefix) + + // npipe is virtual without a drop + if isNpipe(path) { + return "" + } + + if isWindowsPath(path) { + return path + } + + u, _ := url.Parse(path) + if u == nil || (u.Scheme != "" && u.Scheme != fileSchemePrefix && u.Scheme != unixSchemePrefix) { + return "" + } + + if u.Scheme == fileSchemePrefix { + return strings.TrimPrefix(path, "file://") + } + + if u.Scheme == unixSchemePrefix { + return strings.TrimPrefix(path, "unix://") + } + + return path +} diff --git a/internal/pkg/agent/cmd/inspect.go b/internal/pkg/agent/cmd/inspect.go index 77e917c9c3c..6d9b884aea4 100644 --- a/internal/pkg/agent/cmd/inspect.go +++ b/internal/pkg/agent/cmd/inspect.go @@ -17,7 +17,10 @@ import ( "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/service" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/monitoring" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" + "github.com/elastic/elastic-agent/internal/pkg/agent/configuration" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" "github.com/elastic/elastic-agent/internal/pkg/agent/vars" @@ -45,6 +48,7 @@ wait that amount of time before using the variables for the configuration. Run: func(c *cobra.Command, args []string) { var opts inspectConfigOpts opts.variables, _ = c.Flags().GetBool("variables") + opts.includeMonitoring, _ = c.Flags().GetBool("monitoring") opts.variablesWait, _ = c.Flags().GetDuration("variables-wait") ctx, cancel := context.WithCancel(context.Background()) @@ -57,6 +61,7 @@ wait that amount of time before using the variables for the configuration. } cmd.Flags().Bool("variables", false, "render configuration with variables substituted") + cmd.Flags().Bool("monitoring", false, "includes monitoring configuration") cmd.Flags().Duration("variables-wait", time.Duration(0), "wait this amount of time for variables before performing substitution") cmd.AddCommand(newInspectComponentsCommandWithArgs(s, streams)) @@ -115,8 +120,9 @@ variables for the configuration. } type inspectConfigOpts struct { - variables bool - variablesWait time.Duration + variables bool + includeMonitoring bool + variablesWait time.Duration } func inspectConfig(ctx context.Context, cfgPath string, opts inspectConfigOpts, streams *cli.IOStreams) error { @@ -135,13 +141,53 @@ func inspectConfig(ctx context.Context, cfgPath string, opts inspectConfigOpts, return err } - if !opts.variables { + if !opts.variables && !opts.includeMonitoring { return printConfig(fullCfg, l, streams) } cfg, err := getConfigWithVariables(ctx, l, cfgPath, opts.variablesWait) if err != nil { return err } + + if opts.includeMonitoring { + // Load the requirements before trying to load the configuration. These should always load + // even if the configuration is wrong. + platform, err := component.LoadPlatformDetail() + if err != nil { + return fmt.Errorf("failed to gather system information: %w", err) + } + specs, err := component.LoadRuntimeSpecs(paths.Components(), platform) + if err != nil { + return fmt.Errorf("failed to detect inputs and outputs: %w", err) + } + + monitorFn, err := getMonitoringFn(cfg) + if err != nil { + return fmt.Errorf("failed to get monitoring: %w", err) + } + _, binaryMapping, err := specs.PolicyToComponents(cfg) + if err != nil { + return fmt.Errorf("failed to get binary mappings: %w", err) + } + monitorCfg, err := monitorFn(cfg, binaryMapping) + if err != nil { + return fmt.Errorf("failed to get monitoring config: %w", err) + } + + if monitorCfg != nil { + rawCfg := config.MustNewConfigFrom(cfg) + + if err := rawCfg.Merge(monitorCfg); err != nil { + return fmt.Errorf("failed to merge monitoring config: %w", err) + } + + cfg, err = rawCfg.ToMapStr() + if err != nil { + return fmt.Errorf("failed to convert monitoring config: %w", err) + } + } + } + return printMapStringConfig(cfg, streams) } @@ -212,8 +258,13 @@ func inspectComponents(ctx context.Context, cfgPath string, opts inspectComponen return err } + monitorFn, err := getMonitoringFn(m) + if err != nil { + return fmt.Errorf("failed to get monitoring: %w", err) + } + // Compute the components from the computed configuration. - comps, err := specs.ToComponents(m) + comps, err := specs.ToComponents(m, monitorFn) if err != nil { return fmt.Errorf("failed to render components: %w", err) } @@ -271,6 +322,26 @@ func inspectComponents(ctx context.Context, cfgPath string, opts inspectComponen return printComponents(comps, streams) } +func getMonitoringFn(cfg map[string]interface{}) (component.GenerateMonitoringCfgFn, error) { + config, err := config.NewConfigFrom(cfg) + if err != nil { + return nil, err + } + + agentCfg := configuration.DefaultConfiguration() + if err := config.Unpack(agentCfg); err != nil { + return nil, err + } + + agentInfo, err := info.NewAgentInfoWithLog("error", false) + if err != nil { + return nil, fmt.Errorf("could not load agent info: %w", err) + } + + monitor := monitoring.New(agentCfg.Settings.V1MonitoringEnabled, agentCfg.Settings.DownloadConfig.OS(), agentCfg.Settings.MonitoringConfig, agentInfo) + return monitor.MonitoringConfig, nil +} + func getConfigWithVariables(ctx context.Context, l *logger.Logger, cfgPath string, timeout time.Duration) (map[string]interface{}, error) { caps, err := capabilities.Load(paths.AgentCapabilitiesPath(), l) if err != nil { diff --git a/internal/pkg/agent/cmd/run.go b/internal/pkg/agent/cmd/run.go index ad8690d90ee..e6f9ec8d0f7 100644 --- a/internal/pkg/agent/cmd/run.go +++ b/internal/pkg/agent/cmd/run.go @@ -19,10 +19,15 @@ import ( apmtransport "go.elastic.co/apm/transport" "gopkg.in/yaml.v2" + monitoringLib "github.com/elastic/elastic-agent-libs/monitoring" + + "github.com/elastic/elastic-agent-libs/api" "github.com/elastic/elastic-agent-libs/service" + "github.com/elastic/elastic-agent-system-metrics/report" "github.com/elastic/elastic-agent/internal/pkg/agent/application" "github.com/elastic/elastic-agent/internal/pkg/agent/application/filelock" "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/monitoring" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/application/reexec" "github.com/elastic/elastic-agent/internal/pkg/agent/application/secret" @@ -38,6 +43,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/release" "github.com/elastic/elastic-agent/pkg/component" "github.com/elastic/elastic-agent/pkg/core/logger" + "github.com/elastic/elastic-agent/version" ) const ( @@ -164,6 +170,14 @@ func run(override cfgOverrider, modifiers ...component.PlatformModifier) error { return err } + serverStopFn, err := setupMetrics(logger, cfg.Settings.DownloadConfig.OS(), cfg.Settings.MonitoringConfig, tracer) + if err != nil { + return err + } + defer func() { + _ = serverStopFn() + }() + diagHooks := diagnostics.GlobalHooks() diagHooks = append(diagHooks, coord.DiagnosticHooks()...) control := server.New(logger.Named("control"), agentInfo, coord, tracer, diagHooks) @@ -327,7 +341,7 @@ func tryDelayEnroll(ctx context.Context, logger *logger.Logger, cfg *configurati enrollPath := paths.AgentEnrollFile() if _, err := os.Stat(enrollPath); err != nil { // no enrollment file exists or failed to stat it; nothing to do - return cfg, nil //nolint:nilerr // there is nothing to do + return cfg, nil } contents, err := ioutil.ReadFile(enrollPath) if err != nil { @@ -432,3 +446,29 @@ func initTracer(agentName, version string, mcfg *monitoringCfg.MonitoringConfig) Transport: ts, }) } + +func setupMetrics( + logger *logger.Logger, + operatingSystem string, + cfg *monitoringCfg.MonitoringConfig, + tracer *apm.Tracer, +) (func() error, error) { + if err := report.SetupMetrics(logger, agentName, version.GetDefaultVersion()); err != nil { + return nil, err + } + + // start server for stats + endpointConfig := api.Config{ + Enabled: true, + Host: monitoring.AgentMonitoringEndpoint(operatingSystem, cfg), + } + + s, err := monitoring.NewServer(logger, endpointConfig, monitoringLib.GetNamespace, tracer) + if err != nil { + return nil, errors.New(err, "could not start the HTTP server for the API") + } + s.Start() + + // return server stopper + return s.Stop, nil +} diff --git a/internal/pkg/agent/configuration/settings.go b/internal/pkg/agent/configuration/settings.go index 0a211101c4d..3b509270344 100644 --- a/internal/pkg/agent/configuration/settings.go +++ b/internal/pkg/agent/configuration/settings.go @@ -21,18 +21,20 @@ type SettingsConfig struct { LoggingConfig *logger.Config `yaml:"logging,omitempty" config:"logging,omitempty" json:"logging,omitempty"` // standalone config - Reload *ReloadConfig `config:"reload" yaml:"reload" json:"reload"` - Path string `config:"path" yaml:"path" json:"path"` + Reload *ReloadConfig `config:"reload" yaml:"reload" json:"reload"` + Path string `config:"path" yaml:"path" json:"path"` + V1MonitoringEnabled bool `config:"v1_monitoring_enabled" yaml:"v1_monitoring_enabled" json:"v1_monitoring_enabled"` } // DefaultSettingsConfig creates a config with pre-set default values. func DefaultSettingsConfig() *SettingsConfig { return &SettingsConfig{ - ProcessConfig: process.DefaultConfig(), - DownloadConfig: artifact.DefaultConfig(), - LoggingConfig: logger.DefaultLoggingConfig(), - MonitoringConfig: monitoringCfg.DefaultConfig(), - GRPC: DefaultGRPCConfig(), - Reload: DefaultReloadConfig(), + ProcessConfig: process.DefaultConfig(), + DownloadConfig: artifact.DefaultConfig(), + LoggingConfig: logger.DefaultLoggingConfig(), + MonitoringConfig: monitoringCfg.DefaultConfig(), + GRPC: DefaultGRPCConfig(), + Reload: DefaultReloadConfig(), + V1MonitoringEnabled: true, } } diff --git a/internal/pkg/agent/install/uninstall.go b/internal/pkg/agent/install/uninstall.go index df5c11d747c..fd99c3bbb82 100644 --- a/internal/pkg/agent/install/uninstall.go +++ b/internal/pkg/agent/install/uninstall.go @@ -167,7 +167,7 @@ func serviceComponentsFromConfig(specs component.RuntimeSpecs, cfg *config.Confi if err != nil { return nil, errors.New("failed to create a map from config", err) } - allComps, err := specs.ToComponents(mm) + allComps, err := specs.ToComponents(mm, nil) if err != nil { return nil, fmt.Errorf("failed to render components: %w", err) } diff --git a/internal/pkg/agent/vars/vars.go b/internal/pkg/agent/vars/vars.go index 7f0aff1c329..b685583895f 100644 --- a/internal/pkg/agent/vars/vars.go +++ b/internal/pkg/agent/vars/vars.go @@ -10,11 +10,12 @@ import ( "fmt" "time" + "golang.org/x/sync/errgroup" + "github.com/elastic/elastic-agent/internal/pkg/agent/transpiler" "github.com/elastic/elastic-agent/internal/pkg/composable" "github.com/elastic/elastic-agent/internal/pkg/config" "github.com/elastic/elastic-agent/pkg/core/logger" - "golang.org/x/sync/errgroup" ) func WaitForVariables(ctx context.Context, l *logger.Logger, cfg *config.Config, wait time.Duration) ([]*transpiler.Vars, error) { diff --git a/internal/pkg/core/monitoring/config/config.go b/internal/pkg/core/monitoring/config/config.go index cbaee09c9e2..bf5edd9716f 100644 --- a/internal/pkg/core/monitoring/config/config.go +++ b/internal/pkg/core/monitoring/config/config.go @@ -52,6 +52,7 @@ func DefaultConfig() *MonitoringConfig { MonitorTraces: false, HTTP: &MonitoringHTTPConfig{ Enabled: false, + Host: "localhost", Port: defaultPort, }, Namespace: defaultNamespace, diff --git a/pkg/component/component.go b/pkg/component/component.go index 20969e9a11d..f77b27d6fcf 100644 --- a/pkg/component/component.go +++ b/pkg/component/component.go @@ -17,6 +17,8 @@ import ( "github.com/elastic/elastic-agent/pkg/utils" ) +type GenerateMonitoringCfgFn func(map[string]interface{}, map[string]string) (map[string]interface{}, error) + const ( // defaultUnitLogLevel is the default log level that a unit will get if one is not defined. defaultUnitLogLevel = client.UnitLogLevelInfo @@ -79,19 +81,45 @@ type Component struct { } // ToComponents returns the components that should be running based on the policy and the current runtime specification. -func (r *RuntimeSpecs) ToComponents(policy map[string]interface{}) ([]Component, error) { - outputsMap, err := toIntermediate(policy) +func (r *RuntimeSpecs) ToComponents(policy map[string]interface{}, monitoringInjector GenerateMonitoringCfgFn) ([]Component, error) { + components, binaryMapping, err := r.PolicyToComponents(policy) if err != nil { return nil, err } + + if monitoringInjector != nil { + monitoringCfg, err := monitoringInjector(policy, binaryMapping) + if err != nil { + return nil, fmt.Errorf("failed to inject monitoring: %w", err) + } + + if monitoringCfg != nil { + // monitoring is enabled + monitoringComps, _, err := r.PolicyToComponents(monitoringCfg) + if err != nil { + return nil, fmt.Errorf("failed to generate monitoring components: %w", err) + } + + components = append(components, monitoringComps...) + } + } + + return components, nil +} + +func (r *RuntimeSpecs) PolicyToComponents(policy map[string]interface{}) ([]Component, map[string]string, error) { + outputsMap, err := toIntermediate(policy) + if err != nil { + return nil, nil, err + } if outputsMap == nil { - return nil, nil + return nil, nil, nil } // set the runtime variables that are available in the input specification runtime checks hasRoot, err := utils.HasRoot() if err != nil { - return nil, err + return nil, nil, err } vars, err := transpiler.NewVars(map[string]interface{}{ "runtime": map[string]interface{}{ @@ -109,10 +137,11 @@ func (r *RuntimeSpecs) ToComponents(policy map[string]interface{}) ([]Component, }, }, nil) if err != nil { - return nil, err + return nil, nil, err } var components []Component + componentIdsInputMap := make(map[string]string) for outputName, output := range outputsMap { if !output.enabled { // skip; not enabled @@ -179,10 +208,12 @@ func (r *RuntimeSpecs) ToComponents(policy map[string]interface{}) ([]Component, Spec: inputSpec, Units: units, }) + componentIdsInputMap[componentID] = inputSpec.BinaryName } } } - return components, nil + + return components, componentIdsInputMap, nil } // toIntermediate takes the policy and returns it into an intermediate representation that is easier to map into a set diff --git a/pkg/component/component_test.go b/pkg/component/component_test.go index f43dfc244c6..0d9e97d4c94 100644 --- a/pkg/component/component_test.go +++ b/pkg/component/component_test.go @@ -3,7 +3,6 @@ // you may not use this file except in compliance with the Elastic License. //nolint:dupl // duplicate code is in test cases - package component import ( @@ -915,7 +914,7 @@ func TestToComponents(t *testing.T) { runtime, err := LoadRuntimeSpecs(filepath.Join("..", "..", "specs"), scenario.Platform, SkipBinaryCheck()) require.NoError(t, err) - result, err := runtime.ToComponents(scenario.Policy) + result, err := runtime.ToComponents(scenario.Policy, nil) if scenario.Err != "" { assert.Equal(t, scenario.Err, err.Error()) } else { diff --git a/pkg/component/runtime/command.go b/pkg/component/runtime/command.go index 8c1722d657f..2cabe906b1f 100644 --- a/pkg/component/runtime/command.go +++ b/pkg/component/runtime/command.go @@ -35,6 +35,12 @@ const ( envAgentComponentInputType = "AGENT_COMPONENT_INPUT_TYPE" ) +type MonitoringManager interface { + EnrichArgs(string, string, []string) []string + Prepare() error + Cleanup(string) error +} + type procState struct { proc *process.Info state *os.ProcessState @@ -43,6 +49,7 @@ type procState struct { // CommandRuntime provides the command runtime for running a component as a subprocess. type CommandRuntime struct { current component.Component + monitor MonitoringManager ch chan ComponentState actionCh chan actionMode @@ -58,7 +65,7 @@ type CommandRuntime struct { } // NewCommandRuntime creates a new command runtime for the provided component. -func NewCommandRuntime(comp component.Component) (ComponentRuntime, error) { +func NewCommandRuntime(comp component.Component, monitor MonitoringManager) (ComponentRuntime, error) { if comp.Spec.Spec.Command == nil { return nil, errors.New("must have command defined in specification") } @@ -70,6 +77,7 @@ func NewCommandRuntime(comp component.Component) (ComponentRuntime, error) { compCh: make(chan component.Component), actionState: actionStop, state: newComponentState(&comp), + monitor: monitor, }, nil } @@ -279,7 +287,18 @@ func (c *CommandRuntime) start(comm Communicator) error { if err != nil { return fmt.Errorf("execution of component prevented: %w", err) } - proc, err := process.Start(path, uid, gid, cmdSpec.Args, env, attachOutErr, dirPath(workDir)) + + if err := c.monitor.Prepare(); err != nil { + return err + } + args := c.monitor.EnrichArgs(c.current.ID, c.current.Spec.BinaryName, cmdSpec.Args) + + // differentiate data paths + dataPath := filepath.Join(paths.Home(), "run", c.current.ID) + _ = os.MkdirAll(dataPath, 0755) + args = append(args, "-E", "path.data="+dataPath) + + proc, err := process.Start(path, uid, gid, args, env, attachOutErr, dirPath(workDir)) if err != nil { return err } @@ -303,6 +322,10 @@ func (c *CommandRuntime) stop(ctx context.Context) error { } return nil } + + // cleanup reserved resources related to monitoring + defer c.monitor.Cleanup(c.current.ID) //nolint:errcheck // this is ok + cmdSpec := c.current.Spec.Spec.Command go func(info *process.Info, timeout time.Duration) { t := time.NewTimer(timeout) diff --git a/pkg/component/runtime/manager.go b/pkg/component/runtime/manager.go index b475ddef4ca..d713a9a10c4 100644 --- a/pkg/component/runtime/manager.go +++ b/pkg/component/runtime/manager.go @@ -73,6 +73,7 @@ type Manager struct { listenAddr string agentInfo *info.AgentInfo tracer *apm.Tracer + monitor MonitoringManager netMx sync.RWMutex listener net.Listener @@ -95,7 +96,7 @@ type Manager struct { } // NewManager creates a new manager. -func NewManager(logger *logger.Logger, listenAddr string, agentInfo *info.AgentInfo, tracer *apm.Tracer) (*Manager, error) { +func NewManager(logger *logger.Logger, listenAddr string, agentInfo *info.AgentInfo, tracer *apm.Tracer, monitor MonitoringManager) (*Manager, error) { ca, err := authority.NewCA() if err != nil { return nil, err @@ -110,6 +111,7 @@ func NewManager(logger *logger.Logger, listenAddr string, agentInfo *info.AgentI current: make(map[string]*componentRuntimeState), subscriptions: make(map[string][]*Subscription), errCh: make(chan error), + monitor: monitor, } return m, nil } @@ -619,7 +621,7 @@ func (m *Manager) update(components []component.Component, teardown bool) error } else { // new component; create its runtime logger := m.logger.Named(fmt.Sprintf("component.runtime.%s", comp.ID)) - state, err := newComponentRuntimeState(m, logger, comp) + state, err := newComponentRuntimeState(m, logger, m.monitor, comp) if err != nil { return fmt.Errorf("failed to create new component %s: %w", comp.ID, err) } diff --git a/pkg/component/runtime/manager_test.go b/pkg/component/runtime/manager_test.go index 1475e6fd094..b71a24c35e0 100644 --- a/pkg/component/runtime/manager_test.go +++ b/pkg/component/runtime/manager_test.go @@ -3,7 +3,6 @@ // you may not use this file except in compliance with the Elastic License. //nolint:dupl // duplicate code is in test cases - package runtime import ( @@ -29,7 +28,8 @@ import ( ) const ( - exeExt = ".exe" + exeExt = ".exe" + errActionUndefined = "action undefined" ) var ( @@ -50,7 +50,7 @@ func TestManager_SimpleComponentErr(t *testing.T) { defer cancel() ai, _ := info.NewAgentInfo(true) - m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer) + m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer, newTestMonitoringMgr()) require.NoError(t, err) errCh := make(chan error) go func() { @@ -152,7 +152,7 @@ func TestManager_FakeInput_StartStop(t *testing.T) { defer cancel() ai, _ := info.NewAgentInfo(true) - m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer) + m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer, newTestMonitoringMgr()) require.NoError(t, err) errCh := make(chan error) go func() { @@ -277,7 +277,7 @@ func TestManager_FakeInput_BadUnitToGood(t *testing.T) { defer cancel() ai, _ := info.NewAgentInfo(true) - m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer) + m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer, newTestMonitoringMgr()) require.NoError(t, err) errCh := make(chan error) go func() { @@ -448,7 +448,7 @@ func TestManager_FakeInput_GoodUnitToBad(t *testing.T) { defer cancel() ai, _ := info.NewAgentInfo(true) - m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer) + m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer, newTestMonitoringMgr()) require.NoError(t, err) errCh := make(chan error) go func() { @@ -603,7 +603,7 @@ func TestManager_FakeInput_Configure(t *testing.T) { defer cancel() ai, _ := info.NewAgentInfo(true) - m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer) + m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer, newTestMonitoringMgr()) require.NoError(t, err) errCh := make(chan error) go func() { @@ -729,7 +729,7 @@ func TestManager_FakeInput_RemoveUnit(t *testing.T) { defer cancel() ai, _ := info.NewAgentInfo(true) - m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer) + m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer, newTestMonitoringMgr()) require.NoError(t, err) errCh := make(chan error) go func() { @@ -887,7 +887,7 @@ func TestManager_FakeInput_ActionState(t *testing.T) { defer cancel() ai, _ := info.NewAgentInfo(true) - m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer) + m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer, newTestMonitoringMgr()) require.NoError(t, err) errCh := make(chan error) go func() { @@ -1017,7 +1017,7 @@ func TestManager_FakeInput_Restarts(t *testing.T) { defer cancel() ai, _ := info.NewAgentInfo(true) - m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer) + m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer, newTestMonitoringMgr()) require.NoError(t, err) errCh := make(chan error) go func() { @@ -1156,7 +1156,7 @@ func TestManager_FakeInput_RestartsOnMissedCheckins(t *testing.T) { defer cancel() ai, _ := info.NewAgentInfo(true) - m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer) + m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer, newTestMonitoringMgr()) require.NoError(t, err) errCh := make(chan error) go func() { @@ -1277,7 +1277,7 @@ func TestManager_FakeInput_InvalidAction(t *testing.T) { defer cancel() ai, _ := info.NewAgentInfo(true) - m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer) + m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer, newTestMonitoringMgr()) require.NoError(t, err) errCh := make(chan error) go func() { @@ -1340,7 +1340,7 @@ func TestManager_FakeInput_InvalidAction(t *testing.T) { actionCancel() if err == nil { subErrCh <- fmt.Errorf("should have returned an error") - } else if err.Error() != "action undefined" { + } else if err.Error() != errActionUndefined { subErrCh <- fmt.Errorf("should have returned error: action undefined") } else { subErrCh <- nil @@ -1401,7 +1401,7 @@ func TestManager_FakeInput_MultiComponent(t *testing.T) { defer cancel() ai, _ := info.NewAgentInfo(true) - m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer) + m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer, newTestMonitoringMgr()) require.NoError(t, err) errCh := make(chan error) go func() { @@ -1613,7 +1613,7 @@ func TestManager_FakeInput_LogLevel(t *testing.T) { defer cancel() ai, _ := info.NewAgentInfo(true) - m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer) + m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer, newTestMonitoringMgr()) require.NoError(t, err) errCh := make(chan error) go func() { @@ -1691,7 +1691,7 @@ func TestManager_FakeInput_LogLevel(t *testing.T) { actionCancel() if err == nil { subErrCh <- fmt.Errorf("should have returned an error") - } else if err.Error() != "action undefined" { + } else if err.Error() != errActionUndefined { subErrCh <- fmt.Errorf("should have returned error: action undefined") } else { subErrCh <- nil @@ -1830,3 +1830,11 @@ func testBinary(t *testing.T) string { } return binaryPath } + +type testMonitoringManager struct{} + +func newTestMonitoringMgr() *testMonitoringManager { return &testMonitoringManager{} } + +func (*testMonitoringManager) EnrichArgs(_ string, _ string, args []string) []string { return args } +func (*testMonitoringManager) Prepare() error { return nil } +func (*testMonitoringManager) Cleanup(string) error { return nil } diff --git a/pkg/component/runtime/runtime.go b/pkg/component/runtime/runtime.go index 43731645b5f..58c7d1ed153 100644 --- a/pkg/component/runtime/runtime.go +++ b/pkg/component/runtime/runtime.go @@ -54,11 +54,11 @@ type ComponentRuntime interface { } // NewComponentRuntime creates the proper runtime based on the input specification for the component. -func NewComponentRuntime(comp component.Component) (ComponentRuntime, error) { +func NewComponentRuntime(comp component.Component, monitor MonitoringManager) (ComponentRuntime, error) { if comp.Err != nil { return NewFailedRuntime(comp) } else if comp.Spec.Spec.Command != nil { - return NewCommandRuntime(comp) + return NewCommandRuntime(comp, monitor) } else if comp.Spec.Spec.Service != nil { return nil, errors.New("service component runtime not implemented") } @@ -82,12 +82,12 @@ type componentRuntimeState struct { actions map[string]func(*proto.ActionResponse) } -func newComponentRuntimeState(m *Manager, logger *logger.Logger, comp component.Component) (*componentRuntimeState, error) { +func newComponentRuntimeState(m *Manager, logger *logger.Logger, monitor MonitoringManager, comp component.Component) (*componentRuntimeState, error) { comm, err := newRuntimeComm(logger, m.getListenAddr(), m.ca, m.agentInfo) if err != nil { return nil, err } - runtime, err := NewComponentRuntime(comp) + runtime, err := NewComponentRuntime(comp, monitor) if err != nil { return nil, err } diff --git a/specs/metricbeat.spec.yml b/specs/metricbeat.spec.yml index a744cdb535f..b160a4f29e7 100644 --- a/specs/metricbeat.spec.yml +++ b/specs/metricbeat.spec.yml @@ -155,3 +155,9 @@ inputs: outputs: *outputs command: args: *args + - name: http/metrics + description: "HTTP metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args diff --git a/version/version.go b/version/version.go index f5101a34efa..60029093c35 100644 --- a/version/version.go +++ b/version/version.go @@ -4,4 +4,4 @@ package version -const defaultBeatVersion = "8.5.0" +const defaultBeatVersion = "8.6.0" From 06020918dee32333f82b5b23782d450e851bcb29 Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Thu, 20 Oct 2022 13:38:18 -0400 Subject: [PATCH 28/49] [v2] Merge main on Oct. 18 (#1557) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [Automation] Update elastic stack version to 8.4.0-40cff009 for testing (#557) Co-authored-by: apmmachine * [Automation] Update elastic stack version to 8.4.0-5e6770b1 for testing (#564) Co-authored-by: apmmachine * Fix regression and use comma separated values (#560) Fix regression from https://github.com/elastic/elastic-agent/pull/509 * Change in Jenkinsfile will trigger k8s run (#568) * [Automation] Update elastic stack version to 8.4.0-da5a1c6d for testing (#573) Co-authored-by: apmmachine * Add `@metadata.input_id` and `@metadata.stream_id` when injecting streams (#527) These 2 value are going to be used in the shipper to identify where an event came from in order to apply processors accordingly. Also, added test cases for the processor to verify the change and updated test cases with the new processor. * Add filemod times to contents of diagnostics collect command (#570) * Add filemod times to contents of diagnostics collect command Add filemod times to the files and directories in the zip archive. Log files (and sub dirs) will use the modtime returned by the fileinfo for the source. Others will use the timestamp from when the zip is created. * Fix linter * [Automation] Update elastic stack version to 8.4.0-b13123ee for testing (#581) Co-authored-by: apmmachine * Fix Agent upgrade 8.2->8.3 (#578) * Fix Agent upgrade 8.2->8.3 * Improve the upgrade encryption handling. Add .yml files cleanup. * Rollback ActionUpgrade to action_id, add MarkerActionUpgrade adapter struct for marker serialization compatibility * Update containerd (#577) * [Automation] Update elastic stack version to 8.4.0-4fe26f2a for testing (#591) Co-authored-by: apmmachine * Set explicit ExitTimeOut for MacOS agent launchd plist (#594) * Set explicit ExitTimeOut for MacOS agent launchd plist * [Automation] Update elastic stack version to 8.4.0-2e32a640 for testing (#599) Co-authored-by: apmmachine * ci: enable build notifications as GitHub issues (#595) * status identifies failing component, fleet gateway may report degraded, liveness endpoint added (#569) * Add liveness endpoint Add /liveness route to metrics server. This route will report the status from pkg/core/status. fleet-gateway will now report a degraded state if a checkin fails. This may not propogate to fleet-server as a failed checkin means communications between the agent and the server are not working. It may also lead to the server reporting degraded for up to 30s (fleet-server polling time) when teh agent is able to successfully connect. * linter fix * add nolint direcrtive * Linter fix * Review feedback, add doc strings * Rename noop controller file to _test file * [Automation] Update elastic stack version to 8.4.0-722a7d79 for testing (#607) Co-authored-by: apmmachine * ci: enable flaky test detector (#605) * [Automation] Update elastic stack version to 8.4.0-210dd487 for testing (#620) Co-authored-by: apmmachine * mergify: remove backport automation for non active branches (#615) * chore: use elastic-agent profile to run the E2E tests (#610) * [Automation] Update elastic stack version to 8.4.0-a6aa9f3b for testing (#631) Co-authored-by: apmmachine * add macros pointing to new agent's repo and fix old macro calls (#458) * Add mount of /etc/machine-id for managed Agent in k8s (#530) * Set hostPID=true for managed agent in k8s (#528) * Set hostPID=true for managed agent in k8s * Add comment on hostPID. * [Automation] Update elastic stack version to 8.4.0-86cc80f3 for testing (#648) Co-authored-by: apmmachine * Update elastic-agent-libs version: includes restriction on default VerificationMode to `full` (#521) * update version * mage fmt update * update dependency * update changelog * redact sensitive information in diagnostics collect command (#566) * Support Cloudbeat regex input type (#638) * support input type with regex * Update supported.go * Changing the regex to support backward compatible * Disable flaky test download test (#641) * [Automation] Update elastic stack version to 8.4.0-3d206b5d for testing (#656) Co-authored-by: apmmachine * [Automation] Update elastic stack version to 8.4.0-3ad82aa8 for testing (#661) Co-authored-by: apmmachine * jjbb: exclude allowed branches, tags and PRs (#658) cosmetic change in the description and boolean based * Update elastic-agent-project-board.yml (#649) * ci: fix labels that clashes with the Orka workers (#659) * [Automation] Update elastic stack version to 8.4.0-03bd6f3f for testing (#668) Co-authored-by: apmmachine * [Automation] Update elastic stack version to 8.4.0-533f1e30 for testing (#675) Co-authored-by: apmmachine * Osquerybeat: Fix osquerybeat is not running with logstash output (#674) * [Automation] Update elastic stack version to 8.4.0-d0a4da44 for testing (#684) Co-authored-by: apmmachine * [Automation] Update elastic stack version to 8.4.0-dd98ded4 for testing (#703) Co-authored-by: apmmachine * [Automation] Update elastic stack version to 8.4.0-164d9a10 for testing (#705) Co-authored-by: apmmachine * Add missing license headers (#711) * [Automation] Update elastic stack version to 8.4.0-00048b66 for testing (#713) Co-authored-by: apmmachine * Allow - in eql variable names (#710) * fix to allow dashes in variable names in EQL expressions extend eql to allow the '-' char to appear in variable names, i.e., ${data.some-var} and additional test cases to eql, the transpiler, and the k8s provider to verify this works. Note that the bug was caused by the EQL limitation, the otehr test cases were added when attempting to find it. * Regenerate grammer with antlr 4.7.1, add CHANGELOG * Fix linter issue * Fix typo * Fix transpiler to allow : in dynamic variables. (#680) Fix transpiler regex to allow ':' characters in dynamic variables so that users can input "${dynamic.lookup|'fallback.here'}". Co-authored-by: Aleksandr Maus * Fix for the filebeat spec file picking up packetbeat inputs (#700) * Reproduce filebeat picking up packetbeat inputs * Filebeat: filter inputs as first input transform. Move input filtering to be the first input transformation that occurs in the filebeat spec file. Fixes https://github.com/elastic/elastic-agent/issues/427. * Update changelog. * [Automation] Update elastic stack version to 8.4.0-3cd57abb for testing (#724) Co-authored-by: apmmachine * [Automation] Update elastic stack version to 8.4.0-a324b98b for testing (#727) Co-authored-by: apmmachine * ci: run on MacOS12 (#696) * [Automation] Update elastic stack version to 8.4.0-31315ca3 for testing (#732) Co-authored-by: apmmachine * fix typo on package command (#734) This commit fixes the typo in the package command on the README.md. * Allow / to be used in variable names (#718) * Allow the / character to be used in variable names. Allow / to be used in variable names from dynamic providers and eql expressions. Ensure that k8s providers can provide variables with slashes in their names. * run antlr4 * Fix tests * Fix Elastic Agent non-fleet broken upgrade between 8.3.x releases (#701) * Fix Elastic Agent non-fleet broken upgrade between 8.3.x releases * Migrates vault directory on linux and windows to the top directory of the agent, so it can be shared without needing the upgrade handler call, like for example with side-by-side install/upgrade from .rpm/.deb * Extended vault to allow read-only open, useful when the vault at particular location needs to be only read not created. * Correct the typo in the log messages * Update lint flagged function comment with 'unused', was flagged with 'deadcode' on the previous run * Address code review feedback * Add missing import for linux utz * Change vault path from Top() to Config(), this a better location, next to fleet.enc based on the install/upgrade testing with .rpm/.deb installs * Fix the missing state migration for .rpm/.deb upgrade. The post install script now performs the migration and creates the symlink after that. * Fix typo in the postinstall script * Update the vault migration code, add the agent configuration match check with the agent secret * [Automation] Update elastic stack version to 8.4.0-31269fd2 for testing (#746) Co-authored-by: apmmachine * wrap errors and fix some docs typo and convention (#743) * automate the ironbank docker context generation (#679) * Update README.md Adding M1 variable to export to be able to build AMD images * fix flaky (#730) * Add filestream ID on standalone kubernetes manifest (#742) This commit add unique IDs for the filestream inputs used by the Kubernetes integration in the Elastic-Agent standalone Kubernetes configuration/manifest file. * Alter github action to run on different OSs (#769) Alter the linter action to run on different OSs instead of on linux with the $GOOS env var. * [Automation] Update elastic stack version to 8.4.0-d058e92f for testing (#771) Co-authored-by: apmmachine * elastic-agent manifests: add comments; add cloudnative team as a codeowner for the k8s manifests (#708) * managed elastic-agent: add comments; add cloudnative team as a codeowner for the k8s manifests Signed-off-by: Tetiana Kravchenko * add comments to the standalone elastic-agent, similar to the documentation we have https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-standalone.html Signed-off-by: Tetiana Kravchenko * Apply suggestions from code review Co-authored-by: Michael Katsoulis Co-authored-by: Andrew Gizas * remove comment for FLEET_ENROLLMENT_TOKEN; use Needed everywhere instead of Required Signed-off-by: Tetiana Kravchenko * rephrase regarding accessing kube-state-metrics when used third party tools, like kube-rbac-proxy Signed-off-by: Tetiana Kravchenko * run make check Signed-off-by: Tetiana Kravchenko * keep manifests in sync to pass ci check Signed-off-by: Tetiana Kravchenko * add info on where to find FLEET_URL and FLEET_ENROLLMENT_TOKEN Signed-off-by: Tetiana Kravchenko * add links to elastic-agent documentation Signed-off-by: Tetiana Kravchenko * update comment on FLEET_ENROLLMENT_TOKEN Signed-off-by: Tetiana Kravchenko Co-authored-by: Michael Katsoulis Co-authored-by: Andrew Gizas * [Elastic-Agent] Added source uri reloading (#686) * Update will cleanup unneeded artifacts. (#752) * Update will cleanup unneeded artifacts. The update process will cleanup unneeded artifacts. When an update starts all artifacts that do not have the current version number in it's name will be removed. If artifact retrieval fails, downloaded artifacts are removed. On a successful upgrade, all contents of the downloads dir will be removed. * Clean up linter warnings * Wrap errors * cleanup tests * Fix passed version * Use os.RemoveAll * ci: propagate e2e-testing errors (#695) * [Release] add-backport-next (#784) * Update main to 8.5.0 (#793) * [Automation] Update go release version to 1.17.12 (#726) Co-authored-by: apmmachine * [Automation] Update elastic stack version to 8.4.0-60171339 for testing (#799) Co-authored-by: apmmachine * update dependency elastic/go-structform from v0.0.9 to v0.0.10 (#802) Signed-off-by: Florian Lehner * Fix unpacking of artifact config (#776) Fix unpacking of artifact config (#776) * [Automation] Update elastic stack version to 8.5.0-c54c3404 for testing (#826) Co-authored-by: apmmachine * [Automation] Update elastic stack version to 8.5.0-7dbc10f8 for testing (#833) Co-authored-by: apmmachine * Fix RPM/DEB clean install (#816) * Fix RPM/DEB clean install * Improve the post install script * Do not try to copy the state files if the agent directory is the same, this causes the error. * Check the existance of symlink instead of the file it is pointing to for the state file migration. * Update check for symlink existance for the cases where the symlink points to non-existent file * fix path for auto generated spec file (#859) Signed-off-by: Florian Lehner * Reload downloader client on config change (#848) Reload downloader client on config change (#848) * Bundle elastic-agent.app for MacOS, needed to be able to enable the … (#714) * Bundle elastic-agent.app for MacOS, needed to be able to enable the Full Disk Access * Calm down the linter * Fix pathing for windows unit test * crossbuild: add fix to set ulimit for debian images (#856) Signed-off-by: Florian Lehner * [Heartbeat] Cleanup docker install / always add playwright deps (#764) This is the agent counterpart to elastic/beats#32122 Refactors Dockerfile handling of synthetics deps to rely on playwright install-deps rather than us manually keeping up to date with those. This should fix issues with newer playwrights needing additional deps. This also cleans up the Dockerfile a good amount, and fixes indentation. Finally, this removes the unused Dockerfile.elastic-agent.tmpl file since agent is now its own repo. It also cleans up some other metadata that no longer does anything. No changelog is specified because no user facing changes are present. * [Automation] Update elastic stack version to 8.5.0-41aadc32 for testing (#889) Co-authored-by: apmmachine * Fix/panic with composable renderer (#823) * Fix a panic with wg passed to the composable object In the code to retrieve the variables from the configuration files we need to pass a execution callback, this callback will be called in a goroutine. This callback can be executed multiple time until the composable renderer is stopped. There were a problem in the code that made the callback called multiple time and it made the waitgroup internal counter to do to a negative values. This commit change the behavior, it start the composable renderer give it a callback when the callback receives the variables it will stop the composable's Run method using the context. This ensure that the callback will be called a single time and that the variables are correctly retrieved. Fixes: #806 * [Automation] Update go release version to 1.18.5 (#832) Co-authored-by: apmmachine * [Automation] Update elastic stack version to 8.5.0-60a4c029 for testing (#899) Co-authored-by: apmmachine * Add control-plane toleration to Agent K8S manifests. (#864) * Add toleration to elastic-agent Kubernetes manifests. The toleration with key node-role.kubernetes.io/control-plane is set to replace the deprecated toleration with key node-role.kubernetes.io/master which will be removed by Kubernetes v1.25 * Remove outdated "master" node terminology. * install mage with go install (#936) * Cloudnative ci automation (#837) This commit provides the relevant Jenkins CI automation to open Pull requests to kibana github repository in order to keep Cloud-Native teams manifests in sync with the manifests that are used into Fleet UI. For full information check #706 Updated .ci/Jenkins file that is triggered upon PR requests of /elastic-agent/deploy/kubernetes/* changes Updated Makefile to add functionality needed to create the extra files for the new prs to kibana remote repository * Reduce memory footprint by reordering struct elements (#804) * Reduce memory footprint by reordering struct elements * rename struct element for linter Signed-off-by: Florian Lehner Signed-off-by: Florian Lehner * [Automation] Update elastic stack version to 8.5.0-6b9f92c0 for testing (#948) Co-authored-by: apmmachine * [Automation] Update elastic stack version to 8.5.0-0616acda for testing (#963) Co-authored-by: apmmachine * Clarify that this repo is not only docs (#969) * Add Filebeat lumberjack input to spec (#959) Make the lumberjack input available from Agent. Relates: https://github.com/elastic/beats/pull/32175 * [Automation] Update elastic stack version to 8.5.0-dd6f2bb0 for testing (#978) Co-authored-by: apmmachine * [Automation] Update elastic stack version to 8.5.0-feb644de for testing (#988) Co-authored-by: apmmachine * [Automation] Update elastic stack version to 8.5.0-7783a03c for testing (#1004) Co-authored-by: apmmachine * [Automation] Update elastic stack version to 8.5.0-17b8a62d for testing (#1014) Co-authored-by: apmmachine * update ironbank image product name (#1009) This is required to automate the creation of the ironbank merge requests as the ubireleaser is using this field to compute the elastic-agent artifact url. For example it is now trying to retrieve https://artifacts.elastic.co/downloads/beats/elastic-agent-8.4.0-linux-x86_64.tar.gz instead of https://artifacts.elastic.co/downloads/beats/elastic-agent/elastic-agent-8.4.0-linux-x86_64.tar.gz * ci: add extended support for windows (#683) * [Automation] Update elastic stack version to 8.5.0-9aed3b11 for testing (#1030) Co-authored-by: apmmachine * Cloudnative ci utomation (#1035) * Updating Jenkinsfile and Makefile to open PR * Adding needed token-id * [Automation] Update elastic stack version to 8.5.0-fedc3e60 for testing (#1054) Co-authored-by: apmmachine * Testing PR creation for 706 (#1049) * Fix lookup issues with inputs.d fragment yml (#840) * Fix lookup issues with inputs.d fragment yml The Elastic Agent was looking next to the binary for the `inputs.d` folder instead it should look up into the `Home` folder where the Elastic Agent symlink is located. Fixes: #663 * Changelog * Fix input.d path, tie to the agent Config() directory * Update CHANGELOG to reflect that the agent configuration directory is used to locate the inputs.d directory Co-authored-by: Aleksandr Maus * [Automation] Update elastic stack version to 8.5.0-b5001a6d for testing (#1064) Co-authored-by: apmmachine * [Automation] Update elastic stack version to 8.5.0-1bd77fc1 for testing (#1082) Co-authored-by: apmmachine * [Automation] Update elastic stack version to 8.5.0-167dfc80 for testing (#1091) Co-authored-by: apmmachine * Adding support for v1.25.0 k8s (#1044) * Adding support for v1.25.0 k8s * [Automation] Update elastic stack version to 8.5.0-6b7dda2d for testing (#1101) Co-authored-by: apmmachine * [Automation] Update elastic stack version to 8.5.0-4140365c for testing (#1114) Co-authored-by: apmmachine * Remove experimental warning log in upgrade command (#1106) * Update go.mod to Go 1.18, update notice. (#1120) * Remove the fleet reporter (#1130) * Remove the fleet reporter Remove the fleet-reporter so that checkins no longer deliver the event list. * add CHANGELOG fix tests * [Automation] Update elastic stack version to 8.5.0-589a4a10 for testing (#1147) Co-authored-by: apmmachine * Avoid reporting `Unhealthy` on fleet connectivity issues (#1152) Avoid reporting `Unhealthy` on fleet connectivity issues (#1152) * ci: enable MacOS M1 stages (#1123) * [Automation] Update go release version to 1.18.6 (#1143) * [Automation] Update elastic stack version to 8.5.0-37418cf3 for testing (#1165) Co-authored-by: apmmachine * Remove mage notice in favour of make notice (#1108) The current implementation of mage notice is not working because it was never finalised, the fact that it and `make notice` exist only generates confusion. This commit removes the `mage notice` and documents that `make notice` should be used instead for the time being. In the long run we want to use the implementation on `elastic-agent-libs`, however it is not working at the moment. Closes #1107 Co-authored-by: Craig MacKenzie * ci: run e2e-testing at the end (#1169) * ci: move macos to github actions (#1175) * [Automation] Update elastic stack version to 8.5.0-fcf3d4c2 for testing (#1183) Co-authored-by: apmmachine * Add support for hints' based autodiscovery in kubernetes provider (#698) * ci: increase timeout (#1190) * Fixing condition for PR creation (#1188) * Fix leftover log level (#1194) * [automation] Publish kubernetes templates for elastic-agent (#1192) Co-authored-by: apmmachine * ci: force GO_VERSION (#1204) * Fix whitespaces in vault_darwin.c (#1206) * Update kubernetes templates for elastic-agent [templates.d] (#1231) * Use at least warning level for all status logs (#1218) * Update k8s manifests to leverage hints (#1202) * Add Go 1.18 upgrade to breaking changes section. (#1216) * Add Go 1.18 upgrade to breaking changes section. * Fix the PR number in the changelog. * [Release] add-backport-next (#1254) * Bump version to 8.6.0. (#1259) * [Automation] Update elastic stack version to 8.5.0-7dc445a0 for testing (#1248) Co-authored-by: apmmachine * Fix: Endpoint collision between monitoring and regular beats (#1034) Fix: Endpoint collision between monitoring and regular beats (#1034) * internal/pkg/agent/cmd: don't format error message with nil errors (#1240) The failure conditions allow nil errors to result in an error being formatted, when formatting due to a non-accepted HTTP status code and a nil error, omit the error. Co-authored-by: Craig MacKenzie * [Automation] Update elastic stack version to 8.6.0-21651da3 for testing (#1290) Co-authored-by: apmmachine * Fixed: source uri reload for download/verify components (#1252) Fixed: source uri reload for download/verify components (#1252) * Expand status reporter/controller interfaces to allow local reporters (#1285) * Expand status reporter/controller interfaces to allow local reporters Add a local reporter map to the status controller. These reporters are not used when updating status with fleet-server, they are only used to gather local state information - specifically if the agent is degraded because checkin with fleet-server has failed. This bypasses the bug that was introduced with the liveness endpoint where the agent could checkin (to fleet-server) with a degraded status because a previous checkin failed. Local reporters are used to generate a separate status. This status is used in the liveness endpoint. * fix linter * Improve logging for agent upgrades. (#1287) * [Automation] Update elastic stack version to 8.6.0-326f84b0 for testing (#1318) Co-authored-by: apmmachine * [Automation] Update elastic stack version to 8.6.0-df00693f for testing (#1334) Co-authored-by: apmmachine * Add success log message after previous checkin failures (#1327) * Fix status reporter initialization (#1341) * [Automation] Update elastic stack version to 8.6.0-a2f4f140 for testing (#1362) Co-authored-by: apmmachine * Added status message to CheckinRequest (#1369) * Added status message to CheckinRequest * added changelog * updated test * added omitempty * Fix failures when using npipe monitoring endpoints (#1371) * [Automation] Update elastic stack version to 8.6.0-158a13db for testing (#1379) Co-authored-by: apmmachine * Mount /etc directory in Kubernetes DaemonSet manifests. (#1382) Changes made to files like `/etc/passwd` using Linux tools like `useradd` are not reflected in the mounted file on the Agent, because the tool replaces the file instead of changing it in-place. Mounting the parent directory solves this problem. * [Automation] Update elastic stack version to 8.6.0-aea1c645 for testing (#1405) Co-authored-by: apmmachine * [Automation] Update elastic stack version to 8.6.0-0fca2953 for testing (#1412) Co-authored-by: apmmachine * ci: 7.17 is not available for the daily run (#1417) * [Automation] Update elastic stack version to 8.6.0-e4c15f15 for testing (#1425) Co-authored-by: apmmachine * [backport main] Fix: Agent failed to upgrade from 8.4.2 to 8.5.0 BC1 for MAC 12 agent using agent binary. (#1401) [backport main] Fix: Agent failed to upgrade from 8.4.2 to 8.5.0 BC1 for MAC 12 agent using agent binary. (#1401) * Fix docker provider add_fields processors (#1420) The Docker provider was using a wrong key when defining the `add_fields` processor, this causes Filebeat not to start the input and stay on a unhealthy state. This commig fixes it. Fixes https://github.com/elastic/beats/issues/29030 * [Automation] Update elastic stack version to 8.6.0-d939cfde for testing (#1436) Co-authored-by: apmmachine * [Automation] Update elastic stack version to 8.6.0-7c9f25a9 for testing (#1446) Co-authored-by: apmmachine * Enable integration only when datastreams are not defined (#1456) * Add not dedoted k8s pod labels in autodiscover provider to be used for templating, exactly like annotations (#1398) * [Automation] Update elastic stack version to 8.6.0-c49fac70 for testing (#1464) Co-authored-by: apmmachine * Add storageclass permissions in agent clusterrole (#1470) * Add storageclass permissions in agent clusterrole * Remote QA-labels automation (#1455) * [Automation] Update go release version to 1.18.7 (#1444) Co-authored-by: apmmachine * [Automation] Update elastic stack version to 8.6.0-5a8d757d for testing (#1480) Co-authored-by: apmmachine * Improve logging around agent checkins. (#1477) Improve logging around agent checkins. - Log transient checkin errors at Info. - Upgrade to an Error log after 2 repeated failures. - Log the wait time for the next retry. - Only update local state after repeated failures. * [Automation] Update elastic stack version to 8.6.0-40086bc7 for testing (#1496) Co-authored-by: apmmachine * Fixing makefile check (#1490) * Fixing makefile check * action: validate changelog fragment (#1488) * Allign managed with standalone role (#1500) * Fix k8s template link versioning (#1504) * Allighningmanifests (#1507) * Allign managed with standalone role * Fixing missing Label * [Automation] Update elastic stack version to 8.6.0-233dc5d4 for testing (#1515) Co-authored-by: apmmachine * Convert CHANGELOG.next to fragments (#1244) * [Automation] Update elastic stack version to 8.6.0-54a302f0 for testing (#1531) Co-authored-by: apmmachine * Update the linter configuration. (#1478) Sync the configuration with the one used in Beats, which has disabled the majority of the least useful linters already. * Elastic agent counterpart of https://github.com/elastic/beats/pull/33362 (#1528) Always use the stack_release label for npm i No changelog necessary since there are no user-visible changes This lets us ensure we've carefully reviewed and labeled the version of the @elastic/synthetics NPM library that's bundled in docker images * [Automation] Update elastic stack version to 8.6.0-cae815eb for testing (#1545) Co-authored-by: apmmachine * Fix admin permission check on localized windows (#1552) Fix admin permission check on localized windows (#1552) * Fixes from merge of main. * Update heartbeat specification to only support elasticsearch. * Fix bad merge in dockerfile. Signed-off-by: Florian Lehner Co-authored-by: apmmachine <58790750+apmmachine@users.noreply.github.com> Co-authored-by: apmmachine Co-authored-by: Pier-Hugues Pellerin Co-authored-by: Denis Rechkunov Co-authored-by: Michel Laterman <82832767+michel-laterman@users.noreply.github.com> Co-authored-by: Aleksandr Maus Co-authored-by: Victor Martinez Co-authored-by: Manuel de la Peña Co-authored-by: Anderson Queiroz Co-authored-by: Daniel Araujo Almeida Co-authored-by: Mariana Dima Co-authored-by: ofiriro3 Co-authored-by: Julien Lind Co-authored-by: Craig MacKenzie Co-authored-by: Tiago Queiroz Co-authored-by: Pierre HILBERT Co-authored-by: Tetiana Kravchenko Co-authored-by: Michael Katsoulis Co-authored-by: Andrew Gizas Co-authored-by: Michal Pristas Co-authored-by: Elastic Machine Co-authored-by: Florian Lehner Co-authored-by: Andrew Cholakian Co-authored-by: Yash Tewari Co-authored-by: Quentin Pradet Co-authored-by: Andrew Kroh Co-authored-by: Julien Mailleret <8582351+jmlrt@users.noreply.github.com> Co-authored-by: Josh Dover <1813008+joshdover@users.noreply.github.com> Co-authored-by: Chris Mark Co-authored-by: apmmachine Co-authored-by: Dan Kortschak <90160302+efd6@users.noreply.github.com> Co-authored-by: Julia Bardi <90178898+juliaElastic@users.noreply.github.com> Co-authored-by: Edoardo Tenani <526307+endorama@users.noreply.github.com> --- .ci/Jenkinsfile | 183 +- .ci/schedule-daily.groovy | 2 +- .github/workflows/changelog.yml | 17 + .github/workflows/golangci-lint.yml | 8 +- .github/workflows/macos.yml | 25 + .github/workflows/qa-labels.yml | 93 - .go-version | 2 +- .golangci.yml | 110 +- .mergify.yml | 13 + CHANGELOG.next.asciidoc | 200 - Dockerfile | 2 +- Makefile | 2 +- NOTICE.txt | 4 +- README.md | 29 +- ...ion-when-installing-the-Elastic-Agent.yaml | 3 + ...SHA-1-are-now-rejected-See-the-Go-118.yaml | 3 + ...rjack-input-type-to-the-Filebeat-spec.yaml | 3 + ...-autodiscovery-in-kubernetes-provider.yaml | 3 + ...ource-URI-when-downloading-components.yaml | 3 + ...nly-events-so-that-degraded-fleet-che.yaml | 4 + ...30732-Improve-logging-during-upgrades.yaml | 3 + ...ssage-after-previous-checkin-failures.yaml | 3 + ...989867-fix-docker-provider-processors.yaml | 31 + ...5517984-improve-checkin-error-logging.yaml | 5 + ...4342-use-stack-version-npm-synthetics.yaml | 31 + ...permission-check-on-localized-windows.yaml | 31 + deploy/kubernetes/Makefile | 67 +- deploy/kubernetes/creator_k8s_manifest.sh | 58 + .../elastic-agent-managed-kubernetes.yaml | 48 +- .../elastic-agent-managed-daemonset.yaml | 44 +- .../elastic-agent-managed-role.yaml | 4 + .../elastic-agent-standalone-kubernetes.yaml | 76 +- ...-agent-standalone-daemonset-configmap.yaml | 4 +- .../elastic-agent-standalone-daemonset.yaml | 68 +- .../elastic-agent-standalone-role.yaml | 4 + .../templates.d/activemq.yml | 96 + .../templates.d/apache.yml | 134 + .../templates.d/cassandra.yml | 327 + .../templates.d/cef.yml | 51 + .../templates.d/checkpoint.yml | 62 + .../templates.d/cockroachdb.yml | 44 + .../templates.d/crowdstrike.yml | 79 + .../templates.d/cyberarkpas.yml | 57 + .../templates.d/elasticsearch.yml | 288 + .../templates.d/endpoint.yml | 22 + .../templates.d/fireeye.yml | 59 + .../templates.d/haproxy.yml | 68 + .../templates.d/hashicorp_vault.yml | 73 + .../templates.d/hid_bravura_monitor.yml | 42 + .../templates.d/iis.yml | 71 + .../templates.d/infoblox_nios.yml | 63 + .../templates.d/iptables.yml | 54 + .../templates.d/kafka.yml | 61 + .../templates.d/keycloak.yml | 23 + .../templates.d/kibana.yml | 112 + .../templates.d/log.yml | 18 + .../templates.d/logstash.yml | 75 + .../templates.d/mattermost.yml | 22 + .../templates.d/microsoft_sqlserver.yml | 127 + .../templates.d/mimecast.yml | 381 + .../templates.d/modsecurity.yml | 28 + .../templates.d/mongodb.yml | 73 + .../templates.d/mysql.yml | 82 + .../templates.d/mysql_enterprise.yml | 18 + .../templates.d/nats.yml | 82 + .../templates.d/netflow.yml | 47 + .../templates.d/nginx.yml | 142 + .../templates.d/nginx_ingress_controller.yml | 53 + .../templates.d/oracle.yml | 82 + .../templates.d/panw.yml | 94 + .../templates.d/panw_cortex_xdr.yml | 90 + .../templates.d/pfsense.yml | 62 + .../templates.d/postgresql.yml | 68 + .../templates.d/prometheus.yml | 90 + .../templates.d/qnap_nas.yml | 60 + .../templates.d/rabbitmq.yml | 79 + .../templates.d/redis.yml | 84 + .../templates.d/santa.yml | 23 + .../templates.d/security_detection_engine.yml | 22 + .../templates.d/sentinel_one.yml | 217 + .../templates.d/snort.yml | 53 + .../templates.d/snyk.yml | 139 + .../templates.d/stan.yml | 56 + .../templates.d/suricata.yml | 24 + .../templates.d/symantec_endpoint.yml | 67 + .../templates.d/synthetics.yml | 148 + .../templates.d/tcp.yml | 32 + .../templates.d/tomcat.yml | 8296 +++++++++++++++++ .../templates.d/traefik.yml | 37 + .../templates.d/udp.yml | 33 + .../templates.d/zeek.yml | 2271 +++++ .../templates.d/zookeeper.yml | 54 + dev-tools/mage/crossbuild.go | 9 + dev-tools/packaging/files/darwin/PkgInfo | 1 + dev-tools/packaging/packages.yml | 63 +- .../templates/darwin/Info.plist.tmpl | 20 + .../templates/darwin/elastic-agent.tmpl | 11 + .../docker/Dockerfile.elastic-agent.tmpl | 478 +- .../templates/docker/Dockerfile.tmpl | 2 +- .../ironbank/hardening_manifest.yaml.tmpl | 2 +- .../templates/linux/postinstall.sh.tmpl | 38 +- go.mod | 3 +- go.sum | 13 +- internal/pkg/agent/application/application.go | 4 +- .../gateway/fleet/fleet_gateway.go | 60 +- internal/pkg/agent/application/info/state.go | 35 +- .../pkg/agent/application/paths/common.go | 50 +- .../agent/application/paths/common_test.go | 93 + internal/pkg/agent/application/paths/files.go | 8 + .../application/upgrade/artifact/config.go | 137 + .../upgrade/artifact/config_test.go | 248 + .../artifact/download/composed/downloader.go | 15 + .../artifact/download/composed/verifier.go | 17 +- .../artifact/download/http/downloader.go | 30 +- .../artifact/download/http/verifier.go | 18 + .../upgrade/artifact/download/reloadable.go | 14 + .../artifact/download/snapshot/downloader.go | 55 +- .../artifact/download/snapshot/verifier.go | 35 +- .../pkg/agent/application/upgrade/cleanup.go | 10 +- .../agent/application/upgrade/cleanup_test.go | 16 +- .../pkg/agent/application/upgrade/rollback.go | 20 +- .../application/upgrade/service_darwin.go | 11 +- .../application/upgrade/step_download.go | 4 + .../agent/application/upgrade/step_mark.go | 12 +- .../agent/application/upgrade/step_relink.go | 21 +- .../agent/application/upgrade/step_unpack.go | 32 +- .../pkg/agent/application/upgrade/upgrade.go | 47 +- internal/pkg/agent/cmd/container.go | 18 +- internal/pkg/agent/cmd/upgrade.go | 2 - internal/pkg/agent/cmd/watch.go | 25 +- .../agent/control/server/listener_windows.go | 47 +- internal/pkg/agent/install/install.go | 56 +- internal/pkg/agent/vars/vars.go | 2 +- internal/pkg/agent/vault/vault_darwin.c | 10 +- internal/pkg/composable/context.go | 6 +- internal/pkg/composable/controller.go | 6 +- internal/pkg/composable/controller_test.go | 2 +- internal/pkg/composable/dynamic.go | 25 +- .../pkg/composable/providers/agent/agent.go | 2 +- .../composable/providers/agent/agent_test.go | 2 +- .../pkg/composable/providers/docker/docker.go | 4 +- .../providers/docker/docker_test.go | 2 +- internal/pkg/composable/providers/env/env.go | 2 +- .../pkg/composable/providers/env/env_test.go | 2 +- .../pkg/composable/providers/host/host.go | 2 +- .../composable/providers/host/host_test.go | 4 +- .../composable/providers/kubernetes/config.go | 6 + .../composable/providers/kubernetes/hints.go | 260 + .../providers/kubernetes/hints_test.go | 368 + .../providers/kubernetes/kubernetes.go | 23 +- .../composable/providers/kubernetes/node.go | 3 +- .../providers/kubernetes/node_test.go | 5 - .../composable/providers/kubernetes/pod.go | 94 +- .../providers/kubernetes/pod_test.go | 44 +- .../providers/kubernetes/service.go | 3 +- .../providers/kubernetes/service_test.go | 5 - .../kubernetesleaderelection/config.go | 6 +- .../kubernetes_leaderelection.go | 2 +- .../kubernetessecrets/kubernetes_secrets.go | 2 +- .../kubernetes_secrets_test.go | 4 +- .../pkg/composable/providers/local/local.go | 2 +- .../composable/providers/local/local_test.go | 2 +- .../providers/localdynamic/localdynamic.go | 2 +- .../localdynamic/localdynamic_test.go | 2 +- .../pkg/composable/providers/path/path.go | 4 +- .../composable/providers/path/path_test.go | 2 +- internal/pkg/composable/testing/dynamic.go | 2 + internal/pkg/core/backoff/backoff.go | 5 + internal/pkg/core/backoff/backoff_test.go | 50 +- internal/pkg/core/backoff/equal_jitter.go | 17 +- internal/pkg/core/backoff/exponential.go | 17 +- internal/pkg/crypto/io.go | 8 +- .../pkg/fleetapi/acker/retrier/retrier.go | 15 +- internal/pkg/fleetapi/checkin_cmd.go | 34 +- internal/pkg/fleetapi/checkin_cmd_test.go | 20 +- magefile.go | 46 +- specs/heartbeat.spec.yml | 91 +- testing/environments/snapshot.yml | 4 +- version/docs/version.asciidoc | 2 +- 179 files changed, 18107 insertions(+), 1195 deletions(-) create mode 100644 .github/workflows/changelog.yml create mode 100644 .github/workflows/macos.yml delete mode 100644 .github/workflows/qa-labels.yml delete mode 100644 CHANGELOG.next.asciidoc create mode 100644 changelog/fragments/1660139385-Fix-a-panic-caused-by-a-race-condition-when-installing-the-Elastic-Agent.yaml create mode 100644 changelog/fragments/1660158319-Upgrade-to-Go-118-Certificates-signed-with-SHA-1-are-now-rejected-See-the-Go-118.yaml create mode 100644 changelog/fragments/1661188787-Add-lumberjack-input-type-to-the-Filebeat-spec.yaml create mode 100644 changelog/fragments/1663143487-Add-support-for-hints-based-autodiscovery-in-kubernetes-provider.yaml create mode 100644 changelog/fragments/1664177394-Fix-unintended-reset-of-source-URI-when-downloading-components.yaml create mode 100644 changelog/fragments/1664212969-Create-separate-status-reporter-for-local-only-events-so-that-degraded-fleet-che.yaml create mode 100644 changelog/fragments/1664230732-Improve-logging-during-upgrades.yaml create mode 100644 changelog/fragments/1664360554-Add-success-log-message-after-previous-checkin-failures.yaml create mode 100644 changelog/fragments/1664989867-fix-docker-provider-processors.yaml create mode 100644 changelog/fragments/1665517984-improve-checkin-error-logging.yaml create mode 100644 changelog/fragments/1665784342-use-stack-version-npm-synthetics.yaml create mode 100644 changelog/fragments/1666088774-Fix-admin-permission-check-on-localized-windows.yaml create mode 100755 deploy/kubernetes/creator_k8s_manifest.sh create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/activemq.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/apache.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/cassandra.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/cef.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/checkpoint.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/cockroachdb.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/crowdstrike.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/cyberarkpas.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/elasticsearch.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/endpoint.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/fireeye.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/haproxy.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/hashicorp_vault.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/hid_bravura_monitor.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/iis.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/infoblox_nios.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/iptables.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/kafka.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/keycloak.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/kibana.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/log.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/logstash.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/mattermost.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/microsoft_sqlserver.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/mimecast.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/modsecurity.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/mongodb.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/mysql.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/mysql_enterprise.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/nats.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/netflow.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/nginx.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/nginx_ingress_controller.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/oracle.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/panw.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/panw_cortex_xdr.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/pfsense.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/postgresql.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/prometheus.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/qnap_nas.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/rabbitmq.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/redis.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/santa.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/security_detection_engine.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/sentinel_one.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/snort.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/snyk.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/stan.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/suricata.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/symantec_endpoint.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/synthetics.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/tcp.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/tomcat.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/traefik.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/udp.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/zeek.yml create mode 100644 deploy/kubernetes/elastic-agent-standalone/templates.d/zookeeper.yml create mode 100644 dev-tools/packaging/files/darwin/PkgInfo create mode 100644 dev-tools/packaging/templates/darwin/Info.plist.tmpl create mode 100644 dev-tools/packaging/templates/darwin/elastic-agent.tmpl create mode 100644 internal/pkg/agent/application/paths/common_test.go create mode 100644 internal/pkg/agent/application/upgrade/artifact/config_test.go create mode 100644 internal/pkg/agent/application/upgrade/artifact/download/reloadable.go create mode 100644 internal/pkg/composable/providers/kubernetes/hints.go create mode 100644 internal/pkg/composable/providers/kubernetes/hints_test.go diff --git a/.ci/Jenkinsfile b/.ci/Jenkinsfile index 78078f79358..c374bfeb0ef 100644 --- a/.ci/Jenkinsfile +++ b/.ci/Jenkinsfile @@ -19,7 +19,7 @@ pipeline { DEVELOPER_MODE=true } options { - timeout(time: 2, unit: 'HOURS') + timeout(time: 3, unit: 'HOURS') buildDiscarder(logRotator(numToKeepStr: '20', artifactNumToKeepStr: '20', daysToKeepStr: '30')) timestamps() ansiColor('xterm') @@ -39,6 +39,14 @@ pipeline { // disabled by default, but required for merge: // opt-in with 'ci:end-to-end' tag on PR booleanParam(name: 'end_to_end_tests_ci', defaultValue: false, description: 'Enable End-to-End tests') + + // disabled by default, but required for merge: + // opt-in with 'ci:extended-windows' tag on PR + booleanParam(name: 'extended_windows_ci', defaultValue: false, description: 'Enable Extended Windows tests') + + // disabled by default, but required for merge: + // opt-in with 'ci:extended-m1' tag on PR + booleanParam(name: 'extended_m1_ci', defaultValue: false, description: 'Enable M1 tests') } stages { stage('Checkout') { @@ -51,6 +59,10 @@ pipeline { setEnvVar('ONLY_DOCS', isGitRegionMatch(patterns: [ '.*\\.(asciidoc|md)' ], shouldMatchAll: true).toString()) setEnvVar('PACKAGING_CHANGES', isGitRegionMatch(patterns: [ '(^dev-tools/packaging/.*|.ci/Jenkinsfile)' ], shouldMatchAll: false).toString()) setEnvVar('K8S_CHANGES', isGitRegionMatch(patterns: [ '(^deploy/kubernetes/.*|^version/docs/version.asciidoc|.ci/Jenkinsfile)' ], shouldMatchAll: false).toString()) + setEnvVar('EXT_WINDOWS_CHANGES', isGitRegionMatch(patterns: [ '.ci/Jenkinsfile' ], shouldMatchAll: false).toString()) + setEnvVar('EXT_M1_CHANGES', isGitRegionMatch(patterns: [ '.ci/Jenkinsfile' ], shouldMatchAll: false).toString()) + // set the GO_VERSION env variable with the go version to be used in withMageEnv + setEnvVar('GO_VERSION', readFile(file: '.go-version')?.trim()) } } } @@ -79,7 +91,8 @@ pipeline { axes { axis { name 'PLATFORM' - values 'ubuntu-20.04 && immutable', 'aws && aarch64', 'windows-2016 && windows-immutable', 'windows-2022 && windows-immutable', 'macos12 && x86_64' + // Orka workers are not healthy (memory and connectivity issues) + values 'ubuntu-20.04 && immutable', 'aws && aarch64', 'windows-2016 && windows-immutable', 'windows-2022 && windows-immutable' //, 'macos12 && x86_64' } } stages { @@ -147,7 +160,7 @@ pipeline { } } steps { - runK8s(k8sVersion: 'v1.23.0', kindVersion: 'v0.11.1', context: "K8s-${PLATFORM}") + runK8s(k8sVersion: 'v1.25.0-beta.0', kindVersion: 'v0.14.0', context: "K8s-${PLATFORM}") } } stage('Package') { @@ -219,7 +232,7 @@ pipeline { axes { axis { name 'K8S_VERSION' - values "v1.24.0", "v1.23.6", "v1.22.9", "v1.21.12" + values "v1.25.0","v1.24.3", "v1.23.6", "v1.22.9" } } stages { @@ -238,6 +251,153 @@ pipeline { } } } + stage('Sync K8s') { //This stage opens a PR to kibana Repository in order to sync k8s manifests + when { + // Only on main branch + // Enable if k8s related changes. + allOf { + branch 'main' // Only runs for branch main + expression { return env.K8S_CHANGES == "true" } // If k8s changes + } + } + failFast false + agent {label 'ubuntu-20.04 && immutable'} + options { skipDefaultCheckout() } + stages { + stage('OpenKibanaPR') { + steps { + withGhEnv(version: '2.4.0') { + deleteDir() + unstashV2(name: 'source', bucket: "${JOB_GCS_BUCKET}", credentialsId: "${JOB_GCS_CREDENTIALS}") + dir("${BASE_DIR}/deploy/kubernetes"){ + sh(label: '[File Creation] Create-Needed-Manifest', script: """ + WITHOUTCONFIG=true make generate-k8s + ./creator_k8s_manifest.sh . """) + sh(label: '[Clone] Kibana-Repository', script: """ + make ci-clone-kibana-repository + cp Makefile ./kibana + cd kibana + make ci-create-kubernetes-templates-pull-request """) + } + } + } + post { + always { + junit(allowEmptyResults: true, keepLongStdio: true, testResults: "${BASE_DIR}/build/TEST-*.xml") + } + } + } + } + } + stage('extended windows') { + when { + // Always when running builds on branches/tags + // Enable if extended windows support related changes. + beforeAgent true + anyOf { + not { changeRequest() } + expression { return isExtendedWindowsEnabled() && env.ONLY_DOCS == "false"} + } + } + failFast false + matrix { + agent {label "${PLATFORM} && windows-immutable"} + options { skipDefaultCheckout() } + axes { + axis { + name 'PLATFORM' + values 'windows-8', 'windows-10', 'windows-11' + } + } + stages { + stage('build'){ + options { skipDefaultCheckout() } + steps { + withGithubNotify(context: "Build-${PLATFORM}") { + deleteDir() + unstashV2(name: 'source', bucket: "${JOB_GCS_BUCKET}", credentialsId: "${JOB_GCS_CREDENTIALS}") + withMageEnv(){ + dir("${BASE_DIR}"){ + cmd(label: 'Go build', script: 'mage build') + } + } + } + } + } + stage('Test') { + options { skipDefaultCheckout() } + steps { + withGithubNotify(context: "Test-${PLATFORM}") { + withMageEnv(){ + dir("${BASE_DIR}"){ + withEnv(["TEST_COVERAGE=${isCodeCoverageEnabled()}"]) { + cmd(label: 'Go unitTest', script: 'mage unitTest') + } + } + } + } + } + post { + always { + junit(allowEmptyResults: true, keepLongStdio: true, testResults: "${BASE_DIR}/build/TEST-*.xml") + whenTrue(isCodeCoverageEnabled()) { + coverageReport(baseDir: "**/build", reportFiles: 'TEST-go-unit.html', coverageFiles: 'TEST-go-unit-cov.xml') + } + } + } + } + } + } + } + stage('m1') { + agent { label 'orka && darwin && aarch64' } + options { skipDefaultCheckout() } + when { + // Always when running builds on branches/tags + // Enable if extended M1 support related changes. + beforeAgent true + anyOf { + not { changeRequest() } + expression { return isExtendedM1Enabled() && env.ONLY_DOCS == "false"} + } + } + stages { + stage('build'){ + steps { + withGithubNotify(context: "Build-darwin-aarch64") { + deleteDir() + unstashV2(name: 'source', bucket: "${JOB_GCS_BUCKET}", credentialsId: "${JOB_GCS_CREDENTIALS}") + withMageEnv(){ + dir("${BASE_DIR}"){ + cmd(label: 'Go build', script: 'mage build') + } + } + } + } + } + stage('Test') { + steps { + withGithubNotify(context: "Test-darwin-aarch64") { + withMageEnv(){ + dir("${BASE_DIR}"){ + withEnv(["TEST_COVERAGE=${isCodeCoverageEnabled()}"]) { + cmd(label: 'Go unitTest', script: 'mage unitTest') + } + } + } + } + } + post { + always { + junit(allowEmptyResults: true, keepLongStdio: true, testResults: "${BASE_DIR}/build/TEST-*.xml") + whenTrue(isCodeCoverageEnabled()) { + coverageReport(baseDir: "**/build", reportFiles: 'TEST-go-unit.html', coverageFiles: 'TEST-go-unit-cov.xml') + } + } + } + } + } + } stage('e2e tests') { when { // Always when running builds on branches/tags @@ -250,7 +410,6 @@ pipeline { } } steps { - // TODO: what's the testMatrixFile to be used if any runE2E(testMatrixFile: '.ci/.e2e-tests-for-elastic-agent.yaml', beatVersion: "${env.BEAT_VERSION}-SNAPSHOT", elasticAgentVersion: "${env.BEAT_VERSION}-SNAPSHOT", @@ -374,3 +533,17 @@ def isE2eEnabled() { def isPackageEnabled() { return env.PACKAGING_CHANGES == "true" || env.GITHUB_COMMENT?.contains('package') || matchesPrLabel(label: 'ci:package') } + +/** +* Wrapper to know if the build should enable the windows extended support +*/ +def isExtendedWindowsEnabled() { + return env.EXT_WINDOWS_CHANGES == "true" || params.extended_windows_ci || env.GITHUB_COMMENT?.contains('extended windows') || matchesPrLabel(label: 'ci:extended-windows') +} + +/** +* Wrapper to know if the build should enable the M1 extended support +*/ +def isExtendedM1Enabled() { + return env.EXT_M1_CHANGES == "true" || params.extended_m1_ci || env.GITHUB_COMMENT?.contains('extended m1') || matchesPrLabel(label: 'ci:extended-m1') +} diff --git a/.ci/schedule-daily.groovy b/.ci/schedule-daily.groovy index 5c1d7134858..adc1ec0f02e 100644 --- a/.ci/schedule-daily.groovy +++ b/.ci/schedule-daily.groovy @@ -20,7 +20,7 @@ pipeline { stages { stage('Nighly beats builds') { steps { - runBuilds(quietPeriodFactor: 2000, branches: ['main', '8.', '8.', '7.']) + runBuilds(quietPeriodFactor: 2000, branches: ['main', '8.', '8.']) } } } diff --git a/.github/workflows/changelog.yml b/.github/workflows/changelog.yml new file mode 100644 index 00000000000..d0f29a0fd25 --- /dev/null +++ b/.github/workflows/changelog.yml @@ -0,0 +1,17 @@ +name: Changelog +on: [pull_request] + +jobs: + fragments: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Fetch Go version from .go-version + run: echo "GO_VERSION=$(cat .go-version)" >> $GITHUB_ENV + - uses: actions/setup-go@v3 + with: + go-version: ${{ env.GO_VERSION }} + - name: check pr-has-fragment + run: | + GOBIN=$PWD/bin go install github.com/elastic/elastic-agent-changelog-tool@latest + ./bin/elastic-agent-changelog-tool pr-has-fragment --repo ${{ github.event.repository.name }} ${{github.event.number}} diff --git a/.github/workflows/golangci-lint.yml b/.github/workflows/golangci-lint.yml index 8079fe1c673..62d4006737c 100644 --- a/.github/workflows/golangci-lint.yml +++ b/.github/workflows/golangci-lint.yml @@ -18,22 +18,22 @@ jobs: name: lint runs-on: ${{ matrix.os }} steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 # Uses Go version from the repository. - name: Read .go-version file id: goversion run: echo "::set-output name=version::$(cat .go-version)" - - uses: actions/setup-go@v2 + - uses: actions/setup-go@v3 with: go-version: "${{ steps.goversion.outputs.version }}" - name: golangci-lint - uses: golangci/golangci-lint-action@v2 + uses: golangci/golangci-lint-action@v3 with: # Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version - version: v1.45.2 + version: v1.47.2 # Give the job more time to execute. # Regarding `--whole-files`, the linter is supposed to support linting of changed a patch only but, diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml new file mode 100644 index 00000000000..bf3e5eed775 --- /dev/null +++ b/.github/workflows/macos.yml @@ -0,0 +1,25 @@ +name: macos + +on: + pull_request: + push: + branches: + - main + - 8.* + +jobs: + macos: + runs-on: macos-latest + steps: + - uses: actions/checkout@v3 + - name: Fetch Go version from .go-version + run: echo "GO_VERSION=$(cat .go-version)" >> $GITHUB_ENV + - uses: actions/setup-go@v3 + with: + go-version: ${{ env.GO_VERSION }} + - name: Install dependencies + run: go install github.com/magefile/mage + - name: Run build + run: mage build + - name: Run test + run: mage unitTest diff --git a/.github/workflows/qa-labels.yml b/.github/workflows/qa-labels.yml deleted file mode 100644 index bbbd4439847..00000000000 --- a/.github/workflows/qa-labels.yml +++ /dev/null @@ -1,93 +0,0 @@ -name: Add QA labels to Elastic Agent issues -on: - # pull_request_target allows running actions on PRs from forks with a read/write GITHUB_TOKEN, but it will not allow - # running workflows defined in the PRs itself, only workflows already merged into the target branch. This avoids - # potential vulnerabilities that could allow someone to open a PR and retrieve secrets. - # It's important that this workflow never runs any checkout actions which could be used to circumvent this protection. - # See these links for more information: - # - https://github.blog/2020-08-03-github-actions-improvements-for-fork-and-pull-request-workflows/ - # - https://nathandavison.com/blog/github-actions-and-the-threat-of-malicious-pull-requests - pull_request_target: - types: - - closed - -jobs: - fetch_issues_to_label: - runs-on: ubuntu-latest - # Only run on PRs that were merged for the Elastic Agent teams - if: | - github.event.pull_request.merged_at && - ( - contains(github.event.pull_request.labels.*.name, 'Team:Elastic-Agent') || - contains(github.event.pull_request.labels.*.name, 'Team:Elastic-Agent-Data-Plane') || - contains(github.event.pull_request.labels.*.name, 'Team:Elastic-Agent-Control-Plane') - ) - outputs: - issue_ids: ${{ steps.issues_to_label.outputs.value }} - label_ids: ${{ steps.label_ids.outputs.value }} - steps: - - uses: octokit/graphql-action@v2.x - id: closing_issues - with: - query: | - query closingIssueNumbersQuery($prnumber: Int!) { - repository(owner: "elastic", name: "elastic-agent") { - pullRequest(number: $prnumber) { - closingIssuesReferences(first: 10) { - nodes { - id - labels(first: 20) { - nodes { - id - name - } - } - } - } - } - } - } - prnumber: ${{ github.event.number }} - token: ${{ secrets.GITHUB_TOKEN }} - - uses: sergeysova/jq-action@v2 - id: issues_to_label - with: - # Map to the issues' node id - cmd: echo $CLOSING_ISSUES | jq -c '.repository.pullRequest.closingIssuesReferences.nodes | map(.id)' - multiline: true - env: - CLOSING_ISSUES: ${{ steps.closing_issues.outputs.data }} - - uses: sergeysova/jq-action@v2 - id: label_ids - with: - # Get list of version labels on pull request and map to label's node id, append 'QA:Ready For Testing' id ("LA_kwDOGgEmJc7mkkl9]") - cmd: echo $PR_LABELS | jq -c 'map(select(.name | test("v[0-9]+\\.[0-9]+\\.[0-9]+")) | .node_id) + ["LA_kwDOGgEmJc7mkkl9]' - multiline: true - env: - PR_LABELS: ${{ toJSON(github.event.pull_request.labels) }} - - label_issues: - needs: fetch_issues_to_label - runs-on: ubuntu-latest - # For each issue closed by the PR run this job - if: | - fromJSON(needs.fetch_issues_to_label.outputs.issue_ids).length > 0 && - fromJSON(needs.fetch_issues_to_label.outputs.label_ids).length > 0 - strategy: - matrix: - issueNodeId: ${{ fromJSON(needs.fetch_issues_to_label.outputs.issue_ids) }} - labelId: ${{ fromJSON(needs.fetch_issues_to_label.outputs.label_ids) }} - name: Label issue ${{ matrix.issueNodeId }} - steps: - - uses: octokit/graphql-action@v2.x - id: add_labels_to_closed_issue - with: - query: | - mutation add_label($issueid:ID!, $labelids:[String!]!) { - addLabelsToLabelable(input: {labelableId: $issueid, labelIds: $labelids}) { - clientMutationId - } - } - issueid: ${{ matrix.issueNodeId }} - labelids: ${{ matrix.labelId }} - token: ${{ secrets.GITHUB_TOKEN }} diff --git a/.go-version b/.go-version index ada2e4fce87..d6f3a382b34 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.17.10 +1.18.7 diff --git a/.golangci.yml b/.golangci.yml index 956b4b4b573..96e131c8ade 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -12,46 +12,37 @@ issues: # Set to 0 to disable. # Default: 50 max-issues-per-linter: 0 + exclude-rules: + # Exclude package name contains '-' issue because we have at least one package with + # it on its name. + - text: "ST1003:" + linters: + - stylecheck + # From mage we are priting to the console to ourselves + - path: (.*magefile.go|.*dev-tools/mage/.*) + linters: + - forbidigo output: sort-results: true -# Uncomment and add a path if needed to exclude -# skip-dirs: -# - some/path -# skip-files: -# - ".*\\.my\\.go$" -# - lib/bad.go - # Find the whole list here https://golangci-lint.run/usage/linters/ linters: disable-all: true enable: - - deadcode # finds unused code - errcheck # checking for unchecked errors in go programs - errorlint # errorlint is a linter for that can be used to find code that will cause problems with the error wrapping scheme introduced in Go 1.13. - - goconst # finds repeated strings that could be replaced by a constant - - dupl # tool for code clone detection - forbidigo # forbids identifiers matched by reg exps - # 'replace' is used in go.mod for many dependencies that come from libbeat. We should work to remove those, - # so we can re-enable this linter. - # - gomoddirectives # manage the use of 'replace', 'retract', and 'excludes' directives in go.mod. - - gomodguard - gosimple # linter for Go source code that specializes in simplifying a code - misspell # finds commonly misspelled English words in comments - nakedret # finds naked returns in functions greater than a specified function length - - prealloc # finds slice declarations that could potentially be preallocated - nolintlint # reports ill-formed or insufficient nolint directives - staticcheck # Staticcheck is a go vet on steroids, applying a ton of static analysis checks - stylecheck # a replacement for golint - - unparam # reports unused function parameters - unused # checks Go code for unused constants, variables, functions and types - - govet # Vet examines Go source code and reports suspicious constructs, such as Printf calls whose arguments do not align with the format string - ineffassign # detects when assignments to existing variables are not used - - structcheck # finds unused struct fields - typecheck # Like the front-end of a Go compiler, parses and type-checks Go code - - varcheck # Finds unused global variables and constants - asciicheck # simple linter to check that your code does not contain non-ASCII identifiers - bodyclose # checks whether HTTP response body is closed successfully - durationcheck # check for two durations multiplied together @@ -63,14 +54,20 @@ linters: - noctx # noctx finds sending http request without context.Context - unconvert # Remove unnecessary type conversions - wastedassign # wastedassign finds wasted assignment statements. - # - godox # tool for detection of FIXME, TODO and other comment keywords + - gomodguard # check for blocked dependencies # all available settings of specific linters linters-settings: errcheck: # report about not checking of errors in type assertions: `a := b.(MyStruct)`; - # default is false: such cases aren't reported by default. - check-type-assertions: true + check-type-assertions: false + # report about assignment of errors to blank identifier: `num, _ := strconv.Atoi(numStr)`. + check-blank: false + # List of functions to exclude from checking, where each entry is a single function to exclude. + # See https://github.com/kisielk/errcheck#excluding-functions for details. + exclude-functions: + - (mapstr.M).Delete # Only returns ErrKeyNotFound, can safely be ignored. + - (mapstr.M).Put # Can only fail on type conversions, usually safe to ignore. errorlint: # Check whether fmt.Errorf uses the %w verb for formatting errors. See the readme for caveats @@ -80,16 +77,6 @@ linters-settings: # Check for plain error comparisons comparison: true - goconst: - # minimal length of string constant, 3 by default - min-len: 3 - # minimal occurrences count to trigger, 3 by default - min-occurrences: 2 - - dupl: - # tokens count to trigger issue, 150 by default - threshold: 100 - forbidigo: # Forbid the following identifiers forbid: @@ -97,68 +84,59 @@ linters-settings: # Exclude godoc examples from forbidigo checks. Default is true. exclude_godoc_examples: true - gomoddirectives: - # Allow local `replace` directives. Default is false. - replace-local: false + goimports: + local-prefixes: github.com/elastic gomodguard: blocked: # List of blocked modules. modules: - - github.com/elastic/beats/v7: - reason: "There must be no Beats dependency, use elastic-agent-libs instead." - + # Blocked module. + - github.com/pkg/errors: + # Recommended modules that should be used instead. (Optional) + recommendations: + - errors + - fmt + reason: "This package is deprecated, use `fmt.Errorf` with `%w` instead" gosimple: # Select the Go version to target. The default is '1.13'. - go: "1.17" - - misspell: - # Correct spellings using locale preferences for US or UK. - # Default is to use a neutral variety of English. - # Setting locale to US will correct the British spelling of 'colour' to 'color'. - # locale: US - # ignore-words: - # - IdP + go: "1.18.7" nakedret: # make an issue if func has more lines of code than this setting and it has naked returns; default is 30 max-func-lines: 0 - prealloc: - # Report preallocation suggestions only on simple loops that have no returns/breaks/continues/gotos in them. - # True by default. - simple: true - range-loops: true # Report preallocation suggestions on range loops, true by default - for-loops: false # Report preallocation suggestions on for loops, false by default - nolintlint: # Enable to ensure that nolint directives are all used. Default is true. allow-unused: false # Disable to ensure that nolint directives don't have a leading space. Default is true. - allow-leading-space: true + allow-leading-space: false # Exclude following linters from requiring an explanation. Default is []. allow-no-explanation: [] # Enable to require an explanation of nonzero length after each nolint directive. Default is false. require-explanation: true # Enable to require nolint directives to mention the specific linter being suppressed. Default is false. - require-specific: true + require-specific: false staticcheck: # Select the Go version to target. The default is '1.13'. - go: "1.17" + go: "1.18.7" + checks: ["all"] stylecheck: # Select the Go version to target. The default is '1.13'. - go: "1.17" - - unparam: - # Inspect exported functions, default is false. Set to true if no external program/library imports your code. - # XXX: if you enable this setting, unparam will report a lot of false-positives in text editors: - # if it's called for subdir of a project it can't find external interfaces. All text editor integrations - # with golangci-lint call it on a directory with the changed file. - check-exported: false + go: "1.18.7" + checks: ["all"] unused: # Select the Go version to target. The default is '1.13'. - go: "1.17" + go: "1.18.7" + + gosec: + excludes: + - G306 # Expect WriteFile permissions to be 0600 or less + - G404 # Use of weak random number generator + - G401 # Detect the usage of DES, RC4, MD5 or SHA1: Used in non-crypto contexts. + - G501 # Import blocklist: crypto/md5: Used in non-crypto contexts. + - G505 # Import blocklist: crypto/sha1: Used in non-crypto contexts. diff --git a/.mergify.yml b/.mergify.yml index 3fe46362854..528df9b498b 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -220,3 +220,16 @@ pull_request_rules: labels: - "backport" title: "[{{ destination_branch }}](backport #{{ number }}) {{ title }}" + - name: backport patches to 8.5 branch + conditions: + - merged + - label=backport-v8.5.0 + actions: + backport: + assignees: + - "{{ author }}" + branches: + - "8.5" + labels: + - "backport" + title: "[{{ destination_branch }}](backport #{{ number }}) {{ title }}" diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc deleted file mode 100644 index acdf4efc087..00000000000 --- a/CHANGELOG.next.asciidoc +++ /dev/null @@ -1,200 +0,0 @@ -// Use these for links to issue and pulls. Note issues and pulls redirect one to -// each other on Github, so don't worry too much on using the right prefix. -:issue-beats: https://github.com/elastic/beats/issues/ -:pull-beats: https://github.com/elastic/beats/pull/ - -:issue: https://github.com/elastic/elastic-agent/issues/ -:pull: https://github.com/elastic/elastic-agent/pull/ - -=== Elastic Agent version HEAD - -==== Breaking changes - -- Docker container is not run as root by default. {pull-beats}[21213] -- Read Fleet connection information from `fleet.*` instead of `fleet.kibana.*`. {pull-beats}[24713] -- Beats build for 32Bit Windows or Linux system will refuse to run on a 64bit system. {pull-beats}[25186] -- Remove the `--kibana-url` from `install` and `enroll` command. {pull-beats}[25529] -- Default to port 80 and 443 for Kibana and Fleet Server connections. {pull-beats}[25723] -- Remove deprecated/undocumented IncludeCreatorMetadata setting from kubernetes metadata config options {pull-beats}[28006] -- The `/processes/` endpoint proxies to the subprocess's monitoring endpoint, instead of querying its `/stats` endpoint {pull-beats}[28165] -- Remove username/password for fleet-server authentication. {pull-beats}[29458] - -==== Bugfixes -- Fix rename *ConfigChange to *PolicyChange to align on changes in the UI. {pull-beats}[20779] -- Thread safe sorted set {pull-beats}[21290] -- Copy Action store on upgrade {pull-beats}[21298] -- Include inputs in action store actions {pull-beats}[21298] -- Fix issue where inputs without processors defined would panic {pull-beats}[21628] -- Prevent reporting ecs version twice {pull-beats}[21616] -- Partial extracted beat result in failure to spawn beat {issue-beats}[21718] -- Use symlink path for reexecutions {pull-beats}[21835] -- Use ML_SYSTEM to detect if agent is running as a service {pull-beats}[21884] -- Use local temp instead of system one {pull-beats}[21883] -- Rename monitoring index from `elastic.agent` to `elastic_agent` {pull-beats}[21932] -- Fix issue with named pipes on Windows 7 {pull-beats}[21931] -- Fix missing elastic_agent event data {pull-beats}[21994] -- Ensure shell wrapper path exists before writing wrapper on install {pull-beats}[22144] -- Fix deb/rpm packaging for Elastic Agent {pull-beats}[22153] -- Fix composable input processor promotion to fix duplicates {pull-beats}[22344] -- Fix sysv init files for deb/rpm installation {pull-beats}[22543] -- Fix shell wrapper for deb/rpm packaging {pull-beats}[23038] -- Fixed parsing of npipe URI {pull-beats}[22978] -- Select default agent policy if no enrollment token provided. {pull-beats}[23973] -- Remove artifacts on transient download errors {pull-beats}[23235] -- Support for linux/arm64 {pull-beats}[23479] -- Skip top level files when unziping archive during upgrade {pull-beats}[23456] -- Do not take ownership of Endpoint log path {pull-beats}[23444] -- Fixed fetching DBus service PID {pull-beats}[23496] -- Fix issue of missing log messages from filebeat monitor {pull-beats}[23514] -- Increase checkin grace period to 30 seconds {pull-beats}[23568] -- Fix libbeat from reporting back degraded on config update {pull-beats}[23537] -- Rewrite check if agent is running with admin rights on Windows {pull-beats}[23970] -- Fix issues with dynamic inputs and conditions {pull-beats}[23886] -- Fix bad substitution of API key. {pull-beats}[24036] -- Fix docker enrollment issue related to Fleet Server change. {pull-beats}[24155] -- Improve log on failure of Endpoint Security installation. {pull-beats}[24429] -- Verify communication to Kibana before updating Fleet client. {pull-beats}[24489] -- Fix nil pointer when null is generated as list item. {issue-beats}[23734] -- Add support for filestream input. {pull-beats}[24820] -- Add check for URL set when cert and cert key. {pull-beats}[24904] -- Fix install command for Fleet Server bootstrap, remove need for --enrollment-token when using --fleet-server {pull-beats}[24981] -- Respect host configuration for exposed processes endpoint {pull-beats}[25114] -- Set --inscure in container when FLEET_SERVER_ENABLE and FLEET_INSECURE set {pull-beats}[25137] -- Fixed: limit for retries to Kibana configurable {issue-beats}[25063] -- Fix issue with status and inspect inside of container {pull-beats}[25204] -- Remove FLEET_SERVER_POLICY_NAME env variable as it was not used {pull-beats}[25149] -- Reduce log level for listener cleanup to debug {pull-beats} -- Passing in policy id to container command works {pull-beats}[25352] -- Reduce log level for listener cleanup to debug {pull-beats}[25274] -- Delay the restart of application when a status report of failure is given {pull-beats}[25339] -- Don't log when upgrade capability doesn't apply {pull-beats}[25386] -- Fixed issue when unversioned home is set and invoked watcher failing with ENOENT {issue-beats}[25371] -- Fixed Elastic Agent: expecting Dict and received *transpiler.Key for '0' {issue-beats}[24453] -- Fix AckBatch to do nothing when no actions passed {pull-beats}[25562] -- Add error log entry when listener creation fails {issue-beats}[23482] -- Handle case where policy doesn't contain Fleet connection information {pull-beats}[25707] -- Fix fleet-server.yml spec to not overwrite existing keys {pull-beats}[25741] -- Agent sends wrong log level to Endpoint {issue-beats}[25583] -- Fix startup with failing configuration {pull-beats}[26057] -- Change timestamp in elatic-agent-json.log to use UTC {issue-beats}[25391] -- Fix add support for Logstash output. {pull-beats}[24305] -- Do not log Elasticsearch configuration for monitoring output when running with debug. {pull-beats}[26583] -- Fix issue where proxy enrollment options broke enrollment command. {pull-beats}[26749] -- Remove symlink.prev from previously failed upgrade {pull-beats}[26785] -- Fix apm-server supported outputs not being in sync with supported output types. {pull-beats}[26885] -- Set permissions during installation {pull-beats}[26665] -- Disable monitoring during fleet-server bootstrapping. {pull-beats}[27222] -- Fix issue with atomic extract running in K8s {pull-beats}[27396] -- Fix issue with install directory in state path in K8s {pull-beats}[27396] -- Disable monitoring during fleet-server bootstrapping. {pull-beats}[27222] -- Change output.elasticsearch.proxy_disabled flag to output.elasticsearch.proxy_disable so fleet uses it. {issue-beats}[27670] {pull-beats}[27671] -- Add validation for certificate flags to ensure they are absolute paths. {pull-beats}[27779] -- Migrate state on upgrade {pull-beats}[27825] -- Add "_monitoring" suffix to monitoring instance names to remove ambiguity with the status command. {issue-beats}[25449] -- Ignore ErrNotExists when fixing permissions. {issue-beats}[27836] {pull-beats}[27846] -- Snapshot artifact lookup will use agent.download proxy settings. {issue-beats}[27903] {pull-beats}[27904] -- Fix lazy acker to only add new actions to the batch. {pull-beats}[27981] -- Allow HTTP metrics to run in bootstrap mode. Add ability to adjust timeouts for Fleet Server. {pull-beats}[28260] -- Fix agent configuration overwritten by default fleet config. {pull-beats}[29297] -- Allow agent containers to use basic auth to create a service token. {pull-beats}[29651] -- Fix issue where a failing artifact verification does not remove the bad artifact. {pull-beats}[30281] -- Reduce Elastic Agent shut down time by stopping processes concurrently {pull-beats}[29650] -- Move `context cancelled` error from fleet gateway into debug level. {pull}187[187] -- Update library containerd to 1.5.10. {pull}186[186] -- Add fleet-server to output of elastic-agent inspect output command (and diagnostic bundle). {pull}243[243] -- Update API calls that the agent makes to Kibana when running the container command. {pull}253[253] -- diagnostics collect log names are fixed on Windows machines, command will ignore failures. AgentID is included in diagnostics(and diagnostics collect) output. {issue}81[81] {issue}92[92] {issue}190[190] {pull}262[262] -- Collects stdout and stderr of applications run as a process and logs them. {issue}[88] -- Remove VerificationMode option to empty string. Default value is `full`. {issue}[184] -- diagnostics collect file mod times are set. {pull}570[570] -- Allow ':' characters in dynamic variables {issue}624[624] {pull}680[680] -- Allow the - char to appear as part of variable names in eql expressions. {issue}709[709] {pull}710[710] -- Allow the / char in variable names in eql and transpiler. {issue}715[715] {pull}718[718] -- Fix data duplication for standalone agent on Kubernetes using the default manifest {issue-beats}31512[31512] {pull}742[742] -- Agent updates will clean up unneeded artifacts. {issue}693[693] {issue}694[694] {pull}752[752] - -==== New features - -- Prepare packaging for endpoint and asc files {pull-beats}[20186] -- Improved version CLI {pull-beats}[20359] -- Enroll CLI now restarts running daemon {pull-beats}[20359] -- Add restart CLI cmd {pull-beats}[20359] -- Add new `synthetics/*` inputs to run Heartbeat {pull-beats}[20387] -- Users of the Docker image can now pass `FLEET_ENROLL_INSECURE=1` to include the `--insecure` flag with the `elastic-agent enroll` command {issue-beats}[20312] {pull-beats}[20713] -- Add `docker` composable dynamic provider. {pull-beats}[20842] -- Add support for dynamic inputs with providers and `{{variable|"default"}}` substitution. {pull-beats}[20839] -- Add support for EQL based condition on inputs {pull-beats}[20994] -- Send `fleet.host.id` to Endpoint Security {pull-beats}[21042] -- Add `install` and `uninstall` subcommands {pull-beats}[21206] -- Use new form of fleet API paths {pull-beats}[21478] -- Add `kubernetes` composable dynamic provider. {pull-beats}[21480] -- Send updating state {pull-beats}[21461] -- Add `elastic.agent.id` and `elastic.agent.version` to published events from filebeat and metricbeat {pull-beats}[21543] -- Add `upgrade` subcommand to perform upgrade of installed Elastic Agent {pull-beats}[21425] -- Update `fleet.yml` and Kibana hosts when a policy change updates the Kibana hosts {pull-beats}[21599] -- Update `install` command to perform enroll before starting Elastic Agent {pull-beats}[21772] -- Update `fleet.kibana.path` from a POLICY_CHANGE {pull-beats}[21804] -- Removed `install-service.ps1` and `uninstall-service.ps1` from Windows .zip packaging {pull-beats}[21694] -- Add `priority` to `AddOrUpdate` on dynamic composable input providers communication channel {pull-beats}[22352] -- Ship `endpoint-security` logs to elasticsearch {pull-beats}[22526] -- Log level reloadable from fleet {pull-beats}[22690] -- Push log level downstream {pull-beats}[22815] -- Add metrics collection for Agent {pull-beats}[22793] -- Add support for Fleet Server {pull-beats}[23736] -- Add support for enrollment with local bootstrap of Fleet Server {pull-beats}[23865] -- Add TLS support for Fleet Server {pull-beats}[24142] -- Add support for Fleet Server running under Elastic Agent {pull-beats}[24220] -- Add CA support to Elastic Agent docker image {pull-beats}[24486] -- Add k8s secrets provider for Agent {pull-beats}[24789] -- Add STATE_PATH, CONFIG_PATH, LOGS_PATH to Elastic Agent docker image {pull-beats}[24817] -- Add status subcommand {pull-beats}[24856] -- Add leader_election provider for k8s {pull-beats}[24267] -- Add --fleet-server-service-token and FLEET_SERVER_SERVICE_TOKEN options {pull-beats}[25083] -- Keep http and logging config during enroll {pull-beats}[25132] -- Log output of container to $LOGS_PATH/elastic-agent-start.log when LOGS_PATH set {pull-beats}[25150] -- Use `filestream` input for internal log collection. {pull-beats}[25660] -- Enable agent to send custom headers to kibana/ES {pull-beats}[26275] -- Set `agent.id` to the Fleet Agent ID in events published from inputs backed by Beats. {issue-beats}[21121] {pull-beats}[26394] {pull-beats}[26548] -- Add proxy support to artifact downloader and communication with fleet server. {pull-beats}[25219] -- Add proxy support to enroll command. {pull-beats}[26514] -- Enable configuring monitoring namespace {issue-beats}[26439] -- Communicate with Fleet Server over HTTP2. {pull-beats}[26474] -- Pass logging.metrics.enabled to beats to stop beats from adding metrics into their logs. {issue-beats}[26758] {pull-beats}[26828] -- Support Node and Service autodiscovery in kubernetes dynamic provider. {pull-beats}[26801] -- Increase Agent's mem limits in k8s. {pull-beats}[27153] -- Add new --enroll-delay option for install and enroll commands. {pull-beats}[27118] -- Add link to troubleshooting guide on fatal exits. {issue-beats}[26367] {pull-beats}[27236] -- Agent now adapts the beats queue size based on output settings. {issue-beats}[26638] {pull-beats}[27429] -- Support ephemeral containers in Kubernetes dynamic provider. {issue-beats}[#27020] {pull-beats}[27707] -- Add complete k8s metadata through composable provider. {pull-beats}[27691] -- Add diagnostics command to gather beat metadata. {pull-beats}[28265] -- Add diagnostics collect command to gather beat metadata, config, policy, and logs and bundle it into an archive. {pull-beats}[28461] -- Add `KIBANA_FLEET_SERVICE_TOKEN` to Elastic Agent container. {pull-beats}[28096] -- Enable pprof endpoints for beats processes. Allow pprof endpoints for elastic-agent if enabled. {pull-beats}[28983] -- Add `--pprof` flag to `elastic-agent diagnostics` and an `elastic-agent pprof` command to allow operators to gather pprof data from the agent and beats running under it. {pull-beats}[28798] -- Allow pprof endpoints for elastic-agent or beats if enabled. {pull-beats}[28983] {pull-beats}[29155] -- Add --fleet-server-es-ca-trusted-fingerprint flag to allow agent/fleet-server to work with elasticsearch clusters using self signed certs. {pull-beats}[29128] -- Discover changes in Kubernetes nodes metadata as soon as they happen. {pull-beats}[23139] -- Add results of inspect output command into archive produced by diagnostics collect. {pull-beats}[29902] -- Add support for loading input configuration from external configuration files in standalone mode. You can load inputs from YAML configuration files under the folder `{path.config}/inputs.d`. {pull-beats}[30087] -- Install command will skip install/uninstall steps when installation via package is detected on Linux distros. {pull-beats}[30289] -- Update docker/distribution dependency library to fix a security issues concerning OCI Manifest Type Confusion Issue. {pull-beats}[30462] -- Add action_input_type for the .fleet-actions-results {pull-beats}[30562] -- Add support for enabling the metrics buffer endpoint in the elastic-agent and beats it runs. diagnostics collect command will gather metrics-buffer data if enabled. {pull-beats}[30471] -- Update ack response schema and processing, add retrier for acks {pull}200[200] -- Enhance error messages and logs for process start {pull}225[225] -- Changed the default policy selection logic. When the agent has no policy id or name defined, it will fall back to defaults (defined by $FLEET_SERVER_POLICY_ID and $FLEET_DEFAULT_TOKEN_POLICY_NAME environment variables respectively). {issue-beats}[29774] {pull}226[226] -- Add Elastic APM instrumentation {pull}180[180] -- Agent can be built for `darwin/arm64`. When it's built for both `darwin/arm64` and `darwin/adm64` a universal binary is also built and packaged. {pull}203[203] -- Add support for Cloudbeat. {pull}179[179] -- Fix download verification in snapshot builds. {issue}252[252] -- Add support for kubernetes cronjobs {pull}279[279] -- Increase the download artifact timeout to 10mins and add log download statistics. {pull}308[308] -- Save the agent configuration and the state encrypted on the disk. {issue}535[535] {pull}398[398] -- Bump node.js version for heartbeat/synthetics to 16.15.0 -- Support scheduled actions and cancellation of pending actions. {issue}393[393] {pull}419[419] -- Add `@metadata.input_id` and `@metadata.stream_id` when applying the inject stream processor {pull}527[527] -- Add liveness endpoint, allow fleet-gateway component to report degraded state, add update time and messages to status output. {issue}390[390] {pull}569[569] -- Redact sensitive information on diagnostics collect command. {issue}[241] {pull}[566] -- Fix incorrectly creating a filebeat redis input when a policy contains a packetbeat redis input. {issue}[427] {pull}[700] -- Allow upgrade actions to be retried on failure with action queue scheduling. {issue}778[778] {pull}1219[1219] diff --git a/Dockerfile b/Dockerfile index 709dcbc7bef..fd56ef5e2ff 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -ARG GO_VERSION=1.17.10 +ARG GO_VERSION=1.18.7 FROM circleci/golang:${GO_VERSION} diff --git a/Makefile b/Makefile index 37022ff7d7d..19eca744b78 100644 --- a/Makefile +++ b/Makefile @@ -13,7 +13,7 @@ export MAGE_IMPORT_PATH mage: ifndef MAGE_PRESENT @echo Installing mage $(MAGE_VERSION). - @go get -ldflags="-X $(MAGE_IMPORT_PATH)/mage.gitTag=$(MAGE_VERSION)" ${MAGE_IMPORT_PATH}@$(MAGE_VERSION) + @go install ${MAGE_IMPORT_PATH}@$(MAGE_VERSION) @-mage -clean endif @true diff --git a/NOTICE.txt b/NOTICE.txt index ad7c25aaad6..f23805c5d87 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -617,11 +617,11 @@ you may not use this file except in compliance with the Elastic License. -------------------------------------------------------------------------------- Dependency : github.com/elastic/elastic-agent-autodiscover -Version: v0.0.0-20220404145827-89887023c1ab +Version: v0.2.1 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-autodiscover@v0.0.0-20220404145827-89887023c1ab/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-autodiscover@v0.2.1/LICENSE: Apache License Version 2.0, January 2004 diff --git a/README.md b/README.md index 2c0dbe31f69..bd0ae71c5fc 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,6 @@ -# Elastic Agent developer docs +# Elastic Agent + +## Developer docs The source files for the general Elastic Agent documentation are currently stored in the [observability-docs](https://github.com/elastic/observability-docs) repo. The following docs are only focused on getting developers started building code for Elastic Agent. @@ -9,6 +11,14 @@ Prerequisites: - installed [mage](https://github.com/magefile/mage) - [Docker](https://docs.docker.com/get-docker/) - [X-pack](https://github.com/elastic/beats/tree/main/x-pack) to pre-exist in the parent folder of the local Git repository checkout +- [elastic-agent-changelog-tool](https://github.com/elastic/elastic-agent-changelog-tool) to add changelog fragments for changelog generation + +If you are on a Mac with M1 chip, don't forget to export some docker variable to be able to build for AMD +``` +export DOCKER_BUILDKIT=0 +export COMPOSE_DOCKER_CLI_BUILD=0 +export DOCKER_DEFAULT_PLATFORM=linux/amd64 +``` If you are on a Mac with M1 chip, don't forget to export some docker variable to be able to build for AMD ``` @@ -101,3 +111,20 @@ kubectl apply -f elastic-agent-${ELASTIC_AGENT_MODE}-kubernetes.yaml ``` kubectl -n kube-system get pods -l app=elastic-agent ``` + +## Updating dependencies/PRs +Even though we prefer `mage` to our automation, we still have some +rules implemented on our `Makefile` as well as CI will use the +`Makefile`. CI will run `make check-ci`, so make sure to run it +locally before submitting any PRs to have a quicker feedback instead +of waiting for a CI failure. + +### Generating the `NOTICE.txt` when updating/adding dependencies +To do so, just run `make notice`, this is also part of the `make +check-ci` and is the same check our CI will do. + +At some point we will migrate it to mage (see discussion on +https://github.com/elastic/elastic-agent/pull/1108 and on +https://github.com/elastic/elastic-agent/issues/1107). However until +we have the mage automation sorted out, it has been removed to avoid +confusion. diff --git a/changelog/fragments/1660139385-Fix-a-panic-caused-by-a-race-condition-when-installing-the-Elastic-Agent.yaml b/changelog/fragments/1660139385-Fix-a-panic-caused-by-a-race-condition-when-installing-the-Elastic-Agent.yaml new file mode 100644 index 00000000000..19844fe2dfc --- /dev/null +++ b/changelog/fragments/1660139385-Fix-a-panic-caused-by-a-race-condition-when-installing-the-Elastic-Agent.yaml @@ -0,0 +1,3 @@ +kind: bug-fix +summary: Fix a panic caused by a race condition when installing the Elastic Agent. +pr: https://github.com/elastic/elastic-agent/pull/823 diff --git a/changelog/fragments/1660158319-Upgrade-to-Go-118-Certificates-signed-with-SHA-1-are-now-rejected-See-the-Go-118.yaml b/changelog/fragments/1660158319-Upgrade-to-Go-118-Certificates-signed-with-SHA-1-are-now-rejected-See-the-Go-118.yaml new file mode 100644 index 00000000000..f7b6ce903d3 --- /dev/null +++ b/changelog/fragments/1660158319-Upgrade-to-Go-118-Certificates-signed-with-SHA-1-are-now-rejected-See-the-Go-118.yaml @@ -0,0 +1,3 @@ +kind: breaking-change +summary: Upgrade to Go 1.18. Certificates signed with SHA-1 are now rejected. See the Go 1.18 https//tip.golang.org/doc/go1.18#sha1[release notes] for details. +pr: https://github.com/elastic/elastic-agent/pull/832 diff --git a/changelog/fragments/1661188787-Add-lumberjack-input-type-to-the-Filebeat-spec.yaml b/changelog/fragments/1661188787-Add-lumberjack-input-type-to-the-Filebeat-spec.yaml new file mode 100644 index 00000000000..9110968e91f --- /dev/null +++ b/changelog/fragments/1661188787-Add-lumberjack-input-type-to-the-Filebeat-spec.yaml @@ -0,0 +1,3 @@ +kind: feature +summary: Add `lumberjack` input type to the Filebeat spec. +pr: https://github.com/elastic/elastic-agent/pull/959 diff --git a/changelog/fragments/1663143487-Add-support-for-hints-based-autodiscovery-in-kubernetes-provider.yaml b/changelog/fragments/1663143487-Add-support-for-hints-based-autodiscovery-in-kubernetes-provider.yaml new file mode 100644 index 00000000000..04e84669955 --- /dev/null +++ b/changelog/fragments/1663143487-Add-support-for-hints-based-autodiscovery-in-kubernetes-provider.yaml @@ -0,0 +1,3 @@ +kind: feature +summary: Add support for hints' based autodiscovery in kubernetes provider. +pr: https://github.com/elastic/elastic-agent/pull/698 diff --git a/changelog/fragments/1664177394-Fix-unintended-reset-of-source-URI-when-downloading-components.yaml b/changelog/fragments/1664177394-Fix-unintended-reset-of-source-URI-when-downloading-components.yaml new file mode 100644 index 00000000000..b5712f4c193 --- /dev/null +++ b/changelog/fragments/1664177394-Fix-unintended-reset-of-source-URI-when-downloading-components.yaml @@ -0,0 +1,3 @@ +kind: bug-fix +summary: Fix unintended reset of source URI when downloading components +pr: https://github.com/elastic/elastic-agent/pull/1252 diff --git a/changelog/fragments/1664212969-Create-separate-status-reporter-for-local-only-events-so-that-degraded-fleet-che.yaml b/changelog/fragments/1664212969-Create-separate-status-reporter-for-local-only-events-so-that-degraded-fleet-che.yaml new file mode 100644 index 00000000000..a94f5b66751 --- /dev/null +++ b/changelog/fragments/1664212969-Create-separate-status-reporter-for-local-only-events-so-that-degraded-fleet-che.yaml @@ -0,0 +1,4 @@ +kind: bug-fix +summary: Create separate status reporter for local only events so that degraded fleet-checkins no longer affect health on successful fleet-checkins. +issue: https://github.com/elastic/elastic-agent/issues/1157 +pr: https://github.com/elastic/elastic-agent/pull/1285 diff --git a/changelog/fragments/1664230732-Improve-logging-during-upgrades.yaml b/changelog/fragments/1664230732-Improve-logging-during-upgrades.yaml new file mode 100644 index 00000000000..15f81e7d5ad --- /dev/null +++ b/changelog/fragments/1664230732-Improve-logging-during-upgrades.yaml @@ -0,0 +1,3 @@ +kind: feature +summary: Improve logging during upgrades. +pr: https://github.com/elastic/elastic-agent/pull/1287 diff --git a/changelog/fragments/1664360554-Add-success-log-message-after-previous-checkin-failures.yaml b/changelog/fragments/1664360554-Add-success-log-message-after-previous-checkin-failures.yaml new file mode 100644 index 00000000000..3e4ac3d91a5 --- /dev/null +++ b/changelog/fragments/1664360554-Add-success-log-message-after-previous-checkin-failures.yaml @@ -0,0 +1,3 @@ +kind: bug-fix +summary: Add success log message after previous checkin failures +pr: https://github.com/elastic/elastic-agent/pull/1327 diff --git a/changelog/fragments/1664989867-fix-docker-provider-processors.yaml b/changelog/fragments/1664989867-fix-docker-provider-processors.yaml new file mode 100644 index 00000000000..c7c87152479 --- /dev/null +++ b/changelog/fragments/1664989867-fix-docker-provider-processors.yaml @@ -0,0 +1,31 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: bug-fix + +# Change summary; a 80ish characters long description of the change. +summary: Fix docker provider add_fields processors + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +#description: + +# Affected component; a word indicating the component this changeset affects. +component: providers + +# PR number; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +#pr: 1234 + +# Issue number; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +#issue: 1234 diff --git a/changelog/fragments/1665517984-improve-checkin-error-logging.yaml b/changelog/fragments/1665517984-improve-checkin-error-logging.yaml new file mode 100644 index 00000000000..7bf2777d9d5 --- /dev/null +++ b/changelog/fragments/1665517984-improve-checkin-error-logging.yaml @@ -0,0 +1,5 @@ +kind: enhancement +summary: Improve logging of Fleet check-in errors. +description: Improve logging of Fleet check-in errors and only report the local state as degraded after two consecutive failed check-ins. +pr: 1477 +issue: 1154 diff --git a/changelog/fragments/1665784342-use-stack-version-npm-synthetics.yaml b/changelog/fragments/1665784342-use-stack-version-npm-synthetics.yaml new file mode 100644 index 00000000000..a928c800d1e --- /dev/null +++ b/changelog/fragments/1665784342-use-stack-version-npm-synthetics.yaml @@ -0,0 +1,31 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: feature + +# Change summary; a 80ish characters long description of the change. +summary: use-stack-version-npm-synthetics + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +description: Always npm i the stack_release version of @elastic/synthetics + +# Affected component; a word indicating the component this changeset affects. +component: synthetics-integration + +# PR number; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +pr: 1528 + +# Issue number; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +#issue: 1234 diff --git a/changelog/fragments/1666088774-Fix-admin-permission-check-on-localized-windows.yaml b/changelog/fragments/1666088774-Fix-admin-permission-check-on-localized-windows.yaml new file mode 100644 index 00000000000..93d5999f1b0 --- /dev/null +++ b/changelog/fragments/1666088774-Fix-admin-permission-check-on-localized-windows.yaml @@ -0,0 +1,31 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: bug-fix + +# Change summary; a 80ish characters long description of the change. +summary: Fix admin permission check on localized windows + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +#description: + +# Affected component; a word indicating the component this changeset affects. +component: + +# PR number; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +pr: 1552 + +# Issue number; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +issue: 857 diff --git a/deploy/kubernetes/Makefile b/deploy/kubernetes/Makefile index 35745dcec31..98e216142b7 100644 --- a/deploy/kubernetes/Makefile +++ b/deploy/kubernetes/Makefile @@ -1,8 +1,14 @@ ALL=elastic-agent-standalone elastic-agent-managed BEAT_VERSION=$(shell head -n 1 ../../version/docs/version.asciidoc | cut -c 17- ) +BRANCH_VERSION=$(shell sed -n '2p' ../../version/docs/version.asciidoc | cut -c 14- ) -.PHONY: generate-k8s $(ALL) +#variables needed for ci-create-kubernetes-templates-pull-request +ELASTIC_AGENT_REPO=kibana +ELASTIC_AGENT_REPO_PATH=x-pack/plugins/fleet/server/services/ +FILE_REPO=elastic_agent_manifest.ts +ELASTIC_AGENT_BRANCH=update-k8s-templates-$(shell date "+%Y%m%d%H%M%S") +.PHONY: generate-k8s $(ALL) generate-k8s: $(ALL) test: generate-k8s @@ -15,9 +21,66 @@ clean: @for f in $(ALL); do rm -f "$$f-kubernetes.yaml"; done $(ALL): +ifdef WITHOUTCONFIG + @echo "Generating $@-kubernetes-without-configmap.yaml" + @rm -f $@-kubernetes-without-configmap.yaml + @for f in $(shell ls $@/*.yaml | grep -v daemonset-configmap); do \ + sed -e "s/%VERSION%/VERSION/g" -e "s/%BRANCH%/${BRANCH_VERSION}/g" $$f >> $@-kubernetes-without-configmap.yaml; \ + echo --- >> $@-kubernetes-without-configmap.yaml; \ + done +else @echo "Generating $@-kubernetes.yaml" @rm -f $@-kubernetes.yaml @for f in $(shell ls $@/*.yaml); do \ - sed "s/%VERSION%/${BEAT_VERSION}/g" $$f >> $@-kubernetes.yaml; \ + sed -e "s/%VERSION%/${BEAT_VERSION}/g" -e "s/%BRANCH%/${BRANCH_VERSION}/g" $$f >> $@-kubernetes.yaml; \ echo --- >> $@-kubernetes.yaml; \ done +endif + +CHDIR_SHELL := $(SHELL) +define chdir + $(eval _D=$(firstword $(1) $(@D))) + $(info $(MAKE): cd $(_D)) $(eval SHELL = cd $(_D); $(CHDIR_SHELL)) +endef + +## ci-clone-kibana-repository : Clone Kibana Repository and copy new files for the PR +.PHONY: ci-clone-kibana-repository +ci-clone-kibana-repository: + git clone git@github.com:elastic/kibana.git + cp $(FILE_REPO) $(ELASTIC_AGENT_REPO)/$(ELASTIC_AGENT_REPO_PATH) + +## ci-create-kubernetes-templates-pull-request : Create the pull request for the kubernetes templates +$(eval HASDIFF =$(shell sh -c "git status | grep $(FILE_REPO) | wc -l")) +.PHONY: ci-create-kubernetes-templates-pull-request +ci-create-kubernetes-templates-pull-request: +ifeq ($(HASDIFF),1) + echo "INFO: Create branch to update k8s templates" + git config user.name obscloudnativemonitoring + git config user.email obs-cloudnative-monitoring@elastic.co + git checkout -b $(ELASTIC_AGENT_BRANCH) + echo "INFO: add files if any" + git add $(ELASTIC_AGENT_REPO_PATH)$(FILE_REPO) + echo "INFO: commit changes if any" + git diff --staged --quiet || git commit -m "[Automated PR] Publish kubernetes templates for elastic-agent" + echo "INFO: show remote details" + git remote -v + +ifeq ($(DRY_RUN),TRUE) + echo "INFO: skip pushing branch" +else + echo "INFO: push branch" + @git push --set-upstream origin $(ELASTIC_AGENT_BRANCH) + echo "INFO: create pull request" + @gh pr create \ + --title "Update kubernetes templates for elastic-agent" \ + --body "Automated by ${BUILD_URL}" \ + --label automation \ + --base main \ + --head $(ELASTIC_AGENT_BRANCH) \ + --reviewer elastic/obs-cloudnative-monitoring +endif + +else + echo "No differences found with kibana git repository" +endif + diff --git a/deploy/kubernetes/creator_k8s_manifest.sh b/deploy/kubernetes/creator_k8s_manifest.sh new file mode 100755 index 00000000000..245f43dcb3d --- /dev/null +++ b/deploy/kubernetes/creator_k8s_manifest.sh @@ -0,0 +1,58 @@ +#!/bin/bash +#### +# Bash Script that creates the needed https://github.com/elastic/kibana/blob/main/x-pack/plugins/fleet/server/services/elastic_agent_manifest.ts +# The script takes as an argument the path of elastic-agent manifests +# Eg. ./creator_k8s_manifest.sh deploy/kubernetes +#### + + +STANDALONE=elastic-agent-standalone-kubernetes-without-configmap.yaml +MANAGED=elastic-agent-managed-kubernetes-without-configmap.yaml +OUTPUT_FILE=elastic_agent_manifest.ts + +#Check if arguments provided +((!$#)) && echo "No arguments provided!Please provide path of elastic-agent files" && exit 1 +MANIFEST_PATH=$1 + +#Check if file elastic-agent-standalone-kubernetes-without-configmap.yaml exists +if [ ! -f "$MANIFEST_PATH/$STANDALONE" ]; then + echo "$MANIFEST_PATH/$STANDALONE does not exists" + exit 1 +fi + +#Check if file elastic-agent-managed-kubernetes-without-configmap.yaml exists +if [ ! -f "$MANIFEST_PATH/$MANAGED" ]; then + echo "$MANIFEST_PATH/$MANAGED does not exists" + exit 1 +fi + +#Start creation of output file +cat << EOF > $OUTPUT_FILE +/* +* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +* or more contributor license agreements. Licensed under the Elastic License +* 2.0; you may not use this file except in compliance with the Elastic License +* 2.0. +*/ + +export const elasticAgentStandaloneManifest = \`--- +EOF + +cat $MANIFEST_PATH/$STANDALONE >> $OUTPUT_FILE +echo "\`;" >> $OUTPUT_FILE + +cat << EOF >> $OUTPUT_FILE + +export const elasticAgentManagedManifest = \`--- +EOF + +cat $MANIFEST_PATH/$MANAGED >> $OUTPUT_FILE +echo -n "\`;" >> $OUTPUT_FILE + +#Replacing all occurencies of elastic-agent-standalone +sed -i -e 's/elastic-agent-standalone/elastic-agent/g' $OUTPUT_FILE + +#Remove ES_HOST entry from file +sed -i -e '/# The Elasticsearch host to communicate with/d' $OUTPUT_FILE +sed -i -e '/ES_HOST/d' $OUTPUT_FILE +sed -i -e '/value: ""/d' $OUTPUT_FILE \ No newline at end of file diff --git a/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml b/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml index 1e2403f47a2..3a41910c51a 100644 --- a/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml +++ b/deploy/kubernetes/elastic-agent-managed-kubernetes.yaml @@ -1,4 +1,4 @@ -# For more information refer to https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-managed-by-fleet.html +# For more information https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-managed-by-fleet.html apiVersion: apps/v1 kind: DaemonSet metadata: @@ -15,9 +15,11 @@ spec: labels: app: elastic-agent spec: - # Tolerations are needed to run Elastic Agent on Kubernetes master nodes. - # Agents running on master nodes collect metrics from the control plane components (scheduler, controller manager) of Kubernetes + # Tolerations are needed to run Elastic Agent on Kubernetes control-plane nodes. + # Agents running on control-plane nodes collect metrics from the control plane components (scheduler, controller manager) of Kubernetes tolerations: + - key: node-role.kubernetes.io/control-plane + effect: NoSchedule - key: node-role.kubernetes.io/master effect: NoSchedule serviceAccountName: elastic-agent @@ -43,7 +45,7 @@ spec: # Elasticsearch API key used to enroll Elastic Agents in Fleet (https://www.elastic.co/guide/en/fleet/current/fleet-enrollment-tokens.html#fleet-enrollment-tokens) # If FLEET_ENROLLMENT_TOKEN is empty then KIBANA_HOST, KIBANA_FLEET_USERNAME, KIBANA_FLEET_PASSWORD are needed - name: FLEET_ENROLLMENT_TOKEN - value: "" + value: "token-id" - name: KIBANA_HOST value: "http://kibana:5601" # The basic authentication username used to connect to Kibana and retrieve a service_token to enable Fleet @@ -81,21 +83,12 @@ spec: - name: varlog mountPath: /var/log readOnly: true - - name: etc-kubernetes - mountPath: /hostfs/etc/kubernetes + - name: etc-full + mountPath: /hostfs/etc readOnly: true - name: var-lib mountPath: /hostfs/var/lib readOnly: true - - name: passwd - mountPath: /hostfs/etc/passwd - readOnly: true - - name: group - mountPath: /hostfs/etc/group - readOnly: true - - name: etcsysmd - mountPath: /hostfs/etc/systemd - readOnly: true - name: etc-mid mountPath: /etc/machine-id readOnly: true @@ -112,26 +105,15 @@ spec: - name: varlog hostPath: path: /var/log - # Needed for cloudbeat - - name: etc-kubernetes + # The following volumes are needed for Cloud Security Posture integration (cloudbeat) + # If you are not using this integration, then these volumes and the corresponding + # mounts can be removed. + - name: etc-full hostPath: - path: /etc/kubernetes - # Needed for cloudbeat + path: /etc - name: var-lib hostPath: path: /var/lib - # Needed for cloudbeat - - name: passwd - hostPath: - path: /etc/passwd - # Needed for cloudbeat - - name: group - hostPath: - path: /etc/group - # Needed for cloudbeat - - name: etcsysmd - hostPath: - path: /etc/systemd # Mount /etc/machine-id from the host to determine host ID # Needed for Elastic Security integration - name: etc-mid @@ -245,6 +227,10 @@ rules: resources: - podsecuritypolicies verbs: ["get", "list", "watch"] + - apiGroups: [ "storage.k8s.io" ] + resources: + - storageclasses + verbs: [ "get", "list", "watch" ] --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role diff --git a/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-daemonset.yaml b/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-daemonset.yaml index c3c679efa36..e1b85082ac3 100644 --- a/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-daemonset.yaml +++ b/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-daemonset.yaml @@ -1,4 +1,4 @@ -# For more information refer to https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-managed-by-fleet.html +# For more information https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-managed-by-fleet.html apiVersion: apps/v1 kind: DaemonSet metadata: @@ -15,9 +15,11 @@ spec: labels: app: elastic-agent spec: - # Tolerations are needed to run Elastic Agent on Kubernetes master nodes. - # Agents running on master nodes collect metrics from the control plane components (scheduler, controller manager) of Kubernetes + # Tolerations are needed to run Elastic Agent on Kubernetes control-plane nodes. + # Agents running on control-plane nodes collect metrics from the control plane components (scheduler, controller manager) of Kubernetes tolerations: + - key: node-role.kubernetes.io/control-plane + effect: NoSchedule - key: node-role.kubernetes.io/master effect: NoSchedule serviceAccountName: elastic-agent @@ -43,7 +45,7 @@ spec: # Elasticsearch API key used to enroll Elastic Agents in Fleet (https://www.elastic.co/guide/en/fleet/current/fleet-enrollment-tokens.html#fleet-enrollment-tokens) # If FLEET_ENROLLMENT_TOKEN is empty then KIBANA_HOST, KIBANA_FLEET_USERNAME, KIBANA_FLEET_PASSWORD are needed - name: FLEET_ENROLLMENT_TOKEN - value: "" + value: "token-id" - name: KIBANA_HOST value: "http://kibana:5601" # The basic authentication username used to connect to Kibana and retrieve a service_token to enable Fleet @@ -81,21 +83,12 @@ spec: - name: varlog mountPath: /var/log readOnly: true - - name: etc-kubernetes - mountPath: /hostfs/etc/kubernetes + - name: etc-full + mountPath: /hostfs/etc readOnly: true - name: var-lib mountPath: /hostfs/var/lib readOnly: true - - name: passwd - mountPath: /hostfs/etc/passwd - readOnly: true - - name: group - mountPath: /hostfs/etc/group - readOnly: true - - name: etcsysmd - mountPath: /hostfs/etc/systemd - readOnly: true - name: etc-mid mountPath: /etc/machine-id readOnly: true @@ -112,26 +105,15 @@ spec: - name: varlog hostPath: path: /var/log - # Needed for cloudbeat - - name: etc-kubernetes + # The following volumes are needed for Cloud Security Posture integration (cloudbeat) + # If you are not using this integration, then these volumes and the corresponding + # mounts can be removed. + - name: etc-full hostPath: - path: /etc/kubernetes - # Needed for cloudbeat + path: /etc - name: var-lib hostPath: path: /var/lib - # Needed for cloudbeat - - name: passwd - hostPath: - path: /etc/passwd - # Needed for cloudbeat - - name: group - hostPath: - path: /etc/group - # Needed for cloudbeat - - name: etcsysmd - hostPath: - path: /etc/systemd # Mount /etc/machine-id from the host to determine host ID # Needed for Elastic Security integration - name: etc-mid diff --git a/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-role.yaml b/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-role.yaml index 0d961215f4e..778a4ba5520 100644 --- a/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-role.yaml +++ b/deploy/kubernetes/elastic-agent-managed/elastic-agent-managed-role.yaml @@ -63,6 +63,10 @@ rules: resources: - podsecuritypolicies verbs: ["get", "list", "watch"] + - apiGroups: [ "storage.k8s.io" ] + resources: + - storageclasses + verbs: [ "get", "list", "watch" ] --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role diff --git a/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml b/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml index 0984f0dc8ac..373282a4c1b 100644 --- a/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml +++ b/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml @@ -1,4 +1,4 @@ -# For more information refer to https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-standalone.html +# For more information https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-standalone.html apiVersion: v1 kind: ConfigMap metadata: @@ -25,6 +25,8 @@ data: providers.kubernetes: node: ${NODE_NAME} scope: node + #Uncomment to enable hints' support + #hints.enabled: true inputs: - name: kubernetes-cluster-metrics condition: ${kubernetes_leaderelection.leader} == true @@ -624,6 +626,7 @@ data: # period: 10s # condition: ${kubernetes.labels.app} == 'redis' --- +# For more information refer https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-standalone.html apiVersion: apps/v1 kind: DaemonSet metadata: @@ -640,19 +643,34 @@ spec: labels: app: elastic-agent-standalone spec: - # Tolerations are needed to run Elastic Agent on Kubernetes master nodes. - # Agents running on master nodes collect metrics from the control plane components (scheduler, controller manager) of Kubernetes + # Tolerations are needed to run Elastic Agent on Kubernetes control-plane nodes. + # Agents running on control-plane nodes collect metrics from the control plane components (scheduler, controller manager) of Kubernetes tolerations: + - key: node-role.kubernetes.io/control-plane + effect: NoSchedule - key: node-role.kubernetes.io/master effect: NoSchedule serviceAccountName: elastic-agent-standalone hostNetwork: true dnsPolicy: ClusterFirstWithHostNet + # Uncomment if using hints feature + #initContainers: + # - name: k8s-templates-downloader + # image: busybox:1.28 + # command: ['sh'] + # args: + # - -c + # - >- + # mkdir -p /etc/elastic-agent/inputs.d && + # wget -O - https://github.com/elastic/elastic-agent/archive/main.tar.gz | tar xz -C /etc/elastic-agent/inputs.d --strip=5 "elastic-agent-main/deploy/kubernetes/elastic-agent-standalone/templates.d" + # volumeMounts: + # - name: external-inputs + # mountPath: /etc/elastic-agent/inputs.d containers: - name: elastic-agent-standalone image: docker.elastic.co/beats/elastic-agent:8.3.0 args: [ - "-c", "/etc/agent.yml", + "-c", "/etc/elastic-agent/agent.yml", "-e", ] env: @@ -662,7 +680,7 @@ spec: value: "elastic" # The basic authentication password used to connect to Elasticsearch - name: ES_PASSWORD - value: "" + value: "changeme" # The Elasticsearch host to communicate with - name: ES_HOST value: "" @@ -674,6 +692,8 @@ spec: valueFrom: fieldRef: fieldPath: metadata.name + - name: STATE_PATH + value: "/etc/elastic-agent" securityContext: runAsUser: 0 resources: @@ -684,9 +704,12 @@ spec: memory: 400Mi volumeMounts: - name: datastreams - mountPath: /etc/agent.yml + mountPath: /etc/elastic-agent/agent.yml readOnly: true subPath: agent.yml + # Uncomment if using hints feature + #- name: external-inputs + # mountPath: /etc/elastic-agent/inputs.d - name: proc mountPath: /hostfs/proc readOnly: true @@ -699,26 +722,20 @@ spec: - name: varlog mountPath: /var/log readOnly: true - - name: etc-kubernetes - mountPath: /hostfs/etc/kubernetes + - name: etc-full + mountPath: /hostfs/etc readOnly: true - name: var-lib mountPath: /hostfs/var/lib readOnly: true - - name: passwd - mountPath: /hostfs/etc/passwd - readOnly: true - - name: group - mountPath: /hostfs/etc/group - readOnly: true - - name: etcsysmd - mountPath: /hostfs/etc/systemd - readOnly: true volumes: - name: datastreams configMap: defaultMode: 0640 name: agent-node-datastreams + # Uncomment if using hints feature + #- name: external-inputs + # emptyDir: {} - name: proc hostPath: path: /proc @@ -731,26 +748,15 @@ spec: - name: varlog hostPath: path: /var/log - # Needed for cloudbeat - - name: etc-kubernetes + # The following volumes are needed for Cloud Security Posture integration (cloudbeat) + # If you are not using this integration, then these volumes and the corresponding + # mounts can be removed. + - name: etc-full hostPath: - path: /etc/kubernetes - # Needed for cloudbeat + path: /etc - name: var-lib hostPath: path: /var/lib - # Needed for cloudbeat - - name: passwd - hostPath: - path: /etc/passwd - # Needed for cloudbeat - - name: group - hostPath: - path: /etc/group - # Needed for cloudbeat - - name: etcsysmd - hostPath: - path: /etc/systemd --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding @@ -858,6 +864,10 @@ rules: resources: - podsecuritypolicies verbs: ["get", "list", "watch"] + - apiGroups: [ "storage.k8s.io" ] + resources: + - storageclasses + verbs: [ "get", "list", "watch" ] --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role diff --git a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset-configmap.yaml b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset-configmap.yaml index 7048bf22adb..1a52302826d 100644 --- a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset-configmap.yaml +++ b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset-configmap.yaml @@ -1,4 +1,4 @@ -# For more information refer to https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-standalone.html +# For more information https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-standalone.html apiVersion: v1 kind: ConfigMap metadata: @@ -25,6 +25,8 @@ data: providers.kubernetes: node: ${NODE_NAME} scope: node + #Uncomment to enable hints' support + #hints.enabled: true inputs: - name: kubernetes-cluster-metrics condition: ${kubernetes_leaderelection.leader} == true diff --git a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset.yaml b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset.yaml index 0bf131ec8ea..d40291d2ed1 100644 --- a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset.yaml +++ b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-daemonset.yaml @@ -1,3 +1,4 @@ +# For more information refer https://www.elastic.co/guide/en/fleet/current/running-on-kubernetes-standalone.html apiVersion: apps/v1 kind: DaemonSet metadata: @@ -14,19 +15,34 @@ spec: labels: app: elastic-agent-standalone spec: - # Tolerations are needed to run Elastic Agent on Kubernetes master nodes. - # Agents running on master nodes collect metrics from the control plane components (scheduler, controller manager) of Kubernetes + # Tolerations are needed to run Elastic Agent on Kubernetes control-plane nodes. + # Agents running on control-plane nodes collect metrics from the control plane components (scheduler, controller manager) of Kubernetes tolerations: + - key: node-role.kubernetes.io/control-plane + effect: NoSchedule - key: node-role.kubernetes.io/master effect: NoSchedule serviceAccountName: elastic-agent-standalone hostNetwork: true dnsPolicy: ClusterFirstWithHostNet + # Uncomment if using hints feature + #initContainers: + # - name: k8s-templates-downloader + # image: busybox:1.28 + # command: ['sh'] + # args: + # - -c + # - >- + # mkdir -p /etc/elastic-agent/inputs.d && + # wget -O - https://github.com/elastic/elastic-agent/archive/%BRANCH%.tar.gz | tar xz -C /etc/elastic-agent/inputs.d --strip=5 "elastic-agent-main/deploy/kubernetes/elastic-agent-standalone/templates.d" + # volumeMounts: + # - name: external-inputs + # mountPath: /etc/elastic-agent/inputs.d containers: - name: elastic-agent-standalone image: docker.elastic.co/beats/elastic-agent:%VERSION% args: [ - "-c", "/etc/agent.yml", + "-c", "/etc/elastic-agent/agent.yml", "-e", ] env: @@ -36,7 +52,7 @@ spec: value: "elastic" # The basic authentication password used to connect to Elasticsearch - name: ES_PASSWORD - value: "" + value: "changeme" # The Elasticsearch host to communicate with - name: ES_HOST value: "" @@ -48,6 +64,8 @@ spec: valueFrom: fieldRef: fieldPath: metadata.name + - name: STATE_PATH + value: "/etc/elastic-agent" securityContext: runAsUser: 0 resources: @@ -58,9 +76,12 @@ spec: memory: 400Mi volumeMounts: - name: datastreams - mountPath: /etc/agent.yml + mountPath: /etc/elastic-agent/agent.yml readOnly: true subPath: agent.yml + # Uncomment if using hints feature + #- name: external-inputs + # mountPath: /etc/elastic-agent/inputs.d - name: proc mountPath: /hostfs/proc readOnly: true @@ -73,26 +94,20 @@ spec: - name: varlog mountPath: /var/log readOnly: true - - name: etc-kubernetes - mountPath: /hostfs/etc/kubernetes + - name: etc-full + mountPath: /hostfs/etc readOnly: true - name: var-lib mountPath: /hostfs/var/lib readOnly: true - - name: passwd - mountPath: /hostfs/etc/passwd - readOnly: true - - name: group - mountPath: /hostfs/etc/group - readOnly: true - - name: etcsysmd - mountPath: /hostfs/etc/systemd - readOnly: true volumes: - name: datastreams configMap: defaultMode: 0640 name: agent-node-datastreams + # Uncomment if using hints feature + #- name: external-inputs + # emptyDir: {} - name: proc hostPath: path: /proc @@ -105,23 +120,12 @@ spec: - name: varlog hostPath: path: /var/log - # Needed for cloudbeat - - name: etc-kubernetes + # The following volumes are needed for Cloud Security Posture integration (cloudbeat) + # If you are not using this integration, then these volumes and the corresponding + # mounts can be removed. + - name: etc-full hostPath: - path: /etc/kubernetes - # Needed for cloudbeat + path: /etc - name: var-lib hostPath: path: /var/lib - # Needed for cloudbeat - - name: passwd - hostPath: - path: /etc/passwd - # Needed for cloudbeat - - name: group - hostPath: - path: /etc/group - # Needed for cloudbeat - - name: etcsysmd - hostPath: - path: /etc/systemd diff --git a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-role.yaml b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-role.yaml index 8a644f3aadf..a0cd80b456a 100644 --- a/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-role.yaml +++ b/deploy/kubernetes/elastic-agent-standalone/elastic-agent-standalone-role.yaml @@ -63,6 +63,10 @@ rules: resources: - podsecuritypolicies verbs: ["get", "list", "watch"] + - apiGroups: [ "storage.k8s.io" ] + resources: + - storageclasses + verbs: [ "get", "list", "watch" ] --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/activemq.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/activemq.yml new file mode 100644 index 00000000000..007060a5ac0 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/activemq.yml @@ -0,0 +1,96 @@ +inputs: + - name: filestream-activemq + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.activemq.audit.enabled} == true or ${kubernetes.hints.activemq.enabled} == true + data_stream: + dataset: activemq.audit + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.activemq.audit.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - activemq-audit + - condition: ${kubernetes.hints.activemq.log.enabled} == true or ${kubernetes.hints.activemq.enabled} == true + data_stream: + dataset: activemq.log + type: logs + exclude_files: + - .gz$ + multiline: + match: after + negate: true + pattern: '^\d{4}-\d{2}-\d{2} ' + parsers: + - container: + format: auto + stream: ${kubernetes.hints.activemq.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - activemq-log + data_stream.namespace: default + - name: activemq/metrics-activemq + type: activemq/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.activemq.broker.enabled} == true or ${kubernetes.hints.activemq.enabled} == true + data_stream: + dataset: activemq.broker + type: metrics + hosts: + - ${kubernetes.hints.activemq.broker.host|'localhost:8161'} + metricsets: + - broker + password: ${kubernetes.hints.activemq.broker.password|'admin'} + path: /api/jolokia/?ignoreErrors=true&canonicalNaming=false + period: ${kubernetes.hints.activemq.broker.period|'10s'} + tags: + - forwarded + - activemq-broker + username: ${kubernetes.hints.activemq.broker.username|'admin'} + - condition: ${kubernetes.hints.activemq.queue.enabled} == true or ${kubernetes.hints.activemq.enabled} == true + data_stream: + dataset: activemq.queue + type: metrics + hosts: + - ${kubernetes.hints.activemq.queue.host|'localhost:8161'} + metricsets: + - queue + password: ${kubernetes.hints.activemq.queue.password|'admin'} + path: /api/jolokia/?ignoreErrors=true&canonicalNaming=false + period: ${kubernetes.hints.activemq.queue.period|'10s'} + tags: + - forwarded + - activemq-queue + username: ${kubernetes.hints.activemq.queue.username|'admin'} + - condition: ${kubernetes.hints.activemq.topic.enabled} == true or ${kubernetes.hints.activemq.enabled} == true + data_stream: + dataset: activemq.topic + type: metrics + hosts: + - ${kubernetes.hints.activemq.topic.host|'localhost:8161'} + metricsets: + - topic + password: ${kubernetes.hints.activemq.topic.password|'admin'} + path: /api/jolokia/?ignoreErrors=true&canonicalNaming=false + period: ${kubernetes.hints.activemq.topic.period|'10s'} + tags: + - forwarded + - activemq-topic + username: ${kubernetes.hints.activemq.topic.username|'admin'} + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/apache.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/apache.yml new file mode 100644 index 00000000000..a6e461a5363 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/apache.yml @@ -0,0 +1,134 @@ +inputs: + - name: filestream-apache + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.apache.access.enabled} == true or ${kubernetes.hints.apache.enabled} == true + data_stream: + dataset: apache.access + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.apache.access.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - apache-access + - condition: ${kubernetes.hints.apache.error.enabled} == true or ${kubernetes.hints.apache.enabled} == true + data_stream: + dataset: apache.error + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.apache.error.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + prospector: + scanner: + symlinks: true + tags: + - apache-error + data_stream.namespace: default + - name: httpjson-apache + type: httpjson + use_output: default + streams: + - condition: ${kubernetes.hints.apache.access.enabled} == true and ${kubernetes.hints.apache.enabled} == true + config_version: "2" + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: apache.access + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="access*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - apache-access + - condition: ${kubernetes.hints.apache.error.enabled} == true and ${kubernetes.hints.apache.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: apache.error + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype=apache:error OR sourcetype=apache_error | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - apache-error + data_stream.namespace: default + - name: apache/metrics-apache + type: apache/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.apache.status.enabled} == true or ${kubernetes.hints.apache.enabled} == true + data_stream: + dataset: apache.status + type: metrics + hosts: + - ${kubernetes.hints.apache.status.host|'http://127.0.0.1'} + metricsets: + - status + period: ${kubernetes.hints.apache.status.period|'30s'} + server_status_path: /server-status + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/cassandra.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/cassandra.yml new file mode 100644 index 00000000000..bce4edf635c --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/cassandra.yml @@ -0,0 +1,327 @@ +inputs: + - name: filestream-cassandra + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.cassandra.log.enabled} == true or ${kubernetes.hints.cassandra.enabled} == true + data_stream: + dataset: cassandra.log + type: logs + exclude_files: + - .gz$ + multiline: + match: after + negate: true + pattern: ^([A-Z]) + parsers: + - container: + format: auto + stream: ${kubernetes.hints.cassandra.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - cassandra-systemlogs + data_stream.namespace: default + - name: jolokia/metrics-cassandra + type: jolokia/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.cassandra.metrics.enabled} == true or ${kubernetes.hints.cassandra.enabled} == true + data_stream: + dataset: cassandra.metrics + type: metrics + hosts: + - ${kubernetes.hints.cassandra.metrics.host|'localhost:8778'} + jmx.mappings: + - attributes: + - attr: ReleaseVersion + field: system.version + - attr: ClusterName + field: system.cluster + - attr: LiveNodes + field: system.live_nodes + - attr: UnreachableNodes + field: system.unreachable_nodes + - attr: LeavingNodes + field: system.leaving_nodes + - attr: JoiningNodes + field: system.joining_nodes + - attr: MovingNodes + field: system.moving_nodes + mbean: org.apache.cassandra.db:type=StorageService + - attributes: + - attr: Datacenter + field: system.data_center + - attr: Rack + field: system.rack + mbean: org.apache.cassandra.db:type=EndpointSnitchInfo + - attributes: + - attr: Count + field: storage.total_hint_in_progress + mbean: org.apache.cassandra.metrics:name=TotalHintsInProgress,type=Storage + - attributes: + - attr: Count + field: storage.total_hints + mbean: org.apache.cassandra.metrics:name=TotalHints,type=Storage + - attributes: + - attr: Count + field: storage.exceptions + mbean: org.apache.cassandra.metrics:name=Exceptions,type=Storage + - attributes: + - attr: Count + field: storage.load + mbean: org.apache.cassandra.metrics:name=Load,type=Storage + - attributes: + - attr: OneMinuteRate + field: hits.succeeded_per_second + mbean: org.apache.cassandra.metrics:type=HintsService,name=HintsSucceeded + - attributes: + - attr: OneMinuteRate + field: hits.failed_per_second + mbean: org.apache.cassandra.metrics:type=HintsService,name=HintsFailed + - attributes: + - attr: OneMinuteRate + field: hits.timed_out_per_second + mbean: org.apache.cassandra.metrics:type=HintsService,name=HintsTimedOut + - attributes: + - attr: CollectionTime + field: gc.concurrent_mark_sweep.collection_time + - attr: CollectionCount + field: gc.concurrent_mark_sweep.collection_count + mbean: java.lang:type=GarbageCollector,name=ConcurrentMarkSweep + - attributes: + - attr: CollectionTime + field: gc.par_new.collection_time + - attr: CollectionCount + field: gc.par_new.collection_count + mbean: java.lang:type=GarbageCollector,name=ParNew + - attributes: + - attr: HeapMemoryUsage + field: memory.heap_usage + - attr: NonHeapMemoryUsage + field: memory.other_usage + mbean: java.lang:type=Memory + - attributes: + - attr: Value + field: task.complete + mbean: org.apache.cassandra.metrics:name=CompletedTasks,type=CommitLog + - attributes: + - attr: Value + field: task.pending + mbean: org.apache.cassandra.metrics:name=PendingTasks,type=CommitLog + - attributes: + - attr: Value + field: task.total_commitlog_size + mbean: org.apache.cassandra.metrics:name=TotalCommitLogSize,type=CommitLog + - attributes: + - attr: Count + field: client_request.write.timeouts + - attr: OneMinuteRate + field: client_request.write.timeoutsms + mbean: org.apache.cassandra.metrics:type=ClientRequest,name=Timeouts,scope=Write + - attributes: + - attr: Count + field: client_request.write.unavailables + - attr: OneMinuteRate + field: client_request.write.unavailablesms + mbean: org.apache.cassandra.metrics:type=ClientRequest,name=Unavailables,scope=Write + - attributes: + - attr: Count + field: client_request.write.count + - attr: OneMinuteRate + field: client_request.write.one_minute_rate + mbean: org.apache.cassandra.metrics:type=ClientRequest,name=Latency,scope=Write + - attributes: + - attr: Count + field: client_request.write.total_latency + mbean: org.apache.cassandra.metrics:type=ClientRequest,name=TotalLatency,scope=Write + - attributes: + - attr: Count + field: client_request.read.timeouts + - attr: OneMinuteRate + field: client_request.read.timeoutsms + mbean: org.apache.cassandra.metrics:type=ClientRequest,name=Timeouts,scope=Read + - attributes: + - attr: Count + field: client_request.read.unavailables + - attr: OneMinuteRate + field: client_request.read.unavailablesms + mbean: org.apache.cassandra.metrics:type=ClientRequest,name=Unavailables,scope=Read + - attributes: + - attr: Count + field: client_request.read.count + - attr: OneMinuteRate + field: client_request.read.one_minute_rate + mbean: org.apache.cassandra.metrics:type=ClientRequest,name=Latency,scope=Read + - attributes: + - attr: Count + field: client_request.read.total_latency + mbean: org.apache.cassandra.metrics:type=ClientRequest,name=TotalLatency,scope=Read + - attributes: + - attr: OneMinuteRate + field: client_request.range_slice.one_minute_rate + mbean: org.apache.cassandra.metrics:type=ClientRequest,name=Latency,scope=RangeSlice + - attributes: + - attr: Count + field: client_request.range_slice.total_latency + mbean: org.apache.cassandra.metrics:type=ClientRequest,name=TotalLatency,scope=RangeSlice + - attributes: + - attr: OneMinuteRate + field: client_request.caswrite.one_minute_rate + mbean: org.apache.cassandra.metrics:type=ClientRequest,name=Latency,scope=CASWrite + - attributes: + - attr: OneMinuteRate + field: client_request.casread.one_minute_rate + mbean: org.apache.cassandra.metrics:type=ClientRequest,name=Latency,scope=CASRead + - attributes: + - attr: Value + field: client.connected_native_clients + mbean: org.apache.cassandra.metrics:type=Client,name=connectedNativeClients + - attributes: + - attr: Value + field: compaction.completed + mbean: org.apache.cassandra.metrics:name=CompletedTasks,type=Compaction + - attributes: + - attr: Value + field: compaction.pending + mbean: org.apache.cassandra.metrics:name=PendingTasks,type=Compaction + - attributes: + - attr: Value + field: table.live_ss_table_count + mbean: org.apache.cassandra.metrics:type=Table,name=LiveSSTableCount + - attributes: + - attr: Value + field: table.live_disk_space_used + mbean: org.apache.cassandra.metrics:type=Table,name=LiveDiskSpaceUsed + - attributes: + - attr: Value + field: table.all_memtables_heap_size + mbean: org.apache.cassandra.metrics:type=Table,name=AllMemtablesHeapSize + - attributes: + - attr: Value + field: table.all_memtables_off_heap_size + mbean: org.apache.cassandra.metrics:type=Table,name=AllMemtablesOffHeapSize + - attributes: + - attr: OneMinuteRate + field: cache.key_cache.requests.one_minute_rate + mbean: org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Requests + - attributes: + - attr: Value + field: cache.key_cache.capacity + mbean: org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=Capacity + - attributes: + - attr: Value + field: cache.key_cache.one_minute_hit_rate + mbean: org.apache.cassandra.metrics:type=Cache,scope=KeyCache,name=OneMinuteHitRate + - attributes: + - attr: OneMinuteRate + field: cache.row_cache.requests.one_minute_rate + mbean: org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Requests + - attributes: + - attr: Value + field: cache.row_cache.capacity + mbean: org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=Capacity + - attributes: + - attr: Value + field: cache.row_cache.one_minute_hit_rate + mbean: org.apache.cassandra.metrics:type=Cache,scope=RowCache,name=OneMinuteHitRate + - attributes: + - attr: Value + field: thread_pools.counter_mutation_stage.request.active + mbean: org.apache.cassandra.metrics:name=ActiveTasks,path=request,scope=CounterMutationStage,type=ThreadPools + - attributes: + - attr: Value + field: thread_pools.counter_mutation_stage.request.pending + mbean: org.apache.cassandra.metrics:name=PendingTasks,path=request,scope=CounterMutationStage,type=ThreadPools + - attributes: + - attr: Value + field: thread_pools.mutation_stage.request.active + mbean: org.apache.cassandra.metrics:name=ActiveTasks,path=request,scope=MutationStage,type=ThreadPools + - attributes: + - attr: Value + field: thread_pools.mutation_stage.request.pending + mbean: org.apache.cassandra.metrics:name=PendingTasks,path=request,scope=MutationStage,type=ThreadPools + - attributes: + - attr: Value + field: thread_pools.read_repair_stage.request.active + mbean: org.apache.cassandra.metrics:name=ActiveTasks,path=request,scope=ReadRepairStage,type=ThreadPools + - attributes: + - attr: Value + field: thread_pools.read_repair_stage.request.pending + mbean: org.apache.cassandra.metrics:name=PendingTasks,path=request,scope=ReadRepairStage,type=ThreadPools + - attributes: + - attr: Value + field: thread_pools.read_stage.request.active + mbean: org.apache.cassandra.metrics:name=ActiveTasks,path=request,scope=ReadStage,type=ThreadPools + - attributes: + - attr: Value + field: thread_pools.read_stage.request.pending + mbean: org.apache.cassandra.metrics:name=PendingTasks,path=request,scope=ReadStage,type=ThreadPools + - attributes: + - attr: Value + field: thread_pools.request_response_stage.request.active + mbean: org.apache.cassandra.metrics:name=ActiveTasks,path=request,scope=RequestResponseStage,type=ThreadPools + - attributes: + - attr: Value + field: thread_pools.request_response_stage.request.pending + mbean: org.apache.cassandra.metrics:name=PendingTasks,path=request,scope=RequestResponseStage,type=ThreadPools + - attributes: + - attr: Value + field: column_family.total_disk_space_used + mbean: org.apache.cassandra.metrics:name=TotalDiskSpaceUsed,type=ColumnFamily + - attributes: + - attr: Count + field: dropped_message.batch_remove + mbean: org.apache.cassandra.metrics:type=DroppedMessage,scope=BATCH_REMOVE,name=Dropped + - attributes: + - attr: Count + field: dropped_message.batch_store + mbean: org.apache.cassandra.metrics:type=DroppedMessage,scope=BATCH_STORE,name=Dropped + - attributes: + - attr: Count + field: dropped_message.counter_mutation + mbean: org.apache.cassandra.metrics:type=DroppedMessage,scope=COUNTER_MUTATION,name=Dropped + - attributes: + - attr: Count + field: dropped_message.hint + mbean: org.apache.cassandra.metrics:type=DroppedMessage,scope=HINT,name=Dropped + - attributes: + - attr: Count + field: dropped_message.mutation + mbean: org.apache.cassandra.metrics:type=DroppedMessage,scope=MUTATION,name=Dropped + - attributes: + - attr: Count + field: dropped_message.paged_range + mbean: org.apache.cassandra.metrics:type=DroppedMessage,scope=PAGED_RANGE,name=Dropped + - attributes: + - attr: Count + field: dropped_message.range_slice + mbean: org.apache.cassandra.metrics:type=DroppedMessage,scope=RANGE_SLICE,name=Dropped + - attributes: + - attr: Count + field: dropped_message.read + mbean: org.apache.cassandra.metrics:type=DroppedMessage,scope=READ,name=Dropped + - attributes: + - attr: Count + field: dropped_message.read_repair + mbean: org.apache.cassandra.metrics:type=DroppedMessage,scope=READ_REPAIR,name=Dropped + - attributes: + - attr: Count + field: dropped_message.request_response + mbean: org.apache.cassandra.metrics:type=DroppedMessage,scope=REQUEST_RESPONSE,name=Dropped + - attributes: + - attr: Count + field: dropped_message.trace + mbean: org.apache.cassandra.metrics:type=DroppedMessage,scope=_TRACE,name=Dropped + metricsets: + - jmx + namespace: metrics + password: ${kubernetes.hints.cassandra.metrics.password|'admin'} + path: /jolokia/?ignoreErrors=true&canonicalNaming=false + period: ${kubernetes.hints.cassandra.metrics.period|'10s'} + username: ${kubernetes.hints.cassandra.metrics.username|'admin'} + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/cef.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/cef.yml new file mode 100644 index 00000000000..524cb6159f3 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/cef.yml @@ -0,0 +1,51 @@ +inputs: + - name: filestream-cef + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.cef.log.enabled} == true or ${kubernetes.hints.cef.enabled} == true + data_stream: + dataset: cef.log + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.cef.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - rename: + fields: + - from: message + to: event.original + - decode_cef: + field: event.original + prospector: + scanner: + symlinks: true + tags: + - cef + - forwarded + data_stream.namespace: default + - name: udp-cef + type: udp + use_output: default + streams: + - condition: ${kubernetes.hints.cef.log.enabled} == true or ${kubernetes.hints.cef.enabled} == true + data_stream: + dataset: cef.log + type: logs + host: localhost:9003 + processors: + - rename: + fields: + - from: message + to: event.original + - decode_cef: + field: event.original + tags: + - cef + - forwarded + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/checkpoint.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/checkpoint.yml new file mode 100644 index 00000000000..c8d49475fb3 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/checkpoint.yml @@ -0,0 +1,62 @@ +inputs: + - name: filestream-checkpoint + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.checkpoint.firewall.enabled} == true or ${kubernetes.hints.checkpoint.enabled} == true + data_stream: + dataset: checkpoint.firewall + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.checkpoint.firewall.stream|'all'} + paths: null + processors: + - add_locale: null + - add_fields: + fields: + internal_zones: + - trust + target: _temp_ + - add_fields: + fields: + external_zones: + - untrust + target: _temp_ + prospector: + scanner: + symlinks: true + tags: + - forwarded + data_stream.namespace: default + - name: tcp-checkpoint + type: tcp + use_output: default + streams: + - condition: ${kubernetes.hints.checkpoint.firewall.enabled} == true or ${kubernetes.hints.checkpoint.enabled} == true + data_stream: + dataset: checkpoint.firewall + type: logs + host: localhost:9001 + processors: + - add_locale: null + tags: + - forwarded + data_stream.namespace: default + - name: udp-checkpoint + type: udp + use_output: default + streams: + - condition: ${kubernetes.hints.checkpoint.firewall.enabled} == true or ${kubernetes.hints.checkpoint.enabled} == true + data_stream: + dataset: checkpoint.firewall + type: logs + host: localhost:9001 + processors: + - add_locale: null + tags: + - forwarded + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/cockroachdb.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/cockroachdb.yml new file mode 100644 index 00000000000..3e55b02794d --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/cockroachdb.yml @@ -0,0 +1,44 @@ +inputs: + - name: prometheus/metrics-cockroachdb + type: prometheus/metrics + use_output: default + streams: + - bearer_token_file: null + condition: ${kubernetes.hints.cockroachdb.status.enabled} == true or ${kubernetes.hints.cockroachdb.enabled} == true + data_stream: + dataset: cockroachdb.status + type: metrics + hosts: + - ${kubernetes.hints.cockroachdb.status.host|'localhost:8080'} + metrics_filters.exclude: null + metrics_filters.include: null + metrics_path: /_status/vars + metricsets: + - collector + password: null + period: ${kubernetes.hints.cockroachdb.status.period|'10s'} + ssl.certificate_authorities: null + use_types: true + username: null + data_stream.namespace: default + - name: filestream-cockroachdb + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.cockroachdb.container_logs.enabled} == true + data_stream: + dataset: kubernetes.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/crowdstrike.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/crowdstrike.yml new file mode 100644 index 00000000000..95a2730c18b --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/crowdstrike.yml @@ -0,0 +1,79 @@ +inputs: + - name: filestream-crowdstrike + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.crowdstrike.falcon.enabled} == true or ${kubernetes.hints.crowdstrike.enabled} == true + data_stream: + dataset: crowdstrike.falcon + type: logs + exclude_files: + - .gz$ + multiline.match: after + multiline.max_lines: 5000 + multiline.negate: true + multiline.pattern: ^{ + multiline.timeout: 10 + parsers: + - container: + format: auto + stream: ${kubernetes.hints.crowdstrike.falcon.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - crowdstrike-falcon + - condition: ${kubernetes.hints.crowdstrike.fdr.enabled} == true or ${kubernetes.hints.crowdstrike.enabled} == true + data_stream: + dataset: crowdstrike.fdr + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.crowdstrike.fdr.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + prospector: + scanner: + symlinks: true + tags: + - forwarded + - crowdstrike-fdr + data_stream.namespace: default + - name: aws-s3-crowdstrike + type: aws-s3 + use_output: default + streams: + - condition: ${kubernetes.hints.crowdstrike.fdr.enabled} == true or ${kubernetes.hints.crowdstrike.enabled} == true + data_stream: + dataset: crowdstrike.fdr + type: logs + queue_url: null + sqs.notification_parsing_script.source: | + function parse(n) { + var m = JSON.parse(n); + var evts = []; + var files = m.files; + var bucket = m.bucket; + if (!Array.isArray(files) || (files.length == 0) || bucket == null || bucket == "") { + return evts; + } + files.forEach(function(f){ + var evt = new S3EventV2(); + evt.SetS3BucketName(bucket); + evt.SetS3ObjectKey(f.path); + evts.push(evt); + }); + return evts; + } + tags: + - forwarded + - crowdstrike-fdr + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/cyberarkpas.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/cyberarkpas.yml new file mode 100644 index 00000000000..fc8f72c6206 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/cyberarkpas.yml @@ -0,0 +1,57 @@ +inputs: + - name: tcp-cyberarkpas + type: tcp + use_output: default + streams: + - condition: ${kubernetes.hints.cyberarkpas.audit.enabled} == true or ${kubernetes.hints.cyberarkpas.enabled} == true + data_stream: + dataset: cyberarkpas.audit + type: logs + host: localhost:9301 + processors: + - add_locale: null + tags: + - cyberarkpas-audit + - forwarded + tcp: null + data_stream.namespace: default + - name: udp-cyberarkpas + type: udp + use_output: default + streams: + - condition: ${kubernetes.hints.cyberarkpas.audit.enabled} == true or ${kubernetes.hints.cyberarkpas.enabled} == true + data_stream: + dataset: cyberarkpas.audit + type: logs + host: localhost:9301 + processors: + - add_locale: null + tags: + - cyberarkpas-audit + - forwarded + udp: null + data_stream.namespace: default + - name: filestream-cyberarkpas + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.cyberarkpas.audit.enabled} == true and ${kubernetes.hints.cyberarkpas.enabled} == true + data_stream: + dataset: cyberarkpas.audit + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.cyberarkpas.audit.stream|'all'} + paths: null + processors: + - add_locale: null + prospector: + scanner: + symlinks: true + tags: + - forwarded + - cyberarkpas-audit + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/elasticsearch.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/elasticsearch.yml new file mode 100644 index 00000000000..49503b63346 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/elasticsearch.yml @@ -0,0 +1,288 @@ +inputs: + - name: filestream-elasticsearch + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.elasticsearch.audit.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true + data_stream: + dataset: elasticsearch.audit + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.elasticsearch.audit.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + - add_fields: + fields: + ecs.version: 1.10.0 + target: "" + - else: + - script: + id: elasticsearch_audit + lang: javascript + source: | + var requestRegex = new RegExp("request_body=\\\[(.*)\\\]$"); function process(event) { + var message = event.Get("message"); + if (message !== null) { + var matches = message.match(requestRegex); + if (matches && matches.length > 1) { + event.Put("_request", matches[1]); + } + } + } + if: + regexp: + message: ^{ + then: + - decode_json_fields: + fields: + - message + target: _json + - rename: + fields: + - from: _json.request.body + to: _request + ignore_missing: true + - drop_fields: + fields: + - _json + - detect_mime_type: + field: _request + target: http.request.mime_type + - drop_fields: + fields: + - _request + ignore_missing: true + prospector: + scanner: + symlinks: true + - condition: ${kubernetes.hints.elasticsearch.deprecation.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true + data_stream: + dataset: elasticsearch.deprecation + type: logs + exclude_files: + - .gz$ + - _slowlog.log$ + - _access.log$ + multiline: + match: after + negate: true + pattern: ^(\[[0-9]{4}-[0-9]{2}-[0-9]{2}|{) + parsers: + - container: + format: auto + stream: ${kubernetes.hints.elasticsearch.deprecation.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale.when.not.regexp.message: ^{ + - add_fields: + fields: + ecs.version: 1.10.0 + target: "" + prospector: + scanner: + symlinks: true + - condition: ${kubernetes.hints.elasticsearch.gc.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true + data_stream: + dataset: elasticsearch.gc + type: logs + exclude_files: + - .gz$ + exclude_lines: + - '^(OpenJDK|Java HotSpot).* Server VM ' + - '^CommandLine flags: ' + - '^Memory: ' + - ^{ + multiline: + match: after + negate: true + pattern: ^(\[?[0-9]{4}-[0-9]{2}-[0-9]{2}|{) + parsers: + - container: + format: auto + stream: ${kubernetes.hints.elasticsearch.gc.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_fields: + fields: + ecs.version: 1.10.0 + target: "" + prospector: + scanner: + symlinks: true + - condition: ${kubernetes.hints.elasticsearch.server.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true + data_stream: + dataset: elasticsearch.server + type: logs + exclude_files: + - .gz$ + - _slowlog.log$ + - _access.log$ + - _deprecation.log$ + multiline: + match: after + negate: true + pattern: ^(\[[0-9]{4}-[0-9]{2}-[0-9]{2}|{) + parsers: + - container: + format: auto + stream: ${kubernetes.hints.elasticsearch.server.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale.when.not.regexp.message: ^{ + - add_fields: + fields: + ecs.version: 1.10.0 + target: "" + prospector: + scanner: + symlinks: true + - condition: ${kubernetes.hints.elasticsearch.slowlog.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true + data_stream: + dataset: elasticsearch.slowlog + type: logs + exclude_files: + - .gz$ + multiline: + match: after + negate: true + pattern: ^(\[?[0-9]{4}-[0-9]{2}-[0-9]{2}|{) + parsers: + - container: + format: auto + stream: ${kubernetes.hints.elasticsearch.slowlog.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale.when.not.regexp.message: ^{ + - add_fields: + fields: + ecs.version: 1.10.0 + target: "" + prospector: + scanner: + symlinks: true + data_stream.namespace: default + - name: elasticsearch/metrics-elasticsearch + type: elasticsearch/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.elasticsearch.ccr.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true + data_stream: + dataset: elasticsearch.stack_monitoring.ccr + type: metrics + hosts: + - ${kubernetes.hints.elasticsearch.ccr.host|'http://localhost:9200'} + metricsets: + - ccr + period: null + scope: node + - condition: ${kubernetes.hints.elasticsearch.cluster_stats.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true + data_stream: + dataset: elasticsearch.stack_monitoring.cluster_stats + type: metrics + hosts: + - ${kubernetes.hints.elasticsearch.cluster_stats.host|'http://localhost:9200'} + metricsets: + - cluster_stats + period: null + scope: node + - condition: ${kubernetes.hints.elasticsearch.enrich.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true + data_stream: + dataset: elasticsearch.stack_monitoring.enrich + type: metrics + hosts: + - ${kubernetes.hints.elasticsearch.enrich.host|'http://localhost:9200'} + metricsets: + - enrich + period: null + scope: node + - condition: ${kubernetes.hints.elasticsearch.index.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true + data_stream: + dataset: elasticsearch.stack_monitoring.index + type: metrics + hosts: + - ${kubernetes.hints.elasticsearch.index.host|'http://localhost:9200'} + metricsets: + - index + period: null + scope: node + - condition: ${kubernetes.hints.elasticsearch.index_recovery.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true + data_stream: + dataset: elasticsearch.stack_monitoring.index_recovery + type: metrics + hosts: + - ${kubernetes.hints.elasticsearch.index_recovery.host|'http://localhost:9200'} + metricsets: + - index_recovery + period: null + scope: node + - condition: ${kubernetes.hints.elasticsearch.index_summary.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true + data_stream: + dataset: elasticsearch.stack_monitoring.index_summary + type: metrics + hosts: + - ${kubernetes.hints.elasticsearch.index_summary.host|'http://localhost:9200'} + metricsets: + - index_summary + period: null + scope: node + - condition: ${kubernetes.hints.elasticsearch.ml_job.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true + data_stream: + dataset: elasticsearch.stack_monitoring.ml_job + type: metrics + hosts: + - ${kubernetes.hints.elasticsearch.ml_job.host|'http://localhost:9200'} + metricsets: + - ml_job + period: null + scope: node + - condition: ${kubernetes.hints.elasticsearch.node.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true + data_stream: + dataset: elasticsearch.stack_monitoring.node + type: metrics + hosts: + - ${kubernetes.hints.elasticsearch.node.host|'http://localhost:9200'} + metricsets: + - node + period: null + scope: node + - condition: ${kubernetes.hints.elasticsearch.node_stats.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true + data_stream: + dataset: elasticsearch.stack_monitoring.node_stats + type: metrics + hosts: + - ${kubernetes.hints.elasticsearch.node_stats.host|'http://localhost:9200'} + metricsets: + - node_stats + period: null + scope: node + - condition: ${kubernetes.hints.elasticsearch.pending_tasks.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true + data_stream: + dataset: elasticsearch.stack_monitoring.pending_tasks + type: metrics + hosts: + - ${kubernetes.hints.elasticsearch.pending_tasks.host|'http://localhost:9200'} + metricsets: + - pending_tasks + period: null + scope: node + - condition: ${kubernetes.hints.elasticsearch.shard.enabled} == true or ${kubernetes.hints.elasticsearch.enabled} == true + data_stream: + dataset: elasticsearch.stack_monitoring.shard + type: metrics + hosts: + - ${kubernetes.hints.elasticsearch.shard.host|'http://localhost:9200'} + metricsets: + - shard + period: null + scope: node + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/endpoint.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/endpoint.yml new file mode 100644 index 00000000000..178a6098f99 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/endpoint.yml @@ -0,0 +1,22 @@ +inputs: + - name: filestream-endpoint + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.endpoint.container_logs.enabled} == true + data_stream: + dataset: kubernetes.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/fireeye.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/fireeye.yml new file mode 100644 index 00000000000..44b8074cb5a --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/fireeye.yml @@ -0,0 +1,59 @@ +inputs: + - name: filestream-fireeye + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.fireeye.nx.enabled} == true or ${kubernetes.hints.fireeye.enabled} == true + data_stream: + dataset: fireeye.nx + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.fireeye.nx.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + prospector: + scanner: + symlinks: true + tags: + - fireeye-nx + data_stream.namespace: default + - name: udp-fireeye + type: udp + use_output: default + streams: + - condition: ${kubernetes.hints.fireeye.nx.enabled} == true or ${kubernetes.hints.fireeye.enabled} == true + data_stream: + dataset: fireeye.nx + type: logs + fields_under_root: true + host: localhost:9523 + processors: + - add_locale: null + tags: + - fireeye-nx + - forwarded + udp: null + data_stream.namespace: default + - name: tcp-fireeye + type: tcp + use_output: default + streams: + - condition: ${kubernetes.hints.fireeye.nx.enabled} == true or ${kubernetes.hints.fireeye.enabled} == true + data_stream: + dataset: fireeye.nx + type: logs + fields_under_root: true + host: localhost:9523 + processors: + - add_locale: null + tags: + - fireeye-nx + - forwarded + tcp: null + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/haproxy.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/haproxy.yml new file mode 100644 index 00000000000..cff5d5821aa --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/haproxy.yml @@ -0,0 +1,68 @@ +inputs: + - name: syslog-haproxy + type: syslog + use_output: default + streams: + - condition: ${kubernetes.hints.haproxy.log.enabled} == true or ${kubernetes.hints.haproxy.enabled} == true + data_stream: + dataset: haproxy.log + type: logs + processors: + - add_locale: null + protocol.udp: + host: localhost:9001 + tags: + - forwarded + - haproxy-log + data_stream.namespace: default + - name: haproxy/metrics-haproxy + type: haproxy/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.haproxy.info.enabled} == true or ${kubernetes.hints.haproxy.enabled} == true + data_stream: + dataset: haproxy.info + type: metrics + hosts: + - ${kubernetes.hints.haproxy.info.host|'tcp://127.0.0.1:14567'} + metricsets: + - info + password: ${kubernetes.hints.haproxy.info.password|'admin'} + period: ${kubernetes.hints.haproxy.info.period|'10s'} + username: ${kubernetes.hints.haproxy.info.username|'admin'} + - condition: ${kubernetes.hints.haproxy.stat.enabled} == true or ${kubernetes.hints.haproxy.enabled} == true + data_stream: + dataset: haproxy.stat + type: metrics + hosts: + - ${kubernetes.hints.haproxy.stat.host|'tcp://127.0.0.1:14567'} + metricsets: + - stat + password: ${kubernetes.hints.haproxy.stat.password|'admin'} + period: ${kubernetes.hints.haproxy.stat.period|'10s'} + username: ${kubernetes.hints.haproxy.stat.username|'admin'} + data_stream.namespace: default + - name: filestream-haproxy + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.haproxy.log.enabled} == true or ${kubernetes.hints.haproxy.enabled} == true + data_stream: + dataset: haproxy.log + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.haproxy.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + prospector: + scanner: + symlinks: true + tags: + - haproxy-log + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/hashicorp_vault.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/hashicorp_vault.yml new file mode 100644 index 00000000000..19892110b74 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/hashicorp_vault.yml @@ -0,0 +1,73 @@ +inputs: + - name: filestream-hashicorp_vault + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.hashicorp_vault.audit.enabled} == true or ${kubernetes.hints.hashicorp_vault.enabled} == true + data_stream: + dataset: hashicorp_vault.audit + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.hashicorp_vault.audit.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - hashicorp-vault-audit + - condition: ${kubernetes.hints.hashicorp_vault.log.enabled} == true or ${kubernetes.hints.hashicorp_vault.enabled} == true + data_stream: + dataset: hashicorp_vault.log + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.hashicorp_vault.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - hashicorp-vault-log + data_stream.namespace: default + - name: tcp-hashicorp_vault + type: tcp + use_output: default + streams: + - condition: ${kubernetes.hints.hashicorp_vault.audit.enabled} == true and ${kubernetes.hints.hashicorp_vault.enabled} == true + data_stream: + dataset: hashicorp_vault.audit + type: logs + host: localhost:9007 + max_message_size: 1 MiB + tags: + - hashicorp-vault-audit + - forwarded + data_stream.namespace: default + - name: prometheus/metrics-hashicorp_vault + type: prometheus/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.hashicorp_vault.metrics.enabled} == true or ${kubernetes.hints.hashicorp_vault.enabled} == true + data_stream: + dataset: hashicorp_vault.metrics + type: metrics + hosts: + - ${kubernetes.hints.hashicorp_vault.metrics.host|'http://localhost:8200'} + metrics_path: /v1/sys/metrics + metricsets: + - collector + period: ${kubernetes.hints.hashicorp_vault.metrics.period|'30s'} + query: + format: prometheus + rate_counters: true + use_types: true + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/hid_bravura_monitor.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/hid_bravura_monitor.yml new file mode 100644 index 00000000000..28d8f782d69 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/hid_bravura_monitor.yml @@ -0,0 +1,42 @@ +inputs: + - name: filestream-hid_bravura_monitor + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.hid_bravura_monitor.log.enabled} == true or ${kubernetes.hints.hid_bravura_monitor.enabled} == true + data_stream: + dataset: hid_bravura_monitor.log + type: logs + line_terminator: carriage_return_line_feed + parsers: + - multiline: + match: after + negate: true + pattern: ^[[:cntrl:]] + type: pattern + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_fields: + fields: + event.timezone: UTC + hid_bravura_monitor.environment: PRODUCTION + hid_bravura_monitor.instancename: default + hid_bravura_monitor.instancetype: Privilege-Identity-Password + hid_bravura_monitor.node: 0.0.0.0 + target: "" + prospector.scanner.exclude_files: + - .gz$ + tags: null + data_stream.namespace: default + - name: winlog-hid_bravura_monitor + type: winlog + use_output: default + streams: + - condition: ${kubernetes.hints.hid_bravura_monitor.winlog.enabled} == true or ${kubernetes.hints.hid_bravura_monitor.enabled} == true + data_stream: + dataset: hid_bravura_monitor.winlog + type: logs + name: Hitachi-Hitachi ID Systems-Hitachi ID Suite/Operational + tags: null + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/iis.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/iis.yml new file mode 100644 index 00000000000..44162f4ac6b --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/iis.yml @@ -0,0 +1,71 @@ +inputs: + - name: filestream-iis + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.iis.access.enabled} == true or ${kubernetes.hints.iis.enabled} == true + data_stream: + dataset: iis.access + type: logs + exclude_files: + - .gz$ + exclude_lines: + - ^# + parsers: + - container: + format: auto + stream: ${kubernetes.hints.iis.access.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - iis-access + - condition: ${kubernetes.hints.iis.error.enabled} == true or ${kubernetes.hints.iis.enabled} == true + data_stream: + dataset: iis.error + type: logs + exclude_files: + - .gz$ + exclude_lines: + - ^# + parsers: + - container: + format: auto + stream: ${kubernetes.hints.iis.error.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - iis-error + data_stream.namespace: default + - name: iis/metrics-iis + type: iis/metrics + use_output: default + streams: + - application_pool.name: null + condition: ${kubernetes.hints.iis.application_pool.enabled} == true or ${kubernetes.hints.iis.enabled} == true + data_stream: + dataset: iis.application_pool + type: metrics + metricsets: + - application_pool + period: ${kubernetes.hints.iis.application_pool.period|'10s'} + - condition: ${kubernetes.hints.iis.webserver.enabled} == true or ${kubernetes.hints.iis.enabled} == true + data_stream: + dataset: iis.webserver + type: metrics + metricsets: + - webserver + period: ${kubernetes.hints.iis.webserver.period|'10s'} + - condition: ${kubernetes.hints.iis.website.enabled} == true or ${kubernetes.hints.iis.enabled} == true + data_stream: + dataset: iis.website + type: metrics + metricsets: + - website + period: ${kubernetes.hints.iis.website.period|'10s'} + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/infoblox_nios.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/infoblox_nios.yml new file mode 100644 index 00000000000..d260fead6a6 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/infoblox_nios.yml @@ -0,0 +1,63 @@ +inputs: + - name: filestream-infoblox_nios + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.infoblox_nios.log.enabled} == true or ${kubernetes.hints.infoblox_nios.enabled} == true + data_stream: + dataset: infoblox_nios.log + type: logs + exclude_files: + - .gz$ + fields: + _conf: + tz_offset: local + fields_under_root: true + parsers: + - container: + format: auto + stream: ${kubernetes.hints.infoblox_nios.log.stream|'all'} + paths: null + processors: + - add_locale: null + prospector: + scanner: + symlinks: true + tags: + - forwarded + - infoblox_nios-log + data_stream.namespace: default + - name: tcp-infoblox_nios + type: tcp + use_output: default + streams: + - condition: ${kubernetes.hints.infoblox_nios.log.enabled} == true or ${kubernetes.hints.infoblox_nios.enabled} == true + data_stream: + dataset: infoblox_nios.log + type: logs + fields: + _conf: + tz_offset: local + fields_under_root: true + host: localhost:9027 + tags: + - forwarded + - infoblox_nios-log + data_stream.namespace: default + - name: udp-infoblox_nios + type: udp + use_output: default + streams: + - condition: ${kubernetes.hints.infoblox_nios.log.enabled} == true or ${kubernetes.hints.infoblox_nios.enabled} == true + data_stream: + dataset: infoblox_nios.log + type: logs + fields: + _conf: + tz_offset: local + fields_under_root: true + host: localhost:9028 + tags: + - forwarded + - infoblox_nios-log + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/iptables.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/iptables.yml new file mode 100644 index 00000000000..02d1d8330d3 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/iptables.yml @@ -0,0 +1,54 @@ +inputs: + - name: udp-iptables + type: udp + use_output: default + streams: + - condition: ${kubernetes.hints.iptables.log.enabled} == true or ${kubernetes.hints.iptables.enabled} == true + data_stream: + dataset: iptables.log + type: logs + host: localhost:9001 + processors: + - add_locale: null + tags: + - iptables-log + - forwarded + data_stream.namespace: default + - name: filestream-iptables + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.iptables.log.enabled} == true and ${kubernetes.hints.iptables.enabled} == true + data_stream: + dataset: iptables.log + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.iptables.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + prospector: + scanner: + symlinks: true + tags: + - iptables-log + - forwarded + data_stream.namespace: default + - name: journald-iptables + type: journald + use_output: default + streams: + - condition: ${kubernetes.hints.iptables.log.enabled} == true or ${kubernetes.hints.iptables.enabled} == true + data_stream: + dataset: iptables.log + type: logs + include_matches: + - _TRANSPORT=kernel + tags: + - iptables-log + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/kafka.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/kafka.yml new file mode 100644 index 00000000000..b79eebbcfb0 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/kafka.yml @@ -0,0 +1,61 @@ +inputs: + - name: filestream-kafka + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.kafka.log.enabled} == true or ${kubernetes.hints.kafka.enabled} == true + data_stream: + dataset: kafka.log + type: logs + exclude_files: + - .gz$ + multiline: + match: after + negate: true + pattern: ^\[ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.kafka.log.stream|'all'} + paths: + - /opt/kafka*/var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + prospector: + scanner: + symlinks: true + tags: + - kafka-log + data_stream.namespace: default + - name: kafka/metrics-kafka + type: kafka/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.kafka.broker.enabled} == true or ${kubernetes.hints.kafka.enabled} == true + data_stream: + dataset: kafka.broker + type: metrics + hosts: + - localhost:8778 + metricsets: + - broker + period: ${kubernetes.hints.kafka.broker.period|'10s'} + - condition: ${kubernetes.hints.kafka.consumergroup.enabled} == true or ${kubernetes.hints.kafka.enabled} == true + data_stream: + dataset: kafka.consumergroup + type: metrics + hosts: + - ${kubernetes.hints.kafka.consumergroup.host|'localhost:9092'} + metricsets: + - consumergroup + period: ${kubernetes.hints.kafka.consumergroup.period|'10s'} + - condition: ${kubernetes.hints.kafka.partition.enabled} == true or ${kubernetes.hints.kafka.enabled} == true + data_stream: + dataset: kafka.partition + type: metrics + hosts: + - ${kubernetes.hints.kafka.partition.host|'localhost:9092'} + metricsets: + - partition + period: ${kubernetes.hints.kafka.partition.period|'10s'} + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/keycloak.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/keycloak.yml new file mode 100644 index 00000000000..794d014d41c --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/keycloak.yml @@ -0,0 +1,23 @@ +inputs: + - name: filestream-keycloak + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.keycloak.log.enabled} == true or ${kubernetes.hints.keycloak.enabled} == true + data_stream: + dataset: keycloak.log + type: logs + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + - add_fields: + fields: + only_user_events: false + tz_offset: local + target: _tmp + prospector.scanner.exclude_files: + - \.gz$ + tags: + - keycloak-log + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/kibana.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/kibana.yml new file mode 100644 index 00000000000..1c27b4830ab --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/kibana.yml @@ -0,0 +1,112 @@ +inputs: + - name: filestream-kibana + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.kibana.audit.enabled} == true or ${kubernetes.hints.kibana.enabled} == true + data_stream: + dataset: kibana.audit + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.kibana.audit.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + - add_fields: + fields: + ecs.version: 1.10.0 + target: "" + - decode_json_fields: + fields: + - message + target: kibana._audit_temp + prospector: + scanner: + symlinks: true + - condition: ${kubernetes.hints.kibana.log.enabled} == true or ${kubernetes.hints.kibana.enabled} == true + data_stream: + dataset: kibana.log + type: logs + exclude_files: + - .gz$ + json.add_error_key: true + json.keys_under_root: false + parsers: + - container: + format: auto + stream: ${kubernetes.hints.kibana.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_fields: + fields: + ecs.version: 1.10.0 + target: "" + prospector: + scanner: + symlinks: true + data_stream.namespace: default + - name: kibana/metrics-kibana + type: kibana/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.kibana.cluster_actions.enabled} == true or ${kubernetes.hints.kibana.enabled} == true + data_stream: + dataset: kibana.stack_monitoring.cluster_actions + type: metrics + hosts: + - ${kubernetes.hints.kibana.cluster_actions.host|'http://localhost:5601'} + metricsets: + - cluster_actions + period: null + - condition: ${kubernetes.hints.kibana.cluster_rules.enabled} == true or ${kubernetes.hints.kibana.enabled} == true + data_stream: + dataset: kibana.stack_monitoring.cluster_rules + type: metrics + hosts: + - ${kubernetes.hints.kibana.cluster_rules.host|'http://localhost:5601'} + metricsets: + - cluster_rules + period: null + - condition: ${kubernetes.hints.kibana.node_actions.enabled} == true or ${kubernetes.hints.kibana.enabled} == true + data_stream: + dataset: kibana.stack_monitoring.node_actions + type: metrics + hosts: + - ${kubernetes.hints.kibana.node_actions.host|'http://localhost:5601'} + metricsets: + - node_actions + period: null + - condition: ${kubernetes.hints.kibana.node_rules.enabled} == true or ${kubernetes.hints.kibana.enabled} == true + data_stream: + dataset: kibana.stack_monitoring.node_rules + type: metrics + hosts: + - ${kubernetes.hints.kibana.node_rules.host|'http://localhost:5601'} + metricsets: + - node_rules + period: null + - condition: ${kubernetes.hints.kibana.stats.enabled} == true or ${kubernetes.hints.kibana.enabled} == true + data_stream: + dataset: kibana.stack_monitoring.stats + type: metrics + hosts: + - ${kubernetes.hints.kibana.stats.host|'http://localhost:5601'} + metricsets: + - stats + period: null + - condition: ${kubernetes.hints.kibana.status.enabled} == true or ${kubernetes.hints.kibana.enabled} == true + data_stream: + dataset: kibana.stack_monitoring.status + type: metrics + hosts: + - ${kubernetes.hints.kibana.status.host|'http://localhost:5601'} + metricsets: + - status + period: null + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/log.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/log.yml new file mode 100644 index 00000000000..b4627a13814 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/log.yml @@ -0,0 +1,18 @@ +inputs: + - name: filestream-log + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.log.log.enabled} == true or ${kubernetes.hints.log.enabled} == true + data_stream: + dataset: log.log + type: logs + parsers: + - container: + format: auto + stream: ${kubernetes.hints.log.log.stream|'all'} + paths: null + prospector: + scanner: + symlinks: true + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/logstash.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/logstash.yml new file mode 100644 index 00000000000..6ba62de3274 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/logstash.yml @@ -0,0 +1,75 @@ +inputs: + - name: filestream-logstash + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.logstash.log.enabled} == true or ${kubernetes.hints.logstash.enabled} == true + data_stream: + dataset: logstash.log + type: logs + exclude_files: + - .gz$ + multiline: + match: after + negate: true + pattern: ^((\[[0-9]{4}-[0-9]{2}-[0-9]{2}[^\]]+\])|({.+})) + parsers: + - container: + format: auto + stream: ${kubernetes.hints.logstash.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale.when.not.regexp.message: ^{ + - add_fields: + fields: + ecs.version: 1.10.0 + target: "" + prospector: + scanner: + symlinks: true + - condition: ${kubernetes.hints.logstash.slowlog.enabled} == true or ${kubernetes.hints.logstash.enabled} == true + data_stream: + dataset: logstash.slowlog + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.logstash.slowlog.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale.when.not.regexp.message: ^{ + - add_fields: + fields: + ecs.version: 1.10.0 + target: "" + prospector: + scanner: + symlinks: true + data_stream.namespace: default + - name: logstash/metrics-logstash + type: logstash/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.logstash.node.enabled} == true or ${kubernetes.hints.logstash.enabled} == true + data_stream: + dataset: logstash.stack_monitoring.node + type: metrics + hosts: + - ${kubernetes.hints.logstash.node.host|'http://localhost:9600'} + metricsets: + - node + period: ${kubernetes.hints.logstash.node.period|'10s'} + - condition: ${kubernetes.hints.logstash.node_stats.enabled} == true or ${kubernetes.hints.logstash.enabled} == true + data_stream: + dataset: logstash.stack_monitoring.node_stats + type: metrics + hosts: + - ${kubernetes.hints.logstash.node_stats.host|'http://localhost:9600'} + metricsets: + - node_stats + period: ${kubernetes.hints.logstash.node_stats.period|'10s'} + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/mattermost.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/mattermost.yml new file mode 100644 index 00000000000..de5c8932af1 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/mattermost.yml @@ -0,0 +1,22 @@ +inputs: + - name: filestream-mattermost + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.mattermost.audit.enabled} == true or ${kubernetes.hints.mattermost.enabled} == true + data_stream: + dataset: mattermost.audit + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.mattermost.audit.stream|'all'} + paths: null + prospector: + scanner: + symlinks: true + tags: + - mattermost-audit + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/microsoft_sqlserver.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/microsoft_sqlserver.yml new file mode 100644 index 00000000000..5ac70293051 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/microsoft_sqlserver.yml @@ -0,0 +1,127 @@ +inputs: + - name: winlog-microsoft_sqlserver + type: winlog + use_output: default + streams: + - condition: ${kubernetes.hints.microsoft_sqlserver.audit.enabled} == true or ${kubernetes.hints.microsoft_sqlserver.enabled} == true + data_stream: + dataset: microsoft_sqlserver.audit + type: logs + event_id: 33205 + ignore_older: 72h + name: Security + data_stream.namespace: default + - name: filestream-microsoft_sqlserver + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.microsoft_sqlserver.log.enabled} == true and ${kubernetes.hints.microsoft_sqlserver.enabled} == true + data_stream: + dataset: microsoft_sqlserver.log + type: logs + exclude_files: + - .gz$ + multiline: + match: after + negate: true + pattern: ^\d{4}-\d{2}-\d{2} + parsers: + - container: + format: auto + stream: ${kubernetes.hints.microsoft_sqlserver.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - mssql-logs + data_stream.namespace: default + - name: sql/metrics-microsoft_sqlserver + type: sql/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.microsoft_sqlserver.performance.enabled} == true and ${kubernetes.hints.microsoft_sqlserver.enabled} == true + data_stream: + dataset: microsoft_sqlserver.performance + type: metrics + driver: mssql + dynamic_counter_name: Memory Grants Pending + hosts: + - sqlserver://${kubernetes.hints.microsoft_sqlserver.performance.username|'domain\username'}:${kubernetes.hints.microsoft_sqlserver.performance.password|'verysecurepassword'}@${kubernetes.hints.microsoft_sqlserver.performance.host|'localhost'}:1433 + metricsets: + - query + period: ${kubernetes.hints.microsoft_sqlserver.performance.period|'60s'} + raw_data.enabled: true + sql_queries: + - query: SELECT cntr_value As 'user_connections' FROM sys.dm_os_performance_counters WHERE counter_name= 'User Connections' + response_format: table + - query: SELECT cntr_value As 'active_temp_tables' FROM sys.dm_os_performance_counters WHERE counter_name = 'Active Temp Tables' AND object_name like '%General Statistics%' + response_format: table + - query: SELECT cntr_value As 'buffer_cache_hit_ratio' FROM sys.dm_os_performance_counters WHERE counter_name = 'Buffer cache hit ratio' AND object_name like '%Buffer Manager%' + response_format: table + - query: SELECT cntr_value As 'page_splits_per_sec' FROM sys.dm_os_performance_counters WHERE counter_name = 'Page splits/sec' + response_format: table + - query: SELECT cntr_value As 'lock_waits_per_sec' FROM sys.dm_os_performance_counters WHERE counter_name = 'Lock Waits/sec' AND instance_name = '_Total' + response_format: table + - query: SELECT cntr_value As 'compilations_per_sec' FROM sys.dm_os_performance_counters WHERE counter_name = 'SQL Compilations/sec' + response_format: table + - query: SELECT cntr_value As 'batch_requests_per_sec' FROM sys.dm_os_performance_counters WHERE counter_name = 'Batch Requests/sec' + response_format: table + - query: SELECT cntr_value As 'buffer_checkpoint_pages_per_sec' FROM sys.dm_os_performance_counters WHERE counter_name = 'Checkpoint pages/sec' AND object_name like '%Buffer Manager%' + response_format: table + - query: SELECT cntr_value As 'buffer_database_pages' FROM sys.dm_os_performance_counters WHERE counter_name = 'Database pages' AND object_name like '%Buffer Manager%' + response_format: table + - query: SELECT cntr_value As 'buffer_page_life_expectancy' FROM sys.dm_os_performance_counters WHERE counter_name = 'Page life expectancy' AND object_name like '%Buffer Manager%' + response_format: table + - query: SELECT cntr_value As 'buffer_target_pages' FROM sys.dm_os_performance_counters WHERE counter_name = 'Target pages' AND object_name like '%Buffer Manager%' + response_format: table + - query: SELECT cntr_value As 'connection_reset_per_sec' FROM sys.dm_os_performance_counters WHERE counter_name = 'Connection Reset/sec' AND object_name like '%Buffer Manager%' + response_format: table + - query: SELECT cntr_value As 'logins_per_sec' FROM sys.dm_os_performance_counters WHERE counter_name = 'Logins/sec' AND object_name like '%General Statistics%' + response_format: table + - query: SELECT cntr_value As 'logouts_per_sec' FROM sys.dm_os_performance_counters WHERE counter_name = 'Logouts/sec' AND object_name like '%General Statistics%' + response_format: table + - query: SELECT cntr_value As 'transactions' FROM sys.dm_os_performance_counters WHERE counter_name = 'Transactions' AND object_name like '%General Statistics%' + response_format: table + - query: SELECT cntr_value As 're_compilations_per_sec' FROM sys.dm_os_performance_counters WHERE counter_name = 'SQL Re-Compilations/sec' + response_format: table + - query: SELECT counter_name As 'dynamic_counter.name', cntr_value As 'dynamic_counter.value' FROM sys.dm_os_performance_counters WHERE counter_name= 'Memory Grants Pending' + response_format: table + - condition: ${kubernetes.hints.microsoft_sqlserver.transaction_log.enabled} == true and ${kubernetes.hints.microsoft_sqlserver.enabled} == true + data_stream: + dataset: microsoft_sqlserver.transaction_log + type: metrics + driver: mssql + hosts: + - sqlserver://${kubernetes.hints.microsoft_sqlserver.transaction_log.username|'domain\username'}:${kubernetes.hints.microsoft_sqlserver.transaction_log.password|'verysecurepassword'}@${kubernetes.hints.microsoft_sqlserver.transaction_log.host|'localhost'}:1433 + metricsets: + - query + period: ${kubernetes.hints.microsoft_sqlserver.transaction_log.period|'60s'} + raw_data.enabled: true + sql_queries: + - query: SELECT name As 'database_name', database_id FROM sys.databases WHERE database_id=1; + response_format: table + - query: SELECT 'master' As database_name, database_id,total_log_size_mb,active_log_size_mb,log_backup_time,log_since_last_log_backup_mb,log_since_last_checkpoint_mb,log_recovery_size_mb FROM sys.dm_db_log_stats(1) master + response_format: table + - query: SELECT 'master' As 'database_name', total_log_size_in_bytes As total_log_size_bytes, used_log_space_in_bytes As used_log_space_bytes, used_log_space_in_percent As used_log_space_pct, log_space_in_bytes_since_last_backup FROM sys.dm_db_log_space_usage master + response_format: table + - query: SELECT name As 'database_name', database_id FROM sys.databases WHERE database_id=2; + response_format: table + - query: SELECT 'tempdb' As 'database_name', database_id,total_log_size_mb,active_log_size_mb As active_log_size,log_backup_time,log_since_last_log_backup_mb, log_since_last_checkpoint_mb,log_recovery_size_mb FROM sys.dm_db_log_stats(2) tempdb + response_format: table + - query: SELECT 'tempdb' As 'database_name', total_log_size_in_bytes As total_log_size_bytes, used_log_space_in_bytes As used_log_space_bytes, used_log_space_in_percent As used_log_space_pct, log_space_in_bytes_since_last_backup FROM sys.dm_db_log_space_usage tempdb + response_format: table + - query: SELECT name As 'database_name', database_id FROM sys.databases WHERE database_id=3; + response_format: table + - query: SELECT 'model' As 'database_name', database_id,total_log_size_mb,active_log_size_mb As active_log_size,log_backup_time,log_since_last_log_backup_mb, log_since_last_checkpoint_mb,log_recovery_size_mb FROM sys.dm_db_log_stats(3) model + response_format: table + - query: SELECT 'model' As 'database_name', total_log_size_in_bytes As total_log_size_bytes, used_log_space_in_bytes As used_log_space_bytes, used_log_space_in_percent As used_log_space_pct, log_space_in_bytes_since_last_backup FROM sys.dm_db_log_space_usage model + response_format: table + - query: SELECT name As 'database_name', database_id FROM sys.databases WHERE database_id=4; + response_format: table + - query: SELECT 'msdb' As 'database_name', database_id,total_log_size_mb,active_log_size_mb As active_log_size,log_backup_time,log_since_last_log_backup_mb, log_since_last_checkpoint_mb,log_recovery_size_mb FROM sys.dm_db_log_stats(4) msdb + response_format: table + - query: SELECT 'msdb' As 'database_name', total_log_size_in_bytes As total_log_size_bytes, used_log_space_in_bytes As used_log_space_bytes, used_log_space_in_percent As used_log_space_pct, log_space_in_bytes_since_last_backup FROM sys.dm_db_log_space_usage msdb + response_format: table + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/mimecast.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/mimecast.yml new file mode 100644 index 00000000000..23139e47852 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/mimecast.yml @@ -0,0 +1,381 @@ +inputs: + - name: httpjson-mimecast + type: httpjson + use_output: default + streams: + - condition: ${kubernetes.hints.mimecast.audit_events.enabled} == true or ${kubernetes.hints.mimecast.enabled} == true + config_version: "2" + cursor: + next_date: + value: '[[.first_event.eventTime]]' + data_stream: + dataset: mimecast.audit_events + type: logs + interval: 5m + request.method: POST + request.transforms: + - set: + target: body.meta.pagination.pageSize + value: 500 + - set: + default: '[{"endDateTime": "[[formatDate (now) "2006-01-02T15:04:05+0700"]]", "startDateTime":"[[formatDate (now (parseDuration "-5m")) "2006-01-02T15:04:05+0700"]]"}]' + target: body.data + value: '[{"endDateTime": "[[formatDate (now) "2006-01-02T15:04:05+0700"]]", "startDateTime":"[[.cursor.next_date]]"}]' + value_type: json + - set: + target: header.x-mc-app-id + value: null + - set: + target: header.x-mc-date + value: '[[formatDate (now) "RFC1123"]]' + - set: + target: header.x-mc-req-id + value: '[[uuid]]' + - set: + fail_on_template_error: true + target: header.Authorization + value: MC :[[hmacBase64 "sha1" (base64Decode "") (sprintf "%s:%s:/api/audit/get-audit-events:" (.header.Get "x-mc-date") (.header.Get "x-mc-req-id"))]] + request.url: https://eu-api.mimecast.com/api/audit/get-audit-events + response.decode_as: application/json + response.pagination: + - set: + fail_on_template_error: true + target: body.meta.pagination.pageToken + value: '[[.last_response.body.meta.pagination.next]]' + response.split: + target: body.data + tags: + - forwarded + - mimecast-audit-events + - condition: ${kubernetes.hints.mimecast.dlp_logs.enabled} == true or ${kubernetes.hints.mimecast.enabled} == true + config_version: "2" + cursor: null + data_stream: + dataset: mimecast.dlp_logs + type: logs + interval: 5m + next_date: + value: '[[.first_event.eventTime]]' + request.method: POST + request.transforms: + - set: + default: '[{"to": "[[formatDate (now) "2006-01-02T15:04:05+0700"]]", "from":"[[formatDate (now (parseDuration "-5m")) "2006-01-02T15:04:05+0700"]]"}]' + target: body.data + value: '[{"to": "[[formatDate (now) "2006-01-02T15:04:05+0700"]]", "from":"[[.cursor.eventTime]]"}]' + value_type: json + - set: + target: header.x-mc-app-id + value: null + - set: + target: header.x-mc-date + value: '[[formatDate (now) "RFC1123"]]' + - set: + target: header.x-mc-req-id + value: '[[uuid]]' + - set: + fail_on_template_error: true + target: header.Authorization + value: MC :[[hmacBase64 "sha1" (base64Decode "") (sprintf "%s:%s:/api/dlp/get-logs:" (.header.Get "x-mc-date") (.header.Get "x-mc-req-id"))]] + request.url: https://eu-api.mimecast.com/api/dlp/get-logs + response.decode_as: application/json + response.pagination: + - set: + fail_on_template_error: true + target: body.meta.pagination.pageToken + value: '[[.last_response.body.meta.pagination.next]]' + response.split: + split: + target: body.dlpLogs + target: body.data + tags: + - forwarded + - mimecast-dlp-logs + - condition: ${kubernetes.hints.mimecast.siem_logs.enabled} == true or ${kubernetes.hints.mimecast.enabled} == true + config_version: "2" + cursor: + next_token: + value: '[[.last_response.header.Get "mc-siem-token"]]' + data_stream: + dataset: mimecast.siem_logs + type: logs + interval: 5m + request.method: POST + request.transforms: + - set: + default: '[{"type":"MTA","fileFormat":"json", "compress":true}]' + target: body.data + value: '[{"type":"MTA","fileFormat":"json", "compress":true, "token": "[[.cursor.next_token]]"}]' + value_type: json + - set: + target: header.x-mc-app-id + value: null + - set: + target: header.x-mc-date + value: '[[formatDate (now) "RFC1123"]]' + - set: + target: header.x-mc-req-id + value: '[[uuid]]' + - set: + fail_on_template_error: true + target: header.Authorization + value: MC :[[hmacBase64 "sha1" (base64Decode "") (sprintf "%s:%s:/api/audit/get-siem-logs:" (.header.Get "x-mc-date") (.header.Get "x-mc-req-id"))]] + - set: + target: header.Accept + value: '*/*' + request.url: https://eu-api.mimecast.com/api/audit/get-siem-logs + response.decode_as: application/zip + response.pagination: + - set: + target: body.data + value: '[{"type":"MTA","fileFormat":"json", "compress":true, "token": "[[.last_response.header.Get "mc-siem-token"]]"}]' + value_type: json + response.split: + target: body.data + transforms: + - set: + target: body.Content-Disposition + value: '[[.last_response.header.Get "Content-Disposition"]]' + tags: + - forwarded + - mimecast-siem-logs + - condition: ${kubernetes.hints.mimecast.threat_intel_malware_customer.enabled} == true or ${kubernetes.hints.mimecast.enabled} == true + config_version: "2" + cursor: + next_date: + value: '[[.first_event.created]]' + data_stream: + dataset: mimecast.threat_intel_malware_customer + type: logs + interval: 5m + request.method: POST + request.transforms: + - set: + default: '[{"feedType": "malware_customer","fileType": "stix","compress": false,"end": "[[formatDate (now) "2006-01-02T15:04:05+0700"]]", "start":"[[formatDate (now (parseDuration "-5m")) "2006-01-02T15:04:05+0700"]]"}]' + target: body.data + value: '[{"feedType": "malware_customer","fileType": "stix","compress": false,"end": "[[formatDate (now) "2006-01-02T15:04:05+0700"]]", "start":"[[formatDate (.cursor.next_date) "2006-01-02T15:04:05+0700"]]"}]' + value_type: json + - set: + target: header.x-mc-app-id + value: null + - set: + target: header.x-mc-date + value: '[[formatDate (now) "RFC1123"]]' + - set: + target: header.x-mc-req-id + value: '[[uuid]]' + - set: + fail_on_template_error: true + target: header.Authorization + value: MC :[[hmacBase64 "sha1" (base64Decode "") (sprintf "%s:%s:/api/ttp/threat-intel/get-feed:" (.header.Get "x-mc-date") (.header.Get "x-mc-req-id"))]] + request.url: https://eu-api.mimecast.com/api/ttp/threat-intel/get-feed + response.decode_as: application/json + response.pagination: + - set: + target: body.data + value: '[{"feedType": "malware_customer","fileType": "stix","compress": false,"token": "[[.last_response.header.Get "x-mc-threat-feed-next-token"]]","end": "[[formatDate (now) "2006-01-02T15:04:05+0700"]]", "start":"[[.cursor.next_date]]"}]' + value_type: json + response.split: + target: body.objects + transforms: + - set: + target: body.Content-Disposition + value: '[[.last_response.header.Get "Content-Disposition"]]' + tags: + - forwarded + - mimecast-threat-intel-feed-malware-customer + - condition: ${kubernetes.hints.mimecast.threat_intel_malware_grid.enabled} == true or ${kubernetes.hints.mimecast.enabled} == true + config_version: "2" + cursor: + next_date: + value: '[[.first_event.created]]' + data_stream: + dataset: mimecast.threat_intel_malware_grid + type: logs + interval: 5m + request.method: POST + request.transforms: + - set: + default: '[{"feedType": "malware_grid","fileType": "stix","compress": false,"end": "[[formatDate (now) "2006-01-02T15:04:05+0700"]]", "start":"[[formatDate (now (parseDuration "-5m")) "2006-01-02T15:04:05+0700"]]"}]' + target: body.data + value: '[{"feedType": "malware_grid","fileType": "stix","compress": false,"end": "[[formatDate (now) "2006-01-02T15:04:05+0700"]]", "start":"[[formatDate (.cursor.next_date) "2006-01-02T15:04:05+0700"]]"}]' + value_type: json + - set: + target: header.x-mc-app-id + value: null + - set: + target: header.x-mc-date + value: '[[formatDate (now) "RFC1123"]]' + - set: + target: header.x-mc-req-id + value: '[[uuid]]' + - set: + fail_on_template_error: true + target: header.Authorization + value: MC :[[hmacBase64 "sha1" (base64Decode "") (sprintf "%s:%s:/api/ttp/threat-intel/get-feed:" (.header.Get "x-mc-date") (.header.Get "x-mc-req-id"))]] + request.url: https://eu-api.mimecast.com/api/ttp/threat-intel/get-feed + response.decode_as: application/json + response.pagination: + - set: + target: body.data + value: '[{"feedType": "malware_grid","fileType": "stix","compress": false,"token": "[[.last_response.header.Get "x-mc-threat-feed-next-token"]]","end": "[[formatDate (now) "2006-01-02T15:04:05+0700"]]", "start":"[[.cursor.next_date]]"}]' + value_type: json + response.split: + target: body.objects + transforms: + - set: + target: body.Content-Disposition + value: '[[.last_response.header.Get "Content-Disposition"]]' + tags: + - forwarded + - mimecast-threat-intel-feed-malware-grid + - condition: ${kubernetes.hints.mimecast.ttp_ap_logs.enabled} == true or ${kubernetes.hints.mimecast.enabled} == true + config_version: "2" + cursor: null + data_stream: + dataset: mimecast.ttp_ap_logs + type: logs + interval: 5m + next_date: + value: '[[.first_event.date]]' + request.method: POST + request.transforms: + - set: + default: '[{"oldestFirst": false, "route": "all", "result":"all","to": "[[formatDate (now) "2006-01-02T15:04:05+0700"]]", "from":"[[formatDate (now (parseDuration "-5m")) "2006-01-02T15:04:05+0700"]]"}]' + target: body.data + value: '[{"oldestFirst": false, "route": "all", "result":"all","to": "[[formatDate (now) "2006-01-02T15:04:05+0700"]]", "from":"[[.cursor.next_date]]"}]' + value_type: json + - set: + target: header.x-mc-app-id + value: null + - set: + target: header.x-mc-date + value: '[[formatDate (now) "RFC1123"]]' + - set: + target: header.x-mc-req-id + value: '[[uuid]]' + - set: + fail_on_template_error: true + target: header.Authorization + value: MC :[[hmacBase64 "sha1" (base64Decode "") (sprintf "%s:%s:/api/ttp/attachment/get-logs:" (.header.Get "x-mc-date") (.header.Get "x-mc-req-id"))]] + request.url: https://eu-api.mimecast.com/api/ttp/attachment/get-logs + response.decode_as: application/json + response.pagination: + - set: + fail_on_template_error: true + target: body.meta.pagination.pageToken + value: '[[.last_response.body.meta.pagination.next]]' + response.split: + split: + target: body.attachmentLogs + target: body.data + tags: + - forwarded + - mimecast-ttp-ap + - condition: ${kubernetes.hints.mimecast.ttp_ip_logs.enabled} == true or ${kubernetes.hints.mimecast.enabled} == true + config_version: "2" + cursor: null + data_stream: + dataset: mimecast.ttp_ip_logs + type: logs + interval: 5m + next_date: + value: '[[.first_event.eventTime]]' + request.method: POST + request.transforms: + - set: + default: '[{"oldestFirst": false,"to": "[[formatDate (now) "2006-01-02T15:04:05+0700"]]", "from":"[[formatDate (now (parseDuration "-5m")) "2006-01-02T15:04:05+0700"]]"}]' + target: body.data + value: '[{"oldestFirst": false,"to": "[[formatDate (now) "2006-01-02T15:04:05+0700"]]", "from":"[[.cursor.next_date]]"}]' + value_type: json + - set: + target: header.x-mc-app-id + value: null + - set: + target: header.x-mc-date + value: '[[formatDate (now) "RFC1123"]]' + - set: + target: header.x-mc-req-id + value: '[[uuid]]' + - set: + fail_on_template_error: true + target: header.Authorization + value: MC :[[hmacBase64 "sha1" (base64Decode "") (sprintf "%s:%s:/api/ttp/impersonation/get-logs:" (.header.Get "x-mc-date") (.header.Get "x-mc-req-id"))]] + request.url: https://eu-api.mimecast.com/api/ttp/impersonation/get-logs + response.decode_as: application/json + response.pagination: + - set: + fail_on_template_error: true + target: body.meta.pagination.pageToken + value: '[[.last_response.body.meta.pagination.next]]' + response.split: + split: + target: body.impersonationLogs + target: body.data + tags: + - forwarded + - mimecast-ttp-ip + - condition: ${kubernetes.hints.mimecast.ttp_url_logs.enabled} == true or ${kubernetes.hints.mimecast.enabled} == true + config_version: "2" + cursor: + next_date: + value: '[[.first_event.date]]' + data_stream: + dataset: mimecast.ttp_url_logs + type: logs + interval: 5m + request.method: POST + request.transforms: + - set: + default: '[{"oldestFirst": false,"scanResult": "all","route":"all","to": "[[formatDate (now) "2006-01-02T15:04:05+0700"]]", "from":"[[formatDate (now (parseDuration "-5m")) "2006-01-02T15:04:05+0700"]]"}]' + target: body.data + value: '[{"oldestFirst": false,"scanResult": "all","route":"all","to": "[[formatDate (now) "2006-01-02T15:04:05+0700"]]", "from":"[[.cursor.next_date]]"}]' + value_type: json + - set: + target: header.x-mc-app-id + value: null + - set: + target: header.x-mc-date + value: '[[formatDate (now) "RFC1123"]]' + - set: + target: header.x-mc-req-id + value: '[[uuid]]' + - set: + fail_on_template_error: true + target: header.Authorization + value: MC :[[hmacBase64 "sha1" (base64Decode "") (sprintf "%s:%s:/api/ttp/url/get-logs:" (.header.Get "x-mc-date") (.header.Get "x-mc-req-id"))]] + request.url: https://eu-api.mimecast.com/api/ttp/url/get-logs + response.decode_as: application/json + response.pagination: + - set: + fail_on_template_error: true + target: body.meta.pagination.pageToken + value: '[[.last_response.body.meta.pagination.next]]' + response.split: + split: + target: body.clickLogs + target: body.data + tags: + - forwarded + - mimecast-ttp-url + data_stream.namespace: default + - name: filestream-mimecast + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.mimecast.container_logs.enabled} == true + data_stream: + dataset: kubernetes.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/modsecurity.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/modsecurity.yml new file mode 100644 index 00000000000..cc9e109d5ed --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/modsecurity.yml @@ -0,0 +1,28 @@ +inputs: + - name: filestream-modsecurity + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.modsecurity.auditlog.enabled} == true or ${kubernetes.hints.modsecurity.enabled} == true + data_stream: + dataset: modsecurity.auditlog + type: logs + exclude_files: + - .gz$ + fields: + tz_offset: null + fields_under_root: true + parsers: + - container: + format: auto + stream: ${kubernetes.hints.modsecurity.auditlog.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + prospector: + scanner: + symlinks: true + tags: + - modsec-audit + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/mongodb.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/mongodb.yml new file mode 100644 index 00000000000..ece2d4439eb --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/mongodb.yml @@ -0,0 +1,73 @@ +inputs: + - name: filestream-mongodb + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.mongodb.log.enabled} == true or ${kubernetes.hints.mongodb.enabled} == true + data_stream: + dataset: mongodb.log + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.mongodb.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - mongodb-logs + data_stream.namespace: default + - name: mongodb/metrics-mongodb + type: mongodb/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.mongodb.collstats.enabled} == true or ${kubernetes.hints.mongodb.enabled} == true + data_stream: + dataset: mongodb.collstats + type: metrics + hosts: + - ${kubernetes.hints.mongodb.collstats.host|'localhost:27017'} + metricsets: + - collstats + period: ${kubernetes.hints.mongodb.collstats.period|'10s'} + - condition: ${kubernetes.hints.mongodb.dbstats.enabled} == true or ${kubernetes.hints.mongodb.enabled} == true + data_stream: + dataset: mongodb.dbstats + type: metrics + hosts: + - ${kubernetes.hints.mongodb.dbstats.host|'localhost:27017'} + metricsets: + - dbstats + period: ${kubernetes.hints.mongodb.dbstats.period|'10s'} + - condition: ${kubernetes.hints.mongodb.metrics.enabled} == true or ${kubernetes.hints.mongodb.enabled} == true + data_stream: + dataset: mongodb.metrics + type: metrics + hosts: + - ${kubernetes.hints.mongodb.metrics.host|'localhost:27017'} + metricsets: + - metrics + period: ${kubernetes.hints.mongodb.metrics.period|'10s'} + - condition: ${kubernetes.hints.mongodb.replstatus.enabled} == true or ${kubernetes.hints.mongodb.enabled} == true + data_stream: + dataset: mongodb.replstatus + type: metrics + hosts: + - ${kubernetes.hints.mongodb.replstatus.host|'localhost:27017'} + metricsets: + - replstatus + period: ${kubernetes.hints.mongodb.replstatus.period|'10s'} + - condition: ${kubernetes.hints.mongodb.status.enabled} == true or ${kubernetes.hints.mongodb.enabled} == true + data_stream: + dataset: mongodb.status + type: metrics + hosts: + - ${kubernetes.hints.mongodb.status.host|'localhost:27017'} + metricsets: + - status + period: ${kubernetes.hints.mongodb.status.period|'10s'} + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/mysql.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/mysql.yml new file mode 100644 index 00000000000..234caeeb40c --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/mysql.yml @@ -0,0 +1,82 @@ +inputs: + - name: filestream-mysql + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.mysql.error.enabled} == true or ${kubernetes.hints.mysql.enabled} == true + data_stream: + dataset: mysql.error + type: logs + exclude_files: + - .gz$ + multiline: + match: after + negate: true + pattern: ^([0-9]{4}-[0-9]{2}-[0-9]{2}|[0-9]{6}) + parsers: + - container: + format: auto + stream: ${kubernetes.hints.mysql.error.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + prospector: + scanner: + symlinks: true + - condition: ${kubernetes.hints.mysql.slowlog.enabled} == true or ${kubernetes.hints.mysql.enabled} == true + data_stream: + dataset: mysql.slowlog + type: logs + exclude_files: + - .gz$ + exclude_lines: + - '^[\/\w\.]+, Version: .* started with:.*' + - ^# Time:.* + multiline: + match: after + negate: true + pattern: '^(# User@Host: |# Time: )' + parsers: + - container: + format: auto + stream: ${kubernetes.hints.mysql.slowlog.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + data_stream.namespace: default + - name: mysql/metrics-mysql + type: mysql/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.mysql.galera_status.enabled} == true and ${kubernetes.hints.mysql.enabled} == true + data_stream: + dataset: mysql.galera_status + type: metrics + hosts: + - ${kubernetes.hints.mysql.galera_status.host|'tcp(127.0.0.1:3306)/'} + metricsets: + - galera_status + password: ${kubernetes.hints.mysql.galera_status.password|'test'} + period: ${kubernetes.hints.mysql.galera_status.period|'10s'} + username: ${kubernetes.hints.mysql.galera_status.username|'root'} + - condition: ${kubernetes.hints.mysql.performance.enabled} == true and ${kubernetes.hints.mysql.enabled} == true + data_stream: + dataset: mysql.performance + type: metrics + metricsets: + - performance + - condition: ${kubernetes.hints.mysql.status.enabled} == true or ${kubernetes.hints.mysql.enabled} == true + data_stream: + dataset: mysql.status + type: metrics + hosts: + - ${kubernetes.hints.mysql.status.host|'tcp(127.0.0.1:3306)/'} + metricsets: + - status + password: ${kubernetes.hints.mysql.status.password|'test'} + period: ${kubernetes.hints.mysql.status.period|'10s'} + username: ${kubernetes.hints.mysql.status.username|'root'} + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/mysql_enterprise.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/mysql_enterprise.yml new file mode 100644 index 00000000000..d943bb661ff --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/mysql_enterprise.yml @@ -0,0 +1,18 @@ +inputs: + - name: filestream-mysql_enterprise + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.mysql_enterprise.audit.enabled} == true or ${kubernetes.hints.mysql_enterprise.enabled} == true + data_stream: + dataset: mysql_enterprise.audit + type: logs + exclude_files: + - .gz$ + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + tags: + - mysql_enterprise-audit + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/nats.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/nats.yml new file mode 100644 index 00000000000..91525210374 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/nats.yml @@ -0,0 +1,82 @@ +inputs: + - name: filestream-nats + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.nats.log.enabled} == true or ${kubernetes.hints.nats.enabled} == true + data_stream: + dataset: nats.log + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.nats.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - nats-log + data_stream.namespace: default + - name: nats/metrics-nats + type: nats/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.nats.connection.enabled} == true and ${kubernetes.hints.nats.enabled} == true + data_stream: + dataset: nats.connection + type: metrics + hosts: + - ${kubernetes.hints.nats.connection.host|'localhost:8222'} + metricsets: + - connection + period: ${kubernetes.hints.nats.connection.period|'10s'} + - condition: ${kubernetes.hints.nats.connections.enabled} == true or ${kubernetes.hints.nats.enabled} == true + data_stream: + dataset: nats.connections + type: metrics + hosts: + - ${kubernetes.hints.nats.connections.host|'localhost:8222'} + metricsets: + - connections + period: ${kubernetes.hints.nats.connections.period|'10s'} + - condition: ${kubernetes.hints.nats.route.enabled} == true and ${kubernetes.hints.nats.enabled} == true + data_stream: + dataset: nats.route + type: metrics + hosts: + - ${kubernetes.hints.nats.route.host|'localhost:8222'} + metricsets: + - route + period: ${kubernetes.hints.nats.route.period|'10s'} + - condition: ${kubernetes.hints.nats.routes.enabled} == true or ${kubernetes.hints.nats.enabled} == true + data_stream: + dataset: nats.routes + type: metrics + hosts: + - ${kubernetes.hints.nats.routes.host|'localhost:8222'} + metricsets: + - routes + period: ${kubernetes.hints.nats.routes.period|'10s'} + - condition: ${kubernetes.hints.nats.stats.enabled} == true or ${kubernetes.hints.nats.enabled} == true + data_stream: + dataset: nats.stats + type: metrics + hosts: + - ${kubernetes.hints.nats.stats.host|'localhost:8222'} + metricsets: + - stats + period: ${kubernetes.hints.nats.stats.period|'10s'} + - condition: ${kubernetes.hints.nats.subscriptions.enabled} == true or ${kubernetes.hints.nats.enabled} == true + data_stream: + dataset: nats.subscriptions + type: metrics + hosts: + - ${kubernetes.hints.nats.subscriptions.host|'localhost:8222'} + metricsets: + - subscriptions + period: ${kubernetes.hints.nats.subscriptions.period|'10s'} + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/netflow.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/netflow.yml new file mode 100644 index 00000000000..d2bb80601df --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/netflow.yml @@ -0,0 +1,47 @@ +inputs: + - name: netflow-netflow + type: netflow + use_output: default + streams: + - condition: ${kubernetes.hints.netflow.log.enabled} == true or ${kubernetes.hints.netflow.enabled} == true + data_stream: + dataset: netflow.log + type: logs + detect_sequence_reset: true + expiration_timeout: 30m + host: localhost:2055 + max_message_size: 10KiB + protocols: + - v1 + - v5 + - v6 + - v7 + - v8 + - v9 + - ipfix + queue_size: 8192 + tags: + - netflow + - forwarded + data_stream.namespace: default + - name: filestream-netflow + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.netflow.container_logs.enabled} == true + data_stream: + dataset: kubernetes.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/nginx.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/nginx.yml new file mode 100644 index 00000000000..a9b6693e372 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/nginx.yml @@ -0,0 +1,142 @@ +inputs: + - name: nginx/metrics-nginx + type: nginx/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.nginx.stubstatus.enabled} == true or ${kubernetes.hints.nginx.enabled} == true + data_stream: + dataset: nginx.stubstatus + type: metrics + hosts: + - ${kubernetes.hints.nginx.stubstatus.host|'http://127.0.0.1:80'} + metricsets: + - stubstatus + period: ${kubernetes.hints.nginx.stubstatus.period|'10s'} + server_status_path: /nginx_status + data_stream.namespace: default + - name: filestream-nginx + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.nginx.access.enabled} == true or ${kubernetes.hints.nginx.enabled} == true + data_stream: + dataset: nginx.access + type: logs + exclude_files: + - .gz$ + ignore_older: 72h + parsers: + - container: + format: auto + stream: ${kubernetes.hints.nginx.access.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + prospector: + scanner: + symlinks: true + tags: + - nginx-access + - condition: ${kubernetes.hints.nginx.error.enabled} == true or ${kubernetes.hints.nginx.enabled} == true + data_stream: + dataset: nginx.error + type: logs + exclude_files: + - .gz$ + ignore_older: 72h + multiline: + match: after + negate: true + pattern: '^\d{4}\/\d{2}\/\d{2} ' + parsers: + - container: + format: auto + stream: ${kubernetes.hints.nginx.error.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + prospector: + scanner: + symlinks: true + tags: + - nginx-error + data_stream.namespace: default + - name: httpjson-nginx + type: httpjson + use_output: default + streams: + - condition: ${kubernetes.hints.nginx.access.enabled} == true and ${kubernetes.hints.nginx.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: nginx.access + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype=nginx:plus:access | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - nginx-access + - condition: ${kubernetes.hints.nginx.error.enabled} == true and ${kubernetes.hints.nginx.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: nginx.error + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype=nginx:plus:error | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - nginx-error + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/nginx_ingress_controller.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/nginx_ingress_controller.yml new file mode 100644 index 00000000000..5f9ba9bc7e4 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/nginx_ingress_controller.yml @@ -0,0 +1,53 @@ +inputs: + - name: filestream-nginx_ingress_controller + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.nginx_ingress_controller.access.enabled} == true or ${kubernetes.hints.nginx_ingress_controller.enabled} == true + data_stream: + dataset: nginx_ingress_controller.access + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.nginx_ingress_controller.access.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + prospector: + scanner: + symlinks: true + tags: + - nginx-ingress-controller-access + - condition: ${kubernetes.hints.nginx_ingress_controller.error.enabled} == true or ${kubernetes.hints.nginx_ingress_controller.enabled} == true + data_stream: + dataset: nginx_ingress_controller.error + type: logs + exclude_files: + - .gz$ + multiline: + match: after + negate: true + pattern: '^[A-Z]{1}[0-9]{4} ' + parsers: + - container: + format: auto + stream: ${kubernetes.hints.nginx_ingress_controller.error.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - drop_event: + when: + not: + regexp: + message: '^[A-Z]{1}[0-9]{4} ' + - add_locale: null + prospector: + scanner: + symlinks: true + tags: + - nginx-ingress-controller-error + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/oracle.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/oracle.yml new file mode 100644 index 00000000000..8e846586d4b --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/oracle.yml @@ -0,0 +1,82 @@ +inputs: + - name: filestream-oracle + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.oracle.database_audit.enabled} == true or ${kubernetes.hints.oracle.enabled} == true + data_stream: + dataset: oracle.database_audit + type: logs + exclude_files: + - .gz$ + exclude_lines: + - ^Audit file + parsers: + - multiline: + match: after + negate: true + pattern: ^[A-Za-z]{3}\s+[A-Za-z]{3}\s+[0-9]{1,2}\s[0-9]{2}:[0-9]{2}:[0-9]{2}\s[0-9]{4}\s\S[0-9]{2}:[0-9]{2} + timeout: 10 + type: pattern + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + tags: + - oracle-database_audit + data_stream.namespace: default + - name: sql/metrics-oracle + type: sql/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.oracle.performance.enabled} == true and ${kubernetes.hints.oracle.enabled} == true + data_stream: + dataset: oracle.performance + type: metrics + driver: oracle + hosts: + - ${kubernetes.hints.oracle.performance.host|'oracle://sys:Oradoc_db1@0.0.0.0:1521/ORCLCDB.localdomain?sysdba=1'} + metricsets: + - query + period: ${kubernetes.hints.oracle.performance.period|'60s'} + raw_data.enabled: true + sql_queries: + - query: SELECT name, physical_reads, db_block_gets, consistent_gets, 1 - (physical_reads / (db_block_gets + consistent_gets)) "Hit_Ratio" FROM V$BUFFER_POOL_STATISTICS + response_format: table + - query: SELECT sum(a.value) total_cur, avg(a.value) avg_cur, max(a.value) max_cur, S.username, s.machine FROM v$sesstat a, v$statname b, v$session s WHERE a.statistic# = b.statistic# AND s.sid = a.sid GROUP BY s.username, s.machine + response_format: table + - query: SELECT total_cursors, current_cursors, sess_cur_cache_hits, parse_count_total, sess_cur_cache_hits / total_cursors as cachehits_totalcursors_ratio , sess_cur_cache_hits - parse_count_total as real_parses FROM ( SELECT sum ( decode ( name, 'opened cursors cumulative', value, 0)) total_cursors, sum ( decode ( name, 'opened cursors current',value,0)) current_cursors, sum ( decode ( name, 'session cursor cache hits',value,0)) sess_cur_cache_hits, sum ( decode ( name, 'parse count (total)',value,0)) parse_count_total FROM v$sysstat WHERE name IN ( 'opened cursors cumulative','opened cursors current','session cursor cache hits', 'parse count (total)' )) + response_format: table + - query: SELECT 'lock_requests' "Ratio" , AVG(gethitratio) FROM V$LIBRARYCACHE UNION SELECT 'pin_requests' "Ratio", AVG(pinhitratio) FROM V$LIBRARYCACHE UNION SELECT 'io_reloads' "Ratio", (SUM(reloads) / SUM(pins)) FROM V$LIBRARYCACHE + response_format: variables + - condition: ${kubernetes.hints.oracle.sysmetric.enabled} == true and ${kubernetes.hints.oracle.enabled} == true + data_stream: + dataset: oracle.sysmetric + type: metrics + driver: oracle + dynamic_metric_name_filter: '%' + hosts: + - ${kubernetes.hints.oracle.sysmetric.host|'oracle://sys:Oradoc_db1@0.0.0.0:1521/ORCLCDB.localdomain?sysdba=1'} + metricsets: + - query + period: ${kubernetes.hints.oracle.sysmetric.period|'60s'} + raw_data.enabled: true + sql_queries: + - query: SELECT METRIC_NAME, VALUE FROM V$SYSMETRIC WHERE GROUP_ID = 2 and METRIC_NAME LIKE '%' + response_format: variables + - condition: ${kubernetes.hints.oracle.tablespace.enabled} == true and ${kubernetes.hints.oracle.enabled} == true + data_stream: + dataset: oracle.tablespace + type: metrics + driver: oracle + dynamic_metric_name_filter: "" + hosts: + - ${kubernetes.hints.oracle.tablespace.host|'oracle://sys:Oradoc_db1@0.0.0.0:1521/ORCLCDB.localdomain?sysdba=1'} + metricsets: + - query + period: ${kubernetes.hints.oracle.tablespace.period|'60s'} + raw_data.enabled: true + sql_queries: + - query: WITH data_files AS (SELECT file_name, file_id, tablespace_name, bytes, status, maxbytes, user_bytes, online_status FROM sys.dba_data_files UNION SELECT file_name, file_id, tablespace_name, bytes, status, maxbytes, user_bytes, status AS ONLINE_STATUS FROM sys.dba_temp_files), spaces AS (SELECT b.tablespace_name TB_NAME, tbs_size TB_SIZE_USED, a.free_space TB_SIZE_FREE FROM (SELECT tablespace_name, SUM(bytes) AS free_space FROM dba_free_space GROUP BY tablespace_name) a, (SELECT tablespace_name, SUM(bytes) AS tbs_size FROM dba_data_files GROUP BY tablespace_name) b WHERE a.tablespace_name(+) = b.tablespace_name AND a.tablespace_name != 'TEMP'), temp_spaces AS (SELECT tablespace_name, tablespace_size, allocated_space, free_space FROM dba_temp_free_space WHERE tablespace_name = 'TEMP'), details AS (SELECT df.file_name, df.file_id, df.tablespace_name, df.bytes, df.status, df.maxbytes, df.user_bytes, df.online_status, sp.tb_size_used, sp.tb_size_free FROM data_files df, spaces sp WHERE df.tablespace_name = sp.tb_name UNION SELECT df.file_name, df.file_id, df.tablespace_name, df.bytes, df.status, df.maxbytes, df.user_bytes, df.online_status, tsp.tablespace_size - tsp.free_space AS TB_SIZE_USED, tsp.free_space AS TB_SIZE_FREE FROM data_files df, temp_spaces tsp WHERE df.tablespace_name = tsp.tablespace_name) SELECT file_name, file_id, tablespace_name, bytes, status, maxbytes, user_bytes, online_status, tb_size_used, tb_size_free, SUM(bytes) over() AS TOTAL_BYTES FROM details + response_format: table + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/panw.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/panw.yml new file mode 100644 index 00000000000..93c07883f03 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/panw.yml @@ -0,0 +1,94 @@ +inputs: + - name: tcp-panw + type: tcp + use_output: default + streams: + - condition: ${kubernetes.hints.panw.panos.enabled} == true or ${kubernetes.hints.panw.enabled} == true + data_stream: + dataset: panw.panos + type: logs + host: localhost:9001 + max_message_size: 50KiB + processors: + - add_locale: null + - syslog: + field: message + format: auto + timezone: Local + - add_fields: + fields: + internal_zones: + - trust + target: _conf + - add_fields: + fields: + external_zones: + - untrust + target: _conf + tags: + - panw-panos + - forwarded + data_stream.namespace: default + - name: udp-panw + type: udp + use_output: default + streams: + - condition: ${kubernetes.hints.panw.panos.enabled} == true or ${kubernetes.hints.panw.enabled} == true + data_stream: + dataset: panw.panos + type: logs + host: localhost:9001 + max_message_size: 50KiB + processors: + - add_locale: null + - syslog: + field: message + format: auto + timezone: Local + - add_fields: + fields: + internal_zones: + - trust + target: _conf + - add_fields: + fields: + external_zones: + - untrust + target: _conf + tags: + - panw-panos + - forwarded + data_stream.namespace: default + - name: filestream-panw + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.panw.panos.enabled} == true or ${kubernetes.hints.panw.enabled} == true + data_stream: + dataset: panw.panos + type: logs + exclude_files: + - .gz$ + fields: + _conf: + external_zones: + - untrust + internal_zones: + - trust + tz_offset: local + fields_under_root: true + parsers: + - container: + format: auto + stream: ${kubernetes.hints.panw.panos.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + prospector: + scanner: + symlinks: true + tags: + - panw-panos + - forwarded + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/panw_cortex_xdr.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/panw_cortex_xdr.yml new file mode 100644 index 00000000000..ec6a58fd9b2 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/panw_cortex_xdr.yml @@ -0,0 +1,90 @@ +inputs: + - name: httpjson-panw_cortex_xdr + type: httpjson + use_output: default + streams: + - condition: ${kubernetes.hints.panw_cortex_xdr.alerts.enabled} == true or ${kubernetes.hints.panw_cortex_xdr.enabled} == true + config_version: "2" + cursor: + next_ts: + value: '[[.last_event.detection_timestamp]]' + data_stream: + dataset: panw_cortex_xdr.alerts + type: logs + interval: 5m + request.method: POST + request.rate_limit: + limit: '[[.last_response.header.Get "X-Rate-Limit-Limit"]]' + remaining: '[[.last_response.header.Get "X-Rate-Limit-Remaining"]]' + reset: '[[(parseDate (.last_response.header.Get "X-Rate-Limit-Reset")).Unix]]' + request.timeout: 30s + request.transforms: + - set: + target: header.Authorization + value: null + - set: + target: header.x-xdr-auth-id + value: 1 + - set: + target: body.request_data.sort.field + value: creation_time + - set: + target: body.request_data.sort.keyword + value: asc + - append: + default: |- + { + "field": "creation_time", + "operator": "gte", + "value": [[ mul (add (now (parseDuration "-24h")).Unix) 1000 ]] + } + target: body.request_data.filters + value: |- + { + "field": "creation_time", + "operator": "gte", + "value": [[ .cursor.next_ts ]] + } + value_type: json + request.url: https://test.xdr.eu.paloaltonetworks.com/public_api/v1/alerts/get_alerts_multi_events + response.pagination: + - set: + fail_on_template_error: true + target: body.request_data.search_from + value: '[[if (ne (len .last_response.body.reply.alerts) 0)]][[mul .last_response.page 100]][[end]]' + value_type: int + - set: + fail_on_template_error: true + target: body.request_data.search_to + value: '[[if (ne (len .last_response.body.reply.alerts) 0)]][[add (mul .last_response.page 100) 100]][[end]]' + value_type: int + response.split: + split: + keep_parent: true + target: body.events + target: body.reply.alerts + tags: + - forwarded + - panw_cortex_xdr + data_stream.namespace: default + - name: filestream-panw_cortex_xdr + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.panw_cortex_xdr.container_logs.enabled} == true + data_stream: + dataset: kubernetes.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/pfsense.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/pfsense.yml new file mode 100644 index 00000000000..e4541f90639 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/pfsense.yml @@ -0,0 +1,62 @@ +inputs: + - name: udp-pfsense + type: udp + use_output: default + streams: + - condition: ${kubernetes.hints.pfsense.log.enabled} == true or ${kubernetes.hints.pfsense.enabled} == true + data_stream: + dataset: pfsense.log + type: logs + host: localhost:9001 + processors: + - add_locale: null + - add_fields: + fields: + internal_networks: + - private + tz_offset: local + target: _tmp + tags: + - pfsense + - forwarded + data_stream.namespace: default + - name: tcp-pfsense + type: tcp + use_output: default + streams: + - condition: ${kubernetes.hints.pfsense.log.enabled} == true and ${kubernetes.hints.pfsense.enabled} == true + data_stream: + dataset: pfsense.log + type: logs + host: localhost:9001 + processors: + - add_locale: null + - add_fields: + fields: + tz_offset: local + target: _tmp + tags: + - pfsense + - forwarded + data_stream.namespace: default + - name: filestream-pfsense + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.pfsense.container_logs.enabled} == true + data_stream: + dataset: kubernetes.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/postgresql.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/postgresql.yml new file mode 100644 index 00000000000..a9abf518a9a --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/postgresql.yml @@ -0,0 +1,68 @@ +inputs: + - name: filestream-postgresql + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.postgresql.log.enabled} == true or ${kubernetes.hints.postgresql.enabled} == true + data_stream: + dataset: postgresql.log + type: logs + exclude_files: + - .gz$ + multiline: + match: after + negate: true + pattern: '^\d{4}-\d{2}-\d{2} ' + parsers: + - container: + format: auto + stream: ${kubernetes.hints.postgresql.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - postgresql-log + data_stream.namespace: default + - name: postgresql/metrics-postgresql + type: postgresql/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.postgresql.activity.enabled} == true or ${kubernetes.hints.postgresql.enabled} == true + data_stream: + dataset: postgresql.activity + type: metrics + hosts: + - ${kubernetes.hints.postgresql.activity.host|'postgres://localhost:5432'} + metricsets: + - activity + period: ${kubernetes.hints.postgresql.activity.period|'10s'} + - condition: ${kubernetes.hints.postgresql.bgwriter.enabled} == true or ${kubernetes.hints.postgresql.enabled} == true + data_stream: + dataset: postgresql.bgwriter + type: metrics + hosts: + - ${kubernetes.hints.postgresql.bgwriter.host|'postgres://localhost:5432'} + metricsets: + - bgwriter + period: ${kubernetes.hints.postgresql.bgwriter.period|'10s'} + - condition: ${kubernetes.hints.postgresql.database.enabled} == true or ${kubernetes.hints.postgresql.enabled} == true + data_stream: + dataset: postgresql.database + type: metrics + hosts: + - ${kubernetes.hints.postgresql.database.host|'postgres://localhost:5432'} + metricsets: + - database + period: ${kubernetes.hints.postgresql.database.period|'10s'} + - condition: ${kubernetes.hints.postgresql.statement.enabled} == true or ${kubernetes.hints.postgresql.enabled} == true + data_stream: + dataset: postgresql.statement + type: metrics + hosts: + - ${kubernetes.hints.postgresql.statement.host|'postgres://localhost:5432'} + metricsets: + - statement + period: ${kubernetes.hints.postgresql.statement.period|'10s'} + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/prometheus.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/prometheus.yml new file mode 100644 index 00000000000..2a7e630c9cf --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/prometheus.yml @@ -0,0 +1,90 @@ +inputs: + - name: prometheus/metrics-prometheus + type: prometheus/metrics + use_output: default + streams: + - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + condition: ${kubernetes.hints.prometheus.collector.enabled} == true or ${kubernetes.hints.prometheus.enabled} == true + data_stream: + dataset: prometheus.collector + type: metrics + hosts: + - ${kubernetes.hints.prometheus.collector.host|'localhost:9090'} + metrics_filters.exclude: null + metrics_filters.include: null + metrics_path: /metrics + metricsets: + - collector + password: ${kubernetes.hints.prometheus.collector.password|'secret'} + period: ${kubernetes.hints.prometheus.collector.period|'10s'} + rate_counters: true + ssl.certificate_authorities: + - /var/run/secrets/kubernetes.io/serviceaccount/service-ca.crt + use_types: true + username: ${kubernetes.hints.prometheus.collector.username|'user'} + - condition: ${kubernetes.hints.prometheus.query.enabled} == true and ${kubernetes.hints.prometheus.enabled} == true + data_stream: + dataset: prometheus.query + type: metrics + hosts: + - ${kubernetes.hints.prometheus.query.host|'localhost:9090'} + metricsets: + - query + period: ${kubernetes.hints.prometheus.query.period|'10s'} + queries: + - name: instant_vector + params: + query: sum(rate(prometheus_http_requests_total[1m])) + path: /api/v1/query + - name: range_vector + params: + end: "2019-12-21T00:00:00.000Z" + query: up + start: "2019-12-20T00:00:00.000Z" + step: 1h + path: /api/v1/query_range + - name: scalar + params: + query: "100" + path: /api/v1/query + - name: string + params: + query: some_value + path: /api/v1/query + - condition: ${kubernetes.hints.prometheus.remote_write.enabled} == true and ${kubernetes.hints.prometheus.enabled} == true + data_stream: + dataset: prometheus.remote_write + type: metrics + host: localhost + metricsets: + - remote_write + port: 9201 + rate_counters: true + ssl.certificate: /etc/pki/server/cert.pem + ssl.enabled: null + ssl.key: null + types_patterns.exclude: null + types_patterns.include: null + use_types: true + data_stream.namespace: default + - name: filestream-prometheus + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.prometheus.container_logs.enabled} == true + data_stream: + dataset: kubernetes.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/qnap_nas.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/qnap_nas.yml new file mode 100644 index 00000000000..546faa79901 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/qnap_nas.yml @@ -0,0 +1,60 @@ +inputs: + - name: udp-qnap_nas + type: udp + use_output: default + streams: + - condition: ${kubernetes.hints.qnap_nas.log.enabled} == true and ${kubernetes.hints.qnap_nas.enabled} == true + data_stream: + dataset: qnap_nas.log + type: logs + host: localhost:9301 + processors: + - add_locale: null + - add_fields: + fields: + tz_offset: local + target: _tmp + tags: + - qnap-nas + - forwarded + data_stream.namespace: default + - name: filestream-qnap_nas + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.qnap_nas.container_logs.enabled} == true + data_stream: + dataset: kubernetes.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default + - name: tcp-qnap_nas + type: tcp + use_output: default + streams: + - condition: ${kubernetes.hints.qnap_nas.log.enabled} == true or ${kubernetes.hints.qnap_nas.enabled} == true + data_stream: + dataset: qnap_nas.log + type: logs + host: localhost:9301 + processors: + - add_locale: null + - add_fields: + fields: + tz_offset: local + target: _tmp + tags: + - qnap-nas + - forwarded + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/rabbitmq.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/rabbitmq.yml new file mode 100644 index 00000000000..942c4fa6911 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/rabbitmq.yml @@ -0,0 +1,79 @@ +inputs: + - name: filestream-rabbitmq + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.rabbitmq.log.enabled} == true or ${kubernetes.hints.rabbitmq.enabled} == true + data_stream: + dataset: rabbitmq.log + type: logs + exclude_files: + - .gz$ + multiline: + match: after + negate: true + pattern: '[0-9]{4}-[0-9]{2}-[0-9]{2}' + parsers: + - container: + format: auto + stream: ${kubernetes.hints.rabbitmq.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + prospector: + scanner: + symlinks: true + tags: + - forwarded + data_stream.namespace: default + - name: rabbitmq/metrics-rabbitmq + type: rabbitmq/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.rabbitmq.connection.enabled} == true or ${kubernetes.hints.rabbitmq.enabled} == true + data_stream: + dataset: rabbitmq.connection + type: metrics + hosts: + - ${kubernetes.hints.rabbitmq.connection.host|'localhost:15672'} + metricsets: + - connection + password: ${kubernetes.hints.rabbitmq.connection.password|''} + period: ${kubernetes.hints.rabbitmq.connection.period|'10s'} + username: ${kubernetes.hints.rabbitmq.connection.username|''} + - condition: ${kubernetes.hints.rabbitmq.exchange.enabled} == true or ${kubernetes.hints.rabbitmq.enabled} == true + data_stream: + dataset: rabbitmq.exchange + type: metrics + hosts: + - ${kubernetes.hints.rabbitmq.exchange.host|'localhost:15672'} + metricsets: + - exchange + password: ${kubernetes.hints.rabbitmq.exchange.password|''} + period: ${kubernetes.hints.rabbitmq.exchange.period|'10s'} + username: ${kubernetes.hints.rabbitmq.exchange.username|''} + - condition: ${kubernetes.hints.rabbitmq.node.enabled} == true or ${kubernetes.hints.rabbitmq.enabled} == true + data_stream: + dataset: rabbitmq.node + type: metrics + hosts: + - ${kubernetes.hints.rabbitmq.node.host|'localhost:15672'} + metricsets: + - node + node.collect: node + password: ${kubernetes.hints.rabbitmq.node.password|''} + period: ${kubernetes.hints.rabbitmq.node.period|'10s'} + username: ${kubernetes.hints.rabbitmq.node.username|''} + - condition: ${kubernetes.hints.rabbitmq.queue.enabled} == true or ${kubernetes.hints.rabbitmq.enabled} == true + data_stream: + dataset: rabbitmq.queue + type: metrics + hosts: + - ${kubernetes.hints.rabbitmq.queue.host|'localhost:15672'} + metricsets: + - queue + password: ${kubernetes.hints.rabbitmq.queue.password|''} + period: ${kubernetes.hints.rabbitmq.queue.period|'10s'} + username: ${kubernetes.hints.rabbitmq.queue.username|''} + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/redis.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/redis.yml new file mode 100644 index 00000000000..31731f6c1a5 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/redis.yml @@ -0,0 +1,84 @@ +inputs: + - name: filestream-redis + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.redis.log.enabled} == true or ${kubernetes.hints.redis.enabled} == true + data_stream: + dataset: redis.log + type: logs + exclude_files: + - .gz$ + exclude_lines: + - ^\s+[\-`('.|_] + parsers: + - container: + format: auto + stream: ${kubernetes.hints.redis.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - redis-log + data_stream.namespace: default + - name: redis-redis + type: redis + use_output: default + streams: + - condition: ${kubernetes.hints.redis.slowlog.enabled} == true or ${kubernetes.hints.redis.enabled} == true + data_stream: + dataset: redis.slowlog + type: logs + hosts: + - ${kubernetes.hints.redis.slowlog.host|'127.0.0.1:6379'} + password: ${kubernetes.hints.redis.slowlog.password|''} + data_stream.namespace: default + - name: redis/metrics-redis + type: redis/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.redis.info.enabled} == true or ${kubernetes.hints.redis.enabled} == true + data_stream: + dataset: redis.info + type: metrics + hosts: + - ${kubernetes.hints.redis.info.host|'127.0.0.1:6379'} + idle_timeout: 20s + maxconn: 10 + metricsets: + - info + network: tcp + password: ${kubernetes.hints.redis.info.password|''} + period: ${kubernetes.hints.redis.info.period|'10s'} + - condition: ${kubernetes.hints.redis.key.enabled} == true or ${kubernetes.hints.redis.enabled} == true + data_stream: + dataset: redis.key + type: metrics + hosts: + - ${kubernetes.hints.redis.key.host|'127.0.0.1:6379'} + idle_timeout: 20s + key.patterns: + - limit: 20 + pattern: '*' + maxconn: 10 + metricsets: + - key + network: tcp + password: ${kubernetes.hints.redis.key.password|''} + period: ${kubernetes.hints.redis.key.period|'10s'} + - condition: ${kubernetes.hints.redis.keyspace.enabled} == true or ${kubernetes.hints.redis.enabled} == true + data_stream: + dataset: redis.keyspace + type: metrics + hosts: + - ${kubernetes.hints.redis.keyspace.host|'127.0.0.1:6379'} + idle_timeout: 20s + maxconn: 10 + metricsets: + - keyspace + network: tcp + password: ${kubernetes.hints.redis.keyspace.password|''} + period: ${kubernetes.hints.redis.keyspace.period|'10s'} + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/santa.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/santa.yml new file mode 100644 index 00000000000..d60bfeb744a --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/santa.yml @@ -0,0 +1,23 @@ +inputs: + - name: filestream-santa + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.santa.log.enabled} == true or ${kubernetes.hints.santa.enabled} == true + data_stream: + dataset: santa.log + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.santa.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - santa-log + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/security_detection_engine.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/security_detection_engine.yml new file mode 100644 index 00000000000..990a4372e8b --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/security_detection_engine.yml @@ -0,0 +1,22 @@ +inputs: + - name: filestream-security_detection_engine + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.security_detection_engine.container_logs.enabled} == true + data_stream: + dataset: kubernetes.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/sentinel_one.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/sentinel_one.yml new file mode 100644 index 00000000000..7c06b222d78 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/sentinel_one.yml @@ -0,0 +1,217 @@ +inputs: + - name: httpjson-sentinel_one + type: httpjson + use_output: default + streams: + - condition: ${kubernetes.hints.sentinel_one.activity.enabled} == true or ${kubernetes.hints.sentinel_one.enabled} == true + config_version: 2 + cursor: + last_create_at: + value: '[[.last_event.createdAt]]' + data_stream: + dataset: sentinel_one.activity + type: logs + interval: 1m + request.method: GET + request.transforms: + - set: + target: header.Authorization + value: 'ApiToken ' + - set: + target: url.params.limit + value: "100" + - set: + target: url.params.sortBy + value: createdAt + - set: + target: url.params.sortOrder + value: asc + - set: + default: '[[formatDate (now (parseDuration "-24h"))]]' + target: url.params.createdAt__gte + value: '[[formatDate (parseDate .cursor.last_create_at)]]' + request.url: /web/api/v2.1/activities + response.pagination: + - set: + fail_on_template_error: true + target: url.params.cursor + value: '[[if (ne .last_response.body.pagination.nextCursor nil)]][[.last_response.body.pagination.nextCursor]][[else]][[.last_response.terminate_pagination]][[end]]' + response.split: + target: body.data + tags: + - forwarded + - sentinel_one-activity + - condition: ${kubernetes.hints.sentinel_one.agent.enabled} == true or ${kubernetes.hints.sentinel_one.enabled} == true + config_version: 2 + cursor: + last_update_at: + value: '[[.last_event.updatedAt]]' + data_stream: + dataset: sentinel_one.agent + type: logs + interval: 5m + request.method: GET + request.transforms: + - set: + target: header.Authorization + value: 'ApiToken ' + - set: + target: url.params.limit + value: "100" + - set: + target: url.params.sortBy + value: updatedAt + - set: + target: url.params.sortOrder + value: asc + - set: + default: '[[formatDate (now (parseDuration "-24h"))]]' + target: url.params.updatedAt__gte + value: '[[formatDate (parseDate .cursor.last_update_at)]]' + request.url: /web/api/v2.1/agents + response.pagination: + - set: + fail_on_template_error: true + target: url.params.cursor + value: '[[if (ne .last_response.body.pagination.nextCursor nil)]][[.last_response.body.pagination.nextCursor]][[else]][[.last_response.terminate_pagination]][[end]]' + response.split: + target: body.data + tags: + - forwarded + - sentinel_one-agent + - condition: ${kubernetes.hints.sentinel_one.alert.enabled} == true or ${kubernetes.hints.sentinel_one.enabled} == true + config_version: 2 + cursor: + last_create_at: + value: '[[.last_event.alertInfo.createdAt]]' + data_stream: + dataset: sentinel_one.alert + type: logs + interval: 5m + request.method: GET + request.transforms: + - set: + target: header.Authorization + value: 'ApiToken ' + - set: + target: url.params.limit + value: "100" + - set: + target: url.params.sortBy + value: alertInfoCreatedAt + - set: + target: url.params.sortOrder + value: asc + - set: + default: '[[formatDate (now (parseDuration "-24h"))]]' + target: url.params.createdAt__gte + value: '[[formatDate (parseDate .cursor.last_create_at)]]' + request.url: /web/api/v2.1/cloud-detection/alerts + response.pagination: + - set: + fail_on_template_error: true + target: url.params.cursor + value: '[[if (ne .last_response.body.pagination.nextCursor nil)]][[.last_response.body.pagination.nextCursor]][[else]][[.last_response.terminate_pagination]][[end]]' + response.split: + target: body.data + tags: + - forwarded + - sentinel_one-alert + - condition: ${kubernetes.hints.sentinel_one.group.enabled} == true or ${kubernetes.hints.sentinel_one.enabled} == true + config_version: 2 + cursor: + last_update_at: + value: '[[.last_event.updatedAt]]' + data_stream: + dataset: sentinel_one.group + type: logs + interval: 5m + request.method: GET + request.transforms: + - set: + target: header.Authorization + value: 'ApiToken ' + - set: + target: url.params.limit + value: "100" + - set: + target: url.params.sortBy + value: updatedAt + - set: + target: url.params.sortOrder + value: asc + - set: + default: '[[formatDate (now (parseDuration "-24h"))]]' + target: url.params.updatedAt__gte + value: '[[formatDate (parseDate .cursor.last_update_at)]]' + request.url: /web/api/v2.1/groups + response.pagination: + - set: + fail_on_template_error: true + target: url.params.cursor + value: '[[if (ne .last_response.body.pagination.nextCursor nil)]][[.last_response.body.pagination.nextCursor]][[else]][[.last_response.terminate_pagination]][[end]]' + response.split: + target: body.data + tags: + - forwarded + - sentinel_one-group + - condition: ${kubernetes.hints.sentinel_one.threat.enabled} == true or ${kubernetes.hints.sentinel_one.enabled} == true + config_version: 2 + cursor: + last_update_at: + value: '[[.last_event.threatInfo.updatedAt]]' + data_stream: + dataset: sentinel_one.threat + type: logs + interval: 5m + request.method: GET + request.transforms: + - set: + target: header.Authorization + value: 'ApiToken ' + - set: + target: url.params.limit + value: "100" + - set: + target: url.params.sortBy + value: updatedAt + - set: + target: url.params.sortOrder + value: asc + - set: + default: '[[formatDate (now (parseDuration "-24h"))]]' + target: url.params.updatedAt__gte + value: '[[formatDate (parseDate .cursor.last_update_at)]]' + request.url: /web/api/v2.1/threats + response.pagination: + - set: + fail_on_template_error: true + target: url.params.cursor + value: '[[if (ne .last_response.body.pagination.nextCursor nil)]][[.last_response.body.pagination.nextCursor]][[else]][[.last_response.terminate_pagination]][[end]]' + response.split: + target: body.data + tags: + - forwarded + - sentinel_one-threat + data_stream.namespace: default + - name: filestream-sentinel_one + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.sentinel_one.container_logs.enabled} == true + data_stream: + dataset: kubernetes.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/snort.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/snort.yml new file mode 100644 index 00000000000..80ed6df384a --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/snort.yml @@ -0,0 +1,53 @@ +inputs: + - name: filestream-snort + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.snort.log.enabled} == true or ${kubernetes.hints.snort.enabled} == true + data_stream: + dataset: snort.log + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.snort.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - add_locale: null + - add_fields: + fields: + internal_networks: + - private + tz_offset: local + target: _tmp + prospector: + scanner: + symlinks: true + tags: + - forwarded + - snort.log + data_stream.namespace: default + - name: udp-snort + type: udp + use_output: default + streams: + - condition: ${kubernetes.hints.snort.log.enabled} == true or ${kubernetes.hints.snort.enabled} == true + data_stream: + dataset: snort.log + type: logs + host: localhost:9514 + processors: + - add_locale: null + - add_fields: + fields: + internal_networks: + - private + tz_offset: local + target: _tmp + tags: + - forwarded + - snort.log + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/snyk.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/snyk.yml new file mode 100644 index 00000000000..aef353751ec --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/snyk.yml @@ -0,0 +1,139 @@ +inputs: + - name: filestream-snyk + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.snyk.container_logs.enabled} == true + data_stream: + dataset: kubernetes.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default + - name: httpjson-snyk + type: httpjson + use_output: default + streams: + - condition: ${kubernetes.hints.snyk.audit.enabled} == true or ${kubernetes.hints.snyk.enabled} == true + config_version: 2 + cursor: + interval: + value: -24h + data_stream: + dataset: snyk.audit + type: logs + interval: 10s + request.body: + filters: null + request.method: POST + request.transforms: + - set: + target: header.Authorization + value: token + - set: + target: url.params.to + value: '[[ formatDate (now) "2006-01-02" ]]' + - set: + default: '[[ formatDate (now (parseDuration "-720h")) "2006-01-02" ]]' + target: url.params.from + value: '[[ formatDate (now (parseDuration .cursor.interval)) "2006-01-02" ]]' + request.url: https://snyk.io/api/v1/org//audit?page=1&sortOrder=ASC + response.pagination: + - set: + fail_on_template_error: true + target: url.params.page + value: '[[if (ne (len .last_response.body.response) 0)]][[add .last_response.page 1]][[end]]' + response.request_body_on_pagination: true + tags: + - forwarded + - snyk-audit + - condition: ${kubernetes.hints.snyk.vulnerabilities.enabled} == true or ${kubernetes.hints.snyk.enabled} == true + config_version: 2 + cursor: + interval: + value: -24h + data_stream: + dataset: snyk.vulnerabilities + type: logs + interval: 24h + request.body: + filters: + exploitMaturity: + - mature + - proof-of-concept + - no-known-exploit + - no-data + fixable: false + identifier: null + ignored: false + isFixed: false + isPatchable: false + isPinnable: false + isUpgradable: false + languages: + - javascript + - ruby + - java + - scala + - python + - golang + - php + - dotnet + - swift-objective-c + - elixir + - docker + - terraform + - kubernetes + - helm + - cloudformation + orgs: null + patched: false + priorityScore: + max: 1000 + min: 0 + projects: null + severity: + - critical + - high + - medium + - low + types: + - vuln + - license + - configuration + request.method: POST + request.timeout: 120s + request.transforms: + - set: + target: header.Authorization + value: token + - set: + target: url.params.to + value: '[[ formatDate (now) "2006-01-02" ]]' + - set: + default: '[[ formatDate (now (parseDuration "-24h")) "2006-01-02" ]]' + target: url.params.from + value: '[[ formatDate (now (parseDuration .cursor.interval)) "2006-01-02" ]]' + request.url: https://snyk.io/api/v1/reporting/issues/?page=1&perPage=10&sortBy=issueTitle&order=asc&groupBy=issue + response.pagination: + - set: + fail_on_template_error: true + target: url.params.page + value: '[[if (ne (len .last_response.body.response) 0)]][[add .last_response.page 1]][[end]]' + response.request_body_on_pagination: true + response.split: + target: body.results + tags: + - forwarded + - snyk-vulnerabilities + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/stan.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/stan.yml new file mode 100644 index 00000000000..9fdee28a731 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/stan.yml @@ -0,0 +1,56 @@ +inputs: + - name: filestream-stan + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.stan.log.enabled} == true or ${kubernetes.hints.stan.enabled} == true + data_stream: + dataset: stan.log + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.stan.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - stan-log + data_stream.namespace: default + - name: stan/metrics-stan + type: stan/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.stan.channels.enabled} == true or ${kubernetes.hints.stan.enabled} == true + data_stream: + dataset: stan.channels + type: metrics + hosts: + - ${kubernetes.hints.stan.channels.host|'localhost:8222'} + metricsets: + - channels + period: ${kubernetes.hints.stan.channels.period|'60s'} + - condition: ${kubernetes.hints.stan.stats.enabled} == true or ${kubernetes.hints.stan.enabled} == true + data_stream: + dataset: stan.stats + type: metrics + hosts: + - ${kubernetes.hints.stan.stats.host|'localhost:8222'} + metricsets: + - stats + period: ${kubernetes.hints.stan.stats.period|'60s'} + - condition: ${kubernetes.hints.stan.subscriptions.enabled} == true or ${kubernetes.hints.stan.enabled} == true + data_stream: + dataset: stan.subscriptions + type: metrics + hosts: + - ${kubernetes.hints.stan.subscriptions.host|'localhost:8222'} + metricsets: + - subscriptions + period: ${kubernetes.hints.stan.subscriptions.period|'60s'} + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/suricata.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/suricata.yml new file mode 100644 index 00000000000..374d369783e --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/suricata.yml @@ -0,0 +1,24 @@ +inputs: + - name: filestream-suricata + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.suricata.eve.enabled} == true or ${kubernetes.hints.suricata.enabled} == true + data_stream: + dataset: suricata.eve + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.suricata.eve.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - suricata-eve + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/symantec_endpoint.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/symantec_endpoint.yml new file mode 100644 index 00000000000..8e3ca7ce297 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/symantec_endpoint.yml @@ -0,0 +1,67 @@ +inputs: + - name: udp-symantec_endpoint + type: udp + use_output: default + streams: + - condition: ${kubernetes.hints.symantec_endpoint.log.enabled} == true or ${kubernetes.hints.symantec_endpoint.enabled} == true + data_stream: + dataset: symantec_endpoint.log + type: logs + fields: + _conf: + remove_mapped_fields: false + tz_offset: UTC + fields_under_root: true + host: localhost:9008 + max_message_size: 1 MiB + tags: + - symantec-endpoint-log + - forwarded + data_stream.namespace: default + - name: filestream-symantec_endpoint + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.symantec_endpoint.log.enabled} == true and ${kubernetes.hints.symantec_endpoint.enabled} == true + data_stream: + dataset: symantec_endpoint.log + type: logs + exclude_files: + - .gz$ + fields: + _conf: + remove_mapped_fields: false + tz_offset: UTC + fields_under_root: true + parsers: + - container: + format: auto + stream: ${kubernetes.hints.symantec_endpoint.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - symantec-endpoint-log + - forwarded + data_stream.namespace: default + - name: tcp-symantec_endpoint + type: tcp + use_output: default + streams: + - condition: ${kubernetes.hints.symantec_endpoint.log.enabled} == true and ${kubernetes.hints.symantec_endpoint.enabled} == true + data_stream: + dataset: symantec_endpoint.log + type: logs + fields: + _conf: + remove_mapped_fields: false + tz_offset: UTC + fields_under_root: true + host: localhost:9008 + max_message_size: 1 MiB + tags: + - symantec-endpoint-log + - forwarded + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/synthetics.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/synthetics.yml new file mode 100644 index 00000000000..2f375b1a3f0 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/synthetics.yml @@ -0,0 +1,148 @@ +inputs: + - name: synthetics/http-synthetics + type: synthetics/http + use_output: default + streams: + - __ui: null + check.request.method: null + condition: ${kubernetes.hints.synthetics.http.enabled} == true and ${kubernetes.hints.synthetics.enabled} == true + data_stream: + dataset: http + type: synthetics + enabled: true + max_redirects: null + name: null + processors: + - add_observer_metadata: + geo: + name: Fleet managed + - add_fields: + fields: + monitor.fleet_managed: true + target: "" + response.include_body: null + response.include_headers: null + schedule: '@every 3m' + timeout: null + type: http + urls: null + data_stream.namespace: default + - name: synthetics/tcp-synthetics + type: synthetics/tcp + use_output: default + streams: + - __ui: null + condition: ${kubernetes.hints.synthetics.tcp.enabled} == true and ${kubernetes.hints.synthetics.enabled} == true + data_stream: + dataset: tcp + type: synthetics + enabled: true + hosts: ${kubernetes.hints.synthetics.tcp.host|''} + name: null + processors: + - add_observer_metadata: + geo: + name: Fleet managed + - add_fields: + fields: + monitor.fleet_managed: true + target: "" + proxy_use_local_resolver: false + schedule: '@every 3m' + timeout: null + type: tcp + data_stream.namespace: default + - name: synthetics/icmp-synthetics + type: synthetics/icmp + use_output: default + streams: + - __ui: null + condition: ${kubernetes.hints.synthetics.icmp.enabled} == true and ${kubernetes.hints.synthetics.enabled} == true + data_stream: + dataset: icmp + type: synthetics + enabled: true + hosts: ${kubernetes.hints.synthetics.icmp.host|''} + name: null + processors: + - add_observer_metadata: + geo: + name: Fleet managed + - add_fields: + fields: + monitor.fleet_managed: true + target: "" + schedule: '@every 3m' + timeout: null + type: icmp + wait: 1s + data_stream.namespace: default + - name: synthetics/browser-synthetics + type: synthetics/browser + use_output: default + streams: + - __ui: null + condition: ${kubernetes.hints.synthetics.browser.enabled} == true or ${kubernetes.hints.synthetics.enabled} == true + data_stream: + dataset: browser + type: synthetics + enabled: true + name: null + processors: + - add_observer_metadata: + geo: + name: Fleet managed + - add_fields: + fields: + monitor.fleet_managed: true + target: "" + schedule: '@every 3m' + throttling: null + timeout: null + type: browser + - condition: ${kubernetes.hints.synthetics.browser_network.enabled} == true or ${kubernetes.hints.synthetics.enabled} == true + data_stream: + dataset: browser.network + type: synthetics + processors: + - add_observer_metadata: + geo: + name: Fleet managed + - add_fields: + fields: + monitor.fleet_managed: true + target: "" + - condition: ${kubernetes.hints.synthetics.browser_screenshot.enabled} == true or ${kubernetes.hints.synthetics.enabled} == true + data_stream: + dataset: browser.screenshot + type: synthetics + processors: + - add_observer_metadata: + geo: + name: Fleet managed + - add_fields: + fields: + monitor.fleet_managed: true + target: "" + data_stream.namespace: default + - name: filestream-synthetics + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.synthetics.container_logs.enabled} == true + data_stream: + dataset: kubernetes.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/tcp.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/tcp.yml new file mode 100644 index 00000000000..34c8d0d984e --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/tcp.yml @@ -0,0 +1,32 @@ +inputs: + - name: filestream-tcp + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.tcp.container_logs.enabled} == true + data_stream: + dataset: kubernetes.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default + - name: tcp-tcp + type: tcp + use_output: default + streams: + - condition: ${kubernetes.hints.tcp.generic.enabled} == true or ${kubernetes.hints.tcp.enabled} == true + data_stream: + dataset: tcp.generic + type: logs + host: localhost:8080 + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/tomcat.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/tomcat.yml new file mode 100644 index 00000000000..1355b57befa --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/tomcat.yml @@ -0,0 +1,8296 @@ +inputs: + - name: udp-tomcat + type: udp + use_output: default + streams: + - condition: ${kubernetes.hints.tomcat.log.enabled} == true or ${kubernetes.hints.tomcat.enabled} == true + data_stream: + dataset: tomcat.log + type: logs + fields: + observer: + product: TomCat + type: Web + vendor: Apache + fields_under_root: true + host: localhost:9523 + processors: + - script: + lang: javascript + params: + debug: false + ecs: true + keep_raw: false + rsa: true + tz_offset: local + source: | + // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + // or more contributor license agreements. Licensed under the Elastic License; + // you may not use this file except in compliance with the Elastic License. + + /* jshint -W014,-W016,-W097,-W116 */ + + var processor = require("processor"); + var console = require("console"); + + var FLAG_FIELD = "log.flags"; + var FIELDS_OBJECT = "nwparser"; + var FIELDS_PREFIX = FIELDS_OBJECT + "."; + + var defaults = { + debug: false, + ecs: true, + rsa: false, + keep_raw: false, + tz_offset: "local", + strip_priority: true + }; + + var saved_flags = null; + var debug; + var map_ecs; + var map_rsa; + var keep_raw; + var device; + var tz_offset; + var strip_priority; + + // Register params from configuration. + function register(params) { + debug = params.debug !== undefined ? params.debug : defaults.debug; + map_ecs = params.ecs !== undefined ? params.ecs : defaults.ecs; + map_rsa = params.rsa !== undefined ? params.rsa : defaults.rsa; + keep_raw = params.keep_raw !== undefined ? params.keep_raw : defaults.keep_raw; + tz_offset = parse_tz_offset(params.tz_offset !== undefined? params.tz_offset : defaults.tz_offset); + strip_priority = params.strip_priority !== undefined? params.strip_priority : defaults.strip_priority; + device = new DeviceProcessor(); + } + + function parse_tz_offset(offset) { + var date; + var m; + switch(offset) { + // local uses the tz offset from the JS VM. + case "local": + date = new Date(); + // Reversing the sign as we the offset from UTC, not to UTC. + return parse_local_tz_offset(-date.getTimezoneOffset()); + // event uses the tz offset from event.timezone (add_locale processor). + case "event": + return offset; + // Otherwise a tz offset in the form "[+-][0-9]{4}" is required. + default: + m = offset.match(/^([+\-])([0-9]{2}):?([0-9]{2})?$/); + if (m === null || m.length !== 4) { + throw("bad timezone offset: '" + offset + "'. Must have the form +HH:MM"); + } + return m[1] + m[2] + ":" + (m[3]!==undefined? m[3] : "00"); + } + } + + function parse_local_tz_offset(minutes) { + var neg = minutes < 0; + minutes = Math.abs(minutes); + var min = minutes % 60; + var hours = Math.floor(minutes / 60); + var pad2digit = function(n) { + if (n < 10) { return "0" + n;} + return "" + n; + }; + return (neg? "-" : "+") + pad2digit(hours) + ":" + pad2digit(min); + } + + function process(evt) { + // Function register is only called by the processor when `params` are set + // in the processor config. + if (device === undefined) { + register(defaults); + } + return device.process(evt); + } + + function processor_chain(subprocessors) { + var builder = new processor.Chain(); + subprocessors.forEach(builder.Add); + return builder.Build().Run; + } + + function linear_select(subprocessors) { + return function (evt) { + var flags = evt.Get(FLAG_FIELD); + var i; + for (i = 0; i < subprocessors.length; i++) { + evt.Delete(FLAG_FIELD); + if (debug) console.warn("linear_select trying entry " + i); + subprocessors[i](evt); + // Dissect processor succeeded? + if (evt.Get(FLAG_FIELD) == null) break; + if (debug) console.warn("linear_select failed entry " + i); + } + if (flags !== null) { + evt.Put(FLAG_FIELD, flags); + } + if (debug) { + if (i < subprocessors.length) { + console.warn("linear_select matched entry " + i); + } else { + console.warn("linear_select didn't match"); + } + } + }; + } + + function conditional(opt) { + return function(evt) { + if (opt.if(evt)) { + opt.then(evt); + } else if (opt.else) { + opt.else(evt); + } + }; + } + + var strip_syslog_priority = (function() { + var isEnabled = function() { return strip_priority === true; }; + var fetchPRI = field("_pri"); + var fetchPayload = field("payload"); + var removePayload = remove(["payload"]); + var cleanup = remove(["_pri", "payload"]); + var onMatch = function(evt) { + var pri, priStr = fetchPRI(evt); + if (priStr != null + && 0 < priStr.length && priStr.length < 4 + && !isNaN((pri = Number(priStr))) + && 0 <= pri && pri < 192) { + var severity = pri & 7, + facility = pri >> 3; + setc("_severity", "" + severity)(evt); + setc("_facility", "" + facility)(evt); + // Replace message with priority stripped. + evt.Put("message", fetchPayload(evt)); + removePayload(evt); + } else { + // not a valid syslog PRI, cleanup. + cleanup(evt); + } + }; + return conditional({ + if: isEnabled, + then: cleanup_flags(match( + "STRIP_PRI", + "message", + "<%{_pri}>%{payload}", + onMatch + )) + }); + })(); + + function match(id, src, pattern, on_success) { + var dissect = new processor.Dissect({ + field: src, + tokenizer: pattern, + target_prefix: FIELDS_OBJECT, + ignore_failure: true, + overwrite_keys: true, + trim_values: "right" + }); + return function (evt) { + var msg = evt.Get(src); + dissect.Run(evt); + var failed = evt.Get(FLAG_FIELD) != null; + if (debug) { + if (failed) { + console.debug("dissect fail: " + id + " field:" + src); + } else { + console.debug("dissect OK: " + id + " field:" + src); + } + console.debug(" expr: <<" + pattern + ">>"); + console.debug(" input: <<" + msg + ">>"); + } + if (on_success != null && !failed) { + on_success(evt); + } + }; + } + + function match_copy(id, src, dst, on_success) { + dst = FIELDS_PREFIX + dst; + if (dst === FIELDS_PREFIX || dst === src) { + return function (evt) { + if (debug) { + console.debug("noop OK: " + id + " field:" + src); + console.debug(" input: <<" + evt.Get(src) + ">>"); + } + if (on_success != null) on_success(evt); + } + } + return function (evt) { + var msg = evt.Get(src); + evt.Put(dst, msg); + if (debug) { + console.debug("copy OK: " + id + " field:" + src); + console.debug(" target: '" + dst + "'"); + console.debug(" input: <<" + msg + ">>"); + } + if (on_success != null) on_success(evt); + } + } + + function cleanup_flags(processor) { + return function(evt) { + processor(evt); + evt.Delete(FLAG_FIELD); + }; + } + + function all_match(opts) { + return function (evt) { + var i; + for (i = 0; i < opts.processors.length; i++) { + evt.Delete(FLAG_FIELD); + opts.processors[i](evt); + // Dissect processor succeeded? + if (evt.Get(FLAG_FIELD) != null) { + if (debug) console.warn("all_match failure at " + i); + if (opts.on_failure != null) opts.on_failure(evt); + return; + } + if (debug) console.warn("all_match success at " + i); + } + if (opts.on_success != null) opts.on_success(evt); + }; + } + + function msgid_select(mapping) { + return function (evt) { + var msgid = evt.Get(FIELDS_PREFIX + "messageid"); + if (msgid == null) { + if (debug) console.warn("msgid_select: no messageid captured!"); + return; + } + var next = mapping[msgid]; + if (next === undefined) { + if (debug) console.warn("msgid_select: no mapping for messageid:" + msgid); + return; + } + if (debug) console.info("msgid_select: matched key=" + msgid); + return next(evt); + }; + } + + function msg(msg_id, match) { + return function (evt) { + match(evt); + if (evt.Get(FLAG_FIELD) == null) { + evt.Put(FIELDS_PREFIX + "msg_id1", msg_id); + } + }; + } + + var start; + + function save_flags(evt) { + saved_flags = evt.Get(FLAG_FIELD); + evt.Put("event.original", evt.Get("message")); + } + + function restore_flags(evt) { + if (saved_flags !== null) { + evt.Put(FLAG_FIELD, saved_flags); + } + evt.Delete("message"); + } + + function constant(value) { + return function (evt) { + return value; + }; + } + + function field(name) { + var fullname = FIELDS_PREFIX + name; + return function (evt) { + return evt.Get(fullname); + }; + } + + function STRCAT(args) { + var s = ""; + var i; + for (i = 0; i < args.length; i++) { + s += args[i]; + } + return s; + } + + // TODO: Implement + function DIRCHK(args) { + unimplemented("DIRCHK"); + } + + function strictToInt(str) { + return str * 1; + } + + function CALC(args) { + if (args.length !== 3) { + console.warn("skipped call to CALC with " + args.length + " arguments."); + return; + } + var a = strictToInt(args[0]); + var b = strictToInt(args[2]); + if (isNaN(a) || isNaN(b)) { + console.warn("failed evaluating CALC arguments a='" + args[0] + "' b='" + args[2] + "'."); + return; + } + var result; + switch (args[1]) { + case "+": + result = a + b; + break; + case "-": + result = a - b; + break; + case "*": + result = a * b; + break; + default: + // Only * and + seen in the parsers. + console.warn("unknown CALC operation '" + args[1] + "'."); + return; + } + // Always return a string + return result !== undefined ? "" + result : result; + } + + var quoteChars = "\"'`"; + function RMQ(args) { + if(args.length !== 1) { + console.warn("RMQ: only one argument expected"); + return; + } + var value = args[0].trim(); + var n = value.length; + var char; + return n > 1 + && (char=value.charAt(0)) === value.charAt(n-1) + && quoteChars.indexOf(char) !== -1? + value.substr(1, n-2) + : value; + } + + function call(opts) { + var args = new Array(opts.args.length); + return function (evt) { + for (var i = 0; i < opts.args.length; i++) + if ((args[i] = opts.args[i](evt)) == null) return; + var result = opts.fn(args); + if (result != null) { + evt.Put(opts.dest, result); + } + }; + } + + function nop(evt) { + } + + function appendErrorMsg(evt, msg) { + var value = evt.Get("error.message"); + if (value == null) { + value = [msg]; + } else if (msg instanceof Array) { + value.push(msg); + } else { + value = [value, msg]; + } + evt.Put("error.message", value); + } + + function unimplemented(name) { + appendErrorMsg("unimplemented feature: " + name); + } + + function lookup(opts) { + return function (evt) { + var key = opts.key(evt); + if (key == null) return; + var value = opts.map.keyvaluepairs[key]; + if (value === undefined) { + value = opts.map.default; + } + if (value !== undefined) { + evt.Put(opts.dest, value(evt)); + } + }; + } + + function set(fields) { + return new processor.AddFields({ + target: FIELDS_OBJECT, + fields: fields, + }); + } + + function setf(dst, src) { + return function (evt) { + var val = evt.Get(FIELDS_PREFIX + src); + if (val != null) evt.Put(FIELDS_PREFIX + dst, val); + }; + } + + function setc(dst, value) { + return function (evt) { + evt.Put(FIELDS_PREFIX + dst, value); + }; + } + + function set_field(opts) { + return function (evt) { + var val = opts.value(evt); + if (val != null) evt.Put(opts.dest, val); + }; + } + + function dump(label) { + return function (evt) { + console.log("Dump of event at " + label + ": " + JSON.stringify(evt, null, "\t")); + }; + } + + function date_time_join_args(evt, arglist) { + var str = ""; + for (var i = 0; i < arglist.length; i++) { + var fname = FIELDS_PREFIX + arglist[i]; + var val = evt.Get(fname); + if (val != null) { + if (str !== "") str += " "; + str += val; + } else { + if (debug) console.warn("in date_time: input arg " + fname + " is not set"); + } + } + return str; + } + + function to2Digit(num) { + return num? (num < 10? "0" + num : num) : "00"; + } + + // Make two-digit dates 00-69 interpreted as 2000-2069 + // and dates 70-99 translated to 1970-1999. + var twoDigitYearEpoch = 70; + var twoDigitYearCentury = 2000; + + // This is to accept dates up to 2 days in the future, only used when + // no year is specified in a date. 2 days should be enough to account for + // time differences between systems and different tz offsets. + var maxFutureDelta = 2*24*60*60*1000; + + // DateContainer stores date fields and then converts those fields into + // a Date. Necessary because building a Date using its set() methods gives + // different results depending on the order of components. + function DateContainer(tzOffset) { + this.offset = tzOffset === undefined? "Z" : tzOffset; + } + + DateContainer.prototype = { + setYear: function(v) {this.year = v;}, + setMonth: function(v) {this.month = v;}, + setDay: function(v) {this.day = v;}, + setHours: function(v) {this.hours = v;}, + setMinutes: function(v) {this.minutes = v;}, + setSeconds: function(v) {this.seconds = v;}, + + setUNIX: function(v) {this.unix = v;}, + + set2DigitYear: function(v) { + this.year = v < twoDigitYearEpoch? twoDigitYearCentury + v : twoDigitYearCentury + v - 100; + }, + + toDate: function() { + if (this.unix !== undefined) { + return new Date(this.unix * 1000); + } + if (this.day === undefined || this.month === undefined) { + // Can't make a date from this. + return undefined; + } + if (this.year === undefined) { + // A date without a year. Set current year, or previous year + // if date would be in the future. + var now = new Date(); + this.year = now.getFullYear(); + var date = this.toDate(); + if (date.getTime() - now.getTime() > maxFutureDelta) { + date.setFullYear(now.getFullYear() - 1); + } + return date; + } + var MM = to2Digit(this.month); + var DD = to2Digit(this.day); + var hh = to2Digit(this.hours); + var mm = to2Digit(this.minutes); + var ss = to2Digit(this.seconds); + return new Date(this.year + "-" + MM + "-" + DD + "T" + hh + ":" + mm + ":" + ss + this.offset); + } + } + + function date_time_try_pattern(fmt, str, tzOffset) { + var date = new DateContainer(tzOffset); + var pos = date_time_try_pattern_at_pos(fmt, str, 0, date); + return pos !== undefined? date.toDate() : undefined; + } + + function date_time_try_pattern_at_pos(fmt, str, pos, date) { + var len = str.length; + for (var proc = 0; pos !== undefined && pos < len && proc < fmt.length; proc++) { + pos = fmt[proc](str, pos, date); + } + return pos; + } + + function date_time(opts) { + return function (evt) { + var tzOffset = opts.tz || tz_offset; + if (tzOffset === "event") { + tzOffset = evt.Get("event.timezone"); + } + var str = date_time_join_args(evt, opts.args); + for (var i = 0; i < opts.fmts.length; i++) { + var date = date_time_try_pattern(opts.fmts[i], str, tzOffset); + if (date !== undefined) { + evt.Put(FIELDS_PREFIX + opts.dest, date); + return; + } + } + if (debug) console.warn("in date_time: id=" + opts.id + " FAILED: " + str); + }; + } + + var uA = 60 * 60 * 24; + var uD = 60 * 60 * 24; + var uF = 60 * 60; + var uG = 60 * 60 * 24 * 30; + var uH = 60 * 60; + var uI = 60 * 60; + var uJ = 60 * 60 * 24; + var uM = 60 * 60 * 24 * 30; + var uN = 60 * 60; + var uO = 1; + var uS = 1; + var uT = 60; + var uU = 60; + var uc = dc; + + function duration(opts) { + return function(evt) { + var str = date_time_join_args(evt, opts.args); + for (var i = 0; i < opts.fmts.length; i++) { + var seconds = duration_try_pattern(opts.fmts[i], str); + if (seconds !== undefined) { + evt.Put(FIELDS_PREFIX + opts.dest, seconds); + return; + } + } + if (debug) console.warn("in duration: id=" + opts.id + " (s) FAILED: " + str); + }; + } + + function duration_try_pattern(fmt, str) { + var secs = 0; + var pos = 0; + for (var i=0; i [ month_id , how many chars to skip if month in long form ] + "Jan": [0, 4], + "Feb": [1, 5], + "Mar": [2, 2], + "Apr": [3, 2], + "May": [4, 0], + "Jun": [5, 1], + "Jul": [6, 1], + "Aug": [7, 3], + "Sep": [8, 6], + "Oct": [9, 4], + "Nov": [10, 5], + "Dec": [11, 4], + "jan": [0, 4], + "feb": [1, 5], + "mar": [2, 2], + "apr": [3, 2], + "may": [4, 0], + "jun": [5, 1], + "jul": [6, 1], + "aug": [7, 3], + "sep": [8, 6], + "oct": [9, 4], + "nov": [10, 5], + "dec": [11, 4], + }; + + // var dC = undefined; + var dR = dateMonthName(true); + var dB = dateMonthName(false); + var dM = dateFixedWidthNumber("M", 2, 1, 12, DateContainer.prototype.setMonth); + var dG = dateVariableWidthNumber("G", 1, 12, DateContainer.prototype.setMonth); + var dD = dateFixedWidthNumber("D", 2, 1, 31, DateContainer.prototype.setDay); + var dF = dateVariableWidthNumber("F", 1, 31, DateContainer.prototype.setDay); + var dH = dateFixedWidthNumber("H", 2, 0, 24, DateContainer.prototype.setHours); + var dI = dateVariableWidthNumber("I", 0, 24, DateContainer.prototype.setHours); // Accept hours >12 + var dN = dateVariableWidthNumber("N", 0, 24, DateContainer.prototype.setHours); + var dT = dateFixedWidthNumber("T", 2, 0, 59, DateContainer.prototype.setMinutes); + var dU = dateVariableWidthNumber("U", 0, 59, DateContainer.prototype.setMinutes); + var dP = parseAMPM; // AM|PM + var dQ = parseAMPM; // A.M.|P.M + var dS = dateFixedWidthNumber("S", 2, 0, 60, DateContainer.prototype.setSeconds); + var dO = dateVariableWidthNumber("O", 0, 60, DateContainer.prototype.setSeconds); + var dY = dateFixedWidthNumber("Y", 2, 0, 99, DateContainer.prototype.set2DigitYear); + var dW = dateFixedWidthNumber("W", 4, 1000, 9999, DateContainer.prototype.setYear); + var dZ = parseHMS; + var dX = dateVariableWidthNumber("X", 0, 0x10000000000, DateContainer.prototype.setUNIX); + + // parseAMPM parses "A.M", "AM", "P.M", "PM" from logs. + // Only works if this modifier appears after the hour has been read from logs + // which is always the case in the 300 devices. + function parseAMPM(str, pos, date) { + var n = str.length; + var start = skipws(str, pos); + if (start + 2 > n) return; + var head = str.substr(start, 2).toUpperCase(); + var isPM = false; + var skip = false; + switch (head) { + case "A.": + skip = true; + /* falls through */ + case "AM": + break; + case "P.": + skip = true; + /* falls through */ + case "PM": + isPM = true; + break; + default: + if (debug) console.warn("can't parse pos " + start + " as AM/PM: " + str + "(head:" + head + ")"); + return; + } + pos = start + 2; + if (skip) { + if (pos+2 > n || str.substr(pos, 2).toUpperCase() !== "M.") { + if (debug) console.warn("can't parse pos " + start + " as AM/PM: " + str + "(tail)"); + return; + } + pos += 2; + } + var hh = date.hours; + if (isPM) { + // Accept existing hour in 24h format. + if (hh < 12) hh += 12; + } else { + if (hh === 12) hh = 0; + } + date.setHours(hh); + return pos; + } + + function parseHMS(str, pos, date) { + return date_time_try_pattern_at_pos([dN, dc(":"), dU, dc(":"), dO], str, pos, date); + } + + function skipws(str, pos) { + for ( var n = str.length; + pos < n && str.charAt(pos) === " "; + pos++) + ; + return pos; + } + + function skipdigits(str, pos) { + var c; + for (var n = str.length; + pos < n && (c = str.charAt(pos)) >= "0" && c <= "9"; + pos++) + ; + return pos; + } + + function dSkip(str, pos, date) { + var chr; + for (;pos < str.length && (chr=str[pos])<'0' || chr>'9'; pos++) {} + return pos < str.length? pos : undefined; + } + + function dateVariableWidthNumber(fmtChar, min, max, setter) { + return function (str, pos, date) { + var start = skipws(str, pos); + pos = skipdigits(str, start); + var s = str.substr(start, pos - start); + var value = parseInt(s, 10); + if (value >= min && value <= max) { + setter.call(date, value); + return pos; + } + return; + }; + } + + function dateFixedWidthNumber(fmtChar, width, min, max, setter) { + return function (str, pos, date) { + pos = skipws(str, pos); + var n = str.length; + if (pos + width > n) return; + var s = str.substr(pos, width); + var value = parseInt(s, 10); + if (value >= min && value <= max) { + setter.call(date, value); + return pos + width; + } + return; + }; + } + + // Short month name (Jan..Dec). + function dateMonthName(long) { + return function (str, pos, date) { + pos = skipws(str, pos); + var n = str.length; + if (pos + 3 > n) return; + var mon = str.substr(pos, 3); + var idx = shortMonths[mon]; + if (idx === undefined) { + idx = shortMonths[mon.toLowerCase()]; + } + if (idx === undefined) { + //console.warn("parsing date_time: '" + mon + "' is not a valid short month (%B)"); + return; + } + date.setMonth(idx[0]+1); + return pos + 3 + (long ? idx[1] : 0); + }; + } + + function url_wrapper(dst, src, fn) { + return function(evt) { + var value = evt.Get(FIELDS_PREFIX + src), result; + if (value != null && (result = fn(value))!== undefined) { + evt.Put(FIELDS_PREFIX + dst, result); + } else { + console.debug(fn.name + " failed for '" + value + "'"); + } + }; + } + + // The following regular expression for parsing URLs from: + // https://github.com/wizard04wsu/URI_Parsing + // + // The MIT License (MIT) + // + // Copyright (c) 2014 Andrew Harrison + // + // Permission is hereby granted, free of charge, to any person obtaining a copy of + // this software and associated documentation files (the "Software"), to deal in + // the Software without restriction, including without limitation the rights to + // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + // the Software, and to permit persons to whom the Software is furnished to do so, + // subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + // FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + // COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + // IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + // CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + var uriRegExp = /^([a-z][a-z0-9+.\-]*):(?:\/\/((?:(?=((?:[a-z0-9\-._~!$&'()*+,;=:]|%[0-9A-F]{2})*))(\3)@)?(?=(\[[0-9A-F:.]{2,}\]|(?:[a-z0-9\-._~!$&'()*+,;=]|%[0-9A-F]{2})*))\5(?::(?=(\d*))\6)?)(\/(?=((?:[a-z0-9-._~!$&'()*+,;=:@\/]|%[0-9A-F]{2})*))\8)?|(\/?(?!\/)(?=((?:[a-z0-9-._~!$&'()*+,;=:@\/]|%[0-9A-F]{2})*))\10)?)(?:\?(?=((?:[a-z0-9-._~!$&'()*+,;=:@\/?]|%[0-9A-F]{2})*))\11)?(?:#(?=((?:[a-z0-9-._~!$&'()*+,;=:@\/?]|%[0-9A-F]{2})*))\12)?$/i; + + var uriScheme = 1; + var uriDomain = 5; + var uriPort = 6; + var uriPath = 7; + var uriPathAlt = 9; + var uriQuery = 11; + + function domain(dst, src) { + return url_wrapper(dst, src, extract_domain); + } + + function split_url(value) { + var m = value.match(uriRegExp); + if (m && m[uriDomain]) return m; + // Support input in the form "www.example.net/path", but not "/path". + m = ("null://" + value).match(uriRegExp); + if (m) return m; + } + + function extract_domain(value) { + var m = split_url(value); + if (m && m[uriDomain]) return m[uriDomain]; + } + + var extFromPage = /\.[^.]+$/; + function extract_ext(value) { + var page = extract_page(value); + if (page) { + var m = page.match(extFromPage); + if (m) return m[0]; + } + } + + function ext(dst, src) { + return url_wrapper(dst, src, extract_ext); + } + + function fqdn(dst, src) { + // TODO: fqdn and domain(eTLD+1) are currently the same. + return domain(dst, src); + } + + var pageFromPathRegExp = /\/([^\/]+)$/; + var pageName = 1; + + function extract_page(value) { + value = extract_path(value); + if (!value) return undefined; + var m = value.match(pageFromPathRegExp); + if (m) return m[pageName]; + } + + function page(dst, src) { + return url_wrapper(dst, src, extract_page); + } + + function extract_path(value) { + var m = split_url(value); + return m? m[uriPath] || m[uriPathAlt] : undefined; + } + + function path(dst, src) { + return url_wrapper(dst, src, extract_path); + } + + // Map common schemes to their default port. + // port has to be a string (will be converted at a later stage). + var schemePort = { + "ftp": "21", + "ssh": "22", + "http": "80", + "https": "443", + }; + + function extract_port(value) { + var m = split_url(value); + if (!m) return undefined; + if (m[uriPort]) return m[uriPort]; + if (m[uriScheme]) { + return schemePort[m[uriScheme]]; + } + } + + function port(dst, src) { + return url_wrapper(dst, src, extract_port); + } + + function extract_query(value) { + var m = split_url(value); + if (m && m[uriQuery]) return m[uriQuery]; + } + + function query(dst, src) { + return url_wrapper(dst, src, extract_query); + } + + function extract_root(value) { + var m = split_url(value); + if (m && m[uriDomain] && m[uriDomain]) { + var scheme = m[uriScheme] && m[uriScheme] !== "null"? + m[uriScheme] + "://" : ""; + var port = m[uriPort]? ":" + m[uriPort] : ""; + return scheme + m[uriDomain] + port; + } + } + + function root(dst, src) { + return url_wrapper(dst, src, extract_root); + } + + function tagval(id, src, cfg, keys, on_success) { + var fail = function(evt) { + evt.Put(FLAG_FIELD, "tagval_parsing_error"); + } + if (cfg.kv_separator.length !== 1) { + throw("Invalid TAGVALMAP ValueDelimiter (must have 1 character)"); + } + var quotes_len = cfg.open_quote.length > 0 && cfg.close_quote.length > 0? + cfg.open_quote.length + cfg.close_quote.length : 0; + var kv_regex = new RegExp('^([^' + cfg.kv_separator + ']*)*' + cfg.kv_separator + ' *(.*)*$'); + return function(evt) { + var msg = evt.Get(src); + if (msg === undefined) { + console.warn("tagval: input field is missing"); + return fail(evt); + } + var pairs = msg.split(cfg.pair_separator); + var i; + var success = false; + var prev = ""; + for (i=0; i 0 && + value.length >= cfg.open_quote.length + cfg.close_quote.length && + value.substr(0, cfg.open_quote.length) === cfg.open_quote && + value.substr(value.length - cfg.close_quote.length) === cfg.close_quote) { + value = value.substr(cfg.open_quote.length, value.length - quotes_len); + } + evt.Put(FIELDS_PREFIX + field, value); + success = true; + } + if (!success) { + return fail(evt); + } + if (on_success != null) { + on_success(evt); + } + } + } + + var ecs_mappings = { + "_facility": {convert: to_long, to:[{field: "log.syslog.facility.code", setter: fld_set}]}, + "_pri": {convert: to_long, to:[{field: "log.syslog.priority", setter: fld_set}]}, + "_severity": {convert: to_long, to:[{field: "log.syslog.severity.code", setter: fld_set}]}, + "action": {to:[{field: "event.action", setter: fld_prio, prio: 0}]}, + "administrator": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 4}]}, + "alias.ip": {convert: to_ip, to:[{field: "host.ip", setter: fld_prio, prio: 3},{field: "related.ip", setter: fld_append}]}, + "alias.ipv6": {convert: to_ip, to:[{field: "host.ip", setter: fld_prio, prio: 4},{field: "related.ip", setter: fld_append}]}, + "alias.mac": {convert: to_mac, to:[{field: "host.mac", setter: fld_prio, prio: 1}]}, + "application": {to:[{field: "network.application", setter: fld_set}]}, + "bytes": {convert: to_long, to:[{field: "network.bytes", setter: fld_set}]}, + "c_domain": {to:[{field: "source.domain", setter: fld_prio, prio: 1}]}, + "c_logon_id": {to:[{field: "user.id", setter: fld_prio, prio: 2}]}, + "c_user_name": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 8}]}, + "c_username": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 2}]}, + "cctld": {to:[{field: "url.top_level_domain", setter: fld_prio, prio: 1}]}, + "child_pid": {convert: to_long, to:[{field: "process.pid", setter: fld_prio, prio: 1}]}, + "child_pid_val": {to:[{field: "process.title", setter: fld_set}]}, + "child_process": {to:[{field: "process.name", setter: fld_prio, prio: 1}]}, + "city.dst": {to:[{field: "destination.geo.city_name", setter: fld_set}]}, + "city.src": {to:[{field: "source.geo.city_name", setter: fld_set}]}, + "daddr": {convert: to_ip, to:[{field: "destination.ip", setter: fld_set},{field: "related.ip", setter: fld_append}]}, + "daddr_v6": {convert: to_ip, to:[{field: "destination.ip", setter: fld_set},{field: "related.ip", setter: fld_append}]}, + "ddomain": {to:[{field: "destination.domain", setter: fld_prio, prio: 0}]}, + "devicehostip": {convert: to_ip, to:[{field: "host.ip", setter: fld_prio, prio: 2},{field: "related.ip", setter: fld_append}]}, + "devicehostmac": {convert: to_mac, to:[{field: "host.mac", setter: fld_prio, prio: 0}]}, + "dhost": {to:[{field: "destination.address", setter: fld_set},{field: "related.hosts", setter: fld_append}]}, + "dinterface": {to:[{field: "observer.egress.interface.name", setter: fld_set}]}, + "direction": {to:[{field: "network.direction", setter: fld_set}]}, + "directory": {to:[{field: "file.directory", setter: fld_set}]}, + "dmacaddr": {convert: to_mac, to:[{field: "destination.mac", setter: fld_set}]}, + "dns.responsetype": {to:[{field: "dns.answers.type", setter: fld_set}]}, + "dns.resptext": {to:[{field: "dns.answers.name", setter: fld_set}]}, + "dns_querytype": {to:[{field: "dns.question.type", setter: fld_set}]}, + "domain": {to:[{field: "server.domain", setter: fld_prio, prio: 0},{field: "related.hosts", setter: fld_append}]}, + "domain.dst": {to:[{field: "destination.domain", setter: fld_prio, prio: 1}]}, + "domain.src": {to:[{field: "source.domain", setter: fld_prio, prio: 2}]}, + "domain_id": {to:[{field: "user.domain", setter: fld_set}]}, + "domainname": {to:[{field: "server.domain", setter: fld_prio, prio: 1}]}, + "dport": {convert: to_long, to:[{field: "destination.port", setter: fld_prio, prio: 0}]}, + "dtransaddr": {convert: to_ip, to:[{field: "destination.nat.ip", setter: fld_prio, prio: 0},{field: "related.ip", setter: fld_append}]}, + "dtransport": {convert: to_long, to:[{field: "destination.nat.port", setter: fld_prio, prio: 0}]}, + "ec_outcome": {to:[{field: "event.outcome", setter: fld_ecs_outcome}]}, + "event_description": {to:[{field: "message", setter: fld_prio, prio: 0}]}, + "event_source": {to:[{field: "related.hosts", setter: fld_append}]}, + "event_time": {convert: to_date, to:[{field: "@timestamp", setter: fld_set}]}, + "event_type": {to:[{field: "event.action", setter: fld_prio, prio: 1}]}, + "extension": {to:[{field: "file.extension", setter: fld_prio, prio: 1}]}, + "file.attributes": {to:[{field: "file.attributes", setter: fld_set}]}, + "filename": {to:[{field: "file.name", setter: fld_prio, prio: 0}]}, + "filename_size": {convert: to_long, to:[{field: "file.size", setter: fld_set}]}, + "filepath": {to:[{field: "file.path", setter: fld_set}]}, + "filetype": {to:[{field: "file.type", setter: fld_set}]}, + "fqdn": {to:[{field: "related.hosts", setter: fld_append}]}, + "group": {to:[{field: "group.name", setter: fld_set}]}, + "groupid": {to:[{field: "group.id", setter: fld_set}]}, + "host": {to:[{field: "host.name", setter: fld_prio, prio: 1},{field: "related.hosts", setter: fld_append}]}, + "hostip": {convert: to_ip, to:[{field: "host.ip", setter: fld_prio, prio: 0},{field: "related.ip", setter: fld_append}]}, + "hostip_v6": {convert: to_ip, to:[{field: "host.ip", setter: fld_prio, prio: 1},{field: "related.ip", setter: fld_append}]}, + "hostname": {to:[{field: "host.name", setter: fld_prio, prio: 0}]}, + "id": {to:[{field: "event.code", setter: fld_prio, prio: 0}]}, + "interface": {to:[{field: "network.interface.name", setter: fld_set}]}, + "ip.orig": {convert: to_ip, to:[{field: "network.forwarded_ip", setter: fld_prio, prio: 0},{field: "related.ip", setter: fld_append}]}, + "ip.trans.dst": {convert: to_ip, to:[{field: "destination.nat.ip", setter: fld_prio, prio: 1},{field: "related.ip", setter: fld_append}]}, + "ip.trans.src": {convert: to_ip, to:[{field: "source.nat.ip", setter: fld_prio, prio: 1},{field: "related.ip", setter: fld_append}]}, + "ipv6.orig": {convert: to_ip, to:[{field: "network.forwarded_ip", setter: fld_prio, prio: 2},{field: "related.ip", setter: fld_append}]}, + "latdec_dst": {convert: to_double, to:[{field: "destination.geo.location.lat", setter: fld_set}]}, + "latdec_src": {convert: to_double, to:[{field: "source.geo.location.lat", setter: fld_set}]}, + "location_city": {to:[{field: "geo.city_name", setter: fld_set}]}, + "location_country": {to:[{field: "geo.country_name", setter: fld_set}]}, + "location_desc": {to:[{field: "geo.name", setter: fld_set}]}, + "location_dst": {to:[{field: "destination.geo.country_name", setter: fld_set}]}, + "location_src": {to:[{field: "source.geo.country_name", setter: fld_set}]}, + "location_state": {to:[{field: "geo.region_name", setter: fld_set}]}, + "logon_id": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 5}]}, + "longdec_dst": {convert: to_double, to:[{field: "destination.geo.location.lon", setter: fld_set}]}, + "longdec_src": {convert: to_double, to:[{field: "source.geo.location.lon", setter: fld_set}]}, + "macaddr": {convert: to_mac, to:[{field: "host.mac", setter: fld_prio, prio: 2}]}, + "messageid": {to:[{field: "event.code", setter: fld_prio, prio: 1}]}, + "method": {to:[{field: "http.request.method", setter: fld_set}]}, + "msg": {to:[{field: "message", setter: fld_set}]}, + "orig_ip": {convert: to_ip, to:[{field: "network.forwarded_ip", setter: fld_prio, prio: 1},{field: "related.ip", setter: fld_append}]}, + "owner": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 6}]}, + "packets": {convert: to_long, to:[{field: "network.packets", setter: fld_set}]}, + "parent_pid": {convert: to_long, to:[{field: "process.parent.pid", setter: fld_prio, prio: 0}]}, + "parent_pid_val": {to:[{field: "process.parent.title", setter: fld_set}]}, + "parent_process": {to:[{field: "process.parent.name", setter: fld_prio, prio: 0}]}, + "patient_fullname": {to:[{field: "user.full_name", setter: fld_prio, prio: 1}]}, + "port.dst": {convert: to_long, to:[{field: "destination.port", setter: fld_prio, prio: 1}]}, + "port.src": {convert: to_long, to:[{field: "source.port", setter: fld_prio, prio: 1}]}, + "port.trans.dst": {convert: to_long, to:[{field: "destination.nat.port", setter: fld_prio, prio: 1}]}, + "port.trans.src": {convert: to_long, to:[{field: "source.nat.port", setter: fld_prio, prio: 1}]}, + "process": {to:[{field: "process.name", setter: fld_prio, prio: 0}]}, + "process_id": {convert: to_long, to:[{field: "process.pid", setter: fld_prio, prio: 0}]}, + "process_id_src": {convert: to_long, to:[{field: "process.parent.pid", setter: fld_prio, prio: 1}]}, + "process_src": {to:[{field: "process.parent.name", setter: fld_prio, prio: 1}]}, + "product": {to:[{field: "observer.product", setter: fld_set}]}, + "protocol": {to:[{field: "network.protocol", setter: fld_set}]}, + "query": {to:[{field: "url.query", setter: fld_prio, prio: 2}]}, + "rbytes": {convert: to_long, to:[{field: "destination.bytes", setter: fld_set}]}, + "referer": {to:[{field: "http.request.referrer", setter: fld_prio, prio: 1}]}, + "rulename": {to:[{field: "rule.name", setter: fld_set}]}, + "saddr": {convert: to_ip, to:[{field: "source.ip", setter: fld_set},{field: "related.ip", setter: fld_append}]}, + "saddr_v6": {convert: to_ip, to:[{field: "source.ip", setter: fld_set},{field: "related.ip", setter: fld_append}]}, + "sbytes": {convert: to_long, to:[{field: "source.bytes", setter: fld_set}]}, + "sdomain": {to:[{field: "source.domain", setter: fld_prio, prio: 0}]}, + "service": {to:[{field: "service.name", setter: fld_prio, prio: 1}]}, + "service.name": {to:[{field: "service.name", setter: fld_prio, prio: 0}]}, + "service_account": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 7}]}, + "severity": {to:[{field: "log.level", setter: fld_set}]}, + "shost": {to:[{field: "host.hostname", setter: fld_set},{field: "source.address", setter: fld_set},{field: "related.hosts", setter: fld_append}]}, + "sinterface": {to:[{field: "observer.ingress.interface.name", setter: fld_set}]}, + "sld": {to:[{field: "url.registered_domain", setter: fld_set}]}, + "smacaddr": {convert: to_mac, to:[{field: "source.mac", setter: fld_set}]}, + "sport": {convert: to_long, to:[{field: "source.port", setter: fld_prio, prio: 0}]}, + "stransaddr": {convert: to_ip, to:[{field: "source.nat.ip", setter: fld_prio, prio: 0},{field: "related.ip", setter: fld_append}]}, + "stransport": {convert: to_long, to:[{field: "source.nat.port", setter: fld_prio, prio: 0}]}, + "tcp.dstport": {convert: to_long, to:[{field: "destination.port", setter: fld_prio, prio: 2}]}, + "tcp.srcport": {convert: to_long, to:[{field: "source.port", setter: fld_prio, prio: 2}]}, + "timezone": {to:[{field: "event.timezone", setter: fld_set}]}, + "tld": {to:[{field: "url.top_level_domain", setter: fld_prio, prio: 0}]}, + "udp.dstport": {convert: to_long, to:[{field: "destination.port", setter: fld_prio, prio: 3}]}, + "udp.srcport": {convert: to_long, to:[{field: "source.port", setter: fld_prio, prio: 3}]}, + "uid": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 3}]}, + "url": {to:[{field: "url.original", setter: fld_prio, prio: 1}]}, + "url_raw": {to:[{field: "url.original", setter: fld_prio, prio: 0}]}, + "urldomain": {to:[{field: "url.domain", setter: fld_prio, prio: 0}]}, + "urlquery": {to:[{field: "url.query", setter: fld_prio, prio: 0}]}, + "user": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 0}]}, + "user.id": {to:[{field: "user.id", setter: fld_prio, prio: 1}]}, + "user_agent": {to:[{field: "user_agent.original", setter: fld_set}]}, + "user_fullname": {to:[{field: "user.full_name", setter: fld_prio, prio: 0}]}, + "user_id": {to:[{field: "user.id", setter: fld_prio, prio: 0}]}, + "username": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 1}]}, + "version": {to:[{field: "observer.version", setter: fld_set}]}, + "web_domain": {to:[{field: "url.domain", setter: fld_prio, prio: 1},{field: "related.hosts", setter: fld_append}]}, + "web_extension": {to:[{field: "file.extension", setter: fld_prio, prio: 0}]}, + "web_query": {to:[{field: "url.query", setter: fld_prio, prio: 1}]}, + "web_ref_domain": {to:[{field: "related.hosts", setter: fld_append}]}, + "web_referer": {to:[{field: "http.request.referrer", setter: fld_prio, prio: 0}]}, + "web_root": {to:[{field: "url.path", setter: fld_set}]}, + "webpage": {to:[{field: "file.name", setter: fld_prio, prio: 1}]}, + }; + + var rsa_mappings = { + "access_point": {to:[{field: "rsa.wireless.access_point", setter: fld_set}]}, + "accesses": {to:[{field: "rsa.identity.accesses", setter: fld_set}]}, + "acl_id": {to:[{field: "rsa.misc.acl_id", setter: fld_set}]}, + "acl_op": {to:[{field: "rsa.misc.acl_op", setter: fld_set}]}, + "acl_pos": {to:[{field: "rsa.misc.acl_pos", setter: fld_set}]}, + "acl_table": {to:[{field: "rsa.misc.acl_table", setter: fld_set}]}, + "action": {to:[{field: "rsa.misc.action", setter: fld_append}]}, + "ad_computer_dst": {to:[{field: "rsa.network.ad_computer_dst", setter: fld_set}]}, + "addr": {to:[{field: "rsa.network.addr", setter: fld_set}]}, + "admin": {to:[{field: "rsa.misc.admin", setter: fld_set}]}, + "agent": {to:[{field: "rsa.misc.client", setter: fld_prio, prio: 0}]}, + "agent.id": {to:[{field: "rsa.misc.agent_id", setter: fld_set}]}, + "alarm_id": {to:[{field: "rsa.misc.alarm_id", setter: fld_set}]}, + "alarmname": {to:[{field: "rsa.misc.alarmname", setter: fld_set}]}, + "alert": {to:[{field: "rsa.threat.alert", setter: fld_set}]}, + "alert_id": {to:[{field: "rsa.misc.alert_id", setter: fld_set}]}, + "alias.host": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "analysis.file": {to:[{field: "rsa.investigations.analysis_file", setter: fld_set}]}, + "analysis.service": {to:[{field: "rsa.investigations.analysis_service", setter: fld_set}]}, + "analysis.session": {to:[{field: "rsa.investigations.analysis_session", setter: fld_set}]}, + "app_id": {to:[{field: "rsa.misc.app_id", setter: fld_set}]}, + "attachment": {to:[{field: "rsa.file.attachment", setter: fld_set}]}, + "audit": {to:[{field: "rsa.misc.audit", setter: fld_set}]}, + "audit_class": {to:[{field: "rsa.internal.audit_class", setter: fld_set}]}, + "audit_object": {to:[{field: "rsa.misc.audit_object", setter: fld_set}]}, + "auditdata": {to:[{field: "rsa.misc.auditdata", setter: fld_set}]}, + "authmethod": {to:[{field: "rsa.identity.auth_method", setter: fld_set}]}, + "autorun_type": {to:[{field: "rsa.misc.autorun_type", setter: fld_set}]}, + "bcc": {to:[{field: "rsa.email.email", setter: fld_append}]}, + "benchmark": {to:[{field: "rsa.misc.benchmark", setter: fld_set}]}, + "binary": {to:[{field: "rsa.file.binary", setter: fld_set}]}, + "boc": {to:[{field: "rsa.investigations.boc", setter: fld_set}]}, + "bssid": {to:[{field: "rsa.wireless.wlan_ssid", setter: fld_prio, prio: 1}]}, + "bypass": {to:[{field: "rsa.misc.bypass", setter: fld_set}]}, + "c_sid": {to:[{field: "rsa.identity.user_sid_src", setter: fld_set}]}, + "cache": {to:[{field: "rsa.misc.cache", setter: fld_set}]}, + "cache_hit": {to:[{field: "rsa.misc.cache_hit", setter: fld_set}]}, + "calling_from": {to:[{field: "rsa.misc.phone", setter: fld_prio, prio: 1}]}, + "calling_to": {to:[{field: "rsa.misc.phone", setter: fld_prio, prio: 0}]}, + "category": {to:[{field: "rsa.misc.category", setter: fld_set}]}, + "cc": {to:[{field: "rsa.email.email", setter: fld_append}]}, + "cc.number": {convert: to_long, to:[{field: "rsa.misc.cc_number", setter: fld_set}]}, + "cefversion": {to:[{field: "rsa.misc.cefversion", setter: fld_set}]}, + "cert.serial": {to:[{field: "rsa.crypto.cert_serial", setter: fld_set}]}, + "cert_ca": {to:[{field: "rsa.crypto.cert_ca", setter: fld_set}]}, + "cert_checksum": {to:[{field: "rsa.crypto.cert_checksum", setter: fld_set}]}, + "cert_common": {to:[{field: "rsa.crypto.cert_common", setter: fld_set}]}, + "cert_error": {to:[{field: "rsa.crypto.cert_error", setter: fld_set}]}, + "cert_hostname": {to:[{field: "rsa.crypto.cert_host_name", setter: fld_set}]}, + "cert_hostname_cat": {to:[{field: "rsa.crypto.cert_host_cat", setter: fld_set}]}, + "cert_issuer": {to:[{field: "rsa.crypto.cert_issuer", setter: fld_set}]}, + "cert_keysize": {to:[{field: "rsa.crypto.cert_keysize", setter: fld_set}]}, + "cert_status": {to:[{field: "rsa.crypto.cert_status", setter: fld_set}]}, + "cert_subject": {to:[{field: "rsa.crypto.cert_subject", setter: fld_set}]}, + "cert_username": {to:[{field: "rsa.crypto.cert_username", setter: fld_set}]}, + "cfg.attr": {to:[{field: "rsa.misc.cfg_attr", setter: fld_set}]}, + "cfg.obj": {to:[{field: "rsa.misc.cfg_obj", setter: fld_set}]}, + "cfg.path": {to:[{field: "rsa.misc.cfg_path", setter: fld_set}]}, + "change_attribute": {to:[{field: "rsa.misc.change_attrib", setter: fld_set}]}, + "change_new": {to:[{field: "rsa.misc.change_new", setter: fld_set}]}, + "change_old": {to:[{field: "rsa.misc.change_old", setter: fld_set}]}, + "changes": {to:[{field: "rsa.misc.changes", setter: fld_set}]}, + "checksum": {to:[{field: "rsa.misc.checksum", setter: fld_set}]}, + "checksum.dst": {to:[{field: "rsa.misc.checksum_dst", setter: fld_set}]}, + "checksum.src": {to:[{field: "rsa.misc.checksum_src", setter: fld_set}]}, + "cid": {to:[{field: "rsa.internal.cid", setter: fld_set}]}, + "client": {to:[{field: "rsa.misc.client", setter: fld_prio, prio: 1}]}, + "client_ip": {to:[{field: "rsa.misc.client_ip", setter: fld_set}]}, + "clustermembers": {to:[{field: "rsa.misc.clustermembers", setter: fld_set}]}, + "cmd": {to:[{field: "rsa.misc.cmd", setter: fld_set}]}, + "cn_acttimeout": {to:[{field: "rsa.misc.cn_acttimeout", setter: fld_set}]}, + "cn_asn_dst": {to:[{field: "rsa.web.cn_asn_dst", setter: fld_set}]}, + "cn_asn_src": {to:[{field: "rsa.misc.cn_asn_src", setter: fld_set}]}, + "cn_bgpv4nxthop": {to:[{field: "rsa.misc.cn_bgpv4nxthop", setter: fld_set}]}, + "cn_ctr_dst_code": {to:[{field: "rsa.misc.cn_ctr_dst_code", setter: fld_set}]}, + "cn_dst_tos": {to:[{field: "rsa.misc.cn_dst_tos", setter: fld_set}]}, + "cn_dst_vlan": {to:[{field: "rsa.misc.cn_dst_vlan", setter: fld_set}]}, + "cn_engine_id": {to:[{field: "rsa.misc.cn_engine_id", setter: fld_set}]}, + "cn_engine_type": {to:[{field: "rsa.misc.cn_engine_type", setter: fld_set}]}, + "cn_f_switch": {to:[{field: "rsa.misc.cn_f_switch", setter: fld_set}]}, + "cn_flowsampid": {to:[{field: "rsa.misc.cn_flowsampid", setter: fld_set}]}, + "cn_flowsampintv": {to:[{field: "rsa.misc.cn_flowsampintv", setter: fld_set}]}, + "cn_flowsampmode": {to:[{field: "rsa.misc.cn_flowsampmode", setter: fld_set}]}, + "cn_inacttimeout": {to:[{field: "rsa.misc.cn_inacttimeout", setter: fld_set}]}, + "cn_inpermbyts": {to:[{field: "rsa.misc.cn_inpermbyts", setter: fld_set}]}, + "cn_inpermpckts": {to:[{field: "rsa.misc.cn_inpermpckts", setter: fld_set}]}, + "cn_invalid": {to:[{field: "rsa.misc.cn_invalid", setter: fld_set}]}, + "cn_ip_proto_ver": {to:[{field: "rsa.misc.cn_ip_proto_ver", setter: fld_set}]}, + "cn_ipv4_ident": {to:[{field: "rsa.misc.cn_ipv4_ident", setter: fld_set}]}, + "cn_l_switch": {to:[{field: "rsa.misc.cn_l_switch", setter: fld_set}]}, + "cn_log_did": {to:[{field: "rsa.misc.cn_log_did", setter: fld_set}]}, + "cn_log_rid": {to:[{field: "rsa.misc.cn_log_rid", setter: fld_set}]}, + "cn_max_ttl": {to:[{field: "rsa.misc.cn_max_ttl", setter: fld_set}]}, + "cn_maxpcktlen": {to:[{field: "rsa.misc.cn_maxpcktlen", setter: fld_set}]}, + "cn_min_ttl": {to:[{field: "rsa.misc.cn_min_ttl", setter: fld_set}]}, + "cn_minpcktlen": {to:[{field: "rsa.misc.cn_minpcktlen", setter: fld_set}]}, + "cn_mpls_lbl_1": {to:[{field: "rsa.misc.cn_mpls_lbl_1", setter: fld_set}]}, + "cn_mpls_lbl_10": {to:[{field: "rsa.misc.cn_mpls_lbl_10", setter: fld_set}]}, + "cn_mpls_lbl_2": {to:[{field: "rsa.misc.cn_mpls_lbl_2", setter: fld_set}]}, + "cn_mpls_lbl_3": {to:[{field: "rsa.misc.cn_mpls_lbl_3", setter: fld_set}]}, + "cn_mpls_lbl_4": {to:[{field: "rsa.misc.cn_mpls_lbl_4", setter: fld_set}]}, + "cn_mpls_lbl_5": {to:[{field: "rsa.misc.cn_mpls_lbl_5", setter: fld_set}]}, + "cn_mpls_lbl_6": {to:[{field: "rsa.misc.cn_mpls_lbl_6", setter: fld_set}]}, + "cn_mpls_lbl_7": {to:[{field: "rsa.misc.cn_mpls_lbl_7", setter: fld_set}]}, + "cn_mpls_lbl_8": {to:[{field: "rsa.misc.cn_mpls_lbl_8", setter: fld_set}]}, + "cn_mpls_lbl_9": {to:[{field: "rsa.misc.cn_mpls_lbl_9", setter: fld_set}]}, + "cn_mplstoplabel": {to:[{field: "rsa.misc.cn_mplstoplabel", setter: fld_set}]}, + "cn_mplstoplabip": {to:[{field: "rsa.misc.cn_mplstoplabip", setter: fld_set}]}, + "cn_mul_dst_byt": {to:[{field: "rsa.misc.cn_mul_dst_byt", setter: fld_set}]}, + "cn_mul_dst_pks": {to:[{field: "rsa.misc.cn_mul_dst_pks", setter: fld_set}]}, + "cn_muligmptype": {to:[{field: "rsa.misc.cn_muligmptype", setter: fld_set}]}, + "cn_rpackets": {to:[{field: "rsa.web.cn_rpackets", setter: fld_set}]}, + "cn_sampalgo": {to:[{field: "rsa.misc.cn_sampalgo", setter: fld_set}]}, + "cn_sampint": {to:[{field: "rsa.misc.cn_sampint", setter: fld_set}]}, + "cn_seqctr": {to:[{field: "rsa.misc.cn_seqctr", setter: fld_set}]}, + "cn_spackets": {to:[{field: "rsa.misc.cn_spackets", setter: fld_set}]}, + "cn_src_tos": {to:[{field: "rsa.misc.cn_src_tos", setter: fld_set}]}, + "cn_src_vlan": {to:[{field: "rsa.misc.cn_src_vlan", setter: fld_set}]}, + "cn_sysuptime": {to:[{field: "rsa.misc.cn_sysuptime", setter: fld_set}]}, + "cn_template_id": {to:[{field: "rsa.misc.cn_template_id", setter: fld_set}]}, + "cn_totbytsexp": {to:[{field: "rsa.misc.cn_totbytsexp", setter: fld_set}]}, + "cn_totflowexp": {to:[{field: "rsa.misc.cn_totflowexp", setter: fld_set}]}, + "cn_totpcktsexp": {to:[{field: "rsa.misc.cn_totpcktsexp", setter: fld_set}]}, + "cn_unixnanosecs": {to:[{field: "rsa.misc.cn_unixnanosecs", setter: fld_set}]}, + "cn_v6flowlabel": {to:[{field: "rsa.misc.cn_v6flowlabel", setter: fld_set}]}, + "cn_v6optheaders": {to:[{field: "rsa.misc.cn_v6optheaders", setter: fld_set}]}, + "code": {to:[{field: "rsa.misc.code", setter: fld_set}]}, + "command": {to:[{field: "rsa.misc.command", setter: fld_set}]}, + "comments": {to:[{field: "rsa.misc.comments", setter: fld_set}]}, + "comp_class": {to:[{field: "rsa.misc.comp_class", setter: fld_set}]}, + "comp_name": {to:[{field: "rsa.misc.comp_name", setter: fld_set}]}, + "comp_rbytes": {to:[{field: "rsa.misc.comp_rbytes", setter: fld_set}]}, + "comp_sbytes": {to:[{field: "rsa.misc.comp_sbytes", setter: fld_set}]}, + "component_version": {to:[{field: "rsa.misc.comp_version", setter: fld_set}]}, + "connection_id": {to:[{field: "rsa.misc.connection_id", setter: fld_prio, prio: 1}]}, + "connectionid": {to:[{field: "rsa.misc.connection_id", setter: fld_prio, prio: 0}]}, + "content": {to:[{field: "rsa.misc.content", setter: fld_set}]}, + "content_type": {to:[{field: "rsa.misc.content_type", setter: fld_set}]}, + "content_version": {to:[{field: "rsa.misc.content_version", setter: fld_set}]}, + "context": {to:[{field: "rsa.misc.context", setter: fld_set}]}, + "count": {to:[{field: "rsa.misc.count", setter: fld_set}]}, + "cpu": {convert: to_long, to:[{field: "rsa.misc.cpu", setter: fld_set}]}, + "cpu_data": {to:[{field: "rsa.misc.cpu_data", setter: fld_set}]}, + "criticality": {to:[{field: "rsa.misc.criticality", setter: fld_set}]}, + "cs_agency_dst": {to:[{field: "rsa.misc.cs_agency_dst", setter: fld_set}]}, + "cs_analyzedby": {to:[{field: "rsa.misc.cs_analyzedby", setter: fld_set}]}, + "cs_av_other": {to:[{field: "rsa.misc.cs_av_other", setter: fld_set}]}, + "cs_av_primary": {to:[{field: "rsa.misc.cs_av_primary", setter: fld_set}]}, + "cs_av_secondary": {to:[{field: "rsa.misc.cs_av_secondary", setter: fld_set}]}, + "cs_bgpv6nxthop": {to:[{field: "rsa.misc.cs_bgpv6nxthop", setter: fld_set}]}, + "cs_bit9status": {to:[{field: "rsa.misc.cs_bit9status", setter: fld_set}]}, + "cs_context": {to:[{field: "rsa.misc.cs_context", setter: fld_set}]}, + "cs_control": {to:[{field: "rsa.misc.cs_control", setter: fld_set}]}, + "cs_data": {to:[{field: "rsa.misc.cs_data", setter: fld_set}]}, + "cs_datecret": {to:[{field: "rsa.misc.cs_datecret", setter: fld_set}]}, + "cs_dst_tld": {to:[{field: "rsa.misc.cs_dst_tld", setter: fld_set}]}, + "cs_eth_dst_ven": {to:[{field: "rsa.misc.cs_eth_dst_ven", setter: fld_set}]}, + "cs_eth_src_ven": {to:[{field: "rsa.misc.cs_eth_src_ven", setter: fld_set}]}, + "cs_event_uuid": {to:[{field: "rsa.misc.cs_event_uuid", setter: fld_set}]}, + "cs_filetype": {to:[{field: "rsa.misc.cs_filetype", setter: fld_set}]}, + "cs_fld": {to:[{field: "rsa.misc.cs_fld", setter: fld_set}]}, + "cs_if_desc": {to:[{field: "rsa.misc.cs_if_desc", setter: fld_set}]}, + "cs_if_name": {to:[{field: "rsa.misc.cs_if_name", setter: fld_set}]}, + "cs_ip_next_hop": {to:[{field: "rsa.misc.cs_ip_next_hop", setter: fld_set}]}, + "cs_ipv4dstpre": {to:[{field: "rsa.misc.cs_ipv4dstpre", setter: fld_set}]}, + "cs_ipv4srcpre": {to:[{field: "rsa.misc.cs_ipv4srcpre", setter: fld_set}]}, + "cs_lifetime": {to:[{field: "rsa.misc.cs_lifetime", setter: fld_set}]}, + "cs_log_medium": {to:[{field: "rsa.misc.cs_log_medium", setter: fld_set}]}, + "cs_loginname": {to:[{field: "rsa.misc.cs_loginname", setter: fld_set}]}, + "cs_modulescore": {to:[{field: "rsa.misc.cs_modulescore", setter: fld_set}]}, + "cs_modulesign": {to:[{field: "rsa.misc.cs_modulesign", setter: fld_set}]}, + "cs_opswatresult": {to:[{field: "rsa.misc.cs_opswatresult", setter: fld_set}]}, + "cs_payload": {to:[{field: "rsa.misc.cs_payload", setter: fld_set}]}, + "cs_registrant": {to:[{field: "rsa.misc.cs_registrant", setter: fld_set}]}, + "cs_registrar": {to:[{field: "rsa.misc.cs_registrar", setter: fld_set}]}, + "cs_represult": {to:[{field: "rsa.misc.cs_represult", setter: fld_set}]}, + "cs_rpayload": {to:[{field: "rsa.misc.cs_rpayload", setter: fld_set}]}, + "cs_sampler_name": {to:[{field: "rsa.misc.cs_sampler_name", setter: fld_set}]}, + "cs_sourcemodule": {to:[{field: "rsa.misc.cs_sourcemodule", setter: fld_set}]}, + "cs_streams": {to:[{field: "rsa.misc.cs_streams", setter: fld_set}]}, + "cs_targetmodule": {to:[{field: "rsa.misc.cs_targetmodule", setter: fld_set}]}, + "cs_v6nxthop": {to:[{field: "rsa.misc.cs_v6nxthop", setter: fld_set}]}, + "cs_whois_server": {to:[{field: "rsa.misc.cs_whois_server", setter: fld_set}]}, + "cs_yararesult": {to:[{field: "rsa.misc.cs_yararesult", setter: fld_set}]}, + "cve": {to:[{field: "rsa.misc.cve", setter: fld_set}]}, + "d_certauth": {to:[{field: "rsa.crypto.d_certauth", setter: fld_set}]}, + "d_cipher": {to:[{field: "rsa.crypto.cipher_dst", setter: fld_set}]}, + "d_ciphersize": {convert: to_long, to:[{field: "rsa.crypto.cipher_size_dst", setter: fld_set}]}, + "d_sslver": {to:[{field: "rsa.crypto.ssl_ver_dst", setter: fld_set}]}, + "data": {to:[{field: "rsa.internal.data", setter: fld_set}]}, + "data_type": {to:[{field: "rsa.misc.data_type", setter: fld_set}]}, + "date": {to:[{field: "rsa.time.date", setter: fld_set}]}, + "datetime": {to:[{field: "rsa.time.datetime", setter: fld_set}]}, + "day": {to:[{field: "rsa.time.day", setter: fld_set}]}, + "db_id": {to:[{field: "rsa.db.db_id", setter: fld_set}]}, + "db_name": {to:[{field: "rsa.db.database", setter: fld_set}]}, + "db_pid": {convert: to_long, to:[{field: "rsa.db.db_pid", setter: fld_set}]}, + "dclass_counter1": {convert: to_long, to:[{field: "rsa.counters.dclass_c1", setter: fld_set}]}, + "dclass_counter1_string": {to:[{field: "rsa.counters.dclass_c1_str", setter: fld_set}]}, + "dclass_counter2": {convert: to_long, to:[{field: "rsa.counters.dclass_c2", setter: fld_set}]}, + "dclass_counter2_string": {to:[{field: "rsa.counters.dclass_c2_str", setter: fld_set}]}, + "dclass_counter3": {convert: to_long, to:[{field: "rsa.counters.dclass_c3", setter: fld_set}]}, + "dclass_counter3_string": {to:[{field: "rsa.counters.dclass_c3_str", setter: fld_set}]}, + "dclass_ratio1": {to:[{field: "rsa.counters.dclass_r1", setter: fld_set}]}, + "dclass_ratio1_string": {to:[{field: "rsa.counters.dclass_r1_str", setter: fld_set}]}, + "dclass_ratio2": {to:[{field: "rsa.counters.dclass_r2", setter: fld_set}]}, + "dclass_ratio2_string": {to:[{field: "rsa.counters.dclass_r2_str", setter: fld_set}]}, + "dclass_ratio3": {to:[{field: "rsa.counters.dclass_r3", setter: fld_set}]}, + "dclass_ratio3_string": {to:[{field: "rsa.counters.dclass_r3_str", setter: fld_set}]}, + "dead": {convert: to_long, to:[{field: "rsa.internal.dead", setter: fld_set}]}, + "description": {to:[{field: "rsa.misc.description", setter: fld_set}]}, + "detail": {to:[{field: "rsa.misc.event_desc", setter: fld_set}]}, + "device": {to:[{field: "rsa.misc.device_name", setter: fld_set}]}, + "device.class": {to:[{field: "rsa.internal.device_class", setter: fld_set}]}, + "device.group": {to:[{field: "rsa.internal.device_group", setter: fld_set}]}, + "device.host": {to:[{field: "rsa.internal.device_host", setter: fld_set}]}, + "device.ip": {convert: to_ip, to:[{field: "rsa.internal.device_ip", setter: fld_set}]}, + "device.ipv6": {convert: to_ip, to:[{field: "rsa.internal.device_ipv6", setter: fld_set}]}, + "device.type": {to:[{field: "rsa.internal.device_type", setter: fld_set}]}, + "device.type.id": {convert: to_long, to:[{field: "rsa.internal.device_type_id", setter: fld_set}]}, + "devicehostname": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "devvendor": {to:[{field: "rsa.misc.devvendor", setter: fld_set}]}, + "dhost": {to:[{field: "rsa.network.host_dst", setter: fld_set}]}, + "did": {to:[{field: "rsa.internal.did", setter: fld_set}]}, + "dinterface": {to:[{field: "rsa.network.dinterface", setter: fld_set}]}, + "directory.dst": {to:[{field: "rsa.file.directory_dst", setter: fld_set}]}, + "directory.src": {to:[{field: "rsa.file.directory_src", setter: fld_set}]}, + "disk_volume": {to:[{field: "rsa.storage.disk_volume", setter: fld_set}]}, + "disposition": {to:[{field: "rsa.misc.disposition", setter: fld_set}]}, + "distance": {to:[{field: "rsa.misc.distance", setter: fld_set}]}, + "dmask": {to:[{field: "rsa.network.dmask", setter: fld_set}]}, + "dn": {to:[{field: "rsa.identity.dn", setter: fld_set}]}, + "dns_a_record": {to:[{field: "rsa.network.dns_a_record", setter: fld_set}]}, + "dns_cname_record": {to:[{field: "rsa.network.dns_cname_record", setter: fld_set}]}, + "dns_id": {to:[{field: "rsa.network.dns_id", setter: fld_set}]}, + "dns_opcode": {to:[{field: "rsa.network.dns_opcode", setter: fld_set}]}, + "dns_ptr_record": {to:[{field: "rsa.network.dns_ptr_record", setter: fld_set}]}, + "dns_resp": {to:[{field: "rsa.network.dns_resp", setter: fld_set}]}, + "dns_type": {to:[{field: "rsa.network.dns_type", setter: fld_set}]}, + "doc_number": {convert: to_long, to:[{field: "rsa.misc.doc_number", setter: fld_set}]}, + "domain": {to:[{field: "rsa.network.domain", setter: fld_set}]}, + "domain1": {to:[{field: "rsa.network.domain1", setter: fld_set}]}, + "dst_dn": {to:[{field: "rsa.identity.dn_dst", setter: fld_set}]}, + "dst_payload": {to:[{field: "rsa.misc.payload_dst", setter: fld_set}]}, + "dst_spi": {to:[{field: "rsa.misc.spi_dst", setter: fld_set}]}, + "dst_zone": {to:[{field: "rsa.network.zone_dst", setter: fld_set}]}, + "dstburb": {to:[{field: "rsa.misc.dstburb", setter: fld_set}]}, + "duration": {convert: to_double, to:[{field: "rsa.time.duration_time", setter: fld_set}]}, + "duration_string": {to:[{field: "rsa.time.duration_str", setter: fld_set}]}, + "ec_activity": {to:[{field: "rsa.investigations.ec_activity", setter: fld_set}]}, + "ec_outcome": {to:[{field: "rsa.investigations.ec_outcome", setter: fld_set}]}, + "ec_subject": {to:[{field: "rsa.investigations.ec_subject", setter: fld_set}]}, + "ec_theme": {to:[{field: "rsa.investigations.ec_theme", setter: fld_set}]}, + "edomain": {to:[{field: "rsa.misc.edomain", setter: fld_set}]}, + "edomaub": {to:[{field: "rsa.misc.edomaub", setter: fld_set}]}, + "effective_time": {convert: to_date, to:[{field: "rsa.time.effective_time", setter: fld_set}]}, + "ein.number": {convert: to_long, to:[{field: "rsa.misc.ein_number", setter: fld_set}]}, + "email": {to:[{field: "rsa.email.email", setter: fld_append}]}, + "encryption_type": {to:[{field: "rsa.crypto.crypto", setter: fld_set}]}, + "endtime": {convert: to_date, to:[{field: "rsa.time.endtime", setter: fld_set}]}, + "entropy.req": {convert: to_long, to:[{field: "rsa.internal.entropy_req", setter: fld_set}]}, + "entropy.res": {convert: to_long, to:[{field: "rsa.internal.entropy_res", setter: fld_set}]}, + "entry": {to:[{field: "rsa.internal.entry", setter: fld_set}]}, + "eoc": {to:[{field: "rsa.investigations.eoc", setter: fld_set}]}, + "error": {to:[{field: "rsa.misc.error", setter: fld_set}]}, + "eth_type": {convert: to_long, to:[{field: "rsa.network.eth_type", setter: fld_set}]}, + "euid": {to:[{field: "rsa.misc.euid", setter: fld_set}]}, + "event.cat": {convert: to_long, to:[{field: "rsa.investigations.event_cat", setter: fld_prio, prio: 1}]}, + "event.cat.name": {to:[{field: "rsa.investigations.event_cat_name", setter: fld_prio, prio: 1}]}, + "event_cat": {convert: to_long, to:[{field: "rsa.investigations.event_cat", setter: fld_prio, prio: 0}]}, + "event_cat_name": {to:[{field: "rsa.investigations.event_cat_name", setter: fld_prio, prio: 0}]}, + "event_category": {to:[{field: "rsa.misc.event_category", setter: fld_set}]}, + "event_computer": {to:[{field: "rsa.misc.event_computer", setter: fld_set}]}, + "event_counter": {convert: to_long, to:[{field: "rsa.counters.event_counter", setter: fld_set}]}, + "event_description": {to:[{field: "rsa.internal.event_desc", setter: fld_set}]}, + "event_id": {to:[{field: "rsa.misc.event_id", setter: fld_set}]}, + "event_log": {to:[{field: "rsa.misc.event_log", setter: fld_set}]}, + "event_name": {to:[{field: "rsa.internal.event_name", setter: fld_set}]}, + "event_queue_time": {convert: to_date, to:[{field: "rsa.time.event_queue_time", setter: fld_set}]}, + "event_source": {to:[{field: "rsa.misc.event_source", setter: fld_set}]}, + "event_state": {to:[{field: "rsa.misc.event_state", setter: fld_set}]}, + "event_time": {convert: to_date, to:[{field: "rsa.time.event_time", setter: fld_set}]}, + "event_time_str": {to:[{field: "rsa.time.event_time_str", setter: fld_prio, prio: 1}]}, + "event_time_string": {to:[{field: "rsa.time.event_time_str", setter: fld_prio, prio: 0}]}, + "event_type": {to:[{field: "rsa.misc.event_type", setter: fld_set}]}, + "event_user": {to:[{field: "rsa.misc.event_user", setter: fld_set}]}, + "eventtime": {to:[{field: "rsa.time.eventtime", setter: fld_set}]}, + "expected_val": {to:[{field: "rsa.misc.expected_val", setter: fld_set}]}, + "expiration_time": {convert: to_date, to:[{field: "rsa.time.expire_time", setter: fld_set}]}, + "expiration_time_string": {to:[{field: "rsa.time.expire_time_str", setter: fld_set}]}, + "facility": {to:[{field: "rsa.misc.facility", setter: fld_set}]}, + "facilityname": {to:[{field: "rsa.misc.facilityname", setter: fld_set}]}, + "faddr": {to:[{field: "rsa.network.faddr", setter: fld_set}]}, + "fcatnum": {to:[{field: "rsa.misc.fcatnum", setter: fld_set}]}, + "federated_idp": {to:[{field: "rsa.identity.federated_idp", setter: fld_set}]}, + "federated_sp": {to:[{field: "rsa.identity.federated_sp", setter: fld_set}]}, + "feed.category": {to:[{field: "rsa.internal.feed_category", setter: fld_set}]}, + "feed_desc": {to:[{field: "rsa.internal.feed_desc", setter: fld_set}]}, + "feed_name": {to:[{field: "rsa.internal.feed_name", setter: fld_set}]}, + "fhost": {to:[{field: "rsa.network.fhost", setter: fld_set}]}, + "file_entropy": {convert: to_double, to:[{field: "rsa.file.file_entropy", setter: fld_set}]}, + "file_vendor": {to:[{field: "rsa.file.file_vendor", setter: fld_set}]}, + "filename_dst": {to:[{field: "rsa.file.filename_dst", setter: fld_set}]}, + "filename_src": {to:[{field: "rsa.file.filename_src", setter: fld_set}]}, + "filename_tmp": {to:[{field: "rsa.file.filename_tmp", setter: fld_set}]}, + "filesystem": {to:[{field: "rsa.file.filesystem", setter: fld_set}]}, + "filter": {to:[{field: "rsa.misc.filter", setter: fld_set}]}, + "finterface": {to:[{field: "rsa.misc.finterface", setter: fld_set}]}, + "flags": {to:[{field: "rsa.misc.flags", setter: fld_set}]}, + "forensic_info": {to:[{field: "rsa.misc.forensic_info", setter: fld_set}]}, + "forward.ip": {convert: to_ip, to:[{field: "rsa.internal.forward_ip", setter: fld_set}]}, + "forward.ipv6": {convert: to_ip, to:[{field: "rsa.internal.forward_ipv6", setter: fld_set}]}, + "found": {to:[{field: "rsa.misc.found", setter: fld_set}]}, + "fport": {to:[{field: "rsa.network.fport", setter: fld_set}]}, + "fqdn": {to:[{field: "rsa.web.fqdn", setter: fld_set}]}, + "fresult": {convert: to_long, to:[{field: "rsa.misc.fresult", setter: fld_set}]}, + "from": {to:[{field: "rsa.email.email_src", setter: fld_set}]}, + "gaddr": {to:[{field: "rsa.misc.gaddr", setter: fld_set}]}, + "gateway": {to:[{field: "rsa.network.gateway", setter: fld_set}]}, + "gmtdate": {to:[{field: "rsa.time.gmtdate", setter: fld_set}]}, + "gmttime": {to:[{field: "rsa.time.gmttime", setter: fld_set}]}, + "group": {to:[{field: "rsa.misc.group", setter: fld_set}]}, + "group_object": {to:[{field: "rsa.misc.group_object", setter: fld_set}]}, + "groupid": {to:[{field: "rsa.misc.group_id", setter: fld_set}]}, + "h_code": {to:[{field: "rsa.internal.hcode", setter: fld_set}]}, + "hardware_id": {to:[{field: "rsa.misc.hardware_id", setter: fld_set}]}, + "header.id": {to:[{field: "rsa.internal.header_id", setter: fld_set}]}, + "host.orig": {to:[{field: "rsa.network.host_orig", setter: fld_set}]}, + "host.state": {to:[{field: "rsa.endpoint.host_state", setter: fld_set}]}, + "host.type": {to:[{field: "rsa.network.host_type", setter: fld_set}]}, + "host_role": {to:[{field: "rsa.identity.host_role", setter: fld_set}]}, + "hostid": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "hostname": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "hour": {to:[{field: "rsa.time.hour", setter: fld_set}]}, + "https.insact": {to:[{field: "rsa.crypto.https_insact", setter: fld_set}]}, + "https.valid": {to:[{field: "rsa.crypto.https_valid", setter: fld_set}]}, + "icmpcode": {convert: to_long, to:[{field: "rsa.network.icmp_code", setter: fld_set}]}, + "icmptype": {convert: to_long, to:[{field: "rsa.network.icmp_type", setter: fld_set}]}, + "id": {to:[{field: "rsa.misc.reference_id", setter: fld_set}]}, + "id1": {to:[{field: "rsa.misc.reference_id1", setter: fld_set}]}, + "id2": {to:[{field: "rsa.misc.reference_id2", setter: fld_set}]}, + "id3": {to:[{field: "rsa.misc.id3", setter: fld_set}]}, + "ike": {to:[{field: "rsa.crypto.ike", setter: fld_set}]}, + "ike_cookie1": {to:[{field: "rsa.crypto.ike_cookie1", setter: fld_set}]}, + "ike_cookie2": {to:[{field: "rsa.crypto.ike_cookie2", setter: fld_set}]}, + "im_buddyid": {to:[{field: "rsa.misc.im_buddyid", setter: fld_set}]}, + "im_buddyname": {to:[{field: "rsa.misc.im_buddyname", setter: fld_set}]}, + "im_client": {to:[{field: "rsa.misc.im_client", setter: fld_set}]}, + "im_croomid": {to:[{field: "rsa.misc.im_croomid", setter: fld_set}]}, + "im_croomtype": {to:[{field: "rsa.misc.im_croomtype", setter: fld_set}]}, + "im_members": {to:[{field: "rsa.misc.im_members", setter: fld_set}]}, + "im_userid": {to:[{field: "rsa.misc.im_userid", setter: fld_set}]}, + "im_username": {to:[{field: "rsa.misc.im_username", setter: fld_set}]}, + "index": {to:[{field: "rsa.misc.index", setter: fld_set}]}, + "info": {to:[{field: "rsa.db.index", setter: fld_set}]}, + "inode": {convert: to_long, to:[{field: "rsa.internal.inode", setter: fld_set}]}, + "inout": {to:[{field: "rsa.misc.inout", setter: fld_set}]}, + "instance": {to:[{field: "rsa.db.instance", setter: fld_set}]}, + "interface": {to:[{field: "rsa.network.interface", setter: fld_set}]}, + "inv.category": {to:[{field: "rsa.investigations.inv_category", setter: fld_set}]}, + "inv.context": {to:[{field: "rsa.investigations.inv_context", setter: fld_set}]}, + "ioc": {to:[{field: "rsa.investigations.ioc", setter: fld_set}]}, + "ip_proto": {convert: to_long, to:[{field: "rsa.network.ip_proto", setter: fld_set}]}, + "ipkt": {to:[{field: "rsa.misc.ipkt", setter: fld_set}]}, + "ipscat": {to:[{field: "rsa.misc.ipscat", setter: fld_set}]}, + "ipspri": {to:[{field: "rsa.misc.ipspri", setter: fld_set}]}, + "jobname": {to:[{field: "rsa.misc.jobname", setter: fld_set}]}, + "jobnum": {to:[{field: "rsa.misc.job_num", setter: fld_set}]}, + "laddr": {to:[{field: "rsa.network.laddr", setter: fld_set}]}, + "language": {to:[{field: "rsa.misc.language", setter: fld_set}]}, + "latitude": {to:[{field: "rsa.misc.latitude", setter: fld_set}]}, + "lc.cid": {to:[{field: "rsa.internal.lc_cid", setter: fld_set}]}, + "lc.ctime": {convert: to_date, to:[{field: "rsa.internal.lc_ctime", setter: fld_set}]}, + "ldap": {to:[{field: "rsa.identity.ldap", setter: fld_set}]}, + "ldap.query": {to:[{field: "rsa.identity.ldap_query", setter: fld_set}]}, + "ldap.response": {to:[{field: "rsa.identity.ldap_response", setter: fld_set}]}, + "level": {convert: to_long, to:[{field: "rsa.internal.level", setter: fld_set}]}, + "lhost": {to:[{field: "rsa.network.lhost", setter: fld_set}]}, + "library": {to:[{field: "rsa.misc.library", setter: fld_set}]}, + "lifetime": {convert: to_long, to:[{field: "rsa.misc.lifetime", setter: fld_set}]}, + "linenum": {to:[{field: "rsa.misc.linenum", setter: fld_set}]}, + "link": {to:[{field: "rsa.misc.link", setter: fld_set}]}, + "linterface": {to:[{field: "rsa.network.linterface", setter: fld_set}]}, + "list_name": {to:[{field: "rsa.misc.list_name", setter: fld_set}]}, + "listnum": {to:[{field: "rsa.misc.listnum", setter: fld_set}]}, + "load_data": {to:[{field: "rsa.misc.load_data", setter: fld_set}]}, + "location_floor": {to:[{field: "rsa.misc.location_floor", setter: fld_set}]}, + "location_mark": {to:[{field: "rsa.misc.location_mark", setter: fld_set}]}, + "log_id": {to:[{field: "rsa.misc.log_id", setter: fld_set}]}, + "log_type": {to:[{field: "rsa.misc.log_type", setter: fld_set}]}, + "logid": {to:[{field: "rsa.misc.logid", setter: fld_set}]}, + "logip": {to:[{field: "rsa.misc.logip", setter: fld_set}]}, + "logname": {to:[{field: "rsa.misc.logname", setter: fld_set}]}, + "logon_type": {to:[{field: "rsa.identity.logon_type", setter: fld_set}]}, + "logon_type_desc": {to:[{field: "rsa.identity.logon_type_desc", setter: fld_set}]}, + "longitude": {to:[{field: "rsa.misc.longitude", setter: fld_set}]}, + "lport": {to:[{field: "rsa.misc.lport", setter: fld_set}]}, + "lread": {convert: to_long, to:[{field: "rsa.db.lread", setter: fld_set}]}, + "lun": {to:[{field: "rsa.storage.lun", setter: fld_set}]}, + "lwrite": {convert: to_long, to:[{field: "rsa.db.lwrite", setter: fld_set}]}, + "macaddr": {convert: to_mac, to:[{field: "rsa.network.eth_host", setter: fld_set}]}, + "mail_id": {to:[{field: "rsa.misc.mail_id", setter: fld_set}]}, + "mask": {to:[{field: "rsa.network.mask", setter: fld_set}]}, + "match": {to:[{field: "rsa.misc.match", setter: fld_set}]}, + "mbug_data": {to:[{field: "rsa.misc.mbug_data", setter: fld_set}]}, + "mcb.req": {convert: to_long, to:[{field: "rsa.internal.mcb_req", setter: fld_set}]}, + "mcb.res": {convert: to_long, to:[{field: "rsa.internal.mcb_res", setter: fld_set}]}, + "mcbc.req": {convert: to_long, to:[{field: "rsa.internal.mcbc_req", setter: fld_set}]}, + "mcbc.res": {convert: to_long, to:[{field: "rsa.internal.mcbc_res", setter: fld_set}]}, + "medium": {convert: to_long, to:[{field: "rsa.internal.medium", setter: fld_set}]}, + "message": {to:[{field: "rsa.internal.message", setter: fld_set}]}, + "message_body": {to:[{field: "rsa.misc.message_body", setter: fld_set}]}, + "messageid": {to:[{field: "rsa.internal.messageid", setter: fld_set}]}, + "min": {to:[{field: "rsa.time.min", setter: fld_set}]}, + "misc": {to:[{field: "rsa.misc.misc", setter: fld_set}]}, + "misc_name": {to:[{field: "rsa.misc.misc_name", setter: fld_set}]}, + "mode": {to:[{field: "rsa.misc.mode", setter: fld_set}]}, + "month": {to:[{field: "rsa.time.month", setter: fld_set}]}, + "msg": {to:[{field: "rsa.internal.msg", setter: fld_set}]}, + "msgIdPart1": {to:[{field: "rsa.misc.msgIdPart1", setter: fld_set}]}, + "msgIdPart2": {to:[{field: "rsa.misc.msgIdPart2", setter: fld_set}]}, + "msgIdPart3": {to:[{field: "rsa.misc.msgIdPart3", setter: fld_set}]}, + "msgIdPart4": {to:[{field: "rsa.misc.msgIdPart4", setter: fld_set}]}, + "msg_id": {to:[{field: "rsa.internal.msg_id", setter: fld_set}]}, + "msg_type": {to:[{field: "rsa.misc.msg_type", setter: fld_set}]}, + "msgid": {to:[{field: "rsa.misc.msgid", setter: fld_set}]}, + "name": {to:[{field: "rsa.misc.name", setter: fld_set}]}, + "netname": {to:[{field: "rsa.network.netname", setter: fld_set}]}, + "netsessid": {to:[{field: "rsa.misc.netsessid", setter: fld_set}]}, + "network_port": {convert: to_long, to:[{field: "rsa.network.network_port", setter: fld_set}]}, + "network_service": {to:[{field: "rsa.network.network_service", setter: fld_set}]}, + "node": {to:[{field: "rsa.misc.node", setter: fld_set}]}, + "nodename": {to:[{field: "rsa.internal.node_name", setter: fld_set}]}, + "ntype": {to:[{field: "rsa.misc.ntype", setter: fld_set}]}, + "num": {to:[{field: "rsa.misc.num", setter: fld_set}]}, + "number": {to:[{field: "rsa.misc.number", setter: fld_set}]}, + "number1": {to:[{field: "rsa.misc.number1", setter: fld_set}]}, + "number2": {to:[{field: "rsa.misc.number2", setter: fld_set}]}, + "nwe.callback_id": {to:[{field: "rsa.internal.nwe_callback_id", setter: fld_set}]}, + "nwwn": {to:[{field: "rsa.misc.nwwn", setter: fld_set}]}, + "obj_id": {to:[{field: "rsa.internal.obj_id", setter: fld_set}]}, + "obj_name": {to:[{field: "rsa.misc.obj_name", setter: fld_set}]}, + "obj_server": {to:[{field: "rsa.internal.obj_server", setter: fld_set}]}, + "obj_type": {to:[{field: "rsa.misc.obj_type", setter: fld_set}]}, + "obj_value": {to:[{field: "rsa.internal.obj_val", setter: fld_set}]}, + "object": {to:[{field: "rsa.misc.object", setter: fld_set}]}, + "observed_val": {to:[{field: "rsa.misc.observed_val", setter: fld_set}]}, + "operation": {to:[{field: "rsa.misc.operation", setter: fld_set}]}, + "operation_id": {to:[{field: "rsa.misc.operation_id", setter: fld_set}]}, + "opkt": {to:[{field: "rsa.misc.opkt", setter: fld_set}]}, + "org.dst": {to:[{field: "rsa.physical.org_dst", setter: fld_prio, prio: 1}]}, + "org.src": {to:[{field: "rsa.physical.org_src", setter: fld_set}]}, + "org_dst": {to:[{field: "rsa.physical.org_dst", setter: fld_prio, prio: 0}]}, + "orig_from": {to:[{field: "rsa.misc.orig_from", setter: fld_set}]}, + "origin": {to:[{field: "rsa.network.origin", setter: fld_set}]}, + "original_owner": {to:[{field: "rsa.identity.owner", setter: fld_set}]}, + "os": {to:[{field: "rsa.misc.OS", setter: fld_set}]}, + "owner_id": {to:[{field: "rsa.misc.owner_id", setter: fld_set}]}, + "p_action": {to:[{field: "rsa.misc.p_action", setter: fld_set}]}, + "p_date": {to:[{field: "rsa.time.p_date", setter: fld_set}]}, + "p_filter": {to:[{field: "rsa.misc.p_filter", setter: fld_set}]}, + "p_group_object": {to:[{field: "rsa.misc.p_group_object", setter: fld_set}]}, + "p_id": {to:[{field: "rsa.misc.p_id", setter: fld_set}]}, + "p_month": {to:[{field: "rsa.time.p_month", setter: fld_set}]}, + "p_msgid": {to:[{field: "rsa.misc.p_msgid", setter: fld_set}]}, + "p_msgid1": {to:[{field: "rsa.misc.p_msgid1", setter: fld_set}]}, + "p_msgid2": {to:[{field: "rsa.misc.p_msgid2", setter: fld_set}]}, + "p_result1": {to:[{field: "rsa.misc.p_result1", setter: fld_set}]}, + "p_time": {to:[{field: "rsa.time.p_time", setter: fld_set}]}, + "p_time1": {to:[{field: "rsa.time.p_time1", setter: fld_set}]}, + "p_time2": {to:[{field: "rsa.time.p_time2", setter: fld_set}]}, + "p_url": {to:[{field: "rsa.web.p_url", setter: fld_set}]}, + "p_user_agent": {to:[{field: "rsa.web.p_user_agent", setter: fld_set}]}, + "p_web_cookie": {to:[{field: "rsa.web.p_web_cookie", setter: fld_set}]}, + "p_web_method": {to:[{field: "rsa.web.p_web_method", setter: fld_set}]}, + "p_web_referer": {to:[{field: "rsa.web.p_web_referer", setter: fld_set}]}, + "p_year": {to:[{field: "rsa.time.p_year", setter: fld_set}]}, + "packet_length": {to:[{field: "rsa.network.packet_length", setter: fld_set}]}, + "paddr": {convert: to_ip, to:[{field: "rsa.network.paddr", setter: fld_set}]}, + "param": {to:[{field: "rsa.misc.param", setter: fld_set}]}, + "param.dst": {to:[{field: "rsa.misc.param_dst", setter: fld_set}]}, + "param.src": {to:[{field: "rsa.misc.param_src", setter: fld_set}]}, + "parent_node": {to:[{field: "rsa.misc.parent_node", setter: fld_set}]}, + "parse.error": {to:[{field: "rsa.internal.parse_error", setter: fld_set}]}, + "password": {to:[{field: "rsa.identity.password", setter: fld_set}]}, + "password_chg": {to:[{field: "rsa.misc.password_chg", setter: fld_set}]}, + "password_expire": {to:[{field: "rsa.misc.password_expire", setter: fld_set}]}, + "patient_fname": {to:[{field: "rsa.healthcare.patient_fname", setter: fld_set}]}, + "patient_id": {to:[{field: "rsa.healthcare.patient_id", setter: fld_set}]}, + "patient_lname": {to:[{field: "rsa.healthcare.patient_lname", setter: fld_set}]}, + "patient_mname": {to:[{field: "rsa.healthcare.patient_mname", setter: fld_set}]}, + "payload.req": {convert: to_long, to:[{field: "rsa.internal.payload_req", setter: fld_set}]}, + "payload.res": {convert: to_long, to:[{field: "rsa.internal.payload_res", setter: fld_set}]}, + "peer": {to:[{field: "rsa.crypto.peer", setter: fld_set}]}, + "peer_id": {to:[{field: "rsa.crypto.peer_id", setter: fld_set}]}, + "permgranted": {to:[{field: "rsa.misc.permgranted", setter: fld_set}]}, + "permissions": {to:[{field: "rsa.db.permissions", setter: fld_set}]}, + "permwanted": {to:[{field: "rsa.misc.permwanted", setter: fld_set}]}, + "pgid": {to:[{field: "rsa.misc.pgid", setter: fld_set}]}, + "phone_number": {to:[{field: "rsa.misc.phone", setter: fld_prio, prio: 2}]}, + "phost": {to:[{field: "rsa.network.phost", setter: fld_set}]}, + "pid": {to:[{field: "rsa.misc.pid", setter: fld_set}]}, + "policy": {to:[{field: "rsa.misc.policy", setter: fld_set}]}, + "policyUUID": {to:[{field: "rsa.misc.policyUUID", setter: fld_set}]}, + "policy_id": {to:[{field: "rsa.misc.policy_id", setter: fld_set}]}, + "policy_value": {to:[{field: "rsa.misc.policy_value", setter: fld_set}]}, + "policy_waiver": {to:[{field: "rsa.misc.policy_waiver", setter: fld_set}]}, + "policyname": {to:[{field: "rsa.misc.policy_name", setter: fld_prio, prio: 0}]}, + "pool_id": {to:[{field: "rsa.misc.pool_id", setter: fld_set}]}, + "pool_name": {to:[{field: "rsa.misc.pool_name", setter: fld_set}]}, + "port": {convert: to_long, to:[{field: "rsa.network.port", setter: fld_set}]}, + "portname": {to:[{field: "rsa.misc.port_name", setter: fld_set}]}, + "pread": {convert: to_long, to:[{field: "rsa.db.pread", setter: fld_set}]}, + "priority": {to:[{field: "rsa.misc.priority", setter: fld_set}]}, + "privilege": {to:[{field: "rsa.file.privilege", setter: fld_set}]}, + "process.vid.dst": {to:[{field: "rsa.internal.process_vid_dst", setter: fld_set}]}, + "process.vid.src": {to:[{field: "rsa.internal.process_vid_src", setter: fld_set}]}, + "process_id_val": {to:[{field: "rsa.misc.process_id_val", setter: fld_set}]}, + "processing_time": {to:[{field: "rsa.time.process_time", setter: fld_set}]}, + "profile": {to:[{field: "rsa.identity.profile", setter: fld_set}]}, + "prog_asp_num": {to:[{field: "rsa.misc.prog_asp_num", setter: fld_set}]}, + "program": {to:[{field: "rsa.misc.program", setter: fld_set}]}, + "protocol_detail": {to:[{field: "rsa.network.protocol_detail", setter: fld_set}]}, + "pwwn": {to:[{field: "rsa.storage.pwwn", setter: fld_set}]}, + "r_hostid": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "real_data": {to:[{field: "rsa.misc.real_data", setter: fld_set}]}, + "realm": {to:[{field: "rsa.identity.realm", setter: fld_set}]}, + "reason": {to:[{field: "rsa.misc.reason", setter: fld_set}]}, + "rec_asp_device": {to:[{field: "rsa.misc.rec_asp_device", setter: fld_set}]}, + "rec_asp_num": {to:[{field: "rsa.misc.rec_asp_num", setter: fld_set}]}, + "rec_library": {to:[{field: "rsa.misc.rec_library", setter: fld_set}]}, + "recorded_time": {convert: to_date, to:[{field: "rsa.time.recorded_time", setter: fld_set}]}, + "recordnum": {to:[{field: "rsa.misc.recordnum", setter: fld_set}]}, + "registry.key": {to:[{field: "rsa.endpoint.registry_key", setter: fld_set}]}, + "registry.value": {to:[{field: "rsa.endpoint.registry_value", setter: fld_set}]}, + "remote_domain": {to:[{field: "rsa.web.remote_domain", setter: fld_set}]}, + "remote_domain_id": {to:[{field: "rsa.network.remote_domain_id", setter: fld_set}]}, + "reputation_num": {convert: to_double, to:[{field: "rsa.web.reputation_num", setter: fld_set}]}, + "resource": {to:[{field: "rsa.internal.resource", setter: fld_set}]}, + "resource_class": {to:[{field: "rsa.internal.resource_class", setter: fld_set}]}, + "result": {to:[{field: "rsa.misc.result", setter: fld_set}]}, + "result_code": {to:[{field: "rsa.misc.result_code", setter: fld_prio, prio: 1}]}, + "resultcode": {to:[{field: "rsa.misc.result_code", setter: fld_prio, prio: 0}]}, + "rid": {convert: to_long, to:[{field: "rsa.internal.rid", setter: fld_set}]}, + "risk": {to:[{field: "rsa.misc.risk", setter: fld_set}]}, + "risk_info": {to:[{field: "rsa.misc.risk_info", setter: fld_set}]}, + "risk_num": {convert: to_double, to:[{field: "rsa.misc.risk_num", setter: fld_set}]}, + "risk_num_comm": {convert: to_double, to:[{field: "rsa.misc.risk_num_comm", setter: fld_set}]}, + "risk_num_next": {convert: to_double, to:[{field: "rsa.misc.risk_num_next", setter: fld_set}]}, + "risk_num_sand": {convert: to_double, to:[{field: "rsa.misc.risk_num_sand", setter: fld_set}]}, + "risk_num_static": {convert: to_double, to:[{field: "rsa.misc.risk_num_static", setter: fld_set}]}, + "risk_suspicious": {to:[{field: "rsa.misc.risk_suspicious", setter: fld_set}]}, + "risk_warning": {to:[{field: "rsa.misc.risk_warning", setter: fld_set}]}, + "rpayload": {to:[{field: "rsa.network.rpayload", setter: fld_set}]}, + "ruid": {to:[{field: "rsa.misc.ruid", setter: fld_set}]}, + "rule": {to:[{field: "rsa.misc.rule", setter: fld_set}]}, + "rule_group": {to:[{field: "rsa.misc.rule_group", setter: fld_set}]}, + "rule_template": {to:[{field: "rsa.misc.rule_template", setter: fld_set}]}, + "rule_uid": {to:[{field: "rsa.misc.rule_uid", setter: fld_set}]}, + "rulename": {to:[{field: "rsa.misc.rule_name", setter: fld_set}]}, + "s_certauth": {to:[{field: "rsa.crypto.s_certauth", setter: fld_set}]}, + "s_cipher": {to:[{field: "rsa.crypto.cipher_src", setter: fld_set}]}, + "s_ciphersize": {convert: to_long, to:[{field: "rsa.crypto.cipher_size_src", setter: fld_set}]}, + "s_context": {to:[{field: "rsa.misc.context_subject", setter: fld_set}]}, + "s_sslver": {to:[{field: "rsa.crypto.ssl_ver_src", setter: fld_set}]}, + "sburb": {to:[{field: "rsa.misc.sburb", setter: fld_set}]}, + "scheme": {to:[{field: "rsa.crypto.scheme", setter: fld_set}]}, + "sdomain_fld": {to:[{field: "rsa.misc.sdomain_fld", setter: fld_set}]}, + "search.text": {to:[{field: "rsa.misc.search_text", setter: fld_set}]}, + "sec": {to:[{field: "rsa.misc.sec", setter: fld_set}]}, + "second": {to:[{field: "rsa.misc.second", setter: fld_set}]}, + "sensor": {to:[{field: "rsa.misc.sensor", setter: fld_set}]}, + "sensorname": {to:[{field: "rsa.misc.sensorname", setter: fld_set}]}, + "seqnum": {to:[{field: "rsa.misc.seqnum", setter: fld_set}]}, + "serial_number": {to:[{field: "rsa.misc.serial_number", setter: fld_set}]}, + "service.account": {to:[{field: "rsa.identity.service_account", setter: fld_set}]}, + "session": {to:[{field: "rsa.misc.session", setter: fld_set}]}, + "session.split": {to:[{field: "rsa.internal.session_split", setter: fld_set}]}, + "sessionid": {to:[{field: "rsa.misc.log_session_id", setter: fld_set}]}, + "sessionid1": {to:[{field: "rsa.misc.log_session_id1", setter: fld_set}]}, + "sessiontype": {to:[{field: "rsa.misc.sessiontype", setter: fld_set}]}, + "severity": {to:[{field: "rsa.misc.severity", setter: fld_set}]}, + "sid": {to:[{field: "rsa.identity.user_sid_dst", setter: fld_set}]}, + "sig.name": {to:[{field: "rsa.misc.sig_name", setter: fld_set}]}, + "sigUUID": {to:[{field: "rsa.misc.sigUUID", setter: fld_set}]}, + "sigcat": {to:[{field: "rsa.misc.sigcat", setter: fld_set}]}, + "sigid": {convert: to_long, to:[{field: "rsa.misc.sig_id", setter: fld_set}]}, + "sigid1": {convert: to_long, to:[{field: "rsa.misc.sig_id1", setter: fld_set}]}, + "sigid_string": {to:[{field: "rsa.misc.sig_id_str", setter: fld_set}]}, + "signame": {to:[{field: "rsa.misc.policy_name", setter: fld_prio, prio: 1}]}, + "sigtype": {to:[{field: "rsa.crypto.sig_type", setter: fld_set}]}, + "sinterface": {to:[{field: "rsa.network.sinterface", setter: fld_set}]}, + "site": {to:[{field: "rsa.internal.site", setter: fld_set}]}, + "size": {convert: to_long, to:[{field: "rsa.internal.size", setter: fld_set}]}, + "smask": {to:[{field: "rsa.network.smask", setter: fld_set}]}, + "snmp.oid": {to:[{field: "rsa.misc.snmp_oid", setter: fld_set}]}, + "snmp.value": {to:[{field: "rsa.misc.snmp_value", setter: fld_set}]}, + "sourcefile": {to:[{field: "rsa.internal.sourcefile", setter: fld_set}]}, + "space": {to:[{field: "rsa.misc.space", setter: fld_set}]}, + "space1": {to:[{field: "rsa.misc.space1", setter: fld_set}]}, + "spi": {to:[{field: "rsa.misc.spi", setter: fld_set}]}, + "sql": {to:[{field: "rsa.misc.sql", setter: fld_set}]}, + "src_dn": {to:[{field: "rsa.identity.dn_src", setter: fld_set}]}, + "src_payload": {to:[{field: "rsa.misc.payload_src", setter: fld_set}]}, + "src_spi": {to:[{field: "rsa.misc.spi_src", setter: fld_set}]}, + "src_zone": {to:[{field: "rsa.network.zone_src", setter: fld_set}]}, + "srcburb": {to:[{field: "rsa.misc.srcburb", setter: fld_set}]}, + "srcdom": {to:[{field: "rsa.misc.srcdom", setter: fld_set}]}, + "srcservice": {to:[{field: "rsa.misc.srcservice", setter: fld_set}]}, + "ssid": {to:[{field: "rsa.wireless.wlan_ssid", setter: fld_prio, prio: 0}]}, + "stamp": {convert: to_date, to:[{field: "rsa.time.stamp", setter: fld_set}]}, + "starttime": {convert: to_date, to:[{field: "rsa.time.starttime", setter: fld_set}]}, + "state": {to:[{field: "rsa.misc.state", setter: fld_set}]}, + "statement": {to:[{field: "rsa.internal.statement", setter: fld_set}]}, + "status": {to:[{field: "rsa.misc.status", setter: fld_set}]}, + "status1": {to:[{field: "rsa.misc.status1", setter: fld_set}]}, + "streams": {convert: to_long, to:[{field: "rsa.misc.streams", setter: fld_set}]}, + "subcategory": {to:[{field: "rsa.misc.subcategory", setter: fld_set}]}, + "subject": {to:[{field: "rsa.email.subject", setter: fld_set}]}, + "svcno": {to:[{field: "rsa.misc.svcno", setter: fld_set}]}, + "system": {to:[{field: "rsa.misc.system", setter: fld_set}]}, + "t_context": {to:[{field: "rsa.misc.context_target", setter: fld_set}]}, + "task_name": {to:[{field: "rsa.file.task_name", setter: fld_set}]}, + "tbdstr1": {to:[{field: "rsa.misc.tbdstr1", setter: fld_set}]}, + "tbdstr2": {to:[{field: "rsa.misc.tbdstr2", setter: fld_set}]}, + "tbl_name": {to:[{field: "rsa.db.table_name", setter: fld_set}]}, + "tcp_flags": {convert: to_long, to:[{field: "rsa.misc.tcp_flags", setter: fld_set}]}, + "terminal": {to:[{field: "rsa.misc.terminal", setter: fld_set}]}, + "tgtdom": {to:[{field: "rsa.misc.tgtdom", setter: fld_set}]}, + "tgtdomain": {to:[{field: "rsa.misc.tgtdomain", setter: fld_set}]}, + "threat_name": {to:[{field: "rsa.threat.threat_category", setter: fld_set}]}, + "threat_source": {to:[{field: "rsa.threat.threat_source", setter: fld_set}]}, + "threat_val": {to:[{field: "rsa.threat.threat_desc", setter: fld_set}]}, + "threshold": {to:[{field: "rsa.misc.threshold", setter: fld_set}]}, + "time": {convert: to_date, to:[{field: "rsa.internal.time", setter: fld_set}]}, + "timestamp": {to:[{field: "rsa.time.timestamp", setter: fld_set}]}, + "timezone": {to:[{field: "rsa.time.timezone", setter: fld_set}]}, + "to": {to:[{field: "rsa.email.email_dst", setter: fld_set}]}, + "tos": {convert: to_long, to:[{field: "rsa.misc.tos", setter: fld_set}]}, + "trans_from": {to:[{field: "rsa.email.trans_from", setter: fld_set}]}, + "trans_id": {to:[{field: "rsa.db.transact_id", setter: fld_set}]}, + "trans_to": {to:[{field: "rsa.email.trans_to", setter: fld_set}]}, + "trigger_desc": {to:[{field: "rsa.misc.trigger_desc", setter: fld_set}]}, + "trigger_val": {to:[{field: "rsa.misc.trigger_val", setter: fld_set}]}, + "type": {to:[{field: "rsa.misc.type", setter: fld_set}]}, + "type1": {to:[{field: "rsa.misc.type1", setter: fld_set}]}, + "tzone": {to:[{field: "rsa.time.tzone", setter: fld_set}]}, + "ubc.req": {convert: to_long, to:[{field: "rsa.internal.ubc_req", setter: fld_set}]}, + "ubc.res": {convert: to_long, to:[{field: "rsa.internal.ubc_res", setter: fld_set}]}, + "udb_class": {to:[{field: "rsa.misc.udb_class", setter: fld_set}]}, + "url_fld": {to:[{field: "rsa.misc.url_fld", setter: fld_set}]}, + "urlpage": {to:[{field: "rsa.web.urlpage", setter: fld_set}]}, + "urlroot": {to:[{field: "rsa.web.urlroot", setter: fld_set}]}, + "user_address": {to:[{field: "rsa.email.email", setter: fld_append}]}, + "user_dept": {to:[{field: "rsa.identity.user_dept", setter: fld_set}]}, + "user_div": {to:[{field: "rsa.misc.user_div", setter: fld_set}]}, + "user_fname": {to:[{field: "rsa.identity.firstname", setter: fld_set}]}, + "user_lname": {to:[{field: "rsa.identity.lastname", setter: fld_set}]}, + "user_mname": {to:[{field: "rsa.identity.middlename", setter: fld_set}]}, + "user_org": {to:[{field: "rsa.identity.org", setter: fld_set}]}, + "user_role": {to:[{field: "rsa.identity.user_role", setter: fld_set}]}, + "userid": {to:[{field: "rsa.misc.userid", setter: fld_set}]}, + "username_fld": {to:[{field: "rsa.misc.username_fld", setter: fld_set}]}, + "utcstamp": {to:[{field: "rsa.misc.utcstamp", setter: fld_set}]}, + "v_instafname": {to:[{field: "rsa.misc.v_instafname", setter: fld_set}]}, + "vendor_event_cat": {to:[{field: "rsa.investigations.event_vcat", setter: fld_set}]}, + "version": {to:[{field: "rsa.misc.version", setter: fld_set}]}, + "vid": {to:[{field: "rsa.internal.msg_vid", setter: fld_set}]}, + "virt_data": {to:[{field: "rsa.misc.virt_data", setter: fld_set}]}, + "virusname": {to:[{field: "rsa.misc.virusname", setter: fld_set}]}, + "vlan": {convert: to_long, to:[{field: "rsa.network.vlan", setter: fld_set}]}, + "vlan.name": {to:[{field: "rsa.network.vlan_name", setter: fld_set}]}, + "vm_target": {to:[{field: "rsa.misc.vm_target", setter: fld_set}]}, + "vpnid": {to:[{field: "rsa.misc.vpnid", setter: fld_set}]}, + "vsys": {to:[{field: "rsa.misc.vsys", setter: fld_set}]}, + "vuln_ref": {to:[{field: "rsa.misc.vuln_ref", setter: fld_set}]}, + "web_cookie": {to:[{field: "rsa.web.web_cookie", setter: fld_set}]}, + "web_extension_tmp": {to:[{field: "rsa.web.web_extension_tmp", setter: fld_set}]}, + "web_host": {to:[{field: "rsa.web.alias_host", setter: fld_set}]}, + "web_method": {to:[{field: "rsa.misc.action", setter: fld_append}]}, + "web_page": {to:[{field: "rsa.web.web_page", setter: fld_set}]}, + "web_ref_domain": {to:[{field: "rsa.web.web_ref_domain", setter: fld_set}]}, + "web_ref_host": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "web_ref_page": {to:[{field: "rsa.web.web_ref_page", setter: fld_set}]}, + "web_ref_query": {to:[{field: "rsa.web.web_ref_query", setter: fld_set}]}, + "web_ref_root": {to:[{field: "rsa.web.web_ref_root", setter: fld_set}]}, + "wifi_channel": {convert: to_long, to:[{field: "rsa.wireless.wlan_channel", setter: fld_set}]}, + "wlan": {to:[{field: "rsa.wireless.wlan_name", setter: fld_set}]}, + "word": {to:[{field: "rsa.internal.word", setter: fld_set}]}, + "workspace_desc": {to:[{field: "rsa.misc.workspace", setter: fld_set}]}, + "workstation": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "year": {to:[{field: "rsa.time.year", setter: fld_set}]}, + "zone": {to:[{field: "rsa.network.zone", setter: fld_set}]}, + }; + + function to_date(value) { + switch (typeof (value)) { + case "object": + // This is a Date. But as it was obtained from evt.Get(), the VM + // doesn't see it as a JS Date anymore, thus value instanceof Date === false. + // Have to trust that any object here is a valid Date for Go. + return value; + case "string": + var asDate = new Date(value); + if (!isNaN(asDate)) return asDate; + } + } + + // ECMAScript 5.1 doesn't have Object.MAX_SAFE_INTEGER / Object.MIN_SAFE_INTEGER. + var maxSafeInt = Math.pow(2, 53) - 1; + var minSafeInt = -maxSafeInt; + + function to_long(value) { + var num = parseInt(value); + // Better not to index a number if it's not safe (above 53 bits). + return !isNaN(num) && minSafeInt <= num && num <= maxSafeInt ? num : undefined; + } + + function to_ip(value) { + if (value.indexOf(":") === -1) + return to_ipv4(value); + return to_ipv6(value); + } + + var ipv4_regex = /^(\d+)\.(\d+)\.(\d+)\.(\d+)$/; + var ipv6_hex_regex = /^[0-9A-Fa-f]{1,4}$/; + + function to_ipv4(value) { + var result = ipv4_regex.exec(value); + if (result == null || result.length !== 5) return; + for (var i = 1; i < 5; i++) { + var num = strictToInt(result[i]); + if (isNaN(num) || num < 0 || num > 255) return; + } + return value; + } + + function to_ipv6(value) { + var sqEnd = value.indexOf("]"); + if (sqEnd > -1) { + if (value.charAt(0) !== "[") return; + value = value.substr(1, sqEnd - 1); + } + var zoneOffset = value.indexOf("%"); + if (zoneOffset > -1) { + value = value.substr(0, zoneOffset); + } + var parts = value.split(":"); + if (parts == null || parts.length < 3 || parts.length > 8) return; + var numEmpty = 0; + var innerEmpty = 0; + for (var i = 0; i < parts.length; i++) { + if (parts[i].length === 0) { + numEmpty++; + if (i > 0 && i + 1 < parts.length) innerEmpty++; + } else if (!parts[i].match(ipv6_hex_regex) && + // Accept an IPv6 with a valid IPv4 at the end. + ((i + 1 < parts.length) || !to_ipv4(parts[i]))) { + return; + } + } + return innerEmpty === 0 && parts.length === 8 || innerEmpty === 1 ? value : undefined; + } + + function to_double(value) { + return parseFloat(value); + } + + function to_mac(value) { + // ES doesn't have a mac datatype so it's safe to ingest whatever was captured. + return value; + } + + function to_lowercase(value) { + // to_lowercase is used against keyword fields, which can accept + // any other type (numbers, dates). + return typeof(value) === "string"? value.toLowerCase() : value; + } + + function fld_set(dst, value) { + dst[this.field] = { v: value }; + } + + function fld_append(dst, value) { + if (dst[this.field] === undefined) { + dst[this.field] = { v: [value] }; + } else { + var base = dst[this.field]; + if (base.v.indexOf(value)===-1) base.v.push(value); + } + } + + function fld_prio(dst, value) { + if (dst[this.field] === undefined) { + dst[this.field] = { v: value, prio: this.prio}; + } else if(this.prio < dst[this.field].prio) { + dst[this.field].v = value; + dst[this.field].prio = this.prio; + } + } + + var valid_ecs_outcome = { + 'failure': true, + 'success': true, + 'unknown': true + }; + + function fld_ecs_outcome(dst, value) { + value = value.toLowerCase(); + if (valid_ecs_outcome[value] === undefined) { + value = 'unknown'; + } + if (dst[this.field] === undefined) { + dst[this.field] = { v: value }; + } else if (dst[this.field].v === 'unknown') { + dst[this.field] = { v: value }; + } + } + + function map_all(evt, targets, value) { + for (var i = 0; i < targets.length; i++) { + evt.Put(targets[i], value); + } + } + + function populate_fields(evt) { + var base = evt.Get(FIELDS_OBJECT); + if (base === null) return; + alternate_datetime(evt); + if (map_ecs) { + do_populate(evt, base, ecs_mappings); + } + if (map_rsa) { + do_populate(evt, base, rsa_mappings); + } + if (keep_raw) { + evt.Put("rsa.raw", base); + } + evt.Delete(FIELDS_OBJECT); + } + + var datetime_alt_components = [ + {field: "day", fmts: [[dF]]}, + {field: "year", fmts: [[dW]]}, + {field: "month", fmts: [[dB],[dG]]}, + {field: "date", fmts: [[dW,dSkip,dG,dSkip,dF],[dW,dSkip,dB,dSkip,dF],[dW,dSkip,dR,dSkip,dF]]}, + {field: "hour", fmts: [[dN]]}, + {field: "min", fmts: [[dU]]}, + {field: "secs", fmts: [[dO]]}, + {field: "time", fmts: [[dN, dSkip, dU, dSkip, dO]]}, + ]; + + function alternate_datetime(evt) { + if (evt.Get(FIELDS_PREFIX + "event_time") != null) { + return; + } + var tzOffset = tz_offset; + if (tzOffset === "event") { + tzOffset = evt.Get("event.timezone"); + } + var container = new DateContainer(tzOffset); + for (var i=0; i} %{timezone}]||%{web_method}||%{web_host}||%{webpage}||%{web_query}||%{network_service}||%{resultcode}||%{sbytes}||%{web_referer}||%{user_agent}||%{web_cookie}", processor_chain([ + dup1, + dup2, + dup3, + dup4, + dup5, + dup6, + ])); + + var hdr1 = match("HEADER#0:0001", "message", "%APACHETOMCAT-%{level}-%{messageid}: %{payload}", processor_chain([ + setc("header_id","0001"), + ])); + + var hdr2 = match("HEADER#1:0002", "message", "%{hmonth->} %{hday->} %{htime->} %{hostname->} %APACHETOMCAT- %{messageid}: %{payload}", processor_chain([ + setc("header_id","0002"), + ])); + + var select1 = linear_select([ + hdr1, + hdr2, + ]); + + var msg1 = msg("ABCD", dup7); + + var msg2 = msg("BADMETHOD", dup7); + + var msg3 = msg("BADMTHD", dup7); + + var msg4 = msg("BDMTHD", dup7); + + var msg5 = msg("INDEX", dup7); + + var msg6 = msg("CFYZ", dup7); + + var msg7 = msg("CONNECT", dup7); + + var msg8 = msg("DELETE", dup7); + + var msg9 = msg("DETECT_METHOD_TYPE", dup7); + + var msg10 = msg("FGET", dup7); + + var msg11 = msg("GET", dup7); + + var msg12 = msg("get", dup7); + + var msg13 = msg("HEAD", dup7); + + var msg14 = msg("id", dup7); + + var msg15 = msg("LOCK", dup7); + + var msg16 = msg("MKCOL", dup7); + + var msg17 = msg("NCIRCLE", dup7); + + var msg18 = msg("OPTIONS", dup7); + + var msg19 = msg("POST", dup7); + + var msg20 = msg("PRONECT", dup7); + + var msg21 = msg("PROPFIND", dup7); + + var msg22 = msg("PUT", dup7); + + var msg23 = msg("QUALYS", dup7); + + var msg24 = msg("SEARCH", dup7); + + var msg25 = msg("TRACK", dup7); + + var msg26 = msg("TRACE", dup7); + + var msg27 = msg("uGET", dup7); + + var msg28 = msg("null", dup7); + + var msg29 = msg("rndmmtd", dup7); + + var msg30 = msg("RNDMMTD", dup7); + + var msg31 = msg("asdf", dup7); + + var msg32 = msg("DEBUG", dup7); + + var msg33 = msg("COOK", dup7); + + var msg34 = msg("nGET", dup7); + + var chain1 = processor_chain([ + select1, + msgid_select({ + "ABCD": msg1, + "BADMETHOD": msg2, + "BADMTHD": msg3, + "BDMTHD": msg4, + "CFYZ": msg6, + "CONNECT": msg7, + "COOK": msg33, + "DEBUG": msg32, + "DELETE": msg8, + "DETECT_METHOD_TYPE": msg9, + "FGET": msg10, + "GET": msg11, + "HEAD": msg13, + "INDEX": msg5, + "LOCK": msg15, + "MKCOL": msg16, + "NCIRCLE": msg17, + "OPTIONS": msg18, + "POST": msg19, + "PRONECT": msg20, + "PROPFIND": msg21, + "PUT": msg22, + "QUALYS": msg23, + "RNDMMTD": msg30, + "SEARCH": msg24, + "TRACE": msg26, + "TRACK": msg25, + "asdf": msg31, + "get": msg12, + "id": msg14, + "nGET": msg34, + "null": msg28, + "rndmmtd": msg29, + "uGET": msg27, + }), + ]); + + var part1 = match("MESSAGE#0:ABCD", "nwparser.payload", "%{saddr}||%{fld5}||%{username}||[%{fld7->} %{timezone}]||%{web_method}||%{web_host}||%{webpage}||%{web_query}||%{network_service}||%{resultcode}||%{sbytes}||%{web_referer}||%{user_agent}||%{web_cookie}", processor_chain([ + dup1, + dup2, + dup3, + dup4, + dup5, + dup6, + ])); + - community_id: null + - registered_domain: + field: dns.question.name + ignore_failure: true + ignore_missing: true + target_etld_field: dns.question.top_level_domain + target_field: dns.question.registered_domain + target_subdomain_field: dns.question.subdomain + - registered_domain: + field: client.domain + ignore_failure: true + ignore_missing: true + target_etld_field: client.top_level_domain + target_field: client.registered_domain + target_subdomain_field: client.subdomain + - registered_domain: + field: server.domain + ignore_failure: true + ignore_missing: true + target_etld_field: server.top_level_domain + target_field: server.registered_domain + target_subdomain_field: server.subdomain + - registered_domain: + field: destination.domain + ignore_failure: true + ignore_missing: true + target_etld_field: destination.top_level_domain + target_field: destination.registered_domain + target_subdomain_field: destination.subdomain + - registered_domain: + field: source.domain + ignore_failure: true + ignore_missing: true + target_etld_field: source.top_level_domain + target_field: source.registered_domain + target_subdomain_field: source.subdomain + - registered_domain: + field: url.domain + ignore_failure: true + ignore_missing: true + target_etld_field: url.top_level_domain + target_field: url.registered_domain + target_subdomain_field: url.subdomain + - add_locale: null + tags: + - tomcat-log + - forwarded + udp: null + data_stream.namespace: default + - name: tcp-tomcat + type: tcp + use_output: default + streams: + - condition: ${kubernetes.hints.tomcat.log.enabled} == true or ${kubernetes.hints.tomcat.enabled} == true + data_stream: + dataset: tomcat.log + type: logs + fields: + observer: + product: TomCat + type: Web + vendor: Apache + fields_under_root: true + host: localhost:9523 + processors: + - script: + lang: javascript + params: + debug: false + ecs: true + keep_raw: false + rsa: true + tz_offset: local + source: | + // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + // or more contributor license agreements. Licensed under the Elastic License; + // you may not use this file except in compliance with the Elastic License. + + /* jshint -W014,-W016,-W097,-W116 */ + + var processor = require("processor"); + var console = require("console"); + + var FLAG_FIELD = "log.flags"; + var FIELDS_OBJECT = "nwparser"; + var FIELDS_PREFIX = FIELDS_OBJECT + "."; + + var defaults = { + debug: false, + ecs: true, + rsa: false, + keep_raw: false, + tz_offset: "local", + strip_priority: true + }; + + var saved_flags = null; + var debug; + var map_ecs; + var map_rsa; + var keep_raw; + var device; + var tz_offset; + var strip_priority; + + // Register params from configuration. + function register(params) { + debug = params.debug !== undefined ? params.debug : defaults.debug; + map_ecs = params.ecs !== undefined ? params.ecs : defaults.ecs; + map_rsa = params.rsa !== undefined ? params.rsa : defaults.rsa; + keep_raw = params.keep_raw !== undefined ? params.keep_raw : defaults.keep_raw; + tz_offset = parse_tz_offset(params.tz_offset !== undefined? params.tz_offset : defaults.tz_offset); + strip_priority = params.strip_priority !== undefined? params.strip_priority : defaults.strip_priority; + device = new DeviceProcessor(); + } + + function parse_tz_offset(offset) { + var date; + var m; + switch(offset) { + // local uses the tz offset from the JS VM. + case "local": + date = new Date(); + // Reversing the sign as we the offset from UTC, not to UTC. + return parse_local_tz_offset(-date.getTimezoneOffset()); + // event uses the tz offset from event.timezone (add_locale processor). + case "event": + return offset; + // Otherwise a tz offset in the form "[+-][0-9]{4}" is required. + default: + m = offset.match(/^([+\-])([0-9]{2}):?([0-9]{2})?$/); + if (m === null || m.length !== 4) { + throw("bad timezone offset: '" + offset + "'. Must have the form +HH:MM"); + } + return m[1] + m[2] + ":" + (m[3]!==undefined? m[3] : "00"); + } + } + + function parse_local_tz_offset(minutes) { + var neg = minutes < 0; + minutes = Math.abs(minutes); + var min = minutes % 60; + var hours = Math.floor(minutes / 60); + var pad2digit = function(n) { + if (n < 10) { return "0" + n;} + return "" + n; + }; + return (neg? "-" : "+") + pad2digit(hours) + ":" + pad2digit(min); + } + + function process(evt) { + // Function register is only called by the processor when `params` are set + // in the processor config. + if (device === undefined) { + register(defaults); + } + return device.process(evt); + } + + function processor_chain(subprocessors) { + var builder = new processor.Chain(); + subprocessors.forEach(builder.Add); + return builder.Build().Run; + } + + function linear_select(subprocessors) { + return function (evt) { + var flags = evt.Get(FLAG_FIELD); + var i; + for (i = 0; i < subprocessors.length; i++) { + evt.Delete(FLAG_FIELD); + if (debug) console.warn("linear_select trying entry " + i); + subprocessors[i](evt); + // Dissect processor succeeded? + if (evt.Get(FLAG_FIELD) == null) break; + if (debug) console.warn("linear_select failed entry " + i); + } + if (flags !== null) { + evt.Put(FLAG_FIELD, flags); + } + if (debug) { + if (i < subprocessors.length) { + console.warn("linear_select matched entry " + i); + } else { + console.warn("linear_select didn't match"); + } + } + }; + } + + function conditional(opt) { + return function(evt) { + if (opt.if(evt)) { + opt.then(evt); + } else if (opt.else) { + opt.else(evt); + } + }; + } + + var strip_syslog_priority = (function() { + var isEnabled = function() { return strip_priority === true; }; + var fetchPRI = field("_pri"); + var fetchPayload = field("payload"); + var removePayload = remove(["payload"]); + var cleanup = remove(["_pri", "payload"]); + var onMatch = function(evt) { + var pri, priStr = fetchPRI(evt); + if (priStr != null + && 0 < priStr.length && priStr.length < 4 + && !isNaN((pri = Number(priStr))) + && 0 <= pri && pri < 192) { + var severity = pri & 7, + facility = pri >> 3; + setc("_severity", "" + severity)(evt); + setc("_facility", "" + facility)(evt); + // Replace message with priority stripped. + evt.Put("message", fetchPayload(evt)); + removePayload(evt); + } else { + // not a valid syslog PRI, cleanup. + cleanup(evt); + } + }; + return conditional({ + if: isEnabled, + then: cleanup_flags(match( + "STRIP_PRI", + "message", + "<%{_pri}>%{payload}", + onMatch + )) + }); + })(); + + function match(id, src, pattern, on_success) { + var dissect = new processor.Dissect({ + field: src, + tokenizer: pattern, + target_prefix: FIELDS_OBJECT, + ignore_failure: true, + overwrite_keys: true, + trim_values: "right" + }); + return function (evt) { + var msg = evt.Get(src); + dissect.Run(evt); + var failed = evt.Get(FLAG_FIELD) != null; + if (debug) { + if (failed) { + console.debug("dissect fail: " + id + " field:" + src); + } else { + console.debug("dissect OK: " + id + " field:" + src); + } + console.debug(" expr: <<" + pattern + ">>"); + console.debug(" input: <<" + msg + ">>"); + } + if (on_success != null && !failed) { + on_success(evt); + } + }; + } + + function match_copy(id, src, dst, on_success) { + dst = FIELDS_PREFIX + dst; + if (dst === FIELDS_PREFIX || dst === src) { + return function (evt) { + if (debug) { + console.debug("noop OK: " + id + " field:" + src); + console.debug(" input: <<" + evt.Get(src) + ">>"); + } + if (on_success != null) on_success(evt); + } + } + return function (evt) { + var msg = evt.Get(src); + evt.Put(dst, msg); + if (debug) { + console.debug("copy OK: " + id + " field:" + src); + console.debug(" target: '" + dst + "'"); + console.debug(" input: <<" + msg + ">>"); + } + if (on_success != null) on_success(evt); + } + } + + function cleanup_flags(processor) { + return function(evt) { + processor(evt); + evt.Delete(FLAG_FIELD); + }; + } + + function all_match(opts) { + return function (evt) { + var i; + for (i = 0; i < opts.processors.length; i++) { + evt.Delete(FLAG_FIELD); + opts.processors[i](evt); + // Dissect processor succeeded? + if (evt.Get(FLAG_FIELD) != null) { + if (debug) console.warn("all_match failure at " + i); + if (opts.on_failure != null) opts.on_failure(evt); + return; + } + if (debug) console.warn("all_match success at " + i); + } + if (opts.on_success != null) opts.on_success(evt); + }; + } + + function msgid_select(mapping) { + return function (evt) { + var msgid = evt.Get(FIELDS_PREFIX + "messageid"); + if (msgid == null) { + if (debug) console.warn("msgid_select: no messageid captured!"); + return; + } + var next = mapping[msgid]; + if (next === undefined) { + if (debug) console.warn("msgid_select: no mapping for messageid:" + msgid); + return; + } + if (debug) console.info("msgid_select: matched key=" + msgid); + return next(evt); + }; + } + + function msg(msg_id, match) { + return function (evt) { + match(evt); + if (evt.Get(FLAG_FIELD) == null) { + evt.Put(FIELDS_PREFIX + "msg_id1", msg_id); + } + }; + } + + var start; + + function save_flags(evt) { + saved_flags = evt.Get(FLAG_FIELD); + evt.Put("event.original", evt.Get("message")); + } + + function restore_flags(evt) { + if (saved_flags !== null) { + evt.Put(FLAG_FIELD, saved_flags); + } + evt.Delete("message"); + } + + function constant(value) { + return function (evt) { + return value; + }; + } + + function field(name) { + var fullname = FIELDS_PREFIX + name; + return function (evt) { + return evt.Get(fullname); + }; + } + + function STRCAT(args) { + var s = ""; + var i; + for (i = 0; i < args.length; i++) { + s += args[i]; + } + return s; + } + + // TODO: Implement + function DIRCHK(args) { + unimplemented("DIRCHK"); + } + + function strictToInt(str) { + return str * 1; + } + + function CALC(args) { + if (args.length !== 3) { + console.warn("skipped call to CALC with " + args.length + " arguments."); + return; + } + var a = strictToInt(args[0]); + var b = strictToInt(args[2]); + if (isNaN(a) || isNaN(b)) { + console.warn("failed evaluating CALC arguments a='" + args[0] + "' b='" + args[2] + "'."); + return; + } + var result; + switch (args[1]) { + case "+": + result = a + b; + break; + case "-": + result = a - b; + break; + case "*": + result = a * b; + break; + default: + // Only * and + seen in the parsers. + console.warn("unknown CALC operation '" + args[1] + "'."); + return; + } + // Always return a string + return result !== undefined ? "" + result : result; + } + + var quoteChars = "\"'`"; + function RMQ(args) { + if(args.length !== 1) { + console.warn("RMQ: only one argument expected"); + return; + } + var value = args[0].trim(); + var n = value.length; + var char; + return n > 1 + && (char=value.charAt(0)) === value.charAt(n-1) + && quoteChars.indexOf(char) !== -1? + value.substr(1, n-2) + : value; + } + + function call(opts) { + var args = new Array(opts.args.length); + return function (evt) { + for (var i = 0; i < opts.args.length; i++) + if ((args[i] = opts.args[i](evt)) == null) return; + var result = opts.fn(args); + if (result != null) { + evt.Put(opts.dest, result); + } + }; + } + + function nop(evt) { + } + + function appendErrorMsg(evt, msg) { + var value = evt.Get("error.message"); + if (value == null) { + value = [msg]; + } else if (msg instanceof Array) { + value.push(msg); + } else { + value = [value, msg]; + } + evt.Put("error.message", value); + } + + function unimplemented(name) { + appendErrorMsg("unimplemented feature: " + name); + } + + function lookup(opts) { + return function (evt) { + var key = opts.key(evt); + if (key == null) return; + var value = opts.map.keyvaluepairs[key]; + if (value === undefined) { + value = opts.map.default; + } + if (value !== undefined) { + evt.Put(opts.dest, value(evt)); + } + }; + } + + function set(fields) { + return new processor.AddFields({ + target: FIELDS_OBJECT, + fields: fields, + }); + } + + function setf(dst, src) { + return function (evt) { + var val = evt.Get(FIELDS_PREFIX + src); + if (val != null) evt.Put(FIELDS_PREFIX + dst, val); + }; + } + + function setc(dst, value) { + return function (evt) { + evt.Put(FIELDS_PREFIX + dst, value); + }; + } + + function set_field(opts) { + return function (evt) { + var val = opts.value(evt); + if (val != null) evt.Put(opts.dest, val); + }; + } + + function dump(label) { + return function (evt) { + console.log("Dump of event at " + label + ": " + JSON.stringify(evt, null, "\t")); + }; + } + + function date_time_join_args(evt, arglist) { + var str = ""; + for (var i = 0; i < arglist.length; i++) { + var fname = FIELDS_PREFIX + arglist[i]; + var val = evt.Get(fname); + if (val != null) { + if (str !== "") str += " "; + str += val; + } else { + if (debug) console.warn("in date_time: input arg " + fname + " is not set"); + } + } + return str; + } + + function to2Digit(num) { + return num? (num < 10? "0" + num : num) : "00"; + } + + // Make two-digit dates 00-69 interpreted as 2000-2069 + // and dates 70-99 translated to 1970-1999. + var twoDigitYearEpoch = 70; + var twoDigitYearCentury = 2000; + + // This is to accept dates up to 2 days in the future, only used when + // no year is specified in a date. 2 days should be enough to account for + // time differences between systems and different tz offsets. + var maxFutureDelta = 2*24*60*60*1000; + + // DateContainer stores date fields and then converts those fields into + // a Date. Necessary because building a Date using its set() methods gives + // different results depending on the order of components. + function DateContainer(tzOffset) { + this.offset = tzOffset === undefined? "Z" : tzOffset; + } + + DateContainer.prototype = { + setYear: function(v) {this.year = v;}, + setMonth: function(v) {this.month = v;}, + setDay: function(v) {this.day = v;}, + setHours: function(v) {this.hours = v;}, + setMinutes: function(v) {this.minutes = v;}, + setSeconds: function(v) {this.seconds = v;}, + + setUNIX: function(v) {this.unix = v;}, + + set2DigitYear: function(v) { + this.year = v < twoDigitYearEpoch? twoDigitYearCentury + v : twoDigitYearCentury + v - 100; + }, + + toDate: function() { + if (this.unix !== undefined) { + return new Date(this.unix * 1000); + } + if (this.day === undefined || this.month === undefined) { + // Can't make a date from this. + return undefined; + } + if (this.year === undefined) { + // A date without a year. Set current year, or previous year + // if date would be in the future. + var now = new Date(); + this.year = now.getFullYear(); + var date = this.toDate(); + if (date.getTime() - now.getTime() > maxFutureDelta) { + date.setFullYear(now.getFullYear() - 1); + } + return date; + } + var MM = to2Digit(this.month); + var DD = to2Digit(this.day); + var hh = to2Digit(this.hours); + var mm = to2Digit(this.minutes); + var ss = to2Digit(this.seconds); + return new Date(this.year + "-" + MM + "-" + DD + "T" + hh + ":" + mm + ":" + ss + this.offset); + } + } + + function date_time_try_pattern(fmt, str, tzOffset) { + var date = new DateContainer(tzOffset); + var pos = date_time_try_pattern_at_pos(fmt, str, 0, date); + return pos !== undefined? date.toDate() : undefined; + } + + function date_time_try_pattern_at_pos(fmt, str, pos, date) { + var len = str.length; + for (var proc = 0; pos !== undefined && pos < len && proc < fmt.length; proc++) { + pos = fmt[proc](str, pos, date); + } + return pos; + } + + function date_time(opts) { + return function (evt) { + var tzOffset = opts.tz || tz_offset; + if (tzOffset === "event") { + tzOffset = evt.Get("event.timezone"); + } + var str = date_time_join_args(evt, opts.args); + for (var i = 0; i < opts.fmts.length; i++) { + var date = date_time_try_pattern(opts.fmts[i], str, tzOffset); + if (date !== undefined) { + evt.Put(FIELDS_PREFIX + opts.dest, date); + return; + } + } + if (debug) console.warn("in date_time: id=" + opts.id + " FAILED: " + str); + }; + } + + var uA = 60 * 60 * 24; + var uD = 60 * 60 * 24; + var uF = 60 * 60; + var uG = 60 * 60 * 24 * 30; + var uH = 60 * 60; + var uI = 60 * 60; + var uJ = 60 * 60 * 24; + var uM = 60 * 60 * 24 * 30; + var uN = 60 * 60; + var uO = 1; + var uS = 1; + var uT = 60; + var uU = 60; + var uc = dc; + + function duration(opts) { + return function(evt) { + var str = date_time_join_args(evt, opts.args); + for (var i = 0; i < opts.fmts.length; i++) { + var seconds = duration_try_pattern(opts.fmts[i], str); + if (seconds !== undefined) { + evt.Put(FIELDS_PREFIX + opts.dest, seconds); + return; + } + } + if (debug) console.warn("in duration: id=" + opts.id + " (s) FAILED: " + str); + }; + } + + function duration_try_pattern(fmt, str) { + var secs = 0; + var pos = 0; + for (var i=0; i [ month_id , how many chars to skip if month in long form ] + "Jan": [0, 4], + "Feb": [1, 5], + "Mar": [2, 2], + "Apr": [3, 2], + "May": [4, 0], + "Jun": [5, 1], + "Jul": [6, 1], + "Aug": [7, 3], + "Sep": [8, 6], + "Oct": [9, 4], + "Nov": [10, 5], + "Dec": [11, 4], + "jan": [0, 4], + "feb": [1, 5], + "mar": [2, 2], + "apr": [3, 2], + "may": [4, 0], + "jun": [5, 1], + "jul": [6, 1], + "aug": [7, 3], + "sep": [8, 6], + "oct": [9, 4], + "nov": [10, 5], + "dec": [11, 4], + }; + + // var dC = undefined; + var dR = dateMonthName(true); + var dB = dateMonthName(false); + var dM = dateFixedWidthNumber("M", 2, 1, 12, DateContainer.prototype.setMonth); + var dG = dateVariableWidthNumber("G", 1, 12, DateContainer.prototype.setMonth); + var dD = dateFixedWidthNumber("D", 2, 1, 31, DateContainer.prototype.setDay); + var dF = dateVariableWidthNumber("F", 1, 31, DateContainer.prototype.setDay); + var dH = dateFixedWidthNumber("H", 2, 0, 24, DateContainer.prototype.setHours); + var dI = dateVariableWidthNumber("I", 0, 24, DateContainer.prototype.setHours); // Accept hours >12 + var dN = dateVariableWidthNumber("N", 0, 24, DateContainer.prototype.setHours); + var dT = dateFixedWidthNumber("T", 2, 0, 59, DateContainer.prototype.setMinutes); + var dU = dateVariableWidthNumber("U", 0, 59, DateContainer.prototype.setMinutes); + var dP = parseAMPM; // AM|PM + var dQ = parseAMPM; // A.M.|P.M + var dS = dateFixedWidthNumber("S", 2, 0, 60, DateContainer.prototype.setSeconds); + var dO = dateVariableWidthNumber("O", 0, 60, DateContainer.prototype.setSeconds); + var dY = dateFixedWidthNumber("Y", 2, 0, 99, DateContainer.prototype.set2DigitYear); + var dW = dateFixedWidthNumber("W", 4, 1000, 9999, DateContainer.prototype.setYear); + var dZ = parseHMS; + var dX = dateVariableWidthNumber("X", 0, 0x10000000000, DateContainer.prototype.setUNIX); + + // parseAMPM parses "A.M", "AM", "P.M", "PM" from logs. + // Only works if this modifier appears after the hour has been read from logs + // which is always the case in the 300 devices. + function parseAMPM(str, pos, date) { + var n = str.length; + var start = skipws(str, pos); + if (start + 2 > n) return; + var head = str.substr(start, 2).toUpperCase(); + var isPM = false; + var skip = false; + switch (head) { + case "A.": + skip = true; + /* falls through */ + case "AM": + break; + case "P.": + skip = true; + /* falls through */ + case "PM": + isPM = true; + break; + default: + if (debug) console.warn("can't parse pos " + start + " as AM/PM: " + str + "(head:" + head + ")"); + return; + } + pos = start + 2; + if (skip) { + if (pos+2 > n || str.substr(pos, 2).toUpperCase() !== "M.") { + if (debug) console.warn("can't parse pos " + start + " as AM/PM: " + str + "(tail)"); + return; + } + pos += 2; + } + var hh = date.hours; + if (isPM) { + // Accept existing hour in 24h format. + if (hh < 12) hh += 12; + } else { + if (hh === 12) hh = 0; + } + date.setHours(hh); + return pos; + } + + function parseHMS(str, pos, date) { + return date_time_try_pattern_at_pos([dN, dc(":"), dU, dc(":"), dO], str, pos, date); + } + + function skipws(str, pos) { + for ( var n = str.length; + pos < n && str.charAt(pos) === " "; + pos++) + ; + return pos; + } + + function skipdigits(str, pos) { + var c; + for (var n = str.length; + pos < n && (c = str.charAt(pos)) >= "0" && c <= "9"; + pos++) + ; + return pos; + } + + function dSkip(str, pos, date) { + var chr; + for (;pos < str.length && (chr=str[pos])<'0' || chr>'9'; pos++) {} + return pos < str.length? pos : undefined; + } + + function dateVariableWidthNumber(fmtChar, min, max, setter) { + return function (str, pos, date) { + var start = skipws(str, pos); + pos = skipdigits(str, start); + var s = str.substr(start, pos - start); + var value = parseInt(s, 10); + if (value >= min && value <= max) { + setter.call(date, value); + return pos; + } + return; + }; + } + + function dateFixedWidthNumber(fmtChar, width, min, max, setter) { + return function (str, pos, date) { + pos = skipws(str, pos); + var n = str.length; + if (pos + width > n) return; + var s = str.substr(pos, width); + var value = parseInt(s, 10); + if (value >= min && value <= max) { + setter.call(date, value); + return pos + width; + } + return; + }; + } + + // Short month name (Jan..Dec). + function dateMonthName(long) { + return function (str, pos, date) { + pos = skipws(str, pos); + var n = str.length; + if (pos + 3 > n) return; + var mon = str.substr(pos, 3); + var idx = shortMonths[mon]; + if (idx === undefined) { + idx = shortMonths[mon.toLowerCase()]; + } + if (idx === undefined) { + //console.warn("parsing date_time: '" + mon + "' is not a valid short month (%B)"); + return; + } + date.setMonth(idx[0]+1); + return pos + 3 + (long ? idx[1] : 0); + }; + } + + function url_wrapper(dst, src, fn) { + return function(evt) { + var value = evt.Get(FIELDS_PREFIX + src), result; + if (value != null && (result = fn(value))!== undefined) { + evt.Put(FIELDS_PREFIX + dst, result); + } else { + console.debug(fn.name + " failed for '" + value + "'"); + } + }; + } + + // The following regular expression for parsing URLs from: + // https://github.com/wizard04wsu/URI_Parsing + // + // The MIT License (MIT) + // + // Copyright (c) 2014 Andrew Harrison + // + // Permission is hereby granted, free of charge, to any person obtaining a copy of + // this software and associated documentation files (the "Software"), to deal in + // the Software without restriction, including without limitation the rights to + // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + // the Software, and to permit persons to whom the Software is furnished to do so, + // subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + // FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + // COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + // IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + // CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + var uriRegExp = /^([a-z][a-z0-9+.\-]*):(?:\/\/((?:(?=((?:[a-z0-9\-._~!$&'()*+,;=:]|%[0-9A-F]{2})*))(\3)@)?(?=(\[[0-9A-F:.]{2,}\]|(?:[a-z0-9\-._~!$&'()*+,;=]|%[0-9A-F]{2})*))\5(?::(?=(\d*))\6)?)(\/(?=((?:[a-z0-9-._~!$&'()*+,;=:@\/]|%[0-9A-F]{2})*))\8)?|(\/?(?!\/)(?=((?:[a-z0-9-._~!$&'()*+,;=:@\/]|%[0-9A-F]{2})*))\10)?)(?:\?(?=((?:[a-z0-9-._~!$&'()*+,;=:@\/?]|%[0-9A-F]{2})*))\11)?(?:#(?=((?:[a-z0-9-._~!$&'()*+,;=:@\/?]|%[0-9A-F]{2})*))\12)?$/i; + + var uriScheme = 1; + var uriDomain = 5; + var uriPort = 6; + var uriPath = 7; + var uriPathAlt = 9; + var uriQuery = 11; + + function domain(dst, src) { + return url_wrapper(dst, src, extract_domain); + } + + function split_url(value) { + var m = value.match(uriRegExp); + if (m && m[uriDomain]) return m; + // Support input in the form "www.example.net/path", but not "/path". + m = ("null://" + value).match(uriRegExp); + if (m) return m; + } + + function extract_domain(value) { + var m = split_url(value); + if (m && m[uriDomain]) return m[uriDomain]; + } + + var extFromPage = /\.[^.]+$/; + function extract_ext(value) { + var page = extract_page(value); + if (page) { + var m = page.match(extFromPage); + if (m) return m[0]; + } + } + + function ext(dst, src) { + return url_wrapper(dst, src, extract_ext); + } + + function fqdn(dst, src) { + // TODO: fqdn and domain(eTLD+1) are currently the same. + return domain(dst, src); + } + + var pageFromPathRegExp = /\/([^\/]+)$/; + var pageName = 1; + + function extract_page(value) { + value = extract_path(value); + if (!value) return undefined; + var m = value.match(pageFromPathRegExp); + if (m) return m[pageName]; + } + + function page(dst, src) { + return url_wrapper(dst, src, extract_page); + } + + function extract_path(value) { + var m = split_url(value); + return m? m[uriPath] || m[uriPathAlt] : undefined; + } + + function path(dst, src) { + return url_wrapper(dst, src, extract_path); + } + + // Map common schemes to their default port. + // port has to be a string (will be converted at a later stage). + var schemePort = { + "ftp": "21", + "ssh": "22", + "http": "80", + "https": "443", + }; + + function extract_port(value) { + var m = split_url(value); + if (!m) return undefined; + if (m[uriPort]) return m[uriPort]; + if (m[uriScheme]) { + return schemePort[m[uriScheme]]; + } + } + + function port(dst, src) { + return url_wrapper(dst, src, extract_port); + } + + function extract_query(value) { + var m = split_url(value); + if (m && m[uriQuery]) return m[uriQuery]; + } + + function query(dst, src) { + return url_wrapper(dst, src, extract_query); + } + + function extract_root(value) { + var m = split_url(value); + if (m && m[uriDomain] && m[uriDomain]) { + var scheme = m[uriScheme] && m[uriScheme] !== "null"? + m[uriScheme] + "://" : ""; + var port = m[uriPort]? ":" + m[uriPort] : ""; + return scheme + m[uriDomain] + port; + } + } + + function root(dst, src) { + return url_wrapper(dst, src, extract_root); + } + + function tagval(id, src, cfg, keys, on_success) { + var fail = function(evt) { + evt.Put(FLAG_FIELD, "tagval_parsing_error"); + } + if (cfg.kv_separator.length !== 1) { + throw("Invalid TAGVALMAP ValueDelimiter (must have 1 character)"); + } + var quotes_len = cfg.open_quote.length > 0 && cfg.close_quote.length > 0? + cfg.open_quote.length + cfg.close_quote.length : 0; + var kv_regex = new RegExp('^([^' + cfg.kv_separator + ']*)*' + cfg.kv_separator + ' *(.*)*$'); + return function(evt) { + var msg = evt.Get(src); + if (msg === undefined) { + console.warn("tagval: input field is missing"); + return fail(evt); + } + var pairs = msg.split(cfg.pair_separator); + var i; + var success = false; + var prev = ""; + for (i=0; i 0 && + value.length >= cfg.open_quote.length + cfg.close_quote.length && + value.substr(0, cfg.open_quote.length) === cfg.open_quote && + value.substr(value.length - cfg.close_quote.length) === cfg.close_quote) { + value = value.substr(cfg.open_quote.length, value.length - quotes_len); + } + evt.Put(FIELDS_PREFIX + field, value); + success = true; + } + if (!success) { + return fail(evt); + } + if (on_success != null) { + on_success(evt); + } + } + } + + var ecs_mappings = { + "_facility": {convert: to_long, to:[{field: "log.syslog.facility.code", setter: fld_set}]}, + "_pri": {convert: to_long, to:[{field: "log.syslog.priority", setter: fld_set}]}, + "_severity": {convert: to_long, to:[{field: "log.syslog.severity.code", setter: fld_set}]}, + "action": {to:[{field: "event.action", setter: fld_prio, prio: 0}]}, + "administrator": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 4}]}, + "alias.ip": {convert: to_ip, to:[{field: "host.ip", setter: fld_prio, prio: 3},{field: "related.ip", setter: fld_append}]}, + "alias.ipv6": {convert: to_ip, to:[{field: "host.ip", setter: fld_prio, prio: 4},{field: "related.ip", setter: fld_append}]}, + "alias.mac": {convert: to_mac, to:[{field: "host.mac", setter: fld_prio, prio: 1}]}, + "application": {to:[{field: "network.application", setter: fld_set}]}, + "bytes": {convert: to_long, to:[{field: "network.bytes", setter: fld_set}]}, + "c_domain": {to:[{field: "source.domain", setter: fld_prio, prio: 1}]}, + "c_logon_id": {to:[{field: "user.id", setter: fld_prio, prio: 2}]}, + "c_user_name": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 8}]}, + "c_username": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 2}]}, + "cctld": {to:[{field: "url.top_level_domain", setter: fld_prio, prio: 1}]}, + "child_pid": {convert: to_long, to:[{field: "process.pid", setter: fld_prio, prio: 1}]}, + "child_pid_val": {to:[{field: "process.title", setter: fld_set}]}, + "child_process": {to:[{field: "process.name", setter: fld_prio, prio: 1}]}, + "city.dst": {to:[{field: "destination.geo.city_name", setter: fld_set}]}, + "city.src": {to:[{field: "source.geo.city_name", setter: fld_set}]}, + "daddr": {convert: to_ip, to:[{field: "destination.ip", setter: fld_set},{field: "related.ip", setter: fld_append}]}, + "daddr_v6": {convert: to_ip, to:[{field: "destination.ip", setter: fld_set},{field: "related.ip", setter: fld_append}]}, + "ddomain": {to:[{field: "destination.domain", setter: fld_prio, prio: 0}]}, + "devicehostip": {convert: to_ip, to:[{field: "host.ip", setter: fld_prio, prio: 2},{field: "related.ip", setter: fld_append}]}, + "devicehostmac": {convert: to_mac, to:[{field: "host.mac", setter: fld_prio, prio: 0}]}, + "dhost": {to:[{field: "destination.address", setter: fld_set},{field: "related.hosts", setter: fld_append}]}, + "dinterface": {to:[{field: "observer.egress.interface.name", setter: fld_set}]}, + "direction": {to:[{field: "network.direction", setter: fld_set}]}, + "directory": {to:[{field: "file.directory", setter: fld_set}]}, + "dmacaddr": {convert: to_mac, to:[{field: "destination.mac", setter: fld_set}]}, + "dns.responsetype": {to:[{field: "dns.answers.type", setter: fld_set}]}, + "dns.resptext": {to:[{field: "dns.answers.name", setter: fld_set}]}, + "dns_querytype": {to:[{field: "dns.question.type", setter: fld_set}]}, + "domain": {to:[{field: "server.domain", setter: fld_prio, prio: 0},{field: "related.hosts", setter: fld_append}]}, + "domain.dst": {to:[{field: "destination.domain", setter: fld_prio, prio: 1}]}, + "domain.src": {to:[{field: "source.domain", setter: fld_prio, prio: 2}]}, + "domain_id": {to:[{field: "user.domain", setter: fld_set}]}, + "domainname": {to:[{field: "server.domain", setter: fld_prio, prio: 1}]}, + "dport": {convert: to_long, to:[{field: "destination.port", setter: fld_prio, prio: 0}]}, + "dtransaddr": {convert: to_ip, to:[{field: "destination.nat.ip", setter: fld_prio, prio: 0},{field: "related.ip", setter: fld_append}]}, + "dtransport": {convert: to_long, to:[{field: "destination.nat.port", setter: fld_prio, prio: 0}]}, + "ec_outcome": {to:[{field: "event.outcome", setter: fld_ecs_outcome}]}, + "event_description": {to:[{field: "message", setter: fld_prio, prio: 0}]}, + "event_source": {to:[{field: "related.hosts", setter: fld_append}]}, + "event_time": {convert: to_date, to:[{field: "@timestamp", setter: fld_set}]}, + "event_type": {to:[{field: "event.action", setter: fld_prio, prio: 1}]}, + "extension": {to:[{field: "file.extension", setter: fld_prio, prio: 1}]}, + "file.attributes": {to:[{field: "file.attributes", setter: fld_set}]}, + "filename": {to:[{field: "file.name", setter: fld_prio, prio: 0}]}, + "filename_size": {convert: to_long, to:[{field: "file.size", setter: fld_set}]}, + "filepath": {to:[{field: "file.path", setter: fld_set}]}, + "filetype": {to:[{field: "file.type", setter: fld_set}]}, + "fqdn": {to:[{field: "related.hosts", setter: fld_append}]}, + "group": {to:[{field: "group.name", setter: fld_set}]}, + "groupid": {to:[{field: "group.id", setter: fld_set}]}, + "host": {to:[{field: "host.name", setter: fld_prio, prio: 1},{field: "related.hosts", setter: fld_append}]}, + "hostip": {convert: to_ip, to:[{field: "host.ip", setter: fld_prio, prio: 0},{field: "related.ip", setter: fld_append}]}, + "hostip_v6": {convert: to_ip, to:[{field: "host.ip", setter: fld_prio, prio: 1},{field: "related.ip", setter: fld_append}]}, + "hostname": {to:[{field: "host.name", setter: fld_prio, prio: 0}]}, + "id": {to:[{field: "event.code", setter: fld_prio, prio: 0}]}, + "interface": {to:[{field: "network.interface.name", setter: fld_set}]}, + "ip.orig": {convert: to_ip, to:[{field: "network.forwarded_ip", setter: fld_prio, prio: 0},{field: "related.ip", setter: fld_append}]}, + "ip.trans.dst": {convert: to_ip, to:[{field: "destination.nat.ip", setter: fld_prio, prio: 1},{field: "related.ip", setter: fld_append}]}, + "ip.trans.src": {convert: to_ip, to:[{field: "source.nat.ip", setter: fld_prio, prio: 1},{field: "related.ip", setter: fld_append}]}, + "ipv6.orig": {convert: to_ip, to:[{field: "network.forwarded_ip", setter: fld_prio, prio: 2},{field: "related.ip", setter: fld_append}]}, + "latdec_dst": {convert: to_double, to:[{field: "destination.geo.location.lat", setter: fld_set}]}, + "latdec_src": {convert: to_double, to:[{field: "source.geo.location.lat", setter: fld_set}]}, + "location_city": {to:[{field: "geo.city_name", setter: fld_set}]}, + "location_country": {to:[{field: "geo.country_name", setter: fld_set}]}, + "location_desc": {to:[{field: "geo.name", setter: fld_set}]}, + "location_dst": {to:[{field: "destination.geo.country_name", setter: fld_set}]}, + "location_src": {to:[{field: "source.geo.country_name", setter: fld_set}]}, + "location_state": {to:[{field: "geo.region_name", setter: fld_set}]}, + "logon_id": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 5}]}, + "longdec_dst": {convert: to_double, to:[{field: "destination.geo.location.lon", setter: fld_set}]}, + "longdec_src": {convert: to_double, to:[{field: "source.geo.location.lon", setter: fld_set}]}, + "macaddr": {convert: to_mac, to:[{field: "host.mac", setter: fld_prio, prio: 2}]}, + "messageid": {to:[{field: "event.code", setter: fld_prio, prio: 1}]}, + "method": {to:[{field: "http.request.method", setter: fld_set}]}, + "msg": {to:[{field: "message", setter: fld_set}]}, + "orig_ip": {convert: to_ip, to:[{field: "network.forwarded_ip", setter: fld_prio, prio: 1},{field: "related.ip", setter: fld_append}]}, + "owner": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 6}]}, + "packets": {convert: to_long, to:[{field: "network.packets", setter: fld_set}]}, + "parent_pid": {convert: to_long, to:[{field: "process.parent.pid", setter: fld_prio, prio: 0}]}, + "parent_pid_val": {to:[{field: "process.parent.title", setter: fld_set}]}, + "parent_process": {to:[{field: "process.parent.name", setter: fld_prio, prio: 0}]}, + "patient_fullname": {to:[{field: "user.full_name", setter: fld_prio, prio: 1}]}, + "port.dst": {convert: to_long, to:[{field: "destination.port", setter: fld_prio, prio: 1}]}, + "port.src": {convert: to_long, to:[{field: "source.port", setter: fld_prio, prio: 1}]}, + "port.trans.dst": {convert: to_long, to:[{field: "destination.nat.port", setter: fld_prio, prio: 1}]}, + "port.trans.src": {convert: to_long, to:[{field: "source.nat.port", setter: fld_prio, prio: 1}]}, + "process": {to:[{field: "process.name", setter: fld_prio, prio: 0}]}, + "process_id": {convert: to_long, to:[{field: "process.pid", setter: fld_prio, prio: 0}]}, + "process_id_src": {convert: to_long, to:[{field: "process.parent.pid", setter: fld_prio, prio: 1}]}, + "process_src": {to:[{field: "process.parent.name", setter: fld_prio, prio: 1}]}, + "product": {to:[{field: "observer.product", setter: fld_set}]}, + "protocol": {to:[{field: "network.protocol", setter: fld_set}]}, + "query": {to:[{field: "url.query", setter: fld_prio, prio: 2}]}, + "rbytes": {convert: to_long, to:[{field: "destination.bytes", setter: fld_set}]}, + "referer": {to:[{field: "http.request.referrer", setter: fld_prio, prio: 1}]}, + "rulename": {to:[{field: "rule.name", setter: fld_set}]}, + "saddr": {convert: to_ip, to:[{field: "source.ip", setter: fld_set},{field: "related.ip", setter: fld_append}]}, + "saddr_v6": {convert: to_ip, to:[{field: "source.ip", setter: fld_set},{field: "related.ip", setter: fld_append}]}, + "sbytes": {convert: to_long, to:[{field: "source.bytes", setter: fld_set}]}, + "sdomain": {to:[{field: "source.domain", setter: fld_prio, prio: 0}]}, + "service": {to:[{field: "service.name", setter: fld_prio, prio: 1}]}, + "service.name": {to:[{field: "service.name", setter: fld_prio, prio: 0}]}, + "service_account": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 7}]}, + "severity": {to:[{field: "log.level", setter: fld_set}]}, + "shost": {to:[{field: "host.hostname", setter: fld_set},{field: "source.address", setter: fld_set},{field: "related.hosts", setter: fld_append}]}, + "sinterface": {to:[{field: "observer.ingress.interface.name", setter: fld_set}]}, + "sld": {to:[{field: "url.registered_domain", setter: fld_set}]}, + "smacaddr": {convert: to_mac, to:[{field: "source.mac", setter: fld_set}]}, + "sport": {convert: to_long, to:[{field: "source.port", setter: fld_prio, prio: 0}]}, + "stransaddr": {convert: to_ip, to:[{field: "source.nat.ip", setter: fld_prio, prio: 0},{field: "related.ip", setter: fld_append}]}, + "stransport": {convert: to_long, to:[{field: "source.nat.port", setter: fld_prio, prio: 0}]}, + "tcp.dstport": {convert: to_long, to:[{field: "destination.port", setter: fld_prio, prio: 2}]}, + "tcp.srcport": {convert: to_long, to:[{field: "source.port", setter: fld_prio, prio: 2}]}, + "timezone": {to:[{field: "event.timezone", setter: fld_set}]}, + "tld": {to:[{field: "url.top_level_domain", setter: fld_prio, prio: 0}]}, + "udp.dstport": {convert: to_long, to:[{field: "destination.port", setter: fld_prio, prio: 3}]}, + "udp.srcport": {convert: to_long, to:[{field: "source.port", setter: fld_prio, prio: 3}]}, + "uid": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 3}]}, + "url": {to:[{field: "url.original", setter: fld_prio, prio: 1}]}, + "url_raw": {to:[{field: "url.original", setter: fld_prio, prio: 0}]}, + "urldomain": {to:[{field: "url.domain", setter: fld_prio, prio: 0}]}, + "urlquery": {to:[{field: "url.query", setter: fld_prio, prio: 0}]}, + "user": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 0}]}, + "user.id": {to:[{field: "user.id", setter: fld_prio, prio: 1}]}, + "user_agent": {to:[{field: "user_agent.original", setter: fld_set}]}, + "user_fullname": {to:[{field: "user.full_name", setter: fld_prio, prio: 0}]}, + "user_id": {to:[{field: "user.id", setter: fld_prio, prio: 0}]}, + "username": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 1}]}, + "version": {to:[{field: "observer.version", setter: fld_set}]}, + "web_domain": {to:[{field: "url.domain", setter: fld_prio, prio: 1},{field: "related.hosts", setter: fld_append}]}, + "web_extension": {to:[{field: "file.extension", setter: fld_prio, prio: 0}]}, + "web_query": {to:[{field: "url.query", setter: fld_prio, prio: 1}]}, + "web_ref_domain": {to:[{field: "related.hosts", setter: fld_append}]}, + "web_referer": {to:[{field: "http.request.referrer", setter: fld_prio, prio: 0}]}, + "web_root": {to:[{field: "url.path", setter: fld_set}]}, + "webpage": {to:[{field: "file.name", setter: fld_prio, prio: 1}]}, + }; + + var rsa_mappings = { + "access_point": {to:[{field: "rsa.wireless.access_point", setter: fld_set}]}, + "accesses": {to:[{field: "rsa.identity.accesses", setter: fld_set}]}, + "acl_id": {to:[{field: "rsa.misc.acl_id", setter: fld_set}]}, + "acl_op": {to:[{field: "rsa.misc.acl_op", setter: fld_set}]}, + "acl_pos": {to:[{field: "rsa.misc.acl_pos", setter: fld_set}]}, + "acl_table": {to:[{field: "rsa.misc.acl_table", setter: fld_set}]}, + "action": {to:[{field: "rsa.misc.action", setter: fld_append}]}, + "ad_computer_dst": {to:[{field: "rsa.network.ad_computer_dst", setter: fld_set}]}, + "addr": {to:[{field: "rsa.network.addr", setter: fld_set}]}, + "admin": {to:[{field: "rsa.misc.admin", setter: fld_set}]}, + "agent": {to:[{field: "rsa.misc.client", setter: fld_prio, prio: 0}]}, + "agent.id": {to:[{field: "rsa.misc.agent_id", setter: fld_set}]}, + "alarm_id": {to:[{field: "rsa.misc.alarm_id", setter: fld_set}]}, + "alarmname": {to:[{field: "rsa.misc.alarmname", setter: fld_set}]}, + "alert": {to:[{field: "rsa.threat.alert", setter: fld_set}]}, + "alert_id": {to:[{field: "rsa.misc.alert_id", setter: fld_set}]}, + "alias.host": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "analysis.file": {to:[{field: "rsa.investigations.analysis_file", setter: fld_set}]}, + "analysis.service": {to:[{field: "rsa.investigations.analysis_service", setter: fld_set}]}, + "analysis.session": {to:[{field: "rsa.investigations.analysis_session", setter: fld_set}]}, + "app_id": {to:[{field: "rsa.misc.app_id", setter: fld_set}]}, + "attachment": {to:[{field: "rsa.file.attachment", setter: fld_set}]}, + "audit": {to:[{field: "rsa.misc.audit", setter: fld_set}]}, + "audit_class": {to:[{field: "rsa.internal.audit_class", setter: fld_set}]}, + "audit_object": {to:[{field: "rsa.misc.audit_object", setter: fld_set}]}, + "auditdata": {to:[{field: "rsa.misc.auditdata", setter: fld_set}]}, + "authmethod": {to:[{field: "rsa.identity.auth_method", setter: fld_set}]}, + "autorun_type": {to:[{field: "rsa.misc.autorun_type", setter: fld_set}]}, + "bcc": {to:[{field: "rsa.email.email", setter: fld_append}]}, + "benchmark": {to:[{field: "rsa.misc.benchmark", setter: fld_set}]}, + "binary": {to:[{field: "rsa.file.binary", setter: fld_set}]}, + "boc": {to:[{field: "rsa.investigations.boc", setter: fld_set}]}, + "bssid": {to:[{field: "rsa.wireless.wlan_ssid", setter: fld_prio, prio: 1}]}, + "bypass": {to:[{field: "rsa.misc.bypass", setter: fld_set}]}, + "c_sid": {to:[{field: "rsa.identity.user_sid_src", setter: fld_set}]}, + "cache": {to:[{field: "rsa.misc.cache", setter: fld_set}]}, + "cache_hit": {to:[{field: "rsa.misc.cache_hit", setter: fld_set}]}, + "calling_from": {to:[{field: "rsa.misc.phone", setter: fld_prio, prio: 1}]}, + "calling_to": {to:[{field: "rsa.misc.phone", setter: fld_prio, prio: 0}]}, + "category": {to:[{field: "rsa.misc.category", setter: fld_set}]}, + "cc": {to:[{field: "rsa.email.email", setter: fld_append}]}, + "cc.number": {convert: to_long, to:[{field: "rsa.misc.cc_number", setter: fld_set}]}, + "cefversion": {to:[{field: "rsa.misc.cefversion", setter: fld_set}]}, + "cert.serial": {to:[{field: "rsa.crypto.cert_serial", setter: fld_set}]}, + "cert_ca": {to:[{field: "rsa.crypto.cert_ca", setter: fld_set}]}, + "cert_checksum": {to:[{field: "rsa.crypto.cert_checksum", setter: fld_set}]}, + "cert_common": {to:[{field: "rsa.crypto.cert_common", setter: fld_set}]}, + "cert_error": {to:[{field: "rsa.crypto.cert_error", setter: fld_set}]}, + "cert_hostname": {to:[{field: "rsa.crypto.cert_host_name", setter: fld_set}]}, + "cert_hostname_cat": {to:[{field: "rsa.crypto.cert_host_cat", setter: fld_set}]}, + "cert_issuer": {to:[{field: "rsa.crypto.cert_issuer", setter: fld_set}]}, + "cert_keysize": {to:[{field: "rsa.crypto.cert_keysize", setter: fld_set}]}, + "cert_status": {to:[{field: "rsa.crypto.cert_status", setter: fld_set}]}, + "cert_subject": {to:[{field: "rsa.crypto.cert_subject", setter: fld_set}]}, + "cert_username": {to:[{field: "rsa.crypto.cert_username", setter: fld_set}]}, + "cfg.attr": {to:[{field: "rsa.misc.cfg_attr", setter: fld_set}]}, + "cfg.obj": {to:[{field: "rsa.misc.cfg_obj", setter: fld_set}]}, + "cfg.path": {to:[{field: "rsa.misc.cfg_path", setter: fld_set}]}, + "change_attribute": {to:[{field: "rsa.misc.change_attrib", setter: fld_set}]}, + "change_new": {to:[{field: "rsa.misc.change_new", setter: fld_set}]}, + "change_old": {to:[{field: "rsa.misc.change_old", setter: fld_set}]}, + "changes": {to:[{field: "rsa.misc.changes", setter: fld_set}]}, + "checksum": {to:[{field: "rsa.misc.checksum", setter: fld_set}]}, + "checksum.dst": {to:[{field: "rsa.misc.checksum_dst", setter: fld_set}]}, + "checksum.src": {to:[{field: "rsa.misc.checksum_src", setter: fld_set}]}, + "cid": {to:[{field: "rsa.internal.cid", setter: fld_set}]}, + "client": {to:[{field: "rsa.misc.client", setter: fld_prio, prio: 1}]}, + "client_ip": {to:[{field: "rsa.misc.client_ip", setter: fld_set}]}, + "clustermembers": {to:[{field: "rsa.misc.clustermembers", setter: fld_set}]}, + "cmd": {to:[{field: "rsa.misc.cmd", setter: fld_set}]}, + "cn_acttimeout": {to:[{field: "rsa.misc.cn_acttimeout", setter: fld_set}]}, + "cn_asn_dst": {to:[{field: "rsa.web.cn_asn_dst", setter: fld_set}]}, + "cn_asn_src": {to:[{field: "rsa.misc.cn_asn_src", setter: fld_set}]}, + "cn_bgpv4nxthop": {to:[{field: "rsa.misc.cn_bgpv4nxthop", setter: fld_set}]}, + "cn_ctr_dst_code": {to:[{field: "rsa.misc.cn_ctr_dst_code", setter: fld_set}]}, + "cn_dst_tos": {to:[{field: "rsa.misc.cn_dst_tos", setter: fld_set}]}, + "cn_dst_vlan": {to:[{field: "rsa.misc.cn_dst_vlan", setter: fld_set}]}, + "cn_engine_id": {to:[{field: "rsa.misc.cn_engine_id", setter: fld_set}]}, + "cn_engine_type": {to:[{field: "rsa.misc.cn_engine_type", setter: fld_set}]}, + "cn_f_switch": {to:[{field: "rsa.misc.cn_f_switch", setter: fld_set}]}, + "cn_flowsampid": {to:[{field: "rsa.misc.cn_flowsampid", setter: fld_set}]}, + "cn_flowsampintv": {to:[{field: "rsa.misc.cn_flowsampintv", setter: fld_set}]}, + "cn_flowsampmode": {to:[{field: "rsa.misc.cn_flowsampmode", setter: fld_set}]}, + "cn_inacttimeout": {to:[{field: "rsa.misc.cn_inacttimeout", setter: fld_set}]}, + "cn_inpermbyts": {to:[{field: "rsa.misc.cn_inpermbyts", setter: fld_set}]}, + "cn_inpermpckts": {to:[{field: "rsa.misc.cn_inpermpckts", setter: fld_set}]}, + "cn_invalid": {to:[{field: "rsa.misc.cn_invalid", setter: fld_set}]}, + "cn_ip_proto_ver": {to:[{field: "rsa.misc.cn_ip_proto_ver", setter: fld_set}]}, + "cn_ipv4_ident": {to:[{field: "rsa.misc.cn_ipv4_ident", setter: fld_set}]}, + "cn_l_switch": {to:[{field: "rsa.misc.cn_l_switch", setter: fld_set}]}, + "cn_log_did": {to:[{field: "rsa.misc.cn_log_did", setter: fld_set}]}, + "cn_log_rid": {to:[{field: "rsa.misc.cn_log_rid", setter: fld_set}]}, + "cn_max_ttl": {to:[{field: "rsa.misc.cn_max_ttl", setter: fld_set}]}, + "cn_maxpcktlen": {to:[{field: "rsa.misc.cn_maxpcktlen", setter: fld_set}]}, + "cn_min_ttl": {to:[{field: "rsa.misc.cn_min_ttl", setter: fld_set}]}, + "cn_minpcktlen": {to:[{field: "rsa.misc.cn_minpcktlen", setter: fld_set}]}, + "cn_mpls_lbl_1": {to:[{field: "rsa.misc.cn_mpls_lbl_1", setter: fld_set}]}, + "cn_mpls_lbl_10": {to:[{field: "rsa.misc.cn_mpls_lbl_10", setter: fld_set}]}, + "cn_mpls_lbl_2": {to:[{field: "rsa.misc.cn_mpls_lbl_2", setter: fld_set}]}, + "cn_mpls_lbl_3": {to:[{field: "rsa.misc.cn_mpls_lbl_3", setter: fld_set}]}, + "cn_mpls_lbl_4": {to:[{field: "rsa.misc.cn_mpls_lbl_4", setter: fld_set}]}, + "cn_mpls_lbl_5": {to:[{field: "rsa.misc.cn_mpls_lbl_5", setter: fld_set}]}, + "cn_mpls_lbl_6": {to:[{field: "rsa.misc.cn_mpls_lbl_6", setter: fld_set}]}, + "cn_mpls_lbl_7": {to:[{field: "rsa.misc.cn_mpls_lbl_7", setter: fld_set}]}, + "cn_mpls_lbl_8": {to:[{field: "rsa.misc.cn_mpls_lbl_8", setter: fld_set}]}, + "cn_mpls_lbl_9": {to:[{field: "rsa.misc.cn_mpls_lbl_9", setter: fld_set}]}, + "cn_mplstoplabel": {to:[{field: "rsa.misc.cn_mplstoplabel", setter: fld_set}]}, + "cn_mplstoplabip": {to:[{field: "rsa.misc.cn_mplstoplabip", setter: fld_set}]}, + "cn_mul_dst_byt": {to:[{field: "rsa.misc.cn_mul_dst_byt", setter: fld_set}]}, + "cn_mul_dst_pks": {to:[{field: "rsa.misc.cn_mul_dst_pks", setter: fld_set}]}, + "cn_muligmptype": {to:[{field: "rsa.misc.cn_muligmptype", setter: fld_set}]}, + "cn_rpackets": {to:[{field: "rsa.web.cn_rpackets", setter: fld_set}]}, + "cn_sampalgo": {to:[{field: "rsa.misc.cn_sampalgo", setter: fld_set}]}, + "cn_sampint": {to:[{field: "rsa.misc.cn_sampint", setter: fld_set}]}, + "cn_seqctr": {to:[{field: "rsa.misc.cn_seqctr", setter: fld_set}]}, + "cn_spackets": {to:[{field: "rsa.misc.cn_spackets", setter: fld_set}]}, + "cn_src_tos": {to:[{field: "rsa.misc.cn_src_tos", setter: fld_set}]}, + "cn_src_vlan": {to:[{field: "rsa.misc.cn_src_vlan", setter: fld_set}]}, + "cn_sysuptime": {to:[{field: "rsa.misc.cn_sysuptime", setter: fld_set}]}, + "cn_template_id": {to:[{field: "rsa.misc.cn_template_id", setter: fld_set}]}, + "cn_totbytsexp": {to:[{field: "rsa.misc.cn_totbytsexp", setter: fld_set}]}, + "cn_totflowexp": {to:[{field: "rsa.misc.cn_totflowexp", setter: fld_set}]}, + "cn_totpcktsexp": {to:[{field: "rsa.misc.cn_totpcktsexp", setter: fld_set}]}, + "cn_unixnanosecs": {to:[{field: "rsa.misc.cn_unixnanosecs", setter: fld_set}]}, + "cn_v6flowlabel": {to:[{field: "rsa.misc.cn_v6flowlabel", setter: fld_set}]}, + "cn_v6optheaders": {to:[{field: "rsa.misc.cn_v6optheaders", setter: fld_set}]}, + "code": {to:[{field: "rsa.misc.code", setter: fld_set}]}, + "command": {to:[{field: "rsa.misc.command", setter: fld_set}]}, + "comments": {to:[{field: "rsa.misc.comments", setter: fld_set}]}, + "comp_class": {to:[{field: "rsa.misc.comp_class", setter: fld_set}]}, + "comp_name": {to:[{field: "rsa.misc.comp_name", setter: fld_set}]}, + "comp_rbytes": {to:[{field: "rsa.misc.comp_rbytes", setter: fld_set}]}, + "comp_sbytes": {to:[{field: "rsa.misc.comp_sbytes", setter: fld_set}]}, + "component_version": {to:[{field: "rsa.misc.comp_version", setter: fld_set}]}, + "connection_id": {to:[{field: "rsa.misc.connection_id", setter: fld_prio, prio: 1}]}, + "connectionid": {to:[{field: "rsa.misc.connection_id", setter: fld_prio, prio: 0}]}, + "content": {to:[{field: "rsa.misc.content", setter: fld_set}]}, + "content_type": {to:[{field: "rsa.misc.content_type", setter: fld_set}]}, + "content_version": {to:[{field: "rsa.misc.content_version", setter: fld_set}]}, + "context": {to:[{field: "rsa.misc.context", setter: fld_set}]}, + "count": {to:[{field: "rsa.misc.count", setter: fld_set}]}, + "cpu": {convert: to_long, to:[{field: "rsa.misc.cpu", setter: fld_set}]}, + "cpu_data": {to:[{field: "rsa.misc.cpu_data", setter: fld_set}]}, + "criticality": {to:[{field: "rsa.misc.criticality", setter: fld_set}]}, + "cs_agency_dst": {to:[{field: "rsa.misc.cs_agency_dst", setter: fld_set}]}, + "cs_analyzedby": {to:[{field: "rsa.misc.cs_analyzedby", setter: fld_set}]}, + "cs_av_other": {to:[{field: "rsa.misc.cs_av_other", setter: fld_set}]}, + "cs_av_primary": {to:[{field: "rsa.misc.cs_av_primary", setter: fld_set}]}, + "cs_av_secondary": {to:[{field: "rsa.misc.cs_av_secondary", setter: fld_set}]}, + "cs_bgpv6nxthop": {to:[{field: "rsa.misc.cs_bgpv6nxthop", setter: fld_set}]}, + "cs_bit9status": {to:[{field: "rsa.misc.cs_bit9status", setter: fld_set}]}, + "cs_context": {to:[{field: "rsa.misc.cs_context", setter: fld_set}]}, + "cs_control": {to:[{field: "rsa.misc.cs_control", setter: fld_set}]}, + "cs_data": {to:[{field: "rsa.misc.cs_data", setter: fld_set}]}, + "cs_datecret": {to:[{field: "rsa.misc.cs_datecret", setter: fld_set}]}, + "cs_dst_tld": {to:[{field: "rsa.misc.cs_dst_tld", setter: fld_set}]}, + "cs_eth_dst_ven": {to:[{field: "rsa.misc.cs_eth_dst_ven", setter: fld_set}]}, + "cs_eth_src_ven": {to:[{field: "rsa.misc.cs_eth_src_ven", setter: fld_set}]}, + "cs_event_uuid": {to:[{field: "rsa.misc.cs_event_uuid", setter: fld_set}]}, + "cs_filetype": {to:[{field: "rsa.misc.cs_filetype", setter: fld_set}]}, + "cs_fld": {to:[{field: "rsa.misc.cs_fld", setter: fld_set}]}, + "cs_if_desc": {to:[{field: "rsa.misc.cs_if_desc", setter: fld_set}]}, + "cs_if_name": {to:[{field: "rsa.misc.cs_if_name", setter: fld_set}]}, + "cs_ip_next_hop": {to:[{field: "rsa.misc.cs_ip_next_hop", setter: fld_set}]}, + "cs_ipv4dstpre": {to:[{field: "rsa.misc.cs_ipv4dstpre", setter: fld_set}]}, + "cs_ipv4srcpre": {to:[{field: "rsa.misc.cs_ipv4srcpre", setter: fld_set}]}, + "cs_lifetime": {to:[{field: "rsa.misc.cs_lifetime", setter: fld_set}]}, + "cs_log_medium": {to:[{field: "rsa.misc.cs_log_medium", setter: fld_set}]}, + "cs_loginname": {to:[{field: "rsa.misc.cs_loginname", setter: fld_set}]}, + "cs_modulescore": {to:[{field: "rsa.misc.cs_modulescore", setter: fld_set}]}, + "cs_modulesign": {to:[{field: "rsa.misc.cs_modulesign", setter: fld_set}]}, + "cs_opswatresult": {to:[{field: "rsa.misc.cs_opswatresult", setter: fld_set}]}, + "cs_payload": {to:[{field: "rsa.misc.cs_payload", setter: fld_set}]}, + "cs_registrant": {to:[{field: "rsa.misc.cs_registrant", setter: fld_set}]}, + "cs_registrar": {to:[{field: "rsa.misc.cs_registrar", setter: fld_set}]}, + "cs_represult": {to:[{field: "rsa.misc.cs_represult", setter: fld_set}]}, + "cs_rpayload": {to:[{field: "rsa.misc.cs_rpayload", setter: fld_set}]}, + "cs_sampler_name": {to:[{field: "rsa.misc.cs_sampler_name", setter: fld_set}]}, + "cs_sourcemodule": {to:[{field: "rsa.misc.cs_sourcemodule", setter: fld_set}]}, + "cs_streams": {to:[{field: "rsa.misc.cs_streams", setter: fld_set}]}, + "cs_targetmodule": {to:[{field: "rsa.misc.cs_targetmodule", setter: fld_set}]}, + "cs_v6nxthop": {to:[{field: "rsa.misc.cs_v6nxthop", setter: fld_set}]}, + "cs_whois_server": {to:[{field: "rsa.misc.cs_whois_server", setter: fld_set}]}, + "cs_yararesult": {to:[{field: "rsa.misc.cs_yararesult", setter: fld_set}]}, + "cve": {to:[{field: "rsa.misc.cve", setter: fld_set}]}, + "d_certauth": {to:[{field: "rsa.crypto.d_certauth", setter: fld_set}]}, + "d_cipher": {to:[{field: "rsa.crypto.cipher_dst", setter: fld_set}]}, + "d_ciphersize": {convert: to_long, to:[{field: "rsa.crypto.cipher_size_dst", setter: fld_set}]}, + "d_sslver": {to:[{field: "rsa.crypto.ssl_ver_dst", setter: fld_set}]}, + "data": {to:[{field: "rsa.internal.data", setter: fld_set}]}, + "data_type": {to:[{field: "rsa.misc.data_type", setter: fld_set}]}, + "date": {to:[{field: "rsa.time.date", setter: fld_set}]}, + "datetime": {to:[{field: "rsa.time.datetime", setter: fld_set}]}, + "day": {to:[{field: "rsa.time.day", setter: fld_set}]}, + "db_id": {to:[{field: "rsa.db.db_id", setter: fld_set}]}, + "db_name": {to:[{field: "rsa.db.database", setter: fld_set}]}, + "db_pid": {convert: to_long, to:[{field: "rsa.db.db_pid", setter: fld_set}]}, + "dclass_counter1": {convert: to_long, to:[{field: "rsa.counters.dclass_c1", setter: fld_set}]}, + "dclass_counter1_string": {to:[{field: "rsa.counters.dclass_c1_str", setter: fld_set}]}, + "dclass_counter2": {convert: to_long, to:[{field: "rsa.counters.dclass_c2", setter: fld_set}]}, + "dclass_counter2_string": {to:[{field: "rsa.counters.dclass_c2_str", setter: fld_set}]}, + "dclass_counter3": {convert: to_long, to:[{field: "rsa.counters.dclass_c3", setter: fld_set}]}, + "dclass_counter3_string": {to:[{field: "rsa.counters.dclass_c3_str", setter: fld_set}]}, + "dclass_ratio1": {to:[{field: "rsa.counters.dclass_r1", setter: fld_set}]}, + "dclass_ratio1_string": {to:[{field: "rsa.counters.dclass_r1_str", setter: fld_set}]}, + "dclass_ratio2": {to:[{field: "rsa.counters.dclass_r2", setter: fld_set}]}, + "dclass_ratio2_string": {to:[{field: "rsa.counters.dclass_r2_str", setter: fld_set}]}, + "dclass_ratio3": {to:[{field: "rsa.counters.dclass_r3", setter: fld_set}]}, + "dclass_ratio3_string": {to:[{field: "rsa.counters.dclass_r3_str", setter: fld_set}]}, + "dead": {convert: to_long, to:[{field: "rsa.internal.dead", setter: fld_set}]}, + "description": {to:[{field: "rsa.misc.description", setter: fld_set}]}, + "detail": {to:[{field: "rsa.misc.event_desc", setter: fld_set}]}, + "device": {to:[{field: "rsa.misc.device_name", setter: fld_set}]}, + "device.class": {to:[{field: "rsa.internal.device_class", setter: fld_set}]}, + "device.group": {to:[{field: "rsa.internal.device_group", setter: fld_set}]}, + "device.host": {to:[{field: "rsa.internal.device_host", setter: fld_set}]}, + "device.ip": {convert: to_ip, to:[{field: "rsa.internal.device_ip", setter: fld_set}]}, + "device.ipv6": {convert: to_ip, to:[{field: "rsa.internal.device_ipv6", setter: fld_set}]}, + "device.type": {to:[{field: "rsa.internal.device_type", setter: fld_set}]}, + "device.type.id": {convert: to_long, to:[{field: "rsa.internal.device_type_id", setter: fld_set}]}, + "devicehostname": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "devvendor": {to:[{field: "rsa.misc.devvendor", setter: fld_set}]}, + "dhost": {to:[{field: "rsa.network.host_dst", setter: fld_set}]}, + "did": {to:[{field: "rsa.internal.did", setter: fld_set}]}, + "dinterface": {to:[{field: "rsa.network.dinterface", setter: fld_set}]}, + "directory.dst": {to:[{field: "rsa.file.directory_dst", setter: fld_set}]}, + "directory.src": {to:[{field: "rsa.file.directory_src", setter: fld_set}]}, + "disk_volume": {to:[{field: "rsa.storage.disk_volume", setter: fld_set}]}, + "disposition": {to:[{field: "rsa.misc.disposition", setter: fld_set}]}, + "distance": {to:[{field: "rsa.misc.distance", setter: fld_set}]}, + "dmask": {to:[{field: "rsa.network.dmask", setter: fld_set}]}, + "dn": {to:[{field: "rsa.identity.dn", setter: fld_set}]}, + "dns_a_record": {to:[{field: "rsa.network.dns_a_record", setter: fld_set}]}, + "dns_cname_record": {to:[{field: "rsa.network.dns_cname_record", setter: fld_set}]}, + "dns_id": {to:[{field: "rsa.network.dns_id", setter: fld_set}]}, + "dns_opcode": {to:[{field: "rsa.network.dns_opcode", setter: fld_set}]}, + "dns_ptr_record": {to:[{field: "rsa.network.dns_ptr_record", setter: fld_set}]}, + "dns_resp": {to:[{field: "rsa.network.dns_resp", setter: fld_set}]}, + "dns_type": {to:[{field: "rsa.network.dns_type", setter: fld_set}]}, + "doc_number": {convert: to_long, to:[{field: "rsa.misc.doc_number", setter: fld_set}]}, + "domain": {to:[{field: "rsa.network.domain", setter: fld_set}]}, + "domain1": {to:[{field: "rsa.network.domain1", setter: fld_set}]}, + "dst_dn": {to:[{field: "rsa.identity.dn_dst", setter: fld_set}]}, + "dst_payload": {to:[{field: "rsa.misc.payload_dst", setter: fld_set}]}, + "dst_spi": {to:[{field: "rsa.misc.spi_dst", setter: fld_set}]}, + "dst_zone": {to:[{field: "rsa.network.zone_dst", setter: fld_set}]}, + "dstburb": {to:[{field: "rsa.misc.dstburb", setter: fld_set}]}, + "duration": {convert: to_double, to:[{field: "rsa.time.duration_time", setter: fld_set}]}, + "duration_string": {to:[{field: "rsa.time.duration_str", setter: fld_set}]}, + "ec_activity": {to:[{field: "rsa.investigations.ec_activity", setter: fld_set}]}, + "ec_outcome": {to:[{field: "rsa.investigations.ec_outcome", setter: fld_set}]}, + "ec_subject": {to:[{field: "rsa.investigations.ec_subject", setter: fld_set}]}, + "ec_theme": {to:[{field: "rsa.investigations.ec_theme", setter: fld_set}]}, + "edomain": {to:[{field: "rsa.misc.edomain", setter: fld_set}]}, + "edomaub": {to:[{field: "rsa.misc.edomaub", setter: fld_set}]}, + "effective_time": {convert: to_date, to:[{field: "rsa.time.effective_time", setter: fld_set}]}, + "ein.number": {convert: to_long, to:[{field: "rsa.misc.ein_number", setter: fld_set}]}, + "email": {to:[{field: "rsa.email.email", setter: fld_append}]}, + "encryption_type": {to:[{field: "rsa.crypto.crypto", setter: fld_set}]}, + "endtime": {convert: to_date, to:[{field: "rsa.time.endtime", setter: fld_set}]}, + "entropy.req": {convert: to_long, to:[{field: "rsa.internal.entropy_req", setter: fld_set}]}, + "entropy.res": {convert: to_long, to:[{field: "rsa.internal.entropy_res", setter: fld_set}]}, + "entry": {to:[{field: "rsa.internal.entry", setter: fld_set}]}, + "eoc": {to:[{field: "rsa.investigations.eoc", setter: fld_set}]}, + "error": {to:[{field: "rsa.misc.error", setter: fld_set}]}, + "eth_type": {convert: to_long, to:[{field: "rsa.network.eth_type", setter: fld_set}]}, + "euid": {to:[{field: "rsa.misc.euid", setter: fld_set}]}, + "event.cat": {convert: to_long, to:[{field: "rsa.investigations.event_cat", setter: fld_prio, prio: 1}]}, + "event.cat.name": {to:[{field: "rsa.investigations.event_cat_name", setter: fld_prio, prio: 1}]}, + "event_cat": {convert: to_long, to:[{field: "rsa.investigations.event_cat", setter: fld_prio, prio: 0}]}, + "event_cat_name": {to:[{field: "rsa.investigations.event_cat_name", setter: fld_prio, prio: 0}]}, + "event_category": {to:[{field: "rsa.misc.event_category", setter: fld_set}]}, + "event_computer": {to:[{field: "rsa.misc.event_computer", setter: fld_set}]}, + "event_counter": {convert: to_long, to:[{field: "rsa.counters.event_counter", setter: fld_set}]}, + "event_description": {to:[{field: "rsa.internal.event_desc", setter: fld_set}]}, + "event_id": {to:[{field: "rsa.misc.event_id", setter: fld_set}]}, + "event_log": {to:[{field: "rsa.misc.event_log", setter: fld_set}]}, + "event_name": {to:[{field: "rsa.internal.event_name", setter: fld_set}]}, + "event_queue_time": {convert: to_date, to:[{field: "rsa.time.event_queue_time", setter: fld_set}]}, + "event_source": {to:[{field: "rsa.misc.event_source", setter: fld_set}]}, + "event_state": {to:[{field: "rsa.misc.event_state", setter: fld_set}]}, + "event_time": {convert: to_date, to:[{field: "rsa.time.event_time", setter: fld_set}]}, + "event_time_str": {to:[{field: "rsa.time.event_time_str", setter: fld_prio, prio: 1}]}, + "event_time_string": {to:[{field: "rsa.time.event_time_str", setter: fld_prio, prio: 0}]}, + "event_type": {to:[{field: "rsa.misc.event_type", setter: fld_set}]}, + "event_user": {to:[{field: "rsa.misc.event_user", setter: fld_set}]}, + "eventtime": {to:[{field: "rsa.time.eventtime", setter: fld_set}]}, + "expected_val": {to:[{field: "rsa.misc.expected_val", setter: fld_set}]}, + "expiration_time": {convert: to_date, to:[{field: "rsa.time.expire_time", setter: fld_set}]}, + "expiration_time_string": {to:[{field: "rsa.time.expire_time_str", setter: fld_set}]}, + "facility": {to:[{field: "rsa.misc.facility", setter: fld_set}]}, + "facilityname": {to:[{field: "rsa.misc.facilityname", setter: fld_set}]}, + "faddr": {to:[{field: "rsa.network.faddr", setter: fld_set}]}, + "fcatnum": {to:[{field: "rsa.misc.fcatnum", setter: fld_set}]}, + "federated_idp": {to:[{field: "rsa.identity.federated_idp", setter: fld_set}]}, + "federated_sp": {to:[{field: "rsa.identity.federated_sp", setter: fld_set}]}, + "feed.category": {to:[{field: "rsa.internal.feed_category", setter: fld_set}]}, + "feed_desc": {to:[{field: "rsa.internal.feed_desc", setter: fld_set}]}, + "feed_name": {to:[{field: "rsa.internal.feed_name", setter: fld_set}]}, + "fhost": {to:[{field: "rsa.network.fhost", setter: fld_set}]}, + "file_entropy": {convert: to_double, to:[{field: "rsa.file.file_entropy", setter: fld_set}]}, + "file_vendor": {to:[{field: "rsa.file.file_vendor", setter: fld_set}]}, + "filename_dst": {to:[{field: "rsa.file.filename_dst", setter: fld_set}]}, + "filename_src": {to:[{field: "rsa.file.filename_src", setter: fld_set}]}, + "filename_tmp": {to:[{field: "rsa.file.filename_tmp", setter: fld_set}]}, + "filesystem": {to:[{field: "rsa.file.filesystem", setter: fld_set}]}, + "filter": {to:[{field: "rsa.misc.filter", setter: fld_set}]}, + "finterface": {to:[{field: "rsa.misc.finterface", setter: fld_set}]}, + "flags": {to:[{field: "rsa.misc.flags", setter: fld_set}]}, + "forensic_info": {to:[{field: "rsa.misc.forensic_info", setter: fld_set}]}, + "forward.ip": {convert: to_ip, to:[{field: "rsa.internal.forward_ip", setter: fld_set}]}, + "forward.ipv6": {convert: to_ip, to:[{field: "rsa.internal.forward_ipv6", setter: fld_set}]}, + "found": {to:[{field: "rsa.misc.found", setter: fld_set}]}, + "fport": {to:[{field: "rsa.network.fport", setter: fld_set}]}, + "fqdn": {to:[{field: "rsa.web.fqdn", setter: fld_set}]}, + "fresult": {convert: to_long, to:[{field: "rsa.misc.fresult", setter: fld_set}]}, + "from": {to:[{field: "rsa.email.email_src", setter: fld_set}]}, + "gaddr": {to:[{field: "rsa.misc.gaddr", setter: fld_set}]}, + "gateway": {to:[{field: "rsa.network.gateway", setter: fld_set}]}, + "gmtdate": {to:[{field: "rsa.time.gmtdate", setter: fld_set}]}, + "gmttime": {to:[{field: "rsa.time.gmttime", setter: fld_set}]}, + "group": {to:[{field: "rsa.misc.group", setter: fld_set}]}, + "group_object": {to:[{field: "rsa.misc.group_object", setter: fld_set}]}, + "groupid": {to:[{field: "rsa.misc.group_id", setter: fld_set}]}, + "h_code": {to:[{field: "rsa.internal.hcode", setter: fld_set}]}, + "hardware_id": {to:[{field: "rsa.misc.hardware_id", setter: fld_set}]}, + "header.id": {to:[{field: "rsa.internal.header_id", setter: fld_set}]}, + "host.orig": {to:[{field: "rsa.network.host_orig", setter: fld_set}]}, + "host.state": {to:[{field: "rsa.endpoint.host_state", setter: fld_set}]}, + "host.type": {to:[{field: "rsa.network.host_type", setter: fld_set}]}, + "host_role": {to:[{field: "rsa.identity.host_role", setter: fld_set}]}, + "hostid": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "hostname": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "hour": {to:[{field: "rsa.time.hour", setter: fld_set}]}, + "https.insact": {to:[{field: "rsa.crypto.https_insact", setter: fld_set}]}, + "https.valid": {to:[{field: "rsa.crypto.https_valid", setter: fld_set}]}, + "icmpcode": {convert: to_long, to:[{field: "rsa.network.icmp_code", setter: fld_set}]}, + "icmptype": {convert: to_long, to:[{field: "rsa.network.icmp_type", setter: fld_set}]}, + "id": {to:[{field: "rsa.misc.reference_id", setter: fld_set}]}, + "id1": {to:[{field: "rsa.misc.reference_id1", setter: fld_set}]}, + "id2": {to:[{field: "rsa.misc.reference_id2", setter: fld_set}]}, + "id3": {to:[{field: "rsa.misc.id3", setter: fld_set}]}, + "ike": {to:[{field: "rsa.crypto.ike", setter: fld_set}]}, + "ike_cookie1": {to:[{field: "rsa.crypto.ike_cookie1", setter: fld_set}]}, + "ike_cookie2": {to:[{field: "rsa.crypto.ike_cookie2", setter: fld_set}]}, + "im_buddyid": {to:[{field: "rsa.misc.im_buddyid", setter: fld_set}]}, + "im_buddyname": {to:[{field: "rsa.misc.im_buddyname", setter: fld_set}]}, + "im_client": {to:[{field: "rsa.misc.im_client", setter: fld_set}]}, + "im_croomid": {to:[{field: "rsa.misc.im_croomid", setter: fld_set}]}, + "im_croomtype": {to:[{field: "rsa.misc.im_croomtype", setter: fld_set}]}, + "im_members": {to:[{field: "rsa.misc.im_members", setter: fld_set}]}, + "im_userid": {to:[{field: "rsa.misc.im_userid", setter: fld_set}]}, + "im_username": {to:[{field: "rsa.misc.im_username", setter: fld_set}]}, + "index": {to:[{field: "rsa.misc.index", setter: fld_set}]}, + "info": {to:[{field: "rsa.db.index", setter: fld_set}]}, + "inode": {convert: to_long, to:[{field: "rsa.internal.inode", setter: fld_set}]}, + "inout": {to:[{field: "rsa.misc.inout", setter: fld_set}]}, + "instance": {to:[{field: "rsa.db.instance", setter: fld_set}]}, + "interface": {to:[{field: "rsa.network.interface", setter: fld_set}]}, + "inv.category": {to:[{field: "rsa.investigations.inv_category", setter: fld_set}]}, + "inv.context": {to:[{field: "rsa.investigations.inv_context", setter: fld_set}]}, + "ioc": {to:[{field: "rsa.investigations.ioc", setter: fld_set}]}, + "ip_proto": {convert: to_long, to:[{field: "rsa.network.ip_proto", setter: fld_set}]}, + "ipkt": {to:[{field: "rsa.misc.ipkt", setter: fld_set}]}, + "ipscat": {to:[{field: "rsa.misc.ipscat", setter: fld_set}]}, + "ipspri": {to:[{field: "rsa.misc.ipspri", setter: fld_set}]}, + "jobname": {to:[{field: "rsa.misc.jobname", setter: fld_set}]}, + "jobnum": {to:[{field: "rsa.misc.job_num", setter: fld_set}]}, + "laddr": {to:[{field: "rsa.network.laddr", setter: fld_set}]}, + "language": {to:[{field: "rsa.misc.language", setter: fld_set}]}, + "latitude": {to:[{field: "rsa.misc.latitude", setter: fld_set}]}, + "lc.cid": {to:[{field: "rsa.internal.lc_cid", setter: fld_set}]}, + "lc.ctime": {convert: to_date, to:[{field: "rsa.internal.lc_ctime", setter: fld_set}]}, + "ldap": {to:[{field: "rsa.identity.ldap", setter: fld_set}]}, + "ldap.query": {to:[{field: "rsa.identity.ldap_query", setter: fld_set}]}, + "ldap.response": {to:[{field: "rsa.identity.ldap_response", setter: fld_set}]}, + "level": {convert: to_long, to:[{field: "rsa.internal.level", setter: fld_set}]}, + "lhost": {to:[{field: "rsa.network.lhost", setter: fld_set}]}, + "library": {to:[{field: "rsa.misc.library", setter: fld_set}]}, + "lifetime": {convert: to_long, to:[{field: "rsa.misc.lifetime", setter: fld_set}]}, + "linenum": {to:[{field: "rsa.misc.linenum", setter: fld_set}]}, + "link": {to:[{field: "rsa.misc.link", setter: fld_set}]}, + "linterface": {to:[{field: "rsa.network.linterface", setter: fld_set}]}, + "list_name": {to:[{field: "rsa.misc.list_name", setter: fld_set}]}, + "listnum": {to:[{field: "rsa.misc.listnum", setter: fld_set}]}, + "load_data": {to:[{field: "rsa.misc.load_data", setter: fld_set}]}, + "location_floor": {to:[{field: "rsa.misc.location_floor", setter: fld_set}]}, + "location_mark": {to:[{field: "rsa.misc.location_mark", setter: fld_set}]}, + "log_id": {to:[{field: "rsa.misc.log_id", setter: fld_set}]}, + "log_type": {to:[{field: "rsa.misc.log_type", setter: fld_set}]}, + "logid": {to:[{field: "rsa.misc.logid", setter: fld_set}]}, + "logip": {to:[{field: "rsa.misc.logip", setter: fld_set}]}, + "logname": {to:[{field: "rsa.misc.logname", setter: fld_set}]}, + "logon_type": {to:[{field: "rsa.identity.logon_type", setter: fld_set}]}, + "logon_type_desc": {to:[{field: "rsa.identity.logon_type_desc", setter: fld_set}]}, + "longitude": {to:[{field: "rsa.misc.longitude", setter: fld_set}]}, + "lport": {to:[{field: "rsa.misc.lport", setter: fld_set}]}, + "lread": {convert: to_long, to:[{field: "rsa.db.lread", setter: fld_set}]}, + "lun": {to:[{field: "rsa.storage.lun", setter: fld_set}]}, + "lwrite": {convert: to_long, to:[{field: "rsa.db.lwrite", setter: fld_set}]}, + "macaddr": {convert: to_mac, to:[{field: "rsa.network.eth_host", setter: fld_set}]}, + "mail_id": {to:[{field: "rsa.misc.mail_id", setter: fld_set}]}, + "mask": {to:[{field: "rsa.network.mask", setter: fld_set}]}, + "match": {to:[{field: "rsa.misc.match", setter: fld_set}]}, + "mbug_data": {to:[{field: "rsa.misc.mbug_data", setter: fld_set}]}, + "mcb.req": {convert: to_long, to:[{field: "rsa.internal.mcb_req", setter: fld_set}]}, + "mcb.res": {convert: to_long, to:[{field: "rsa.internal.mcb_res", setter: fld_set}]}, + "mcbc.req": {convert: to_long, to:[{field: "rsa.internal.mcbc_req", setter: fld_set}]}, + "mcbc.res": {convert: to_long, to:[{field: "rsa.internal.mcbc_res", setter: fld_set}]}, + "medium": {convert: to_long, to:[{field: "rsa.internal.medium", setter: fld_set}]}, + "message": {to:[{field: "rsa.internal.message", setter: fld_set}]}, + "message_body": {to:[{field: "rsa.misc.message_body", setter: fld_set}]}, + "messageid": {to:[{field: "rsa.internal.messageid", setter: fld_set}]}, + "min": {to:[{field: "rsa.time.min", setter: fld_set}]}, + "misc": {to:[{field: "rsa.misc.misc", setter: fld_set}]}, + "misc_name": {to:[{field: "rsa.misc.misc_name", setter: fld_set}]}, + "mode": {to:[{field: "rsa.misc.mode", setter: fld_set}]}, + "month": {to:[{field: "rsa.time.month", setter: fld_set}]}, + "msg": {to:[{field: "rsa.internal.msg", setter: fld_set}]}, + "msgIdPart1": {to:[{field: "rsa.misc.msgIdPart1", setter: fld_set}]}, + "msgIdPart2": {to:[{field: "rsa.misc.msgIdPart2", setter: fld_set}]}, + "msgIdPart3": {to:[{field: "rsa.misc.msgIdPart3", setter: fld_set}]}, + "msgIdPart4": {to:[{field: "rsa.misc.msgIdPart4", setter: fld_set}]}, + "msg_id": {to:[{field: "rsa.internal.msg_id", setter: fld_set}]}, + "msg_type": {to:[{field: "rsa.misc.msg_type", setter: fld_set}]}, + "msgid": {to:[{field: "rsa.misc.msgid", setter: fld_set}]}, + "name": {to:[{field: "rsa.misc.name", setter: fld_set}]}, + "netname": {to:[{field: "rsa.network.netname", setter: fld_set}]}, + "netsessid": {to:[{field: "rsa.misc.netsessid", setter: fld_set}]}, + "network_port": {convert: to_long, to:[{field: "rsa.network.network_port", setter: fld_set}]}, + "network_service": {to:[{field: "rsa.network.network_service", setter: fld_set}]}, + "node": {to:[{field: "rsa.misc.node", setter: fld_set}]}, + "nodename": {to:[{field: "rsa.internal.node_name", setter: fld_set}]}, + "ntype": {to:[{field: "rsa.misc.ntype", setter: fld_set}]}, + "num": {to:[{field: "rsa.misc.num", setter: fld_set}]}, + "number": {to:[{field: "rsa.misc.number", setter: fld_set}]}, + "number1": {to:[{field: "rsa.misc.number1", setter: fld_set}]}, + "number2": {to:[{field: "rsa.misc.number2", setter: fld_set}]}, + "nwe.callback_id": {to:[{field: "rsa.internal.nwe_callback_id", setter: fld_set}]}, + "nwwn": {to:[{field: "rsa.misc.nwwn", setter: fld_set}]}, + "obj_id": {to:[{field: "rsa.internal.obj_id", setter: fld_set}]}, + "obj_name": {to:[{field: "rsa.misc.obj_name", setter: fld_set}]}, + "obj_server": {to:[{field: "rsa.internal.obj_server", setter: fld_set}]}, + "obj_type": {to:[{field: "rsa.misc.obj_type", setter: fld_set}]}, + "obj_value": {to:[{field: "rsa.internal.obj_val", setter: fld_set}]}, + "object": {to:[{field: "rsa.misc.object", setter: fld_set}]}, + "observed_val": {to:[{field: "rsa.misc.observed_val", setter: fld_set}]}, + "operation": {to:[{field: "rsa.misc.operation", setter: fld_set}]}, + "operation_id": {to:[{field: "rsa.misc.operation_id", setter: fld_set}]}, + "opkt": {to:[{field: "rsa.misc.opkt", setter: fld_set}]}, + "org.dst": {to:[{field: "rsa.physical.org_dst", setter: fld_prio, prio: 1}]}, + "org.src": {to:[{field: "rsa.physical.org_src", setter: fld_set}]}, + "org_dst": {to:[{field: "rsa.physical.org_dst", setter: fld_prio, prio: 0}]}, + "orig_from": {to:[{field: "rsa.misc.orig_from", setter: fld_set}]}, + "origin": {to:[{field: "rsa.network.origin", setter: fld_set}]}, + "original_owner": {to:[{field: "rsa.identity.owner", setter: fld_set}]}, + "os": {to:[{field: "rsa.misc.OS", setter: fld_set}]}, + "owner_id": {to:[{field: "rsa.misc.owner_id", setter: fld_set}]}, + "p_action": {to:[{field: "rsa.misc.p_action", setter: fld_set}]}, + "p_date": {to:[{field: "rsa.time.p_date", setter: fld_set}]}, + "p_filter": {to:[{field: "rsa.misc.p_filter", setter: fld_set}]}, + "p_group_object": {to:[{field: "rsa.misc.p_group_object", setter: fld_set}]}, + "p_id": {to:[{field: "rsa.misc.p_id", setter: fld_set}]}, + "p_month": {to:[{field: "rsa.time.p_month", setter: fld_set}]}, + "p_msgid": {to:[{field: "rsa.misc.p_msgid", setter: fld_set}]}, + "p_msgid1": {to:[{field: "rsa.misc.p_msgid1", setter: fld_set}]}, + "p_msgid2": {to:[{field: "rsa.misc.p_msgid2", setter: fld_set}]}, + "p_result1": {to:[{field: "rsa.misc.p_result1", setter: fld_set}]}, + "p_time": {to:[{field: "rsa.time.p_time", setter: fld_set}]}, + "p_time1": {to:[{field: "rsa.time.p_time1", setter: fld_set}]}, + "p_time2": {to:[{field: "rsa.time.p_time2", setter: fld_set}]}, + "p_url": {to:[{field: "rsa.web.p_url", setter: fld_set}]}, + "p_user_agent": {to:[{field: "rsa.web.p_user_agent", setter: fld_set}]}, + "p_web_cookie": {to:[{field: "rsa.web.p_web_cookie", setter: fld_set}]}, + "p_web_method": {to:[{field: "rsa.web.p_web_method", setter: fld_set}]}, + "p_web_referer": {to:[{field: "rsa.web.p_web_referer", setter: fld_set}]}, + "p_year": {to:[{field: "rsa.time.p_year", setter: fld_set}]}, + "packet_length": {to:[{field: "rsa.network.packet_length", setter: fld_set}]}, + "paddr": {convert: to_ip, to:[{field: "rsa.network.paddr", setter: fld_set}]}, + "param": {to:[{field: "rsa.misc.param", setter: fld_set}]}, + "param.dst": {to:[{field: "rsa.misc.param_dst", setter: fld_set}]}, + "param.src": {to:[{field: "rsa.misc.param_src", setter: fld_set}]}, + "parent_node": {to:[{field: "rsa.misc.parent_node", setter: fld_set}]}, + "parse.error": {to:[{field: "rsa.internal.parse_error", setter: fld_set}]}, + "password": {to:[{field: "rsa.identity.password", setter: fld_set}]}, + "password_chg": {to:[{field: "rsa.misc.password_chg", setter: fld_set}]}, + "password_expire": {to:[{field: "rsa.misc.password_expire", setter: fld_set}]}, + "patient_fname": {to:[{field: "rsa.healthcare.patient_fname", setter: fld_set}]}, + "patient_id": {to:[{field: "rsa.healthcare.patient_id", setter: fld_set}]}, + "patient_lname": {to:[{field: "rsa.healthcare.patient_lname", setter: fld_set}]}, + "patient_mname": {to:[{field: "rsa.healthcare.patient_mname", setter: fld_set}]}, + "payload.req": {convert: to_long, to:[{field: "rsa.internal.payload_req", setter: fld_set}]}, + "payload.res": {convert: to_long, to:[{field: "rsa.internal.payload_res", setter: fld_set}]}, + "peer": {to:[{field: "rsa.crypto.peer", setter: fld_set}]}, + "peer_id": {to:[{field: "rsa.crypto.peer_id", setter: fld_set}]}, + "permgranted": {to:[{field: "rsa.misc.permgranted", setter: fld_set}]}, + "permissions": {to:[{field: "rsa.db.permissions", setter: fld_set}]}, + "permwanted": {to:[{field: "rsa.misc.permwanted", setter: fld_set}]}, + "pgid": {to:[{field: "rsa.misc.pgid", setter: fld_set}]}, + "phone_number": {to:[{field: "rsa.misc.phone", setter: fld_prio, prio: 2}]}, + "phost": {to:[{field: "rsa.network.phost", setter: fld_set}]}, + "pid": {to:[{field: "rsa.misc.pid", setter: fld_set}]}, + "policy": {to:[{field: "rsa.misc.policy", setter: fld_set}]}, + "policyUUID": {to:[{field: "rsa.misc.policyUUID", setter: fld_set}]}, + "policy_id": {to:[{field: "rsa.misc.policy_id", setter: fld_set}]}, + "policy_value": {to:[{field: "rsa.misc.policy_value", setter: fld_set}]}, + "policy_waiver": {to:[{field: "rsa.misc.policy_waiver", setter: fld_set}]}, + "policyname": {to:[{field: "rsa.misc.policy_name", setter: fld_prio, prio: 0}]}, + "pool_id": {to:[{field: "rsa.misc.pool_id", setter: fld_set}]}, + "pool_name": {to:[{field: "rsa.misc.pool_name", setter: fld_set}]}, + "port": {convert: to_long, to:[{field: "rsa.network.port", setter: fld_set}]}, + "portname": {to:[{field: "rsa.misc.port_name", setter: fld_set}]}, + "pread": {convert: to_long, to:[{field: "rsa.db.pread", setter: fld_set}]}, + "priority": {to:[{field: "rsa.misc.priority", setter: fld_set}]}, + "privilege": {to:[{field: "rsa.file.privilege", setter: fld_set}]}, + "process.vid.dst": {to:[{field: "rsa.internal.process_vid_dst", setter: fld_set}]}, + "process.vid.src": {to:[{field: "rsa.internal.process_vid_src", setter: fld_set}]}, + "process_id_val": {to:[{field: "rsa.misc.process_id_val", setter: fld_set}]}, + "processing_time": {to:[{field: "rsa.time.process_time", setter: fld_set}]}, + "profile": {to:[{field: "rsa.identity.profile", setter: fld_set}]}, + "prog_asp_num": {to:[{field: "rsa.misc.prog_asp_num", setter: fld_set}]}, + "program": {to:[{field: "rsa.misc.program", setter: fld_set}]}, + "protocol_detail": {to:[{field: "rsa.network.protocol_detail", setter: fld_set}]}, + "pwwn": {to:[{field: "rsa.storage.pwwn", setter: fld_set}]}, + "r_hostid": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "real_data": {to:[{field: "rsa.misc.real_data", setter: fld_set}]}, + "realm": {to:[{field: "rsa.identity.realm", setter: fld_set}]}, + "reason": {to:[{field: "rsa.misc.reason", setter: fld_set}]}, + "rec_asp_device": {to:[{field: "rsa.misc.rec_asp_device", setter: fld_set}]}, + "rec_asp_num": {to:[{field: "rsa.misc.rec_asp_num", setter: fld_set}]}, + "rec_library": {to:[{field: "rsa.misc.rec_library", setter: fld_set}]}, + "recorded_time": {convert: to_date, to:[{field: "rsa.time.recorded_time", setter: fld_set}]}, + "recordnum": {to:[{field: "rsa.misc.recordnum", setter: fld_set}]}, + "registry.key": {to:[{field: "rsa.endpoint.registry_key", setter: fld_set}]}, + "registry.value": {to:[{field: "rsa.endpoint.registry_value", setter: fld_set}]}, + "remote_domain": {to:[{field: "rsa.web.remote_domain", setter: fld_set}]}, + "remote_domain_id": {to:[{field: "rsa.network.remote_domain_id", setter: fld_set}]}, + "reputation_num": {convert: to_double, to:[{field: "rsa.web.reputation_num", setter: fld_set}]}, + "resource": {to:[{field: "rsa.internal.resource", setter: fld_set}]}, + "resource_class": {to:[{field: "rsa.internal.resource_class", setter: fld_set}]}, + "result": {to:[{field: "rsa.misc.result", setter: fld_set}]}, + "result_code": {to:[{field: "rsa.misc.result_code", setter: fld_prio, prio: 1}]}, + "resultcode": {to:[{field: "rsa.misc.result_code", setter: fld_prio, prio: 0}]}, + "rid": {convert: to_long, to:[{field: "rsa.internal.rid", setter: fld_set}]}, + "risk": {to:[{field: "rsa.misc.risk", setter: fld_set}]}, + "risk_info": {to:[{field: "rsa.misc.risk_info", setter: fld_set}]}, + "risk_num": {convert: to_double, to:[{field: "rsa.misc.risk_num", setter: fld_set}]}, + "risk_num_comm": {convert: to_double, to:[{field: "rsa.misc.risk_num_comm", setter: fld_set}]}, + "risk_num_next": {convert: to_double, to:[{field: "rsa.misc.risk_num_next", setter: fld_set}]}, + "risk_num_sand": {convert: to_double, to:[{field: "rsa.misc.risk_num_sand", setter: fld_set}]}, + "risk_num_static": {convert: to_double, to:[{field: "rsa.misc.risk_num_static", setter: fld_set}]}, + "risk_suspicious": {to:[{field: "rsa.misc.risk_suspicious", setter: fld_set}]}, + "risk_warning": {to:[{field: "rsa.misc.risk_warning", setter: fld_set}]}, + "rpayload": {to:[{field: "rsa.network.rpayload", setter: fld_set}]}, + "ruid": {to:[{field: "rsa.misc.ruid", setter: fld_set}]}, + "rule": {to:[{field: "rsa.misc.rule", setter: fld_set}]}, + "rule_group": {to:[{field: "rsa.misc.rule_group", setter: fld_set}]}, + "rule_template": {to:[{field: "rsa.misc.rule_template", setter: fld_set}]}, + "rule_uid": {to:[{field: "rsa.misc.rule_uid", setter: fld_set}]}, + "rulename": {to:[{field: "rsa.misc.rule_name", setter: fld_set}]}, + "s_certauth": {to:[{field: "rsa.crypto.s_certauth", setter: fld_set}]}, + "s_cipher": {to:[{field: "rsa.crypto.cipher_src", setter: fld_set}]}, + "s_ciphersize": {convert: to_long, to:[{field: "rsa.crypto.cipher_size_src", setter: fld_set}]}, + "s_context": {to:[{field: "rsa.misc.context_subject", setter: fld_set}]}, + "s_sslver": {to:[{field: "rsa.crypto.ssl_ver_src", setter: fld_set}]}, + "sburb": {to:[{field: "rsa.misc.sburb", setter: fld_set}]}, + "scheme": {to:[{field: "rsa.crypto.scheme", setter: fld_set}]}, + "sdomain_fld": {to:[{field: "rsa.misc.sdomain_fld", setter: fld_set}]}, + "search.text": {to:[{field: "rsa.misc.search_text", setter: fld_set}]}, + "sec": {to:[{field: "rsa.misc.sec", setter: fld_set}]}, + "second": {to:[{field: "rsa.misc.second", setter: fld_set}]}, + "sensor": {to:[{field: "rsa.misc.sensor", setter: fld_set}]}, + "sensorname": {to:[{field: "rsa.misc.sensorname", setter: fld_set}]}, + "seqnum": {to:[{field: "rsa.misc.seqnum", setter: fld_set}]}, + "serial_number": {to:[{field: "rsa.misc.serial_number", setter: fld_set}]}, + "service.account": {to:[{field: "rsa.identity.service_account", setter: fld_set}]}, + "session": {to:[{field: "rsa.misc.session", setter: fld_set}]}, + "session.split": {to:[{field: "rsa.internal.session_split", setter: fld_set}]}, + "sessionid": {to:[{field: "rsa.misc.log_session_id", setter: fld_set}]}, + "sessionid1": {to:[{field: "rsa.misc.log_session_id1", setter: fld_set}]}, + "sessiontype": {to:[{field: "rsa.misc.sessiontype", setter: fld_set}]}, + "severity": {to:[{field: "rsa.misc.severity", setter: fld_set}]}, + "sid": {to:[{field: "rsa.identity.user_sid_dst", setter: fld_set}]}, + "sig.name": {to:[{field: "rsa.misc.sig_name", setter: fld_set}]}, + "sigUUID": {to:[{field: "rsa.misc.sigUUID", setter: fld_set}]}, + "sigcat": {to:[{field: "rsa.misc.sigcat", setter: fld_set}]}, + "sigid": {convert: to_long, to:[{field: "rsa.misc.sig_id", setter: fld_set}]}, + "sigid1": {convert: to_long, to:[{field: "rsa.misc.sig_id1", setter: fld_set}]}, + "sigid_string": {to:[{field: "rsa.misc.sig_id_str", setter: fld_set}]}, + "signame": {to:[{field: "rsa.misc.policy_name", setter: fld_prio, prio: 1}]}, + "sigtype": {to:[{field: "rsa.crypto.sig_type", setter: fld_set}]}, + "sinterface": {to:[{field: "rsa.network.sinterface", setter: fld_set}]}, + "site": {to:[{field: "rsa.internal.site", setter: fld_set}]}, + "size": {convert: to_long, to:[{field: "rsa.internal.size", setter: fld_set}]}, + "smask": {to:[{field: "rsa.network.smask", setter: fld_set}]}, + "snmp.oid": {to:[{field: "rsa.misc.snmp_oid", setter: fld_set}]}, + "snmp.value": {to:[{field: "rsa.misc.snmp_value", setter: fld_set}]}, + "sourcefile": {to:[{field: "rsa.internal.sourcefile", setter: fld_set}]}, + "space": {to:[{field: "rsa.misc.space", setter: fld_set}]}, + "space1": {to:[{field: "rsa.misc.space1", setter: fld_set}]}, + "spi": {to:[{field: "rsa.misc.spi", setter: fld_set}]}, + "sql": {to:[{field: "rsa.misc.sql", setter: fld_set}]}, + "src_dn": {to:[{field: "rsa.identity.dn_src", setter: fld_set}]}, + "src_payload": {to:[{field: "rsa.misc.payload_src", setter: fld_set}]}, + "src_spi": {to:[{field: "rsa.misc.spi_src", setter: fld_set}]}, + "src_zone": {to:[{field: "rsa.network.zone_src", setter: fld_set}]}, + "srcburb": {to:[{field: "rsa.misc.srcburb", setter: fld_set}]}, + "srcdom": {to:[{field: "rsa.misc.srcdom", setter: fld_set}]}, + "srcservice": {to:[{field: "rsa.misc.srcservice", setter: fld_set}]}, + "ssid": {to:[{field: "rsa.wireless.wlan_ssid", setter: fld_prio, prio: 0}]}, + "stamp": {convert: to_date, to:[{field: "rsa.time.stamp", setter: fld_set}]}, + "starttime": {convert: to_date, to:[{field: "rsa.time.starttime", setter: fld_set}]}, + "state": {to:[{field: "rsa.misc.state", setter: fld_set}]}, + "statement": {to:[{field: "rsa.internal.statement", setter: fld_set}]}, + "status": {to:[{field: "rsa.misc.status", setter: fld_set}]}, + "status1": {to:[{field: "rsa.misc.status1", setter: fld_set}]}, + "streams": {convert: to_long, to:[{field: "rsa.misc.streams", setter: fld_set}]}, + "subcategory": {to:[{field: "rsa.misc.subcategory", setter: fld_set}]}, + "subject": {to:[{field: "rsa.email.subject", setter: fld_set}]}, + "svcno": {to:[{field: "rsa.misc.svcno", setter: fld_set}]}, + "system": {to:[{field: "rsa.misc.system", setter: fld_set}]}, + "t_context": {to:[{field: "rsa.misc.context_target", setter: fld_set}]}, + "task_name": {to:[{field: "rsa.file.task_name", setter: fld_set}]}, + "tbdstr1": {to:[{field: "rsa.misc.tbdstr1", setter: fld_set}]}, + "tbdstr2": {to:[{field: "rsa.misc.tbdstr2", setter: fld_set}]}, + "tbl_name": {to:[{field: "rsa.db.table_name", setter: fld_set}]}, + "tcp_flags": {convert: to_long, to:[{field: "rsa.misc.tcp_flags", setter: fld_set}]}, + "terminal": {to:[{field: "rsa.misc.terminal", setter: fld_set}]}, + "tgtdom": {to:[{field: "rsa.misc.tgtdom", setter: fld_set}]}, + "tgtdomain": {to:[{field: "rsa.misc.tgtdomain", setter: fld_set}]}, + "threat_name": {to:[{field: "rsa.threat.threat_category", setter: fld_set}]}, + "threat_source": {to:[{field: "rsa.threat.threat_source", setter: fld_set}]}, + "threat_val": {to:[{field: "rsa.threat.threat_desc", setter: fld_set}]}, + "threshold": {to:[{field: "rsa.misc.threshold", setter: fld_set}]}, + "time": {convert: to_date, to:[{field: "rsa.internal.time", setter: fld_set}]}, + "timestamp": {to:[{field: "rsa.time.timestamp", setter: fld_set}]}, + "timezone": {to:[{field: "rsa.time.timezone", setter: fld_set}]}, + "to": {to:[{field: "rsa.email.email_dst", setter: fld_set}]}, + "tos": {convert: to_long, to:[{field: "rsa.misc.tos", setter: fld_set}]}, + "trans_from": {to:[{field: "rsa.email.trans_from", setter: fld_set}]}, + "trans_id": {to:[{field: "rsa.db.transact_id", setter: fld_set}]}, + "trans_to": {to:[{field: "rsa.email.trans_to", setter: fld_set}]}, + "trigger_desc": {to:[{field: "rsa.misc.trigger_desc", setter: fld_set}]}, + "trigger_val": {to:[{field: "rsa.misc.trigger_val", setter: fld_set}]}, + "type": {to:[{field: "rsa.misc.type", setter: fld_set}]}, + "type1": {to:[{field: "rsa.misc.type1", setter: fld_set}]}, + "tzone": {to:[{field: "rsa.time.tzone", setter: fld_set}]}, + "ubc.req": {convert: to_long, to:[{field: "rsa.internal.ubc_req", setter: fld_set}]}, + "ubc.res": {convert: to_long, to:[{field: "rsa.internal.ubc_res", setter: fld_set}]}, + "udb_class": {to:[{field: "rsa.misc.udb_class", setter: fld_set}]}, + "url_fld": {to:[{field: "rsa.misc.url_fld", setter: fld_set}]}, + "urlpage": {to:[{field: "rsa.web.urlpage", setter: fld_set}]}, + "urlroot": {to:[{field: "rsa.web.urlroot", setter: fld_set}]}, + "user_address": {to:[{field: "rsa.email.email", setter: fld_append}]}, + "user_dept": {to:[{field: "rsa.identity.user_dept", setter: fld_set}]}, + "user_div": {to:[{field: "rsa.misc.user_div", setter: fld_set}]}, + "user_fname": {to:[{field: "rsa.identity.firstname", setter: fld_set}]}, + "user_lname": {to:[{field: "rsa.identity.lastname", setter: fld_set}]}, + "user_mname": {to:[{field: "rsa.identity.middlename", setter: fld_set}]}, + "user_org": {to:[{field: "rsa.identity.org", setter: fld_set}]}, + "user_role": {to:[{field: "rsa.identity.user_role", setter: fld_set}]}, + "userid": {to:[{field: "rsa.misc.userid", setter: fld_set}]}, + "username_fld": {to:[{field: "rsa.misc.username_fld", setter: fld_set}]}, + "utcstamp": {to:[{field: "rsa.misc.utcstamp", setter: fld_set}]}, + "v_instafname": {to:[{field: "rsa.misc.v_instafname", setter: fld_set}]}, + "vendor_event_cat": {to:[{field: "rsa.investigations.event_vcat", setter: fld_set}]}, + "version": {to:[{field: "rsa.misc.version", setter: fld_set}]}, + "vid": {to:[{field: "rsa.internal.msg_vid", setter: fld_set}]}, + "virt_data": {to:[{field: "rsa.misc.virt_data", setter: fld_set}]}, + "virusname": {to:[{field: "rsa.misc.virusname", setter: fld_set}]}, + "vlan": {convert: to_long, to:[{field: "rsa.network.vlan", setter: fld_set}]}, + "vlan.name": {to:[{field: "rsa.network.vlan_name", setter: fld_set}]}, + "vm_target": {to:[{field: "rsa.misc.vm_target", setter: fld_set}]}, + "vpnid": {to:[{field: "rsa.misc.vpnid", setter: fld_set}]}, + "vsys": {to:[{field: "rsa.misc.vsys", setter: fld_set}]}, + "vuln_ref": {to:[{field: "rsa.misc.vuln_ref", setter: fld_set}]}, + "web_cookie": {to:[{field: "rsa.web.web_cookie", setter: fld_set}]}, + "web_extension_tmp": {to:[{field: "rsa.web.web_extension_tmp", setter: fld_set}]}, + "web_host": {to:[{field: "rsa.web.alias_host", setter: fld_set}]}, + "web_method": {to:[{field: "rsa.misc.action", setter: fld_append}]}, + "web_page": {to:[{field: "rsa.web.web_page", setter: fld_set}]}, + "web_ref_domain": {to:[{field: "rsa.web.web_ref_domain", setter: fld_set}]}, + "web_ref_host": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "web_ref_page": {to:[{field: "rsa.web.web_ref_page", setter: fld_set}]}, + "web_ref_query": {to:[{field: "rsa.web.web_ref_query", setter: fld_set}]}, + "web_ref_root": {to:[{field: "rsa.web.web_ref_root", setter: fld_set}]}, + "wifi_channel": {convert: to_long, to:[{field: "rsa.wireless.wlan_channel", setter: fld_set}]}, + "wlan": {to:[{field: "rsa.wireless.wlan_name", setter: fld_set}]}, + "word": {to:[{field: "rsa.internal.word", setter: fld_set}]}, + "workspace_desc": {to:[{field: "rsa.misc.workspace", setter: fld_set}]}, + "workstation": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "year": {to:[{field: "rsa.time.year", setter: fld_set}]}, + "zone": {to:[{field: "rsa.network.zone", setter: fld_set}]}, + }; + + function to_date(value) { + switch (typeof (value)) { + case "object": + // This is a Date. But as it was obtained from evt.Get(), the VM + // doesn't see it as a JS Date anymore, thus value instanceof Date === false. + // Have to trust that any object here is a valid Date for Go. + return value; + case "string": + var asDate = new Date(value); + if (!isNaN(asDate)) return asDate; + } + } + + // ECMAScript 5.1 doesn't have Object.MAX_SAFE_INTEGER / Object.MIN_SAFE_INTEGER. + var maxSafeInt = Math.pow(2, 53) - 1; + var minSafeInt = -maxSafeInt; + + function to_long(value) { + var num = parseInt(value); + // Better not to index a number if it's not safe (above 53 bits). + return !isNaN(num) && minSafeInt <= num && num <= maxSafeInt ? num : undefined; + } + + function to_ip(value) { + if (value.indexOf(":") === -1) + return to_ipv4(value); + return to_ipv6(value); + } + + var ipv4_regex = /^(\d+)\.(\d+)\.(\d+)\.(\d+)$/; + var ipv6_hex_regex = /^[0-9A-Fa-f]{1,4}$/; + + function to_ipv4(value) { + var result = ipv4_regex.exec(value); + if (result == null || result.length !== 5) return; + for (var i = 1; i < 5; i++) { + var num = strictToInt(result[i]); + if (isNaN(num) || num < 0 || num > 255) return; + } + return value; + } + + function to_ipv6(value) { + var sqEnd = value.indexOf("]"); + if (sqEnd > -1) { + if (value.charAt(0) !== "[") return; + value = value.substr(1, sqEnd - 1); + } + var zoneOffset = value.indexOf("%"); + if (zoneOffset > -1) { + value = value.substr(0, zoneOffset); + } + var parts = value.split(":"); + if (parts == null || parts.length < 3 || parts.length > 8) return; + var numEmpty = 0; + var innerEmpty = 0; + for (var i = 0; i < parts.length; i++) { + if (parts[i].length === 0) { + numEmpty++; + if (i > 0 && i + 1 < parts.length) innerEmpty++; + } else if (!parts[i].match(ipv6_hex_regex) && + // Accept an IPv6 with a valid IPv4 at the end. + ((i + 1 < parts.length) || !to_ipv4(parts[i]))) { + return; + } + } + return innerEmpty === 0 && parts.length === 8 || innerEmpty === 1 ? value : undefined; + } + + function to_double(value) { + return parseFloat(value); + } + + function to_mac(value) { + // ES doesn't have a mac datatype so it's safe to ingest whatever was captured. + return value; + } + + function to_lowercase(value) { + // to_lowercase is used against keyword fields, which can accept + // any other type (numbers, dates). + return typeof(value) === "string"? value.toLowerCase() : value; + } + + function fld_set(dst, value) { + dst[this.field] = { v: value }; + } + + function fld_append(dst, value) { + if (dst[this.field] === undefined) { + dst[this.field] = { v: [value] }; + } else { + var base = dst[this.field]; + if (base.v.indexOf(value)===-1) base.v.push(value); + } + } + + function fld_prio(dst, value) { + if (dst[this.field] === undefined) { + dst[this.field] = { v: value, prio: this.prio}; + } else if(this.prio < dst[this.field].prio) { + dst[this.field].v = value; + dst[this.field].prio = this.prio; + } + } + + var valid_ecs_outcome = { + 'failure': true, + 'success': true, + 'unknown': true + }; + + function fld_ecs_outcome(dst, value) { + value = value.toLowerCase(); + if (valid_ecs_outcome[value] === undefined) { + value = 'unknown'; + } + if (dst[this.field] === undefined) { + dst[this.field] = { v: value }; + } else if (dst[this.field].v === 'unknown') { + dst[this.field] = { v: value }; + } + } + + function map_all(evt, targets, value) { + for (var i = 0; i < targets.length; i++) { + evt.Put(targets[i], value); + } + } + + function populate_fields(evt) { + var base = evt.Get(FIELDS_OBJECT); + if (base === null) return; + alternate_datetime(evt); + if (map_ecs) { + do_populate(evt, base, ecs_mappings); + } + if (map_rsa) { + do_populate(evt, base, rsa_mappings); + } + if (keep_raw) { + evt.Put("rsa.raw", base); + } + evt.Delete(FIELDS_OBJECT); + } + + var datetime_alt_components = [ + {field: "day", fmts: [[dF]]}, + {field: "year", fmts: [[dW]]}, + {field: "month", fmts: [[dB],[dG]]}, + {field: "date", fmts: [[dW,dSkip,dG,dSkip,dF],[dW,dSkip,dB,dSkip,dF],[dW,dSkip,dR,dSkip,dF]]}, + {field: "hour", fmts: [[dN]]}, + {field: "min", fmts: [[dU]]}, + {field: "secs", fmts: [[dO]]}, + {field: "time", fmts: [[dN, dSkip, dU, dSkip, dO]]}, + ]; + + function alternate_datetime(evt) { + if (evt.Get(FIELDS_PREFIX + "event_time") != null) { + return; + } + var tzOffset = tz_offset; + if (tzOffset === "event") { + tzOffset = evt.Get("event.timezone"); + } + var container = new DateContainer(tzOffset); + for (var i=0; i} %{timezone}]||%{web_method}||%{web_host}||%{webpage}||%{web_query}||%{network_service}||%{resultcode}||%{sbytes}||%{web_referer}||%{user_agent}||%{web_cookie}", processor_chain([ + dup1, + dup2, + dup3, + dup4, + dup5, + dup6, + ])); + + var hdr1 = match("HEADER#0:0001", "message", "%APACHETOMCAT-%{level}-%{messageid}: %{payload}", processor_chain([ + setc("header_id","0001"), + ])); + + var hdr2 = match("HEADER#1:0002", "message", "%{hmonth->} %{hday->} %{htime->} %{hostname->} %APACHETOMCAT- %{messageid}: %{payload}", processor_chain([ + setc("header_id","0002"), + ])); + + var select1 = linear_select([ + hdr1, + hdr2, + ]); + + var msg1 = msg("ABCD", dup7); + + var msg2 = msg("BADMETHOD", dup7); + + var msg3 = msg("BADMTHD", dup7); + + var msg4 = msg("BDMTHD", dup7); + + var msg5 = msg("INDEX", dup7); + + var msg6 = msg("CFYZ", dup7); + + var msg7 = msg("CONNECT", dup7); + + var msg8 = msg("DELETE", dup7); + + var msg9 = msg("DETECT_METHOD_TYPE", dup7); + + var msg10 = msg("FGET", dup7); + + var msg11 = msg("GET", dup7); + + var msg12 = msg("get", dup7); + + var msg13 = msg("HEAD", dup7); + + var msg14 = msg("id", dup7); + + var msg15 = msg("LOCK", dup7); + + var msg16 = msg("MKCOL", dup7); + + var msg17 = msg("NCIRCLE", dup7); + + var msg18 = msg("OPTIONS", dup7); + + var msg19 = msg("POST", dup7); + + var msg20 = msg("PRONECT", dup7); + + var msg21 = msg("PROPFIND", dup7); + + var msg22 = msg("PUT", dup7); + + var msg23 = msg("QUALYS", dup7); + + var msg24 = msg("SEARCH", dup7); + + var msg25 = msg("TRACK", dup7); + + var msg26 = msg("TRACE", dup7); + + var msg27 = msg("uGET", dup7); + + var msg28 = msg("null", dup7); + + var msg29 = msg("rndmmtd", dup7); + + var msg30 = msg("RNDMMTD", dup7); + + var msg31 = msg("asdf", dup7); + + var msg32 = msg("DEBUG", dup7); + + var msg33 = msg("COOK", dup7); + + var msg34 = msg("nGET", dup7); + + var chain1 = processor_chain([ + select1, + msgid_select({ + "ABCD": msg1, + "BADMETHOD": msg2, + "BADMTHD": msg3, + "BDMTHD": msg4, + "CFYZ": msg6, + "CONNECT": msg7, + "COOK": msg33, + "DEBUG": msg32, + "DELETE": msg8, + "DETECT_METHOD_TYPE": msg9, + "FGET": msg10, + "GET": msg11, + "HEAD": msg13, + "INDEX": msg5, + "LOCK": msg15, + "MKCOL": msg16, + "NCIRCLE": msg17, + "OPTIONS": msg18, + "POST": msg19, + "PRONECT": msg20, + "PROPFIND": msg21, + "PUT": msg22, + "QUALYS": msg23, + "RNDMMTD": msg30, + "SEARCH": msg24, + "TRACE": msg26, + "TRACK": msg25, + "asdf": msg31, + "get": msg12, + "id": msg14, + "nGET": msg34, + "null": msg28, + "rndmmtd": msg29, + "uGET": msg27, + }), + ]); + + var part1 = match("MESSAGE#0:ABCD", "nwparser.payload", "%{saddr}||%{fld5}||%{username}||[%{fld7->} %{timezone}]||%{web_method}||%{web_host}||%{webpage}||%{web_query}||%{network_service}||%{resultcode}||%{sbytes}||%{web_referer}||%{user_agent}||%{web_cookie}", processor_chain([ + dup1, + dup2, + dup3, + dup4, + dup5, + dup6, + ])); + - community_id: null + - registered_domain: + field: dns.question.name + ignore_failure: true + ignore_missing: true + target_etld_field: dns.question.top_level_domain + target_field: dns.question.registered_domain + target_subdomain_field: dns.question.subdomain + - registered_domain: + field: client.domain + ignore_failure: true + ignore_missing: true + target_etld_field: client.top_level_domain + target_field: client.registered_domain + target_subdomain_field: client.subdomain + - registered_domain: + field: server.domain + ignore_failure: true + ignore_missing: true + target_etld_field: server.top_level_domain + target_field: server.registered_domain + target_subdomain_field: server.subdomain + - registered_domain: + field: destination.domain + ignore_failure: true + ignore_missing: true + target_etld_field: destination.top_level_domain + target_field: destination.registered_domain + target_subdomain_field: destination.subdomain + - registered_domain: + field: source.domain + ignore_failure: true + ignore_missing: true + target_etld_field: source.top_level_domain + target_field: source.registered_domain + target_subdomain_field: source.subdomain + - registered_domain: + field: url.domain + ignore_failure: true + ignore_missing: true + target_etld_field: url.top_level_domain + target_field: url.registered_domain + target_subdomain_field: url.subdomain + - add_locale: null + tags: + - tomcat-log + - forwarded + tcp: null + data_stream.namespace: default + - name: filestream-tomcat + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.tomcat.log.enabled} == true and ${kubernetes.hints.tomcat.enabled} == true + data_stream: + dataset: tomcat.log + type: logs + exclude_files: + - .gz$ + fields: + observer: + product: TomCat + type: Web + vendor: Apache + fields_under_root: true + parsers: + - container: + format: auto + stream: ${kubernetes.hints.tomcat.log.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + processors: + - script: + lang: javascript + params: + debug: false + ecs: true + keep_raw: false + rsa: true + tz_offset: local + source: | + // Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + // or more contributor license agreements. Licensed under the Elastic License; + // you may not use this file except in compliance with the Elastic License. + + /* jshint -W014,-W016,-W097,-W116 */ + + var processor = require("processor"); + var console = require("console"); + + var FLAG_FIELD = "log.flags"; + var FIELDS_OBJECT = "nwparser"; + var FIELDS_PREFIX = FIELDS_OBJECT + "."; + + var defaults = { + debug: false, + ecs: true, + rsa: false, + keep_raw: false, + tz_offset: "local", + strip_priority: true + }; + + var saved_flags = null; + var debug; + var map_ecs; + var map_rsa; + var keep_raw; + var device; + var tz_offset; + var strip_priority; + + // Register params from configuration. + function register(params) { + debug = params.debug !== undefined ? params.debug : defaults.debug; + map_ecs = params.ecs !== undefined ? params.ecs : defaults.ecs; + map_rsa = params.rsa !== undefined ? params.rsa : defaults.rsa; + keep_raw = params.keep_raw !== undefined ? params.keep_raw : defaults.keep_raw; + tz_offset = parse_tz_offset(params.tz_offset !== undefined? params.tz_offset : defaults.tz_offset); + strip_priority = params.strip_priority !== undefined? params.strip_priority : defaults.strip_priority; + device = new DeviceProcessor(); + } + + function parse_tz_offset(offset) { + var date; + var m; + switch(offset) { + // local uses the tz offset from the JS VM. + case "local": + date = new Date(); + // Reversing the sign as we the offset from UTC, not to UTC. + return parse_local_tz_offset(-date.getTimezoneOffset()); + // event uses the tz offset from event.timezone (add_locale processor). + case "event": + return offset; + // Otherwise a tz offset in the form "[+-][0-9]{4}" is required. + default: + m = offset.match(/^([+\-])([0-9]{2}):?([0-9]{2})?$/); + if (m === null || m.length !== 4) { + throw("bad timezone offset: '" + offset + "'. Must have the form +HH:MM"); + } + return m[1] + m[2] + ":" + (m[3]!==undefined? m[3] : "00"); + } + } + + function parse_local_tz_offset(minutes) { + var neg = minutes < 0; + minutes = Math.abs(minutes); + var min = minutes % 60; + var hours = Math.floor(minutes / 60); + var pad2digit = function(n) { + if (n < 10) { return "0" + n;} + return "" + n; + }; + return (neg? "-" : "+") + pad2digit(hours) + ":" + pad2digit(min); + } + + function process(evt) { + // Function register is only called by the processor when `params` are set + // in the processor config. + if (device === undefined) { + register(defaults); + } + return device.process(evt); + } + + function processor_chain(subprocessors) { + var builder = new processor.Chain(); + subprocessors.forEach(builder.Add); + return builder.Build().Run; + } + + function linear_select(subprocessors) { + return function (evt) { + var flags = evt.Get(FLAG_FIELD); + var i; + for (i = 0; i < subprocessors.length; i++) { + evt.Delete(FLAG_FIELD); + if (debug) console.warn("linear_select trying entry " + i); + subprocessors[i](evt); + // Dissect processor succeeded? + if (evt.Get(FLAG_FIELD) == null) break; + if (debug) console.warn("linear_select failed entry " + i); + } + if (flags !== null) { + evt.Put(FLAG_FIELD, flags); + } + if (debug) { + if (i < subprocessors.length) { + console.warn("linear_select matched entry " + i); + } else { + console.warn("linear_select didn't match"); + } + } + }; + } + + function conditional(opt) { + return function(evt) { + if (opt.if(evt)) { + opt.then(evt); + } else if (opt.else) { + opt.else(evt); + } + }; + } + + var strip_syslog_priority = (function() { + var isEnabled = function() { return strip_priority === true; }; + var fetchPRI = field("_pri"); + var fetchPayload = field("payload"); + var removePayload = remove(["payload"]); + var cleanup = remove(["_pri", "payload"]); + var onMatch = function(evt) { + var pri, priStr = fetchPRI(evt); + if (priStr != null + && 0 < priStr.length && priStr.length < 4 + && !isNaN((pri = Number(priStr))) + && 0 <= pri && pri < 192) { + var severity = pri & 7, + facility = pri >> 3; + setc("_severity", "" + severity)(evt); + setc("_facility", "" + facility)(evt); + // Replace message with priority stripped. + evt.Put("message", fetchPayload(evt)); + removePayload(evt); + } else { + // not a valid syslog PRI, cleanup. + cleanup(evt); + } + }; + return conditional({ + if: isEnabled, + then: cleanup_flags(match( + "STRIP_PRI", + "message", + "<%{_pri}>%{payload}", + onMatch + )) + }); + })(); + + function match(id, src, pattern, on_success) { + var dissect = new processor.Dissect({ + field: src, + tokenizer: pattern, + target_prefix: FIELDS_OBJECT, + ignore_failure: true, + overwrite_keys: true, + trim_values: "right" + }); + return function (evt) { + var msg = evt.Get(src); + dissect.Run(evt); + var failed = evt.Get(FLAG_FIELD) != null; + if (debug) { + if (failed) { + console.debug("dissect fail: " + id + " field:" + src); + } else { + console.debug("dissect OK: " + id + " field:" + src); + } + console.debug(" expr: <<" + pattern + ">>"); + console.debug(" input: <<" + msg + ">>"); + } + if (on_success != null && !failed) { + on_success(evt); + } + }; + } + + function match_copy(id, src, dst, on_success) { + dst = FIELDS_PREFIX + dst; + if (dst === FIELDS_PREFIX || dst === src) { + return function (evt) { + if (debug) { + console.debug("noop OK: " + id + " field:" + src); + console.debug(" input: <<" + evt.Get(src) + ">>"); + } + if (on_success != null) on_success(evt); + } + } + return function (evt) { + var msg = evt.Get(src); + evt.Put(dst, msg); + if (debug) { + console.debug("copy OK: " + id + " field:" + src); + console.debug(" target: '" + dst + "'"); + console.debug(" input: <<" + msg + ">>"); + } + if (on_success != null) on_success(evt); + } + } + + function cleanup_flags(processor) { + return function(evt) { + processor(evt); + evt.Delete(FLAG_FIELD); + }; + } + + function all_match(opts) { + return function (evt) { + var i; + for (i = 0; i < opts.processors.length; i++) { + evt.Delete(FLAG_FIELD); + opts.processors[i](evt); + // Dissect processor succeeded? + if (evt.Get(FLAG_FIELD) != null) { + if (debug) console.warn("all_match failure at " + i); + if (opts.on_failure != null) opts.on_failure(evt); + return; + } + if (debug) console.warn("all_match success at " + i); + } + if (opts.on_success != null) opts.on_success(evt); + }; + } + + function msgid_select(mapping) { + return function (evt) { + var msgid = evt.Get(FIELDS_PREFIX + "messageid"); + if (msgid == null) { + if (debug) console.warn("msgid_select: no messageid captured!"); + return; + } + var next = mapping[msgid]; + if (next === undefined) { + if (debug) console.warn("msgid_select: no mapping for messageid:" + msgid); + return; + } + if (debug) console.info("msgid_select: matched key=" + msgid); + return next(evt); + }; + } + + function msg(msg_id, match) { + return function (evt) { + match(evt); + if (evt.Get(FLAG_FIELD) == null) { + evt.Put(FIELDS_PREFIX + "msg_id1", msg_id); + } + }; + } + + var start; + + function save_flags(evt) { + saved_flags = evt.Get(FLAG_FIELD); + evt.Put("event.original", evt.Get("message")); + } + + function restore_flags(evt) { + if (saved_flags !== null) { + evt.Put(FLAG_FIELD, saved_flags); + } + evt.Delete("message"); + } + + function constant(value) { + return function (evt) { + return value; + }; + } + + function field(name) { + var fullname = FIELDS_PREFIX + name; + return function (evt) { + return evt.Get(fullname); + }; + } + + function STRCAT(args) { + var s = ""; + var i; + for (i = 0; i < args.length; i++) { + s += args[i]; + } + return s; + } + + // TODO: Implement + function DIRCHK(args) { + unimplemented("DIRCHK"); + } + + function strictToInt(str) { + return str * 1; + } + + function CALC(args) { + if (args.length !== 3) { + console.warn("skipped call to CALC with " + args.length + " arguments."); + return; + } + var a = strictToInt(args[0]); + var b = strictToInt(args[2]); + if (isNaN(a) || isNaN(b)) { + console.warn("failed evaluating CALC arguments a='" + args[0] + "' b='" + args[2] + "'."); + return; + } + var result; + switch (args[1]) { + case "+": + result = a + b; + break; + case "-": + result = a - b; + break; + case "*": + result = a * b; + break; + default: + // Only * and + seen in the parsers. + console.warn("unknown CALC operation '" + args[1] + "'."); + return; + } + // Always return a string + return result !== undefined ? "" + result : result; + } + + var quoteChars = "\"'`"; + function RMQ(args) { + if(args.length !== 1) { + console.warn("RMQ: only one argument expected"); + return; + } + var value = args[0].trim(); + var n = value.length; + var char; + return n > 1 + && (char=value.charAt(0)) === value.charAt(n-1) + && quoteChars.indexOf(char) !== -1? + value.substr(1, n-2) + : value; + } + + function call(opts) { + var args = new Array(opts.args.length); + return function (evt) { + for (var i = 0; i < opts.args.length; i++) + if ((args[i] = opts.args[i](evt)) == null) return; + var result = opts.fn(args); + if (result != null) { + evt.Put(opts.dest, result); + } + }; + } + + function nop(evt) { + } + + function appendErrorMsg(evt, msg) { + var value = evt.Get("error.message"); + if (value == null) { + value = [msg]; + } else if (msg instanceof Array) { + value.push(msg); + } else { + value = [value, msg]; + } + evt.Put("error.message", value); + } + + function unimplemented(name) { + appendErrorMsg("unimplemented feature: " + name); + } + + function lookup(opts) { + return function (evt) { + var key = opts.key(evt); + if (key == null) return; + var value = opts.map.keyvaluepairs[key]; + if (value === undefined) { + value = opts.map.default; + } + if (value !== undefined) { + evt.Put(opts.dest, value(evt)); + } + }; + } + + function set(fields) { + return new processor.AddFields({ + target: FIELDS_OBJECT, + fields: fields, + }); + } + + function setf(dst, src) { + return function (evt) { + var val = evt.Get(FIELDS_PREFIX + src); + if (val != null) evt.Put(FIELDS_PREFIX + dst, val); + }; + } + + function setc(dst, value) { + return function (evt) { + evt.Put(FIELDS_PREFIX + dst, value); + }; + } + + function set_field(opts) { + return function (evt) { + var val = opts.value(evt); + if (val != null) evt.Put(opts.dest, val); + }; + } + + function dump(label) { + return function (evt) { + console.log("Dump of event at " + label + ": " + JSON.stringify(evt, null, "\t")); + }; + } + + function date_time_join_args(evt, arglist) { + var str = ""; + for (var i = 0; i < arglist.length; i++) { + var fname = FIELDS_PREFIX + arglist[i]; + var val = evt.Get(fname); + if (val != null) { + if (str !== "") str += " "; + str += val; + } else { + if (debug) console.warn("in date_time: input arg " + fname + " is not set"); + } + } + return str; + } + + function to2Digit(num) { + return num? (num < 10? "0" + num : num) : "00"; + } + + // Make two-digit dates 00-69 interpreted as 2000-2069 + // and dates 70-99 translated to 1970-1999. + var twoDigitYearEpoch = 70; + var twoDigitYearCentury = 2000; + + // This is to accept dates up to 2 days in the future, only used when + // no year is specified in a date. 2 days should be enough to account for + // time differences between systems and different tz offsets. + var maxFutureDelta = 2*24*60*60*1000; + + // DateContainer stores date fields and then converts those fields into + // a Date. Necessary because building a Date using its set() methods gives + // different results depending on the order of components. + function DateContainer(tzOffset) { + this.offset = tzOffset === undefined? "Z" : tzOffset; + } + + DateContainer.prototype = { + setYear: function(v) {this.year = v;}, + setMonth: function(v) {this.month = v;}, + setDay: function(v) {this.day = v;}, + setHours: function(v) {this.hours = v;}, + setMinutes: function(v) {this.minutes = v;}, + setSeconds: function(v) {this.seconds = v;}, + + setUNIX: function(v) {this.unix = v;}, + + set2DigitYear: function(v) { + this.year = v < twoDigitYearEpoch? twoDigitYearCentury + v : twoDigitYearCentury + v - 100; + }, + + toDate: function() { + if (this.unix !== undefined) { + return new Date(this.unix * 1000); + } + if (this.day === undefined || this.month === undefined) { + // Can't make a date from this. + return undefined; + } + if (this.year === undefined) { + // A date without a year. Set current year, or previous year + // if date would be in the future. + var now = new Date(); + this.year = now.getFullYear(); + var date = this.toDate(); + if (date.getTime() - now.getTime() > maxFutureDelta) { + date.setFullYear(now.getFullYear() - 1); + } + return date; + } + var MM = to2Digit(this.month); + var DD = to2Digit(this.day); + var hh = to2Digit(this.hours); + var mm = to2Digit(this.minutes); + var ss = to2Digit(this.seconds); + return new Date(this.year + "-" + MM + "-" + DD + "T" + hh + ":" + mm + ":" + ss + this.offset); + } + } + + function date_time_try_pattern(fmt, str, tzOffset) { + var date = new DateContainer(tzOffset); + var pos = date_time_try_pattern_at_pos(fmt, str, 0, date); + return pos !== undefined? date.toDate() : undefined; + } + + function date_time_try_pattern_at_pos(fmt, str, pos, date) { + var len = str.length; + for (var proc = 0; pos !== undefined && pos < len && proc < fmt.length; proc++) { + pos = fmt[proc](str, pos, date); + } + return pos; + } + + function date_time(opts) { + return function (evt) { + var tzOffset = opts.tz || tz_offset; + if (tzOffset === "event") { + tzOffset = evt.Get("event.timezone"); + } + var str = date_time_join_args(evt, opts.args); + for (var i = 0; i < opts.fmts.length; i++) { + var date = date_time_try_pattern(opts.fmts[i], str, tzOffset); + if (date !== undefined) { + evt.Put(FIELDS_PREFIX + opts.dest, date); + return; + } + } + if (debug) console.warn("in date_time: id=" + opts.id + " FAILED: " + str); + }; + } + + var uA = 60 * 60 * 24; + var uD = 60 * 60 * 24; + var uF = 60 * 60; + var uG = 60 * 60 * 24 * 30; + var uH = 60 * 60; + var uI = 60 * 60; + var uJ = 60 * 60 * 24; + var uM = 60 * 60 * 24 * 30; + var uN = 60 * 60; + var uO = 1; + var uS = 1; + var uT = 60; + var uU = 60; + var uc = dc; + + function duration(opts) { + return function(evt) { + var str = date_time_join_args(evt, opts.args); + for (var i = 0; i < opts.fmts.length; i++) { + var seconds = duration_try_pattern(opts.fmts[i], str); + if (seconds !== undefined) { + evt.Put(FIELDS_PREFIX + opts.dest, seconds); + return; + } + } + if (debug) console.warn("in duration: id=" + opts.id + " (s) FAILED: " + str); + }; + } + + function duration_try_pattern(fmt, str) { + var secs = 0; + var pos = 0; + for (var i=0; i [ month_id , how many chars to skip if month in long form ] + "Jan": [0, 4], + "Feb": [1, 5], + "Mar": [2, 2], + "Apr": [3, 2], + "May": [4, 0], + "Jun": [5, 1], + "Jul": [6, 1], + "Aug": [7, 3], + "Sep": [8, 6], + "Oct": [9, 4], + "Nov": [10, 5], + "Dec": [11, 4], + "jan": [0, 4], + "feb": [1, 5], + "mar": [2, 2], + "apr": [3, 2], + "may": [4, 0], + "jun": [5, 1], + "jul": [6, 1], + "aug": [7, 3], + "sep": [8, 6], + "oct": [9, 4], + "nov": [10, 5], + "dec": [11, 4], + }; + + // var dC = undefined; + var dR = dateMonthName(true); + var dB = dateMonthName(false); + var dM = dateFixedWidthNumber("M", 2, 1, 12, DateContainer.prototype.setMonth); + var dG = dateVariableWidthNumber("G", 1, 12, DateContainer.prototype.setMonth); + var dD = dateFixedWidthNumber("D", 2, 1, 31, DateContainer.prototype.setDay); + var dF = dateVariableWidthNumber("F", 1, 31, DateContainer.prototype.setDay); + var dH = dateFixedWidthNumber("H", 2, 0, 24, DateContainer.prototype.setHours); + var dI = dateVariableWidthNumber("I", 0, 24, DateContainer.prototype.setHours); // Accept hours >12 + var dN = dateVariableWidthNumber("N", 0, 24, DateContainer.prototype.setHours); + var dT = dateFixedWidthNumber("T", 2, 0, 59, DateContainer.prototype.setMinutes); + var dU = dateVariableWidthNumber("U", 0, 59, DateContainer.prototype.setMinutes); + var dP = parseAMPM; // AM|PM + var dQ = parseAMPM; // A.M.|P.M + var dS = dateFixedWidthNumber("S", 2, 0, 60, DateContainer.prototype.setSeconds); + var dO = dateVariableWidthNumber("O", 0, 60, DateContainer.prototype.setSeconds); + var dY = dateFixedWidthNumber("Y", 2, 0, 99, DateContainer.prototype.set2DigitYear); + var dW = dateFixedWidthNumber("W", 4, 1000, 9999, DateContainer.prototype.setYear); + var dZ = parseHMS; + var dX = dateVariableWidthNumber("X", 0, 0x10000000000, DateContainer.prototype.setUNIX); + + // parseAMPM parses "A.M", "AM", "P.M", "PM" from logs. + // Only works if this modifier appears after the hour has been read from logs + // which is always the case in the 300 devices. + function parseAMPM(str, pos, date) { + var n = str.length; + var start = skipws(str, pos); + if (start + 2 > n) return; + var head = str.substr(start, 2).toUpperCase(); + var isPM = false; + var skip = false; + switch (head) { + case "A.": + skip = true; + /* falls through */ + case "AM": + break; + case "P.": + skip = true; + /* falls through */ + case "PM": + isPM = true; + break; + default: + if (debug) console.warn("can't parse pos " + start + " as AM/PM: " + str + "(head:" + head + ")"); + return; + } + pos = start + 2; + if (skip) { + if (pos+2 > n || str.substr(pos, 2).toUpperCase() !== "M.") { + if (debug) console.warn("can't parse pos " + start + " as AM/PM: " + str + "(tail)"); + return; + } + pos += 2; + } + var hh = date.hours; + if (isPM) { + // Accept existing hour in 24h format. + if (hh < 12) hh += 12; + } else { + if (hh === 12) hh = 0; + } + date.setHours(hh); + return pos; + } + + function parseHMS(str, pos, date) { + return date_time_try_pattern_at_pos([dN, dc(":"), dU, dc(":"), dO], str, pos, date); + } + + function skipws(str, pos) { + for ( var n = str.length; + pos < n && str.charAt(pos) === " "; + pos++) + ; + return pos; + } + + function skipdigits(str, pos) { + var c; + for (var n = str.length; + pos < n && (c = str.charAt(pos)) >= "0" && c <= "9"; + pos++) + ; + return pos; + } + + function dSkip(str, pos, date) { + var chr; + for (;pos < str.length && (chr=str[pos])<'0' || chr>'9'; pos++) {} + return pos < str.length? pos : undefined; + } + + function dateVariableWidthNumber(fmtChar, min, max, setter) { + return function (str, pos, date) { + var start = skipws(str, pos); + pos = skipdigits(str, start); + var s = str.substr(start, pos - start); + var value = parseInt(s, 10); + if (value >= min && value <= max) { + setter.call(date, value); + return pos; + } + return; + }; + } + + function dateFixedWidthNumber(fmtChar, width, min, max, setter) { + return function (str, pos, date) { + pos = skipws(str, pos); + var n = str.length; + if (pos + width > n) return; + var s = str.substr(pos, width); + var value = parseInt(s, 10); + if (value >= min && value <= max) { + setter.call(date, value); + return pos + width; + } + return; + }; + } + + // Short month name (Jan..Dec). + function dateMonthName(long) { + return function (str, pos, date) { + pos = skipws(str, pos); + var n = str.length; + if (pos + 3 > n) return; + var mon = str.substr(pos, 3); + var idx = shortMonths[mon]; + if (idx === undefined) { + idx = shortMonths[mon.toLowerCase()]; + } + if (idx === undefined) { + //console.warn("parsing date_time: '" + mon + "' is not a valid short month (%B)"); + return; + } + date.setMonth(idx[0]+1); + return pos + 3 + (long ? idx[1] : 0); + }; + } + + function url_wrapper(dst, src, fn) { + return function(evt) { + var value = evt.Get(FIELDS_PREFIX + src), result; + if (value != null && (result = fn(value))!== undefined) { + evt.Put(FIELDS_PREFIX + dst, result); + } else { + console.debug(fn.name + " failed for '" + value + "'"); + } + }; + } + + // The following regular expression for parsing URLs from: + // https://github.com/wizard04wsu/URI_Parsing + // + // The MIT License (MIT) + // + // Copyright (c) 2014 Andrew Harrison + // + // Permission is hereby granted, free of charge, to any person obtaining a copy of + // this software and associated documentation files (the "Software"), to deal in + // the Software without restriction, including without limitation the rights to + // use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of + // the Software, and to permit persons to whom the Software is furnished to do so, + // subject to the following conditions: + // + // The above copyright notice and this permission notice shall be included in all + // copies or substantial portions of the Software. + // + // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS + // FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR + // COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER + // IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + // CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + var uriRegExp = /^([a-z][a-z0-9+.\-]*):(?:\/\/((?:(?=((?:[a-z0-9\-._~!$&'()*+,;=:]|%[0-9A-F]{2})*))(\3)@)?(?=(\[[0-9A-F:.]{2,}\]|(?:[a-z0-9\-._~!$&'()*+,;=]|%[0-9A-F]{2})*))\5(?::(?=(\d*))\6)?)(\/(?=((?:[a-z0-9-._~!$&'()*+,;=:@\/]|%[0-9A-F]{2})*))\8)?|(\/?(?!\/)(?=((?:[a-z0-9-._~!$&'()*+,;=:@\/]|%[0-9A-F]{2})*))\10)?)(?:\?(?=((?:[a-z0-9-._~!$&'()*+,;=:@\/?]|%[0-9A-F]{2})*))\11)?(?:#(?=((?:[a-z0-9-._~!$&'()*+,;=:@\/?]|%[0-9A-F]{2})*))\12)?$/i; + + var uriScheme = 1; + var uriDomain = 5; + var uriPort = 6; + var uriPath = 7; + var uriPathAlt = 9; + var uriQuery = 11; + + function domain(dst, src) { + return url_wrapper(dst, src, extract_domain); + } + + function split_url(value) { + var m = value.match(uriRegExp); + if (m && m[uriDomain]) return m; + // Support input in the form "www.example.net/path", but not "/path". + m = ("null://" + value).match(uriRegExp); + if (m) return m; + } + + function extract_domain(value) { + var m = split_url(value); + if (m && m[uriDomain]) return m[uriDomain]; + } + + var extFromPage = /\.[^.]+$/; + function extract_ext(value) { + var page = extract_page(value); + if (page) { + var m = page.match(extFromPage); + if (m) return m[0]; + } + } + + function ext(dst, src) { + return url_wrapper(dst, src, extract_ext); + } + + function fqdn(dst, src) { + // TODO: fqdn and domain(eTLD+1) are currently the same. + return domain(dst, src); + } + + var pageFromPathRegExp = /\/([^\/]+)$/; + var pageName = 1; + + function extract_page(value) { + value = extract_path(value); + if (!value) return undefined; + var m = value.match(pageFromPathRegExp); + if (m) return m[pageName]; + } + + function page(dst, src) { + return url_wrapper(dst, src, extract_page); + } + + function extract_path(value) { + var m = split_url(value); + return m? m[uriPath] || m[uriPathAlt] : undefined; + } + + function path(dst, src) { + return url_wrapper(dst, src, extract_path); + } + + // Map common schemes to their default port. + // port has to be a string (will be converted at a later stage). + var schemePort = { + "ftp": "21", + "ssh": "22", + "http": "80", + "https": "443", + }; + + function extract_port(value) { + var m = split_url(value); + if (!m) return undefined; + if (m[uriPort]) return m[uriPort]; + if (m[uriScheme]) { + return schemePort[m[uriScheme]]; + } + } + + function port(dst, src) { + return url_wrapper(dst, src, extract_port); + } + + function extract_query(value) { + var m = split_url(value); + if (m && m[uriQuery]) return m[uriQuery]; + } + + function query(dst, src) { + return url_wrapper(dst, src, extract_query); + } + + function extract_root(value) { + var m = split_url(value); + if (m && m[uriDomain] && m[uriDomain]) { + var scheme = m[uriScheme] && m[uriScheme] !== "null"? + m[uriScheme] + "://" : ""; + var port = m[uriPort]? ":" + m[uriPort] : ""; + return scheme + m[uriDomain] + port; + } + } + + function root(dst, src) { + return url_wrapper(dst, src, extract_root); + } + + function tagval(id, src, cfg, keys, on_success) { + var fail = function(evt) { + evt.Put(FLAG_FIELD, "tagval_parsing_error"); + } + if (cfg.kv_separator.length !== 1) { + throw("Invalid TAGVALMAP ValueDelimiter (must have 1 character)"); + } + var quotes_len = cfg.open_quote.length > 0 && cfg.close_quote.length > 0? + cfg.open_quote.length + cfg.close_quote.length : 0; + var kv_regex = new RegExp('^([^' + cfg.kv_separator + ']*)*' + cfg.kv_separator + ' *(.*)*$'); + return function(evt) { + var msg = evt.Get(src); + if (msg === undefined) { + console.warn("tagval: input field is missing"); + return fail(evt); + } + var pairs = msg.split(cfg.pair_separator); + var i; + var success = false; + var prev = ""; + for (i=0; i 0 && + value.length >= cfg.open_quote.length + cfg.close_quote.length && + value.substr(0, cfg.open_quote.length) === cfg.open_quote && + value.substr(value.length - cfg.close_quote.length) === cfg.close_quote) { + value = value.substr(cfg.open_quote.length, value.length - quotes_len); + } + evt.Put(FIELDS_PREFIX + field, value); + success = true; + } + if (!success) { + return fail(evt); + } + if (on_success != null) { + on_success(evt); + } + } + } + + var ecs_mappings = { + "_facility": {convert: to_long, to:[{field: "log.syslog.facility.code", setter: fld_set}]}, + "_pri": {convert: to_long, to:[{field: "log.syslog.priority", setter: fld_set}]}, + "_severity": {convert: to_long, to:[{field: "log.syslog.severity.code", setter: fld_set}]}, + "action": {to:[{field: "event.action", setter: fld_prio, prio: 0}]}, + "administrator": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 4}]}, + "alias.ip": {convert: to_ip, to:[{field: "host.ip", setter: fld_prio, prio: 3},{field: "related.ip", setter: fld_append}]}, + "alias.ipv6": {convert: to_ip, to:[{field: "host.ip", setter: fld_prio, prio: 4},{field: "related.ip", setter: fld_append}]}, + "alias.mac": {convert: to_mac, to:[{field: "host.mac", setter: fld_prio, prio: 1}]}, + "application": {to:[{field: "network.application", setter: fld_set}]}, + "bytes": {convert: to_long, to:[{field: "network.bytes", setter: fld_set}]}, + "c_domain": {to:[{field: "source.domain", setter: fld_prio, prio: 1}]}, + "c_logon_id": {to:[{field: "user.id", setter: fld_prio, prio: 2}]}, + "c_user_name": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 8}]}, + "c_username": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 2}]}, + "cctld": {to:[{field: "url.top_level_domain", setter: fld_prio, prio: 1}]}, + "child_pid": {convert: to_long, to:[{field: "process.pid", setter: fld_prio, prio: 1}]}, + "child_pid_val": {to:[{field: "process.title", setter: fld_set}]}, + "child_process": {to:[{field: "process.name", setter: fld_prio, prio: 1}]}, + "city.dst": {to:[{field: "destination.geo.city_name", setter: fld_set}]}, + "city.src": {to:[{field: "source.geo.city_name", setter: fld_set}]}, + "daddr": {convert: to_ip, to:[{field: "destination.ip", setter: fld_set},{field: "related.ip", setter: fld_append}]}, + "daddr_v6": {convert: to_ip, to:[{field: "destination.ip", setter: fld_set},{field: "related.ip", setter: fld_append}]}, + "ddomain": {to:[{field: "destination.domain", setter: fld_prio, prio: 0}]}, + "devicehostip": {convert: to_ip, to:[{field: "host.ip", setter: fld_prio, prio: 2},{field: "related.ip", setter: fld_append}]}, + "devicehostmac": {convert: to_mac, to:[{field: "host.mac", setter: fld_prio, prio: 0}]}, + "dhost": {to:[{field: "destination.address", setter: fld_set},{field: "related.hosts", setter: fld_append}]}, + "dinterface": {to:[{field: "observer.egress.interface.name", setter: fld_set}]}, + "direction": {to:[{field: "network.direction", setter: fld_set}]}, + "directory": {to:[{field: "file.directory", setter: fld_set}]}, + "dmacaddr": {convert: to_mac, to:[{field: "destination.mac", setter: fld_set}]}, + "dns.responsetype": {to:[{field: "dns.answers.type", setter: fld_set}]}, + "dns.resptext": {to:[{field: "dns.answers.name", setter: fld_set}]}, + "dns_querytype": {to:[{field: "dns.question.type", setter: fld_set}]}, + "domain": {to:[{field: "server.domain", setter: fld_prio, prio: 0},{field: "related.hosts", setter: fld_append}]}, + "domain.dst": {to:[{field: "destination.domain", setter: fld_prio, prio: 1}]}, + "domain.src": {to:[{field: "source.domain", setter: fld_prio, prio: 2}]}, + "domain_id": {to:[{field: "user.domain", setter: fld_set}]}, + "domainname": {to:[{field: "server.domain", setter: fld_prio, prio: 1}]}, + "dport": {convert: to_long, to:[{field: "destination.port", setter: fld_prio, prio: 0}]}, + "dtransaddr": {convert: to_ip, to:[{field: "destination.nat.ip", setter: fld_prio, prio: 0},{field: "related.ip", setter: fld_append}]}, + "dtransport": {convert: to_long, to:[{field: "destination.nat.port", setter: fld_prio, prio: 0}]}, + "ec_outcome": {to:[{field: "event.outcome", setter: fld_ecs_outcome}]}, + "event_description": {to:[{field: "message", setter: fld_prio, prio: 0}]}, + "event_source": {to:[{field: "related.hosts", setter: fld_append}]}, + "event_time": {convert: to_date, to:[{field: "@timestamp", setter: fld_set}]}, + "event_type": {to:[{field: "event.action", setter: fld_prio, prio: 1}]}, + "extension": {to:[{field: "file.extension", setter: fld_prio, prio: 1}]}, + "file.attributes": {to:[{field: "file.attributes", setter: fld_set}]}, + "filename": {to:[{field: "file.name", setter: fld_prio, prio: 0}]}, + "filename_size": {convert: to_long, to:[{field: "file.size", setter: fld_set}]}, + "filepath": {to:[{field: "file.path", setter: fld_set}]}, + "filetype": {to:[{field: "file.type", setter: fld_set}]}, + "fqdn": {to:[{field: "related.hosts", setter: fld_append}]}, + "group": {to:[{field: "group.name", setter: fld_set}]}, + "groupid": {to:[{field: "group.id", setter: fld_set}]}, + "host": {to:[{field: "host.name", setter: fld_prio, prio: 1},{field: "related.hosts", setter: fld_append}]}, + "hostip": {convert: to_ip, to:[{field: "host.ip", setter: fld_prio, prio: 0},{field: "related.ip", setter: fld_append}]}, + "hostip_v6": {convert: to_ip, to:[{field: "host.ip", setter: fld_prio, prio: 1},{field: "related.ip", setter: fld_append}]}, + "hostname": {to:[{field: "host.name", setter: fld_prio, prio: 0}]}, + "id": {to:[{field: "event.code", setter: fld_prio, prio: 0}]}, + "interface": {to:[{field: "network.interface.name", setter: fld_set}]}, + "ip.orig": {convert: to_ip, to:[{field: "network.forwarded_ip", setter: fld_prio, prio: 0},{field: "related.ip", setter: fld_append}]}, + "ip.trans.dst": {convert: to_ip, to:[{field: "destination.nat.ip", setter: fld_prio, prio: 1},{field: "related.ip", setter: fld_append}]}, + "ip.trans.src": {convert: to_ip, to:[{field: "source.nat.ip", setter: fld_prio, prio: 1},{field: "related.ip", setter: fld_append}]}, + "ipv6.orig": {convert: to_ip, to:[{field: "network.forwarded_ip", setter: fld_prio, prio: 2},{field: "related.ip", setter: fld_append}]}, + "latdec_dst": {convert: to_double, to:[{field: "destination.geo.location.lat", setter: fld_set}]}, + "latdec_src": {convert: to_double, to:[{field: "source.geo.location.lat", setter: fld_set}]}, + "location_city": {to:[{field: "geo.city_name", setter: fld_set}]}, + "location_country": {to:[{field: "geo.country_name", setter: fld_set}]}, + "location_desc": {to:[{field: "geo.name", setter: fld_set}]}, + "location_dst": {to:[{field: "destination.geo.country_name", setter: fld_set}]}, + "location_src": {to:[{field: "source.geo.country_name", setter: fld_set}]}, + "location_state": {to:[{field: "geo.region_name", setter: fld_set}]}, + "logon_id": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 5}]}, + "longdec_dst": {convert: to_double, to:[{field: "destination.geo.location.lon", setter: fld_set}]}, + "longdec_src": {convert: to_double, to:[{field: "source.geo.location.lon", setter: fld_set}]}, + "macaddr": {convert: to_mac, to:[{field: "host.mac", setter: fld_prio, prio: 2}]}, + "messageid": {to:[{field: "event.code", setter: fld_prio, prio: 1}]}, + "method": {to:[{field: "http.request.method", setter: fld_set}]}, + "msg": {to:[{field: "message", setter: fld_set}]}, + "orig_ip": {convert: to_ip, to:[{field: "network.forwarded_ip", setter: fld_prio, prio: 1},{field: "related.ip", setter: fld_append}]}, + "owner": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 6}]}, + "packets": {convert: to_long, to:[{field: "network.packets", setter: fld_set}]}, + "parent_pid": {convert: to_long, to:[{field: "process.parent.pid", setter: fld_prio, prio: 0}]}, + "parent_pid_val": {to:[{field: "process.parent.title", setter: fld_set}]}, + "parent_process": {to:[{field: "process.parent.name", setter: fld_prio, prio: 0}]}, + "patient_fullname": {to:[{field: "user.full_name", setter: fld_prio, prio: 1}]}, + "port.dst": {convert: to_long, to:[{field: "destination.port", setter: fld_prio, prio: 1}]}, + "port.src": {convert: to_long, to:[{field: "source.port", setter: fld_prio, prio: 1}]}, + "port.trans.dst": {convert: to_long, to:[{field: "destination.nat.port", setter: fld_prio, prio: 1}]}, + "port.trans.src": {convert: to_long, to:[{field: "source.nat.port", setter: fld_prio, prio: 1}]}, + "process": {to:[{field: "process.name", setter: fld_prio, prio: 0}]}, + "process_id": {convert: to_long, to:[{field: "process.pid", setter: fld_prio, prio: 0}]}, + "process_id_src": {convert: to_long, to:[{field: "process.parent.pid", setter: fld_prio, prio: 1}]}, + "process_src": {to:[{field: "process.parent.name", setter: fld_prio, prio: 1}]}, + "product": {to:[{field: "observer.product", setter: fld_set}]}, + "protocol": {to:[{field: "network.protocol", setter: fld_set}]}, + "query": {to:[{field: "url.query", setter: fld_prio, prio: 2}]}, + "rbytes": {convert: to_long, to:[{field: "destination.bytes", setter: fld_set}]}, + "referer": {to:[{field: "http.request.referrer", setter: fld_prio, prio: 1}]}, + "rulename": {to:[{field: "rule.name", setter: fld_set}]}, + "saddr": {convert: to_ip, to:[{field: "source.ip", setter: fld_set},{field: "related.ip", setter: fld_append}]}, + "saddr_v6": {convert: to_ip, to:[{field: "source.ip", setter: fld_set},{field: "related.ip", setter: fld_append}]}, + "sbytes": {convert: to_long, to:[{field: "source.bytes", setter: fld_set}]}, + "sdomain": {to:[{field: "source.domain", setter: fld_prio, prio: 0}]}, + "service": {to:[{field: "service.name", setter: fld_prio, prio: 1}]}, + "service.name": {to:[{field: "service.name", setter: fld_prio, prio: 0}]}, + "service_account": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 7}]}, + "severity": {to:[{field: "log.level", setter: fld_set}]}, + "shost": {to:[{field: "host.hostname", setter: fld_set},{field: "source.address", setter: fld_set},{field: "related.hosts", setter: fld_append}]}, + "sinterface": {to:[{field: "observer.ingress.interface.name", setter: fld_set}]}, + "sld": {to:[{field: "url.registered_domain", setter: fld_set}]}, + "smacaddr": {convert: to_mac, to:[{field: "source.mac", setter: fld_set}]}, + "sport": {convert: to_long, to:[{field: "source.port", setter: fld_prio, prio: 0}]}, + "stransaddr": {convert: to_ip, to:[{field: "source.nat.ip", setter: fld_prio, prio: 0},{field: "related.ip", setter: fld_append}]}, + "stransport": {convert: to_long, to:[{field: "source.nat.port", setter: fld_prio, prio: 0}]}, + "tcp.dstport": {convert: to_long, to:[{field: "destination.port", setter: fld_prio, prio: 2}]}, + "tcp.srcport": {convert: to_long, to:[{field: "source.port", setter: fld_prio, prio: 2}]}, + "timezone": {to:[{field: "event.timezone", setter: fld_set}]}, + "tld": {to:[{field: "url.top_level_domain", setter: fld_prio, prio: 0}]}, + "udp.dstport": {convert: to_long, to:[{field: "destination.port", setter: fld_prio, prio: 3}]}, + "udp.srcport": {convert: to_long, to:[{field: "source.port", setter: fld_prio, prio: 3}]}, + "uid": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 3}]}, + "url": {to:[{field: "url.original", setter: fld_prio, prio: 1}]}, + "url_raw": {to:[{field: "url.original", setter: fld_prio, prio: 0}]}, + "urldomain": {to:[{field: "url.domain", setter: fld_prio, prio: 0}]}, + "urlquery": {to:[{field: "url.query", setter: fld_prio, prio: 0}]}, + "user": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 0}]}, + "user.id": {to:[{field: "user.id", setter: fld_prio, prio: 1}]}, + "user_agent": {to:[{field: "user_agent.original", setter: fld_set}]}, + "user_fullname": {to:[{field: "user.full_name", setter: fld_prio, prio: 0}]}, + "user_id": {to:[{field: "user.id", setter: fld_prio, prio: 0}]}, + "username": {to:[{field: "related.user", setter: fld_append},{field: "user.name", setter: fld_prio, prio: 1}]}, + "version": {to:[{field: "observer.version", setter: fld_set}]}, + "web_domain": {to:[{field: "url.domain", setter: fld_prio, prio: 1},{field: "related.hosts", setter: fld_append}]}, + "web_extension": {to:[{field: "file.extension", setter: fld_prio, prio: 0}]}, + "web_query": {to:[{field: "url.query", setter: fld_prio, prio: 1}]}, + "web_ref_domain": {to:[{field: "related.hosts", setter: fld_append}]}, + "web_referer": {to:[{field: "http.request.referrer", setter: fld_prio, prio: 0}]}, + "web_root": {to:[{field: "url.path", setter: fld_set}]}, + "webpage": {to:[{field: "file.name", setter: fld_prio, prio: 1}]}, + }; + + var rsa_mappings = { + "access_point": {to:[{field: "rsa.wireless.access_point", setter: fld_set}]}, + "accesses": {to:[{field: "rsa.identity.accesses", setter: fld_set}]}, + "acl_id": {to:[{field: "rsa.misc.acl_id", setter: fld_set}]}, + "acl_op": {to:[{field: "rsa.misc.acl_op", setter: fld_set}]}, + "acl_pos": {to:[{field: "rsa.misc.acl_pos", setter: fld_set}]}, + "acl_table": {to:[{field: "rsa.misc.acl_table", setter: fld_set}]}, + "action": {to:[{field: "rsa.misc.action", setter: fld_append}]}, + "ad_computer_dst": {to:[{field: "rsa.network.ad_computer_dst", setter: fld_set}]}, + "addr": {to:[{field: "rsa.network.addr", setter: fld_set}]}, + "admin": {to:[{field: "rsa.misc.admin", setter: fld_set}]}, + "agent": {to:[{field: "rsa.misc.client", setter: fld_prio, prio: 0}]}, + "agent.id": {to:[{field: "rsa.misc.agent_id", setter: fld_set}]}, + "alarm_id": {to:[{field: "rsa.misc.alarm_id", setter: fld_set}]}, + "alarmname": {to:[{field: "rsa.misc.alarmname", setter: fld_set}]}, + "alert": {to:[{field: "rsa.threat.alert", setter: fld_set}]}, + "alert_id": {to:[{field: "rsa.misc.alert_id", setter: fld_set}]}, + "alias.host": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "analysis.file": {to:[{field: "rsa.investigations.analysis_file", setter: fld_set}]}, + "analysis.service": {to:[{field: "rsa.investigations.analysis_service", setter: fld_set}]}, + "analysis.session": {to:[{field: "rsa.investigations.analysis_session", setter: fld_set}]}, + "app_id": {to:[{field: "rsa.misc.app_id", setter: fld_set}]}, + "attachment": {to:[{field: "rsa.file.attachment", setter: fld_set}]}, + "audit": {to:[{field: "rsa.misc.audit", setter: fld_set}]}, + "audit_class": {to:[{field: "rsa.internal.audit_class", setter: fld_set}]}, + "audit_object": {to:[{field: "rsa.misc.audit_object", setter: fld_set}]}, + "auditdata": {to:[{field: "rsa.misc.auditdata", setter: fld_set}]}, + "authmethod": {to:[{field: "rsa.identity.auth_method", setter: fld_set}]}, + "autorun_type": {to:[{field: "rsa.misc.autorun_type", setter: fld_set}]}, + "bcc": {to:[{field: "rsa.email.email", setter: fld_append}]}, + "benchmark": {to:[{field: "rsa.misc.benchmark", setter: fld_set}]}, + "binary": {to:[{field: "rsa.file.binary", setter: fld_set}]}, + "boc": {to:[{field: "rsa.investigations.boc", setter: fld_set}]}, + "bssid": {to:[{field: "rsa.wireless.wlan_ssid", setter: fld_prio, prio: 1}]}, + "bypass": {to:[{field: "rsa.misc.bypass", setter: fld_set}]}, + "c_sid": {to:[{field: "rsa.identity.user_sid_src", setter: fld_set}]}, + "cache": {to:[{field: "rsa.misc.cache", setter: fld_set}]}, + "cache_hit": {to:[{field: "rsa.misc.cache_hit", setter: fld_set}]}, + "calling_from": {to:[{field: "rsa.misc.phone", setter: fld_prio, prio: 1}]}, + "calling_to": {to:[{field: "rsa.misc.phone", setter: fld_prio, prio: 0}]}, + "category": {to:[{field: "rsa.misc.category", setter: fld_set}]}, + "cc": {to:[{field: "rsa.email.email", setter: fld_append}]}, + "cc.number": {convert: to_long, to:[{field: "rsa.misc.cc_number", setter: fld_set}]}, + "cefversion": {to:[{field: "rsa.misc.cefversion", setter: fld_set}]}, + "cert.serial": {to:[{field: "rsa.crypto.cert_serial", setter: fld_set}]}, + "cert_ca": {to:[{field: "rsa.crypto.cert_ca", setter: fld_set}]}, + "cert_checksum": {to:[{field: "rsa.crypto.cert_checksum", setter: fld_set}]}, + "cert_common": {to:[{field: "rsa.crypto.cert_common", setter: fld_set}]}, + "cert_error": {to:[{field: "rsa.crypto.cert_error", setter: fld_set}]}, + "cert_hostname": {to:[{field: "rsa.crypto.cert_host_name", setter: fld_set}]}, + "cert_hostname_cat": {to:[{field: "rsa.crypto.cert_host_cat", setter: fld_set}]}, + "cert_issuer": {to:[{field: "rsa.crypto.cert_issuer", setter: fld_set}]}, + "cert_keysize": {to:[{field: "rsa.crypto.cert_keysize", setter: fld_set}]}, + "cert_status": {to:[{field: "rsa.crypto.cert_status", setter: fld_set}]}, + "cert_subject": {to:[{field: "rsa.crypto.cert_subject", setter: fld_set}]}, + "cert_username": {to:[{field: "rsa.crypto.cert_username", setter: fld_set}]}, + "cfg.attr": {to:[{field: "rsa.misc.cfg_attr", setter: fld_set}]}, + "cfg.obj": {to:[{field: "rsa.misc.cfg_obj", setter: fld_set}]}, + "cfg.path": {to:[{field: "rsa.misc.cfg_path", setter: fld_set}]}, + "change_attribute": {to:[{field: "rsa.misc.change_attrib", setter: fld_set}]}, + "change_new": {to:[{field: "rsa.misc.change_new", setter: fld_set}]}, + "change_old": {to:[{field: "rsa.misc.change_old", setter: fld_set}]}, + "changes": {to:[{field: "rsa.misc.changes", setter: fld_set}]}, + "checksum": {to:[{field: "rsa.misc.checksum", setter: fld_set}]}, + "checksum.dst": {to:[{field: "rsa.misc.checksum_dst", setter: fld_set}]}, + "checksum.src": {to:[{field: "rsa.misc.checksum_src", setter: fld_set}]}, + "cid": {to:[{field: "rsa.internal.cid", setter: fld_set}]}, + "client": {to:[{field: "rsa.misc.client", setter: fld_prio, prio: 1}]}, + "client_ip": {to:[{field: "rsa.misc.client_ip", setter: fld_set}]}, + "clustermembers": {to:[{field: "rsa.misc.clustermembers", setter: fld_set}]}, + "cmd": {to:[{field: "rsa.misc.cmd", setter: fld_set}]}, + "cn_acttimeout": {to:[{field: "rsa.misc.cn_acttimeout", setter: fld_set}]}, + "cn_asn_dst": {to:[{field: "rsa.web.cn_asn_dst", setter: fld_set}]}, + "cn_asn_src": {to:[{field: "rsa.misc.cn_asn_src", setter: fld_set}]}, + "cn_bgpv4nxthop": {to:[{field: "rsa.misc.cn_bgpv4nxthop", setter: fld_set}]}, + "cn_ctr_dst_code": {to:[{field: "rsa.misc.cn_ctr_dst_code", setter: fld_set}]}, + "cn_dst_tos": {to:[{field: "rsa.misc.cn_dst_tos", setter: fld_set}]}, + "cn_dst_vlan": {to:[{field: "rsa.misc.cn_dst_vlan", setter: fld_set}]}, + "cn_engine_id": {to:[{field: "rsa.misc.cn_engine_id", setter: fld_set}]}, + "cn_engine_type": {to:[{field: "rsa.misc.cn_engine_type", setter: fld_set}]}, + "cn_f_switch": {to:[{field: "rsa.misc.cn_f_switch", setter: fld_set}]}, + "cn_flowsampid": {to:[{field: "rsa.misc.cn_flowsampid", setter: fld_set}]}, + "cn_flowsampintv": {to:[{field: "rsa.misc.cn_flowsampintv", setter: fld_set}]}, + "cn_flowsampmode": {to:[{field: "rsa.misc.cn_flowsampmode", setter: fld_set}]}, + "cn_inacttimeout": {to:[{field: "rsa.misc.cn_inacttimeout", setter: fld_set}]}, + "cn_inpermbyts": {to:[{field: "rsa.misc.cn_inpermbyts", setter: fld_set}]}, + "cn_inpermpckts": {to:[{field: "rsa.misc.cn_inpermpckts", setter: fld_set}]}, + "cn_invalid": {to:[{field: "rsa.misc.cn_invalid", setter: fld_set}]}, + "cn_ip_proto_ver": {to:[{field: "rsa.misc.cn_ip_proto_ver", setter: fld_set}]}, + "cn_ipv4_ident": {to:[{field: "rsa.misc.cn_ipv4_ident", setter: fld_set}]}, + "cn_l_switch": {to:[{field: "rsa.misc.cn_l_switch", setter: fld_set}]}, + "cn_log_did": {to:[{field: "rsa.misc.cn_log_did", setter: fld_set}]}, + "cn_log_rid": {to:[{field: "rsa.misc.cn_log_rid", setter: fld_set}]}, + "cn_max_ttl": {to:[{field: "rsa.misc.cn_max_ttl", setter: fld_set}]}, + "cn_maxpcktlen": {to:[{field: "rsa.misc.cn_maxpcktlen", setter: fld_set}]}, + "cn_min_ttl": {to:[{field: "rsa.misc.cn_min_ttl", setter: fld_set}]}, + "cn_minpcktlen": {to:[{field: "rsa.misc.cn_minpcktlen", setter: fld_set}]}, + "cn_mpls_lbl_1": {to:[{field: "rsa.misc.cn_mpls_lbl_1", setter: fld_set}]}, + "cn_mpls_lbl_10": {to:[{field: "rsa.misc.cn_mpls_lbl_10", setter: fld_set}]}, + "cn_mpls_lbl_2": {to:[{field: "rsa.misc.cn_mpls_lbl_2", setter: fld_set}]}, + "cn_mpls_lbl_3": {to:[{field: "rsa.misc.cn_mpls_lbl_3", setter: fld_set}]}, + "cn_mpls_lbl_4": {to:[{field: "rsa.misc.cn_mpls_lbl_4", setter: fld_set}]}, + "cn_mpls_lbl_5": {to:[{field: "rsa.misc.cn_mpls_lbl_5", setter: fld_set}]}, + "cn_mpls_lbl_6": {to:[{field: "rsa.misc.cn_mpls_lbl_6", setter: fld_set}]}, + "cn_mpls_lbl_7": {to:[{field: "rsa.misc.cn_mpls_lbl_7", setter: fld_set}]}, + "cn_mpls_lbl_8": {to:[{field: "rsa.misc.cn_mpls_lbl_8", setter: fld_set}]}, + "cn_mpls_lbl_9": {to:[{field: "rsa.misc.cn_mpls_lbl_9", setter: fld_set}]}, + "cn_mplstoplabel": {to:[{field: "rsa.misc.cn_mplstoplabel", setter: fld_set}]}, + "cn_mplstoplabip": {to:[{field: "rsa.misc.cn_mplstoplabip", setter: fld_set}]}, + "cn_mul_dst_byt": {to:[{field: "rsa.misc.cn_mul_dst_byt", setter: fld_set}]}, + "cn_mul_dst_pks": {to:[{field: "rsa.misc.cn_mul_dst_pks", setter: fld_set}]}, + "cn_muligmptype": {to:[{field: "rsa.misc.cn_muligmptype", setter: fld_set}]}, + "cn_rpackets": {to:[{field: "rsa.web.cn_rpackets", setter: fld_set}]}, + "cn_sampalgo": {to:[{field: "rsa.misc.cn_sampalgo", setter: fld_set}]}, + "cn_sampint": {to:[{field: "rsa.misc.cn_sampint", setter: fld_set}]}, + "cn_seqctr": {to:[{field: "rsa.misc.cn_seqctr", setter: fld_set}]}, + "cn_spackets": {to:[{field: "rsa.misc.cn_spackets", setter: fld_set}]}, + "cn_src_tos": {to:[{field: "rsa.misc.cn_src_tos", setter: fld_set}]}, + "cn_src_vlan": {to:[{field: "rsa.misc.cn_src_vlan", setter: fld_set}]}, + "cn_sysuptime": {to:[{field: "rsa.misc.cn_sysuptime", setter: fld_set}]}, + "cn_template_id": {to:[{field: "rsa.misc.cn_template_id", setter: fld_set}]}, + "cn_totbytsexp": {to:[{field: "rsa.misc.cn_totbytsexp", setter: fld_set}]}, + "cn_totflowexp": {to:[{field: "rsa.misc.cn_totflowexp", setter: fld_set}]}, + "cn_totpcktsexp": {to:[{field: "rsa.misc.cn_totpcktsexp", setter: fld_set}]}, + "cn_unixnanosecs": {to:[{field: "rsa.misc.cn_unixnanosecs", setter: fld_set}]}, + "cn_v6flowlabel": {to:[{field: "rsa.misc.cn_v6flowlabel", setter: fld_set}]}, + "cn_v6optheaders": {to:[{field: "rsa.misc.cn_v6optheaders", setter: fld_set}]}, + "code": {to:[{field: "rsa.misc.code", setter: fld_set}]}, + "command": {to:[{field: "rsa.misc.command", setter: fld_set}]}, + "comments": {to:[{field: "rsa.misc.comments", setter: fld_set}]}, + "comp_class": {to:[{field: "rsa.misc.comp_class", setter: fld_set}]}, + "comp_name": {to:[{field: "rsa.misc.comp_name", setter: fld_set}]}, + "comp_rbytes": {to:[{field: "rsa.misc.comp_rbytes", setter: fld_set}]}, + "comp_sbytes": {to:[{field: "rsa.misc.comp_sbytes", setter: fld_set}]}, + "component_version": {to:[{field: "rsa.misc.comp_version", setter: fld_set}]}, + "connection_id": {to:[{field: "rsa.misc.connection_id", setter: fld_prio, prio: 1}]}, + "connectionid": {to:[{field: "rsa.misc.connection_id", setter: fld_prio, prio: 0}]}, + "content": {to:[{field: "rsa.misc.content", setter: fld_set}]}, + "content_type": {to:[{field: "rsa.misc.content_type", setter: fld_set}]}, + "content_version": {to:[{field: "rsa.misc.content_version", setter: fld_set}]}, + "context": {to:[{field: "rsa.misc.context", setter: fld_set}]}, + "count": {to:[{field: "rsa.misc.count", setter: fld_set}]}, + "cpu": {convert: to_long, to:[{field: "rsa.misc.cpu", setter: fld_set}]}, + "cpu_data": {to:[{field: "rsa.misc.cpu_data", setter: fld_set}]}, + "criticality": {to:[{field: "rsa.misc.criticality", setter: fld_set}]}, + "cs_agency_dst": {to:[{field: "rsa.misc.cs_agency_dst", setter: fld_set}]}, + "cs_analyzedby": {to:[{field: "rsa.misc.cs_analyzedby", setter: fld_set}]}, + "cs_av_other": {to:[{field: "rsa.misc.cs_av_other", setter: fld_set}]}, + "cs_av_primary": {to:[{field: "rsa.misc.cs_av_primary", setter: fld_set}]}, + "cs_av_secondary": {to:[{field: "rsa.misc.cs_av_secondary", setter: fld_set}]}, + "cs_bgpv6nxthop": {to:[{field: "rsa.misc.cs_bgpv6nxthop", setter: fld_set}]}, + "cs_bit9status": {to:[{field: "rsa.misc.cs_bit9status", setter: fld_set}]}, + "cs_context": {to:[{field: "rsa.misc.cs_context", setter: fld_set}]}, + "cs_control": {to:[{field: "rsa.misc.cs_control", setter: fld_set}]}, + "cs_data": {to:[{field: "rsa.misc.cs_data", setter: fld_set}]}, + "cs_datecret": {to:[{field: "rsa.misc.cs_datecret", setter: fld_set}]}, + "cs_dst_tld": {to:[{field: "rsa.misc.cs_dst_tld", setter: fld_set}]}, + "cs_eth_dst_ven": {to:[{field: "rsa.misc.cs_eth_dst_ven", setter: fld_set}]}, + "cs_eth_src_ven": {to:[{field: "rsa.misc.cs_eth_src_ven", setter: fld_set}]}, + "cs_event_uuid": {to:[{field: "rsa.misc.cs_event_uuid", setter: fld_set}]}, + "cs_filetype": {to:[{field: "rsa.misc.cs_filetype", setter: fld_set}]}, + "cs_fld": {to:[{field: "rsa.misc.cs_fld", setter: fld_set}]}, + "cs_if_desc": {to:[{field: "rsa.misc.cs_if_desc", setter: fld_set}]}, + "cs_if_name": {to:[{field: "rsa.misc.cs_if_name", setter: fld_set}]}, + "cs_ip_next_hop": {to:[{field: "rsa.misc.cs_ip_next_hop", setter: fld_set}]}, + "cs_ipv4dstpre": {to:[{field: "rsa.misc.cs_ipv4dstpre", setter: fld_set}]}, + "cs_ipv4srcpre": {to:[{field: "rsa.misc.cs_ipv4srcpre", setter: fld_set}]}, + "cs_lifetime": {to:[{field: "rsa.misc.cs_lifetime", setter: fld_set}]}, + "cs_log_medium": {to:[{field: "rsa.misc.cs_log_medium", setter: fld_set}]}, + "cs_loginname": {to:[{field: "rsa.misc.cs_loginname", setter: fld_set}]}, + "cs_modulescore": {to:[{field: "rsa.misc.cs_modulescore", setter: fld_set}]}, + "cs_modulesign": {to:[{field: "rsa.misc.cs_modulesign", setter: fld_set}]}, + "cs_opswatresult": {to:[{field: "rsa.misc.cs_opswatresult", setter: fld_set}]}, + "cs_payload": {to:[{field: "rsa.misc.cs_payload", setter: fld_set}]}, + "cs_registrant": {to:[{field: "rsa.misc.cs_registrant", setter: fld_set}]}, + "cs_registrar": {to:[{field: "rsa.misc.cs_registrar", setter: fld_set}]}, + "cs_represult": {to:[{field: "rsa.misc.cs_represult", setter: fld_set}]}, + "cs_rpayload": {to:[{field: "rsa.misc.cs_rpayload", setter: fld_set}]}, + "cs_sampler_name": {to:[{field: "rsa.misc.cs_sampler_name", setter: fld_set}]}, + "cs_sourcemodule": {to:[{field: "rsa.misc.cs_sourcemodule", setter: fld_set}]}, + "cs_streams": {to:[{field: "rsa.misc.cs_streams", setter: fld_set}]}, + "cs_targetmodule": {to:[{field: "rsa.misc.cs_targetmodule", setter: fld_set}]}, + "cs_v6nxthop": {to:[{field: "rsa.misc.cs_v6nxthop", setter: fld_set}]}, + "cs_whois_server": {to:[{field: "rsa.misc.cs_whois_server", setter: fld_set}]}, + "cs_yararesult": {to:[{field: "rsa.misc.cs_yararesult", setter: fld_set}]}, + "cve": {to:[{field: "rsa.misc.cve", setter: fld_set}]}, + "d_certauth": {to:[{field: "rsa.crypto.d_certauth", setter: fld_set}]}, + "d_cipher": {to:[{field: "rsa.crypto.cipher_dst", setter: fld_set}]}, + "d_ciphersize": {convert: to_long, to:[{field: "rsa.crypto.cipher_size_dst", setter: fld_set}]}, + "d_sslver": {to:[{field: "rsa.crypto.ssl_ver_dst", setter: fld_set}]}, + "data": {to:[{field: "rsa.internal.data", setter: fld_set}]}, + "data_type": {to:[{field: "rsa.misc.data_type", setter: fld_set}]}, + "date": {to:[{field: "rsa.time.date", setter: fld_set}]}, + "datetime": {to:[{field: "rsa.time.datetime", setter: fld_set}]}, + "day": {to:[{field: "rsa.time.day", setter: fld_set}]}, + "db_id": {to:[{field: "rsa.db.db_id", setter: fld_set}]}, + "db_name": {to:[{field: "rsa.db.database", setter: fld_set}]}, + "db_pid": {convert: to_long, to:[{field: "rsa.db.db_pid", setter: fld_set}]}, + "dclass_counter1": {convert: to_long, to:[{field: "rsa.counters.dclass_c1", setter: fld_set}]}, + "dclass_counter1_string": {to:[{field: "rsa.counters.dclass_c1_str", setter: fld_set}]}, + "dclass_counter2": {convert: to_long, to:[{field: "rsa.counters.dclass_c2", setter: fld_set}]}, + "dclass_counter2_string": {to:[{field: "rsa.counters.dclass_c2_str", setter: fld_set}]}, + "dclass_counter3": {convert: to_long, to:[{field: "rsa.counters.dclass_c3", setter: fld_set}]}, + "dclass_counter3_string": {to:[{field: "rsa.counters.dclass_c3_str", setter: fld_set}]}, + "dclass_ratio1": {to:[{field: "rsa.counters.dclass_r1", setter: fld_set}]}, + "dclass_ratio1_string": {to:[{field: "rsa.counters.dclass_r1_str", setter: fld_set}]}, + "dclass_ratio2": {to:[{field: "rsa.counters.dclass_r2", setter: fld_set}]}, + "dclass_ratio2_string": {to:[{field: "rsa.counters.dclass_r2_str", setter: fld_set}]}, + "dclass_ratio3": {to:[{field: "rsa.counters.dclass_r3", setter: fld_set}]}, + "dclass_ratio3_string": {to:[{field: "rsa.counters.dclass_r3_str", setter: fld_set}]}, + "dead": {convert: to_long, to:[{field: "rsa.internal.dead", setter: fld_set}]}, + "description": {to:[{field: "rsa.misc.description", setter: fld_set}]}, + "detail": {to:[{field: "rsa.misc.event_desc", setter: fld_set}]}, + "device": {to:[{field: "rsa.misc.device_name", setter: fld_set}]}, + "device.class": {to:[{field: "rsa.internal.device_class", setter: fld_set}]}, + "device.group": {to:[{field: "rsa.internal.device_group", setter: fld_set}]}, + "device.host": {to:[{field: "rsa.internal.device_host", setter: fld_set}]}, + "device.ip": {convert: to_ip, to:[{field: "rsa.internal.device_ip", setter: fld_set}]}, + "device.ipv6": {convert: to_ip, to:[{field: "rsa.internal.device_ipv6", setter: fld_set}]}, + "device.type": {to:[{field: "rsa.internal.device_type", setter: fld_set}]}, + "device.type.id": {convert: to_long, to:[{field: "rsa.internal.device_type_id", setter: fld_set}]}, + "devicehostname": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "devvendor": {to:[{field: "rsa.misc.devvendor", setter: fld_set}]}, + "dhost": {to:[{field: "rsa.network.host_dst", setter: fld_set}]}, + "did": {to:[{field: "rsa.internal.did", setter: fld_set}]}, + "dinterface": {to:[{field: "rsa.network.dinterface", setter: fld_set}]}, + "directory.dst": {to:[{field: "rsa.file.directory_dst", setter: fld_set}]}, + "directory.src": {to:[{field: "rsa.file.directory_src", setter: fld_set}]}, + "disk_volume": {to:[{field: "rsa.storage.disk_volume", setter: fld_set}]}, + "disposition": {to:[{field: "rsa.misc.disposition", setter: fld_set}]}, + "distance": {to:[{field: "rsa.misc.distance", setter: fld_set}]}, + "dmask": {to:[{field: "rsa.network.dmask", setter: fld_set}]}, + "dn": {to:[{field: "rsa.identity.dn", setter: fld_set}]}, + "dns_a_record": {to:[{field: "rsa.network.dns_a_record", setter: fld_set}]}, + "dns_cname_record": {to:[{field: "rsa.network.dns_cname_record", setter: fld_set}]}, + "dns_id": {to:[{field: "rsa.network.dns_id", setter: fld_set}]}, + "dns_opcode": {to:[{field: "rsa.network.dns_opcode", setter: fld_set}]}, + "dns_ptr_record": {to:[{field: "rsa.network.dns_ptr_record", setter: fld_set}]}, + "dns_resp": {to:[{field: "rsa.network.dns_resp", setter: fld_set}]}, + "dns_type": {to:[{field: "rsa.network.dns_type", setter: fld_set}]}, + "doc_number": {convert: to_long, to:[{field: "rsa.misc.doc_number", setter: fld_set}]}, + "domain": {to:[{field: "rsa.network.domain", setter: fld_set}]}, + "domain1": {to:[{field: "rsa.network.domain1", setter: fld_set}]}, + "dst_dn": {to:[{field: "rsa.identity.dn_dst", setter: fld_set}]}, + "dst_payload": {to:[{field: "rsa.misc.payload_dst", setter: fld_set}]}, + "dst_spi": {to:[{field: "rsa.misc.spi_dst", setter: fld_set}]}, + "dst_zone": {to:[{field: "rsa.network.zone_dst", setter: fld_set}]}, + "dstburb": {to:[{field: "rsa.misc.dstburb", setter: fld_set}]}, + "duration": {convert: to_double, to:[{field: "rsa.time.duration_time", setter: fld_set}]}, + "duration_string": {to:[{field: "rsa.time.duration_str", setter: fld_set}]}, + "ec_activity": {to:[{field: "rsa.investigations.ec_activity", setter: fld_set}]}, + "ec_outcome": {to:[{field: "rsa.investigations.ec_outcome", setter: fld_set}]}, + "ec_subject": {to:[{field: "rsa.investigations.ec_subject", setter: fld_set}]}, + "ec_theme": {to:[{field: "rsa.investigations.ec_theme", setter: fld_set}]}, + "edomain": {to:[{field: "rsa.misc.edomain", setter: fld_set}]}, + "edomaub": {to:[{field: "rsa.misc.edomaub", setter: fld_set}]}, + "effective_time": {convert: to_date, to:[{field: "rsa.time.effective_time", setter: fld_set}]}, + "ein.number": {convert: to_long, to:[{field: "rsa.misc.ein_number", setter: fld_set}]}, + "email": {to:[{field: "rsa.email.email", setter: fld_append}]}, + "encryption_type": {to:[{field: "rsa.crypto.crypto", setter: fld_set}]}, + "endtime": {convert: to_date, to:[{field: "rsa.time.endtime", setter: fld_set}]}, + "entropy.req": {convert: to_long, to:[{field: "rsa.internal.entropy_req", setter: fld_set}]}, + "entropy.res": {convert: to_long, to:[{field: "rsa.internal.entropy_res", setter: fld_set}]}, + "entry": {to:[{field: "rsa.internal.entry", setter: fld_set}]}, + "eoc": {to:[{field: "rsa.investigations.eoc", setter: fld_set}]}, + "error": {to:[{field: "rsa.misc.error", setter: fld_set}]}, + "eth_type": {convert: to_long, to:[{field: "rsa.network.eth_type", setter: fld_set}]}, + "euid": {to:[{field: "rsa.misc.euid", setter: fld_set}]}, + "event.cat": {convert: to_long, to:[{field: "rsa.investigations.event_cat", setter: fld_prio, prio: 1}]}, + "event.cat.name": {to:[{field: "rsa.investigations.event_cat_name", setter: fld_prio, prio: 1}]}, + "event_cat": {convert: to_long, to:[{field: "rsa.investigations.event_cat", setter: fld_prio, prio: 0}]}, + "event_cat_name": {to:[{field: "rsa.investigations.event_cat_name", setter: fld_prio, prio: 0}]}, + "event_category": {to:[{field: "rsa.misc.event_category", setter: fld_set}]}, + "event_computer": {to:[{field: "rsa.misc.event_computer", setter: fld_set}]}, + "event_counter": {convert: to_long, to:[{field: "rsa.counters.event_counter", setter: fld_set}]}, + "event_description": {to:[{field: "rsa.internal.event_desc", setter: fld_set}]}, + "event_id": {to:[{field: "rsa.misc.event_id", setter: fld_set}]}, + "event_log": {to:[{field: "rsa.misc.event_log", setter: fld_set}]}, + "event_name": {to:[{field: "rsa.internal.event_name", setter: fld_set}]}, + "event_queue_time": {convert: to_date, to:[{field: "rsa.time.event_queue_time", setter: fld_set}]}, + "event_source": {to:[{field: "rsa.misc.event_source", setter: fld_set}]}, + "event_state": {to:[{field: "rsa.misc.event_state", setter: fld_set}]}, + "event_time": {convert: to_date, to:[{field: "rsa.time.event_time", setter: fld_set}]}, + "event_time_str": {to:[{field: "rsa.time.event_time_str", setter: fld_prio, prio: 1}]}, + "event_time_string": {to:[{field: "rsa.time.event_time_str", setter: fld_prio, prio: 0}]}, + "event_type": {to:[{field: "rsa.misc.event_type", setter: fld_set}]}, + "event_user": {to:[{field: "rsa.misc.event_user", setter: fld_set}]}, + "eventtime": {to:[{field: "rsa.time.eventtime", setter: fld_set}]}, + "expected_val": {to:[{field: "rsa.misc.expected_val", setter: fld_set}]}, + "expiration_time": {convert: to_date, to:[{field: "rsa.time.expire_time", setter: fld_set}]}, + "expiration_time_string": {to:[{field: "rsa.time.expire_time_str", setter: fld_set}]}, + "facility": {to:[{field: "rsa.misc.facility", setter: fld_set}]}, + "facilityname": {to:[{field: "rsa.misc.facilityname", setter: fld_set}]}, + "faddr": {to:[{field: "rsa.network.faddr", setter: fld_set}]}, + "fcatnum": {to:[{field: "rsa.misc.fcatnum", setter: fld_set}]}, + "federated_idp": {to:[{field: "rsa.identity.federated_idp", setter: fld_set}]}, + "federated_sp": {to:[{field: "rsa.identity.federated_sp", setter: fld_set}]}, + "feed.category": {to:[{field: "rsa.internal.feed_category", setter: fld_set}]}, + "feed_desc": {to:[{field: "rsa.internal.feed_desc", setter: fld_set}]}, + "feed_name": {to:[{field: "rsa.internal.feed_name", setter: fld_set}]}, + "fhost": {to:[{field: "rsa.network.fhost", setter: fld_set}]}, + "file_entropy": {convert: to_double, to:[{field: "rsa.file.file_entropy", setter: fld_set}]}, + "file_vendor": {to:[{field: "rsa.file.file_vendor", setter: fld_set}]}, + "filename_dst": {to:[{field: "rsa.file.filename_dst", setter: fld_set}]}, + "filename_src": {to:[{field: "rsa.file.filename_src", setter: fld_set}]}, + "filename_tmp": {to:[{field: "rsa.file.filename_tmp", setter: fld_set}]}, + "filesystem": {to:[{field: "rsa.file.filesystem", setter: fld_set}]}, + "filter": {to:[{field: "rsa.misc.filter", setter: fld_set}]}, + "finterface": {to:[{field: "rsa.misc.finterface", setter: fld_set}]}, + "flags": {to:[{field: "rsa.misc.flags", setter: fld_set}]}, + "forensic_info": {to:[{field: "rsa.misc.forensic_info", setter: fld_set}]}, + "forward.ip": {convert: to_ip, to:[{field: "rsa.internal.forward_ip", setter: fld_set}]}, + "forward.ipv6": {convert: to_ip, to:[{field: "rsa.internal.forward_ipv6", setter: fld_set}]}, + "found": {to:[{field: "rsa.misc.found", setter: fld_set}]}, + "fport": {to:[{field: "rsa.network.fport", setter: fld_set}]}, + "fqdn": {to:[{field: "rsa.web.fqdn", setter: fld_set}]}, + "fresult": {convert: to_long, to:[{field: "rsa.misc.fresult", setter: fld_set}]}, + "from": {to:[{field: "rsa.email.email_src", setter: fld_set}]}, + "gaddr": {to:[{field: "rsa.misc.gaddr", setter: fld_set}]}, + "gateway": {to:[{field: "rsa.network.gateway", setter: fld_set}]}, + "gmtdate": {to:[{field: "rsa.time.gmtdate", setter: fld_set}]}, + "gmttime": {to:[{field: "rsa.time.gmttime", setter: fld_set}]}, + "group": {to:[{field: "rsa.misc.group", setter: fld_set}]}, + "group_object": {to:[{field: "rsa.misc.group_object", setter: fld_set}]}, + "groupid": {to:[{field: "rsa.misc.group_id", setter: fld_set}]}, + "h_code": {to:[{field: "rsa.internal.hcode", setter: fld_set}]}, + "hardware_id": {to:[{field: "rsa.misc.hardware_id", setter: fld_set}]}, + "header.id": {to:[{field: "rsa.internal.header_id", setter: fld_set}]}, + "host.orig": {to:[{field: "rsa.network.host_orig", setter: fld_set}]}, + "host.state": {to:[{field: "rsa.endpoint.host_state", setter: fld_set}]}, + "host.type": {to:[{field: "rsa.network.host_type", setter: fld_set}]}, + "host_role": {to:[{field: "rsa.identity.host_role", setter: fld_set}]}, + "hostid": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "hostname": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "hour": {to:[{field: "rsa.time.hour", setter: fld_set}]}, + "https.insact": {to:[{field: "rsa.crypto.https_insact", setter: fld_set}]}, + "https.valid": {to:[{field: "rsa.crypto.https_valid", setter: fld_set}]}, + "icmpcode": {convert: to_long, to:[{field: "rsa.network.icmp_code", setter: fld_set}]}, + "icmptype": {convert: to_long, to:[{field: "rsa.network.icmp_type", setter: fld_set}]}, + "id": {to:[{field: "rsa.misc.reference_id", setter: fld_set}]}, + "id1": {to:[{field: "rsa.misc.reference_id1", setter: fld_set}]}, + "id2": {to:[{field: "rsa.misc.reference_id2", setter: fld_set}]}, + "id3": {to:[{field: "rsa.misc.id3", setter: fld_set}]}, + "ike": {to:[{field: "rsa.crypto.ike", setter: fld_set}]}, + "ike_cookie1": {to:[{field: "rsa.crypto.ike_cookie1", setter: fld_set}]}, + "ike_cookie2": {to:[{field: "rsa.crypto.ike_cookie2", setter: fld_set}]}, + "im_buddyid": {to:[{field: "rsa.misc.im_buddyid", setter: fld_set}]}, + "im_buddyname": {to:[{field: "rsa.misc.im_buddyname", setter: fld_set}]}, + "im_client": {to:[{field: "rsa.misc.im_client", setter: fld_set}]}, + "im_croomid": {to:[{field: "rsa.misc.im_croomid", setter: fld_set}]}, + "im_croomtype": {to:[{field: "rsa.misc.im_croomtype", setter: fld_set}]}, + "im_members": {to:[{field: "rsa.misc.im_members", setter: fld_set}]}, + "im_userid": {to:[{field: "rsa.misc.im_userid", setter: fld_set}]}, + "im_username": {to:[{field: "rsa.misc.im_username", setter: fld_set}]}, + "index": {to:[{field: "rsa.misc.index", setter: fld_set}]}, + "info": {to:[{field: "rsa.db.index", setter: fld_set}]}, + "inode": {convert: to_long, to:[{field: "rsa.internal.inode", setter: fld_set}]}, + "inout": {to:[{field: "rsa.misc.inout", setter: fld_set}]}, + "instance": {to:[{field: "rsa.db.instance", setter: fld_set}]}, + "interface": {to:[{field: "rsa.network.interface", setter: fld_set}]}, + "inv.category": {to:[{field: "rsa.investigations.inv_category", setter: fld_set}]}, + "inv.context": {to:[{field: "rsa.investigations.inv_context", setter: fld_set}]}, + "ioc": {to:[{field: "rsa.investigations.ioc", setter: fld_set}]}, + "ip_proto": {convert: to_long, to:[{field: "rsa.network.ip_proto", setter: fld_set}]}, + "ipkt": {to:[{field: "rsa.misc.ipkt", setter: fld_set}]}, + "ipscat": {to:[{field: "rsa.misc.ipscat", setter: fld_set}]}, + "ipspri": {to:[{field: "rsa.misc.ipspri", setter: fld_set}]}, + "jobname": {to:[{field: "rsa.misc.jobname", setter: fld_set}]}, + "jobnum": {to:[{field: "rsa.misc.job_num", setter: fld_set}]}, + "laddr": {to:[{field: "rsa.network.laddr", setter: fld_set}]}, + "language": {to:[{field: "rsa.misc.language", setter: fld_set}]}, + "latitude": {to:[{field: "rsa.misc.latitude", setter: fld_set}]}, + "lc.cid": {to:[{field: "rsa.internal.lc_cid", setter: fld_set}]}, + "lc.ctime": {convert: to_date, to:[{field: "rsa.internal.lc_ctime", setter: fld_set}]}, + "ldap": {to:[{field: "rsa.identity.ldap", setter: fld_set}]}, + "ldap.query": {to:[{field: "rsa.identity.ldap_query", setter: fld_set}]}, + "ldap.response": {to:[{field: "rsa.identity.ldap_response", setter: fld_set}]}, + "level": {convert: to_long, to:[{field: "rsa.internal.level", setter: fld_set}]}, + "lhost": {to:[{field: "rsa.network.lhost", setter: fld_set}]}, + "library": {to:[{field: "rsa.misc.library", setter: fld_set}]}, + "lifetime": {convert: to_long, to:[{field: "rsa.misc.lifetime", setter: fld_set}]}, + "linenum": {to:[{field: "rsa.misc.linenum", setter: fld_set}]}, + "link": {to:[{field: "rsa.misc.link", setter: fld_set}]}, + "linterface": {to:[{field: "rsa.network.linterface", setter: fld_set}]}, + "list_name": {to:[{field: "rsa.misc.list_name", setter: fld_set}]}, + "listnum": {to:[{field: "rsa.misc.listnum", setter: fld_set}]}, + "load_data": {to:[{field: "rsa.misc.load_data", setter: fld_set}]}, + "location_floor": {to:[{field: "rsa.misc.location_floor", setter: fld_set}]}, + "location_mark": {to:[{field: "rsa.misc.location_mark", setter: fld_set}]}, + "log_id": {to:[{field: "rsa.misc.log_id", setter: fld_set}]}, + "log_type": {to:[{field: "rsa.misc.log_type", setter: fld_set}]}, + "logid": {to:[{field: "rsa.misc.logid", setter: fld_set}]}, + "logip": {to:[{field: "rsa.misc.logip", setter: fld_set}]}, + "logname": {to:[{field: "rsa.misc.logname", setter: fld_set}]}, + "logon_type": {to:[{field: "rsa.identity.logon_type", setter: fld_set}]}, + "logon_type_desc": {to:[{field: "rsa.identity.logon_type_desc", setter: fld_set}]}, + "longitude": {to:[{field: "rsa.misc.longitude", setter: fld_set}]}, + "lport": {to:[{field: "rsa.misc.lport", setter: fld_set}]}, + "lread": {convert: to_long, to:[{field: "rsa.db.lread", setter: fld_set}]}, + "lun": {to:[{field: "rsa.storage.lun", setter: fld_set}]}, + "lwrite": {convert: to_long, to:[{field: "rsa.db.lwrite", setter: fld_set}]}, + "macaddr": {convert: to_mac, to:[{field: "rsa.network.eth_host", setter: fld_set}]}, + "mail_id": {to:[{field: "rsa.misc.mail_id", setter: fld_set}]}, + "mask": {to:[{field: "rsa.network.mask", setter: fld_set}]}, + "match": {to:[{field: "rsa.misc.match", setter: fld_set}]}, + "mbug_data": {to:[{field: "rsa.misc.mbug_data", setter: fld_set}]}, + "mcb.req": {convert: to_long, to:[{field: "rsa.internal.mcb_req", setter: fld_set}]}, + "mcb.res": {convert: to_long, to:[{field: "rsa.internal.mcb_res", setter: fld_set}]}, + "mcbc.req": {convert: to_long, to:[{field: "rsa.internal.mcbc_req", setter: fld_set}]}, + "mcbc.res": {convert: to_long, to:[{field: "rsa.internal.mcbc_res", setter: fld_set}]}, + "medium": {convert: to_long, to:[{field: "rsa.internal.medium", setter: fld_set}]}, + "message": {to:[{field: "rsa.internal.message", setter: fld_set}]}, + "message_body": {to:[{field: "rsa.misc.message_body", setter: fld_set}]}, + "messageid": {to:[{field: "rsa.internal.messageid", setter: fld_set}]}, + "min": {to:[{field: "rsa.time.min", setter: fld_set}]}, + "misc": {to:[{field: "rsa.misc.misc", setter: fld_set}]}, + "misc_name": {to:[{field: "rsa.misc.misc_name", setter: fld_set}]}, + "mode": {to:[{field: "rsa.misc.mode", setter: fld_set}]}, + "month": {to:[{field: "rsa.time.month", setter: fld_set}]}, + "msg": {to:[{field: "rsa.internal.msg", setter: fld_set}]}, + "msgIdPart1": {to:[{field: "rsa.misc.msgIdPart1", setter: fld_set}]}, + "msgIdPart2": {to:[{field: "rsa.misc.msgIdPart2", setter: fld_set}]}, + "msgIdPart3": {to:[{field: "rsa.misc.msgIdPart3", setter: fld_set}]}, + "msgIdPart4": {to:[{field: "rsa.misc.msgIdPart4", setter: fld_set}]}, + "msg_id": {to:[{field: "rsa.internal.msg_id", setter: fld_set}]}, + "msg_type": {to:[{field: "rsa.misc.msg_type", setter: fld_set}]}, + "msgid": {to:[{field: "rsa.misc.msgid", setter: fld_set}]}, + "name": {to:[{field: "rsa.misc.name", setter: fld_set}]}, + "netname": {to:[{field: "rsa.network.netname", setter: fld_set}]}, + "netsessid": {to:[{field: "rsa.misc.netsessid", setter: fld_set}]}, + "network_port": {convert: to_long, to:[{field: "rsa.network.network_port", setter: fld_set}]}, + "network_service": {to:[{field: "rsa.network.network_service", setter: fld_set}]}, + "node": {to:[{field: "rsa.misc.node", setter: fld_set}]}, + "nodename": {to:[{field: "rsa.internal.node_name", setter: fld_set}]}, + "ntype": {to:[{field: "rsa.misc.ntype", setter: fld_set}]}, + "num": {to:[{field: "rsa.misc.num", setter: fld_set}]}, + "number": {to:[{field: "rsa.misc.number", setter: fld_set}]}, + "number1": {to:[{field: "rsa.misc.number1", setter: fld_set}]}, + "number2": {to:[{field: "rsa.misc.number2", setter: fld_set}]}, + "nwe.callback_id": {to:[{field: "rsa.internal.nwe_callback_id", setter: fld_set}]}, + "nwwn": {to:[{field: "rsa.misc.nwwn", setter: fld_set}]}, + "obj_id": {to:[{field: "rsa.internal.obj_id", setter: fld_set}]}, + "obj_name": {to:[{field: "rsa.misc.obj_name", setter: fld_set}]}, + "obj_server": {to:[{field: "rsa.internal.obj_server", setter: fld_set}]}, + "obj_type": {to:[{field: "rsa.misc.obj_type", setter: fld_set}]}, + "obj_value": {to:[{field: "rsa.internal.obj_val", setter: fld_set}]}, + "object": {to:[{field: "rsa.misc.object", setter: fld_set}]}, + "observed_val": {to:[{field: "rsa.misc.observed_val", setter: fld_set}]}, + "operation": {to:[{field: "rsa.misc.operation", setter: fld_set}]}, + "operation_id": {to:[{field: "rsa.misc.operation_id", setter: fld_set}]}, + "opkt": {to:[{field: "rsa.misc.opkt", setter: fld_set}]}, + "org.dst": {to:[{field: "rsa.physical.org_dst", setter: fld_prio, prio: 1}]}, + "org.src": {to:[{field: "rsa.physical.org_src", setter: fld_set}]}, + "org_dst": {to:[{field: "rsa.physical.org_dst", setter: fld_prio, prio: 0}]}, + "orig_from": {to:[{field: "rsa.misc.orig_from", setter: fld_set}]}, + "origin": {to:[{field: "rsa.network.origin", setter: fld_set}]}, + "original_owner": {to:[{field: "rsa.identity.owner", setter: fld_set}]}, + "os": {to:[{field: "rsa.misc.OS", setter: fld_set}]}, + "owner_id": {to:[{field: "rsa.misc.owner_id", setter: fld_set}]}, + "p_action": {to:[{field: "rsa.misc.p_action", setter: fld_set}]}, + "p_date": {to:[{field: "rsa.time.p_date", setter: fld_set}]}, + "p_filter": {to:[{field: "rsa.misc.p_filter", setter: fld_set}]}, + "p_group_object": {to:[{field: "rsa.misc.p_group_object", setter: fld_set}]}, + "p_id": {to:[{field: "rsa.misc.p_id", setter: fld_set}]}, + "p_month": {to:[{field: "rsa.time.p_month", setter: fld_set}]}, + "p_msgid": {to:[{field: "rsa.misc.p_msgid", setter: fld_set}]}, + "p_msgid1": {to:[{field: "rsa.misc.p_msgid1", setter: fld_set}]}, + "p_msgid2": {to:[{field: "rsa.misc.p_msgid2", setter: fld_set}]}, + "p_result1": {to:[{field: "rsa.misc.p_result1", setter: fld_set}]}, + "p_time": {to:[{field: "rsa.time.p_time", setter: fld_set}]}, + "p_time1": {to:[{field: "rsa.time.p_time1", setter: fld_set}]}, + "p_time2": {to:[{field: "rsa.time.p_time2", setter: fld_set}]}, + "p_url": {to:[{field: "rsa.web.p_url", setter: fld_set}]}, + "p_user_agent": {to:[{field: "rsa.web.p_user_agent", setter: fld_set}]}, + "p_web_cookie": {to:[{field: "rsa.web.p_web_cookie", setter: fld_set}]}, + "p_web_method": {to:[{field: "rsa.web.p_web_method", setter: fld_set}]}, + "p_web_referer": {to:[{field: "rsa.web.p_web_referer", setter: fld_set}]}, + "p_year": {to:[{field: "rsa.time.p_year", setter: fld_set}]}, + "packet_length": {to:[{field: "rsa.network.packet_length", setter: fld_set}]}, + "paddr": {convert: to_ip, to:[{field: "rsa.network.paddr", setter: fld_set}]}, + "param": {to:[{field: "rsa.misc.param", setter: fld_set}]}, + "param.dst": {to:[{field: "rsa.misc.param_dst", setter: fld_set}]}, + "param.src": {to:[{field: "rsa.misc.param_src", setter: fld_set}]}, + "parent_node": {to:[{field: "rsa.misc.parent_node", setter: fld_set}]}, + "parse.error": {to:[{field: "rsa.internal.parse_error", setter: fld_set}]}, + "password": {to:[{field: "rsa.identity.password", setter: fld_set}]}, + "password_chg": {to:[{field: "rsa.misc.password_chg", setter: fld_set}]}, + "password_expire": {to:[{field: "rsa.misc.password_expire", setter: fld_set}]}, + "patient_fname": {to:[{field: "rsa.healthcare.patient_fname", setter: fld_set}]}, + "patient_id": {to:[{field: "rsa.healthcare.patient_id", setter: fld_set}]}, + "patient_lname": {to:[{field: "rsa.healthcare.patient_lname", setter: fld_set}]}, + "patient_mname": {to:[{field: "rsa.healthcare.patient_mname", setter: fld_set}]}, + "payload.req": {convert: to_long, to:[{field: "rsa.internal.payload_req", setter: fld_set}]}, + "payload.res": {convert: to_long, to:[{field: "rsa.internal.payload_res", setter: fld_set}]}, + "peer": {to:[{field: "rsa.crypto.peer", setter: fld_set}]}, + "peer_id": {to:[{field: "rsa.crypto.peer_id", setter: fld_set}]}, + "permgranted": {to:[{field: "rsa.misc.permgranted", setter: fld_set}]}, + "permissions": {to:[{field: "rsa.db.permissions", setter: fld_set}]}, + "permwanted": {to:[{field: "rsa.misc.permwanted", setter: fld_set}]}, + "pgid": {to:[{field: "rsa.misc.pgid", setter: fld_set}]}, + "phone_number": {to:[{field: "rsa.misc.phone", setter: fld_prio, prio: 2}]}, + "phost": {to:[{field: "rsa.network.phost", setter: fld_set}]}, + "pid": {to:[{field: "rsa.misc.pid", setter: fld_set}]}, + "policy": {to:[{field: "rsa.misc.policy", setter: fld_set}]}, + "policyUUID": {to:[{field: "rsa.misc.policyUUID", setter: fld_set}]}, + "policy_id": {to:[{field: "rsa.misc.policy_id", setter: fld_set}]}, + "policy_value": {to:[{field: "rsa.misc.policy_value", setter: fld_set}]}, + "policy_waiver": {to:[{field: "rsa.misc.policy_waiver", setter: fld_set}]}, + "policyname": {to:[{field: "rsa.misc.policy_name", setter: fld_prio, prio: 0}]}, + "pool_id": {to:[{field: "rsa.misc.pool_id", setter: fld_set}]}, + "pool_name": {to:[{field: "rsa.misc.pool_name", setter: fld_set}]}, + "port": {convert: to_long, to:[{field: "rsa.network.port", setter: fld_set}]}, + "portname": {to:[{field: "rsa.misc.port_name", setter: fld_set}]}, + "pread": {convert: to_long, to:[{field: "rsa.db.pread", setter: fld_set}]}, + "priority": {to:[{field: "rsa.misc.priority", setter: fld_set}]}, + "privilege": {to:[{field: "rsa.file.privilege", setter: fld_set}]}, + "process.vid.dst": {to:[{field: "rsa.internal.process_vid_dst", setter: fld_set}]}, + "process.vid.src": {to:[{field: "rsa.internal.process_vid_src", setter: fld_set}]}, + "process_id_val": {to:[{field: "rsa.misc.process_id_val", setter: fld_set}]}, + "processing_time": {to:[{field: "rsa.time.process_time", setter: fld_set}]}, + "profile": {to:[{field: "rsa.identity.profile", setter: fld_set}]}, + "prog_asp_num": {to:[{field: "rsa.misc.prog_asp_num", setter: fld_set}]}, + "program": {to:[{field: "rsa.misc.program", setter: fld_set}]}, + "protocol_detail": {to:[{field: "rsa.network.protocol_detail", setter: fld_set}]}, + "pwwn": {to:[{field: "rsa.storage.pwwn", setter: fld_set}]}, + "r_hostid": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "real_data": {to:[{field: "rsa.misc.real_data", setter: fld_set}]}, + "realm": {to:[{field: "rsa.identity.realm", setter: fld_set}]}, + "reason": {to:[{field: "rsa.misc.reason", setter: fld_set}]}, + "rec_asp_device": {to:[{field: "rsa.misc.rec_asp_device", setter: fld_set}]}, + "rec_asp_num": {to:[{field: "rsa.misc.rec_asp_num", setter: fld_set}]}, + "rec_library": {to:[{field: "rsa.misc.rec_library", setter: fld_set}]}, + "recorded_time": {convert: to_date, to:[{field: "rsa.time.recorded_time", setter: fld_set}]}, + "recordnum": {to:[{field: "rsa.misc.recordnum", setter: fld_set}]}, + "registry.key": {to:[{field: "rsa.endpoint.registry_key", setter: fld_set}]}, + "registry.value": {to:[{field: "rsa.endpoint.registry_value", setter: fld_set}]}, + "remote_domain": {to:[{field: "rsa.web.remote_domain", setter: fld_set}]}, + "remote_domain_id": {to:[{field: "rsa.network.remote_domain_id", setter: fld_set}]}, + "reputation_num": {convert: to_double, to:[{field: "rsa.web.reputation_num", setter: fld_set}]}, + "resource": {to:[{field: "rsa.internal.resource", setter: fld_set}]}, + "resource_class": {to:[{field: "rsa.internal.resource_class", setter: fld_set}]}, + "result": {to:[{field: "rsa.misc.result", setter: fld_set}]}, + "result_code": {to:[{field: "rsa.misc.result_code", setter: fld_prio, prio: 1}]}, + "resultcode": {to:[{field: "rsa.misc.result_code", setter: fld_prio, prio: 0}]}, + "rid": {convert: to_long, to:[{field: "rsa.internal.rid", setter: fld_set}]}, + "risk": {to:[{field: "rsa.misc.risk", setter: fld_set}]}, + "risk_info": {to:[{field: "rsa.misc.risk_info", setter: fld_set}]}, + "risk_num": {convert: to_double, to:[{field: "rsa.misc.risk_num", setter: fld_set}]}, + "risk_num_comm": {convert: to_double, to:[{field: "rsa.misc.risk_num_comm", setter: fld_set}]}, + "risk_num_next": {convert: to_double, to:[{field: "rsa.misc.risk_num_next", setter: fld_set}]}, + "risk_num_sand": {convert: to_double, to:[{field: "rsa.misc.risk_num_sand", setter: fld_set}]}, + "risk_num_static": {convert: to_double, to:[{field: "rsa.misc.risk_num_static", setter: fld_set}]}, + "risk_suspicious": {to:[{field: "rsa.misc.risk_suspicious", setter: fld_set}]}, + "risk_warning": {to:[{field: "rsa.misc.risk_warning", setter: fld_set}]}, + "rpayload": {to:[{field: "rsa.network.rpayload", setter: fld_set}]}, + "ruid": {to:[{field: "rsa.misc.ruid", setter: fld_set}]}, + "rule": {to:[{field: "rsa.misc.rule", setter: fld_set}]}, + "rule_group": {to:[{field: "rsa.misc.rule_group", setter: fld_set}]}, + "rule_template": {to:[{field: "rsa.misc.rule_template", setter: fld_set}]}, + "rule_uid": {to:[{field: "rsa.misc.rule_uid", setter: fld_set}]}, + "rulename": {to:[{field: "rsa.misc.rule_name", setter: fld_set}]}, + "s_certauth": {to:[{field: "rsa.crypto.s_certauth", setter: fld_set}]}, + "s_cipher": {to:[{field: "rsa.crypto.cipher_src", setter: fld_set}]}, + "s_ciphersize": {convert: to_long, to:[{field: "rsa.crypto.cipher_size_src", setter: fld_set}]}, + "s_context": {to:[{field: "rsa.misc.context_subject", setter: fld_set}]}, + "s_sslver": {to:[{field: "rsa.crypto.ssl_ver_src", setter: fld_set}]}, + "sburb": {to:[{field: "rsa.misc.sburb", setter: fld_set}]}, + "scheme": {to:[{field: "rsa.crypto.scheme", setter: fld_set}]}, + "sdomain_fld": {to:[{field: "rsa.misc.sdomain_fld", setter: fld_set}]}, + "search.text": {to:[{field: "rsa.misc.search_text", setter: fld_set}]}, + "sec": {to:[{field: "rsa.misc.sec", setter: fld_set}]}, + "second": {to:[{field: "rsa.misc.second", setter: fld_set}]}, + "sensor": {to:[{field: "rsa.misc.sensor", setter: fld_set}]}, + "sensorname": {to:[{field: "rsa.misc.sensorname", setter: fld_set}]}, + "seqnum": {to:[{field: "rsa.misc.seqnum", setter: fld_set}]}, + "serial_number": {to:[{field: "rsa.misc.serial_number", setter: fld_set}]}, + "service.account": {to:[{field: "rsa.identity.service_account", setter: fld_set}]}, + "session": {to:[{field: "rsa.misc.session", setter: fld_set}]}, + "session.split": {to:[{field: "rsa.internal.session_split", setter: fld_set}]}, + "sessionid": {to:[{field: "rsa.misc.log_session_id", setter: fld_set}]}, + "sessionid1": {to:[{field: "rsa.misc.log_session_id1", setter: fld_set}]}, + "sessiontype": {to:[{field: "rsa.misc.sessiontype", setter: fld_set}]}, + "severity": {to:[{field: "rsa.misc.severity", setter: fld_set}]}, + "sid": {to:[{field: "rsa.identity.user_sid_dst", setter: fld_set}]}, + "sig.name": {to:[{field: "rsa.misc.sig_name", setter: fld_set}]}, + "sigUUID": {to:[{field: "rsa.misc.sigUUID", setter: fld_set}]}, + "sigcat": {to:[{field: "rsa.misc.sigcat", setter: fld_set}]}, + "sigid": {convert: to_long, to:[{field: "rsa.misc.sig_id", setter: fld_set}]}, + "sigid1": {convert: to_long, to:[{field: "rsa.misc.sig_id1", setter: fld_set}]}, + "sigid_string": {to:[{field: "rsa.misc.sig_id_str", setter: fld_set}]}, + "signame": {to:[{field: "rsa.misc.policy_name", setter: fld_prio, prio: 1}]}, + "sigtype": {to:[{field: "rsa.crypto.sig_type", setter: fld_set}]}, + "sinterface": {to:[{field: "rsa.network.sinterface", setter: fld_set}]}, + "site": {to:[{field: "rsa.internal.site", setter: fld_set}]}, + "size": {convert: to_long, to:[{field: "rsa.internal.size", setter: fld_set}]}, + "smask": {to:[{field: "rsa.network.smask", setter: fld_set}]}, + "snmp.oid": {to:[{field: "rsa.misc.snmp_oid", setter: fld_set}]}, + "snmp.value": {to:[{field: "rsa.misc.snmp_value", setter: fld_set}]}, + "sourcefile": {to:[{field: "rsa.internal.sourcefile", setter: fld_set}]}, + "space": {to:[{field: "rsa.misc.space", setter: fld_set}]}, + "space1": {to:[{field: "rsa.misc.space1", setter: fld_set}]}, + "spi": {to:[{field: "rsa.misc.spi", setter: fld_set}]}, + "sql": {to:[{field: "rsa.misc.sql", setter: fld_set}]}, + "src_dn": {to:[{field: "rsa.identity.dn_src", setter: fld_set}]}, + "src_payload": {to:[{field: "rsa.misc.payload_src", setter: fld_set}]}, + "src_spi": {to:[{field: "rsa.misc.spi_src", setter: fld_set}]}, + "src_zone": {to:[{field: "rsa.network.zone_src", setter: fld_set}]}, + "srcburb": {to:[{field: "rsa.misc.srcburb", setter: fld_set}]}, + "srcdom": {to:[{field: "rsa.misc.srcdom", setter: fld_set}]}, + "srcservice": {to:[{field: "rsa.misc.srcservice", setter: fld_set}]}, + "ssid": {to:[{field: "rsa.wireless.wlan_ssid", setter: fld_prio, prio: 0}]}, + "stamp": {convert: to_date, to:[{field: "rsa.time.stamp", setter: fld_set}]}, + "starttime": {convert: to_date, to:[{field: "rsa.time.starttime", setter: fld_set}]}, + "state": {to:[{field: "rsa.misc.state", setter: fld_set}]}, + "statement": {to:[{field: "rsa.internal.statement", setter: fld_set}]}, + "status": {to:[{field: "rsa.misc.status", setter: fld_set}]}, + "status1": {to:[{field: "rsa.misc.status1", setter: fld_set}]}, + "streams": {convert: to_long, to:[{field: "rsa.misc.streams", setter: fld_set}]}, + "subcategory": {to:[{field: "rsa.misc.subcategory", setter: fld_set}]}, + "subject": {to:[{field: "rsa.email.subject", setter: fld_set}]}, + "svcno": {to:[{field: "rsa.misc.svcno", setter: fld_set}]}, + "system": {to:[{field: "rsa.misc.system", setter: fld_set}]}, + "t_context": {to:[{field: "rsa.misc.context_target", setter: fld_set}]}, + "task_name": {to:[{field: "rsa.file.task_name", setter: fld_set}]}, + "tbdstr1": {to:[{field: "rsa.misc.tbdstr1", setter: fld_set}]}, + "tbdstr2": {to:[{field: "rsa.misc.tbdstr2", setter: fld_set}]}, + "tbl_name": {to:[{field: "rsa.db.table_name", setter: fld_set}]}, + "tcp_flags": {convert: to_long, to:[{field: "rsa.misc.tcp_flags", setter: fld_set}]}, + "terminal": {to:[{field: "rsa.misc.terminal", setter: fld_set}]}, + "tgtdom": {to:[{field: "rsa.misc.tgtdom", setter: fld_set}]}, + "tgtdomain": {to:[{field: "rsa.misc.tgtdomain", setter: fld_set}]}, + "threat_name": {to:[{field: "rsa.threat.threat_category", setter: fld_set}]}, + "threat_source": {to:[{field: "rsa.threat.threat_source", setter: fld_set}]}, + "threat_val": {to:[{field: "rsa.threat.threat_desc", setter: fld_set}]}, + "threshold": {to:[{field: "rsa.misc.threshold", setter: fld_set}]}, + "time": {convert: to_date, to:[{field: "rsa.internal.time", setter: fld_set}]}, + "timestamp": {to:[{field: "rsa.time.timestamp", setter: fld_set}]}, + "timezone": {to:[{field: "rsa.time.timezone", setter: fld_set}]}, + "to": {to:[{field: "rsa.email.email_dst", setter: fld_set}]}, + "tos": {convert: to_long, to:[{field: "rsa.misc.tos", setter: fld_set}]}, + "trans_from": {to:[{field: "rsa.email.trans_from", setter: fld_set}]}, + "trans_id": {to:[{field: "rsa.db.transact_id", setter: fld_set}]}, + "trans_to": {to:[{field: "rsa.email.trans_to", setter: fld_set}]}, + "trigger_desc": {to:[{field: "rsa.misc.trigger_desc", setter: fld_set}]}, + "trigger_val": {to:[{field: "rsa.misc.trigger_val", setter: fld_set}]}, + "type": {to:[{field: "rsa.misc.type", setter: fld_set}]}, + "type1": {to:[{field: "rsa.misc.type1", setter: fld_set}]}, + "tzone": {to:[{field: "rsa.time.tzone", setter: fld_set}]}, + "ubc.req": {convert: to_long, to:[{field: "rsa.internal.ubc_req", setter: fld_set}]}, + "ubc.res": {convert: to_long, to:[{field: "rsa.internal.ubc_res", setter: fld_set}]}, + "udb_class": {to:[{field: "rsa.misc.udb_class", setter: fld_set}]}, + "url_fld": {to:[{field: "rsa.misc.url_fld", setter: fld_set}]}, + "urlpage": {to:[{field: "rsa.web.urlpage", setter: fld_set}]}, + "urlroot": {to:[{field: "rsa.web.urlroot", setter: fld_set}]}, + "user_address": {to:[{field: "rsa.email.email", setter: fld_append}]}, + "user_dept": {to:[{field: "rsa.identity.user_dept", setter: fld_set}]}, + "user_div": {to:[{field: "rsa.misc.user_div", setter: fld_set}]}, + "user_fname": {to:[{field: "rsa.identity.firstname", setter: fld_set}]}, + "user_lname": {to:[{field: "rsa.identity.lastname", setter: fld_set}]}, + "user_mname": {to:[{field: "rsa.identity.middlename", setter: fld_set}]}, + "user_org": {to:[{field: "rsa.identity.org", setter: fld_set}]}, + "user_role": {to:[{field: "rsa.identity.user_role", setter: fld_set}]}, + "userid": {to:[{field: "rsa.misc.userid", setter: fld_set}]}, + "username_fld": {to:[{field: "rsa.misc.username_fld", setter: fld_set}]}, + "utcstamp": {to:[{field: "rsa.misc.utcstamp", setter: fld_set}]}, + "v_instafname": {to:[{field: "rsa.misc.v_instafname", setter: fld_set}]}, + "vendor_event_cat": {to:[{field: "rsa.investigations.event_vcat", setter: fld_set}]}, + "version": {to:[{field: "rsa.misc.version", setter: fld_set}]}, + "vid": {to:[{field: "rsa.internal.msg_vid", setter: fld_set}]}, + "virt_data": {to:[{field: "rsa.misc.virt_data", setter: fld_set}]}, + "virusname": {to:[{field: "rsa.misc.virusname", setter: fld_set}]}, + "vlan": {convert: to_long, to:[{field: "rsa.network.vlan", setter: fld_set}]}, + "vlan.name": {to:[{field: "rsa.network.vlan_name", setter: fld_set}]}, + "vm_target": {to:[{field: "rsa.misc.vm_target", setter: fld_set}]}, + "vpnid": {to:[{field: "rsa.misc.vpnid", setter: fld_set}]}, + "vsys": {to:[{field: "rsa.misc.vsys", setter: fld_set}]}, + "vuln_ref": {to:[{field: "rsa.misc.vuln_ref", setter: fld_set}]}, + "web_cookie": {to:[{field: "rsa.web.web_cookie", setter: fld_set}]}, + "web_extension_tmp": {to:[{field: "rsa.web.web_extension_tmp", setter: fld_set}]}, + "web_host": {to:[{field: "rsa.web.alias_host", setter: fld_set}]}, + "web_method": {to:[{field: "rsa.misc.action", setter: fld_append}]}, + "web_page": {to:[{field: "rsa.web.web_page", setter: fld_set}]}, + "web_ref_domain": {to:[{field: "rsa.web.web_ref_domain", setter: fld_set}]}, + "web_ref_host": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "web_ref_page": {to:[{field: "rsa.web.web_ref_page", setter: fld_set}]}, + "web_ref_query": {to:[{field: "rsa.web.web_ref_query", setter: fld_set}]}, + "web_ref_root": {to:[{field: "rsa.web.web_ref_root", setter: fld_set}]}, + "wifi_channel": {convert: to_long, to:[{field: "rsa.wireless.wlan_channel", setter: fld_set}]}, + "wlan": {to:[{field: "rsa.wireless.wlan_name", setter: fld_set}]}, + "word": {to:[{field: "rsa.internal.word", setter: fld_set}]}, + "workspace_desc": {to:[{field: "rsa.misc.workspace", setter: fld_set}]}, + "workstation": {to:[{field: "rsa.network.alias_host", setter: fld_append}]}, + "year": {to:[{field: "rsa.time.year", setter: fld_set}]}, + "zone": {to:[{field: "rsa.network.zone", setter: fld_set}]}, + }; + + function to_date(value) { + switch (typeof (value)) { + case "object": + // This is a Date. But as it was obtained from evt.Get(), the VM + // doesn't see it as a JS Date anymore, thus value instanceof Date === false. + // Have to trust that any object here is a valid Date for Go. + return value; + case "string": + var asDate = new Date(value); + if (!isNaN(asDate)) return asDate; + } + } + + // ECMAScript 5.1 doesn't have Object.MAX_SAFE_INTEGER / Object.MIN_SAFE_INTEGER. + var maxSafeInt = Math.pow(2, 53) - 1; + var minSafeInt = -maxSafeInt; + + function to_long(value) { + var num = parseInt(value); + // Better not to index a number if it's not safe (above 53 bits). + return !isNaN(num) && minSafeInt <= num && num <= maxSafeInt ? num : undefined; + } + + function to_ip(value) { + if (value.indexOf(":") === -1) + return to_ipv4(value); + return to_ipv6(value); + } + + var ipv4_regex = /^(\d+)\.(\d+)\.(\d+)\.(\d+)$/; + var ipv6_hex_regex = /^[0-9A-Fa-f]{1,4}$/; + + function to_ipv4(value) { + var result = ipv4_regex.exec(value); + if (result == null || result.length !== 5) return; + for (var i = 1; i < 5; i++) { + var num = strictToInt(result[i]); + if (isNaN(num) || num < 0 || num > 255) return; + } + return value; + } + + function to_ipv6(value) { + var sqEnd = value.indexOf("]"); + if (sqEnd > -1) { + if (value.charAt(0) !== "[") return; + value = value.substr(1, sqEnd - 1); + } + var zoneOffset = value.indexOf("%"); + if (zoneOffset > -1) { + value = value.substr(0, zoneOffset); + } + var parts = value.split(":"); + if (parts == null || parts.length < 3 || parts.length > 8) return; + var numEmpty = 0; + var innerEmpty = 0; + for (var i = 0; i < parts.length; i++) { + if (parts[i].length === 0) { + numEmpty++; + if (i > 0 && i + 1 < parts.length) innerEmpty++; + } else if (!parts[i].match(ipv6_hex_regex) && + // Accept an IPv6 with a valid IPv4 at the end. + ((i + 1 < parts.length) || !to_ipv4(parts[i]))) { + return; + } + } + return innerEmpty === 0 && parts.length === 8 || innerEmpty === 1 ? value : undefined; + } + + function to_double(value) { + return parseFloat(value); + } + + function to_mac(value) { + // ES doesn't have a mac datatype so it's safe to ingest whatever was captured. + return value; + } + + function to_lowercase(value) { + // to_lowercase is used against keyword fields, which can accept + // any other type (numbers, dates). + return typeof(value) === "string"? value.toLowerCase() : value; + } + + function fld_set(dst, value) { + dst[this.field] = { v: value }; + } + + function fld_append(dst, value) { + if (dst[this.field] === undefined) { + dst[this.field] = { v: [value] }; + } else { + var base = dst[this.field]; + if (base.v.indexOf(value)===-1) base.v.push(value); + } + } + + function fld_prio(dst, value) { + if (dst[this.field] === undefined) { + dst[this.field] = { v: value, prio: this.prio}; + } else if(this.prio < dst[this.field].prio) { + dst[this.field].v = value; + dst[this.field].prio = this.prio; + } + } + + var valid_ecs_outcome = { + 'failure': true, + 'success': true, + 'unknown': true + }; + + function fld_ecs_outcome(dst, value) { + value = value.toLowerCase(); + if (valid_ecs_outcome[value] === undefined) { + value = 'unknown'; + } + if (dst[this.field] === undefined) { + dst[this.field] = { v: value }; + } else if (dst[this.field].v === 'unknown') { + dst[this.field] = { v: value }; + } + } + + function map_all(evt, targets, value) { + for (var i = 0; i < targets.length; i++) { + evt.Put(targets[i], value); + } + } + + function populate_fields(evt) { + var base = evt.Get(FIELDS_OBJECT); + if (base === null) return; + alternate_datetime(evt); + if (map_ecs) { + do_populate(evt, base, ecs_mappings); + } + if (map_rsa) { + do_populate(evt, base, rsa_mappings); + } + if (keep_raw) { + evt.Put("rsa.raw", base); + } + evt.Delete(FIELDS_OBJECT); + } + + var datetime_alt_components = [ + {field: "day", fmts: [[dF]]}, + {field: "year", fmts: [[dW]]}, + {field: "month", fmts: [[dB],[dG]]}, + {field: "date", fmts: [[dW,dSkip,dG,dSkip,dF],[dW,dSkip,dB,dSkip,dF],[dW,dSkip,dR,dSkip,dF]]}, + {field: "hour", fmts: [[dN]]}, + {field: "min", fmts: [[dU]]}, + {field: "secs", fmts: [[dO]]}, + {field: "time", fmts: [[dN, dSkip, dU, dSkip, dO]]}, + ]; + + function alternate_datetime(evt) { + if (evt.Get(FIELDS_PREFIX + "event_time") != null) { + return; + } + var tzOffset = tz_offset; + if (tzOffset === "event") { + tzOffset = evt.Get("event.timezone"); + } + var container = new DateContainer(tzOffset); + for (var i=0; i} %{timezone}]||%{web_method}||%{web_host}||%{webpage}||%{web_query}||%{network_service}||%{resultcode}||%{sbytes}||%{web_referer}||%{user_agent}||%{web_cookie}", processor_chain([ + dup1, + dup2, + dup3, + dup4, + dup5, + dup6, + ])); + + var hdr1 = match("HEADER#0:0001", "message", "%APACHETOMCAT-%{level}-%{messageid}: %{payload}", processor_chain([ + setc("header_id","0001"), + ])); + + var hdr2 = match("HEADER#1:0002", "message", "%{hmonth->} %{hday->} %{htime->} %{hostname->} %APACHETOMCAT- %{messageid}: %{payload}", processor_chain([ + setc("header_id","0002"), + ])); + + var select1 = linear_select([ + hdr1, + hdr2, + ]); + + var msg1 = msg("ABCD", dup7); + + var msg2 = msg("BADMETHOD", dup7); + + var msg3 = msg("BADMTHD", dup7); + + var msg4 = msg("BDMTHD", dup7); + + var msg5 = msg("INDEX", dup7); + + var msg6 = msg("CFYZ", dup7); + + var msg7 = msg("CONNECT", dup7); + + var msg8 = msg("DELETE", dup7); + + var msg9 = msg("DETECT_METHOD_TYPE", dup7); + + var msg10 = msg("FGET", dup7); + + var msg11 = msg("GET", dup7); + + var msg12 = msg("get", dup7); + + var msg13 = msg("HEAD", dup7); + + var msg14 = msg("id", dup7); + + var msg15 = msg("LOCK", dup7); + + var msg16 = msg("MKCOL", dup7); + + var msg17 = msg("NCIRCLE", dup7); + + var msg18 = msg("OPTIONS", dup7); + + var msg19 = msg("POST", dup7); + + var msg20 = msg("PRONECT", dup7); + + var msg21 = msg("PROPFIND", dup7); + + var msg22 = msg("PUT", dup7); + + var msg23 = msg("QUALYS", dup7); + + var msg24 = msg("SEARCH", dup7); + + var msg25 = msg("TRACK", dup7); + + var msg26 = msg("TRACE", dup7); + + var msg27 = msg("uGET", dup7); + + var msg28 = msg("null", dup7); + + var msg29 = msg("rndmmtd", dup7); + + var msg30 = msg("RNDMMTD", dup7); + + var msg31 = msg("asdf", dup7); + + var msg32 = msg("DEBUG", dup7); + + var msg33 = msg("COOK", dup7); + + var msg34 = msg("nGET", dup7); + + var chain1 = processor_chain([ + select1, + msgid_select({ + "ABCD": msg1, + "BADMETHOD": msg2, + "BADMTHD": msg3, + "BDMTHD": msg4, + "CFYZ": msg6, + "CONNECT": msg7, + "COOK": msg33, + "DEBUG": msg32, + "DELETE": msg8, + "DETECT_METHOD_TYPE": msg9, + "FGET": msg10, + "GET": msg11, + "HEAD": msg13, + "INDEX": msg5, + "LOCK": msg15, + "MKCOL": msg16, + "NCIRCLE": msg17, + "OPTIONS": msg18, + "POST": msg19, + "PRONECT": msg20, + "PROPFIND": msg21, + "PUT": msg22, + "QUALYS": msg23, + "RNDMMTD": msg30, + "SEARCH": msg24, + "TRACE": msg26, + "TRACK": msg25, + "asdf": msg31, + "get": msg12, + "id": msg14, + "nGET": msg34, + "null": msg28, + "rndmmtd": msg29, + "uGET": msg27, + }), + ]); + + var part1 = match("MESSAGE#0:ABCD", "nwparser.payload", "%{saddr}||%{fld5}||%{username}||[%{fld7->} %{timezone}]||%{web_method}||%{web_host}||%{webpage}||%{web_query}||%{network_service}||%{resultcode}||%{sbytes}||%{web_referer}||%{user_agent}||%{web_cookie}", processor_chain([ + dup1, + dup2, + dup3, + dup4, + dup5, + dup6, + ])); + - community_id: null + - registered_domain: + field: dns.question.name + ignore_failure: true + ignore_missing: true + target_etld_field: dns.question.top_level_domain + target_field: dns.question.registered_domain + target_subdomain_field: dns.question.subdomain + - registered_domain: + field: client.domain + ignore_failure: true + ignore_missing: true + target_etld_field: client.top_level_domain + target_field: client.registered_domain + target_subdomain_field: client.subdomain + - registered_domain: + field: server.domain + ignore_failure: true + ignore_missing: true + target_etld_field: server.top_level_domain + target_field: server.registered_domain + target_subdomain_field: server.subdomain + - registered_domain: + field: destination.domain + ignore_failure: true + ignore_missing: true + target_etld_field: destination.top_level_domain + target_field: destination.registered_domain + target_subdomain_field: destination.subdomain + - registered_domain: + field: source.domain + ignore_failure: true + ignore_missing: true + target_etld_field: source.top_level_domain + target_field: source.registered_domain + target_subdomain_field: source.subdomain + - registered_domain: + field: url.domain + ignore_failure: true + ignore_missing: true + target_etld_field: url.top_level_domain + target_field: url.registered_domain + target_subdomain_field: url.subdomain + - add_locale: null + prospector: + scanner: + symlinks: true + tags: + - tomcat-log + - forwarded + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/traefik.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/traefik.yml new file mode 100644 index 00000000000..4ab26982389 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/traefik.yml @@ -0,0 +1,37 @@ +inputs: + - name: filestream-traefik + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.traefik.access.enabled} == true or ${kubernetes.hints.traefik.enabled} == true + data_stream: + dataset: traefik.access + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.traefik.access.stream|'all'} + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + data_stream.namespace: default + - name: traefik/metrics-traefik + type: traefik/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.traefik.health.enabled} == true or ${kubernetes.hints.traefik.enabled} == true + data_stream: + dataset: traefik.health + type: metrics + hosts: + - ${kubernetes.hints.traefik.health.host|'localhost:8080'} + metricsets: + - health + period: ${kubernetes.hints.traefik.health.period|'10s'} + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/udp.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/udp.yml new file mode 100644 index 00000000000..60fa5ebf598 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/udp.yml @@ -0,0 +1,33 @@ +inputs: + - name: udp-udp + type: udp + use_output: default + streams: + - condition: ${kubernetes.hints.udp.generic.enabled} == true or ${kubernetes.hints.udp.enabled} == true + data_stream: + dataset: udp.generic + type: logs + host: localhost:8080 + max_message_size: 10KiB + data_stream.namespace: default + - name: filestream-udp + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.udp.container_logs.enabled} == true + data_stream: + dataset: kubernetes.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/zeek.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/zeek.yml new file mode 100644 index 00000000000..22bcc875894 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/zeek.yml @@ -0,0 +1,2271 @@ +inputs: + - name: filestream-zeek + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.zeek.capture_loss.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.capture_loss + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.capture_loss.stream|'all'} + paths: + - /var/log/bro/current/capture_loss.log + - /opt/zeek/logs/current/capture_loss.log + - /usr/local/var/spool/zeek/capture_loss.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-capture-loss + - condition: ${kubernetes.hints.zeek.connection.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.connection + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.connection.stream|'all'} + paths: + - /var/log/bro/current/conn.log + - /opt/zeek/logs/current/conn.log + - /usr/local/var/spool/zeek/conn.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-connection + - condition: ${kubernetes.hints.zeek.dce_rpc.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.dce_rpc + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.dce_rpc.stream|'all'} + paths: + - /var/log/bro/current/dce_rpc.log + - /opt/zeek/logs/current/dce_rpc.log + - /usr/local/var/spool/zeek/dce_rpc.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-dce-rpc + - condition: ${kubernetes.hints.zeek.dhcp.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.dhcp + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.dhcp.stream|'all'} + paths: + - /var/log/bro/current/dhcp.log + - /opt/zeek/logs/current/dhcp.log + - /usr/local/var/spool/zeek/dhcp.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-dhcp + - condition: ${kubernetes.hints.zeek.dnp3.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.dnp3 + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.dnp3.stream|'all'} + paths: + - /var/log/bro/current/dnp3.log + - /opt/zeek/logs/current/dnp3.log + - /usr/local/var/spool/zeek/dnp3.log + prospector: + scanner: + symlinks: true + tags: + - zeek-dnp3 + - condition: ${kubernetes.hints.zeek.dns.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.dns + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.dns.stream|'all'} + paths: + - /var/log/bro/current/dns.log + - /opt/zeek/logs/current/dns.log + - /usr/local/var/spool/zeek/dns.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-dns + - condition: ${kubernetes.hints.zeek.dpd.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.dpd + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.dpd.stream|'all'} + paths: + - /var/log/bro/current/dpd.log + - /opt/zeek/logs/current/dpd.log + - /usr/local/var/spool/zeek/dpd.log + prospector: + scanner: + symlinks: true + tags: + - zeek-dpd + - condition: ${kubernetes.hints.zeek.files.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.files + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.files.stream|'all'} + paths: + - /var/log/bro/current/files.log + - /opt/zeek/logs/current/files.log + - /usr/local/var/spool/zeek/files.log + prospector: + scanner: + symlinks: true + tags: + - zeek-files + - condition: ${kubernetes.hints.zeek.ftp.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.ftp + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.ftp.stream|'all'} + paths: + - /var/log/bro/current/ftp.log + - /opt/zeek/logs/current/ftp.log + - /usr/local/var/spool/zeek/ftp.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-ftp + - condition: ${kubernetes.hints.zeek.http.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.http + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.http.stream|'all'} + paths: + - /var/log/bro/current/http.log + - /opt/zeek/logs/current/http.log + - /usr/local/var/spool/zeek/http.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-http + - condition: ${kubernetes.hints.zeek.intel.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.intel + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.intel.stream|'all'} + paths: + - /var/log/bro/current/intel.log + - /opt/zeek/logs/current/intel.log + - /usr/local/var/spool/zeek/intel.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-intel + - condition: ${kubernetes.hints.zeek.irc.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.irc + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.irc.stream|'all'} + paths: + - /var/log/bro/current/irc.log + - /opt/zeek/logs/current/irc.log + - /usr/local/var/spool/zeek/irc.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-irc + - condition: ${kubernetes.hints.zeek.kerberos.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.kerberos + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.kerberos.stream|'all'} + paths: + - /var/log/bro/current/kerberos.log + - /opt/zeek/logs/current/kerberos.log + - /usr/local/var/spool/zeek/kerberos.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-kerberos + - condition: ${kubernetes.hints.zeek.known_certs.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.known_certs + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.known_certs.stream|'all'} + paths: + - /var/log/bro/current/known_certs.log + - /opt/zeek/logs/current/known_certs.log + - /usr/local/var/spool/zeek/known_certs.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-known_certs + - condition: ${kubernetes.hints.zeek.known_hosts.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.known_hosts + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.known_hosts.stream|'all'} + paths: + - /var/log/bro/current/known_hosts.log + - /opt/zeek/logs/current/known_hosts.log + - /usr/local/var/spool/zeek/known_hosts.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-known_hosts + - condition: ${kubernetes.hints.zeek.known_services.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.known_services + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.known_services.stream|'all'} + paths: + - /var/log/bro/current/known_services.log + - /opt/zeek/logs/current/known_services.log + - /usr/local/var/spool/zeek/known_services.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-known_services + - condition: ${kubernetes.hints.zeek.modbus.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.modbus + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.modbus.stream|'all'} + paths: + - /var/log/bro/current/modbus.log + - /opt/zeek/logs/current/modbus.log + - /usr/local/var/spool/zeek/modbus.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-modbus + - condition: ${kubernetes.hints.zeek.mysql.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.mysql + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.mysql.stream|'all'} + paths: + - /var/log/bro/current/mysql.log + - /opt/zeek/logs/current/mysql.log + - /usr/local/var/spool/zeek/mysql.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-mysql + - condition: ${kubernetes.hints.zeek.notice.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.notice + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.notice.stream|'all'} + paths: + - /var/log/bro/current/notice.log + - /opt/zeek/logs/current/notice.log + - /usr/local/var/spool/zeek/notice.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-notice + - condition: ${kubernetes.hints.zeek.ntlm.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.ntlm + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.ntlm.stream|'all'} + paths: + - /var/log/bro/current/ntlm.log + - /opt/zeek/logs/current/ntlm.log + - /usr/local/var/spool/zeek/ntlm.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-ntlm + - condition: ${kubernetes.hints.zeek.ntp.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.ntp + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.ntp.stream|'all'} + paths: + - /var/log/bro/current/ntp.log + - /opt/zeek/logs/current/ntp.log + - /usr/local/var/spool/zeek/ntp.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-ntp + - condition: ${kubernetes.hints.zeek.ocsp.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.ocsp + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.ocsp.stream|'all'} + paths: + - /var/log/bro/current/ocsp.log + - /opt/zeek/logs/current/ocsp.log + - /usr/local/var/spool/zeek/ocsp.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-ocsp + - condition: ${kubernetes.hints.zeek.pe.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.pe + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.pe.stream|'all'} + paths: + - /var/log/bro/current/pe.log + - /opt/zeek/logs/current/pe.log + - /usr/local/var/spool/zeek/pe.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-pe + - condition: ${kubernetes.hints.zeek.radius.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.radius + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.radius.stream|'all'} + paths: + - /var/log/bro/current/radius.log + - /opt/zeek/logs/current/radius.log + - /usr/local/var/spool/zeek/radius.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-radius + - condition: ${kubernetes.hints.zeek.rdp.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.rdp + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.rdp.stream|'all'} + paths: + - /var/log/bro/current/rdp.log + - /opt/zeek/logs/current/rdp.log + - /usr/local/var/spool/zeek/rdp.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-rdp + - condition: ${kubernetes.hints.zeek.rfb.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.rfb + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.rfb.stream|'all'} + paths: + - /var/log/bro/current/rfb.log + - /opt/zeek/logs/current/rfb.log + - /usr/local/var/spool/zeek/rfb.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-rfb + - condition: ${kubernetes.hints.zeek.signature.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.signature + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.signature.stream|'all'} + paths: + - /var/log/bro/current/signature.log + - /opt/zeek/logs/current/signature.log + - /usr/local/var/spool/zeek/signature.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-signature + - condition: ${kubernetes.hints.zeek.sip.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.sip + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.sip.stream|'all'} + paths: + - /var/log/bro/current/sip.log + - /opt/zeek/logs/current/sip.log + - /usr/local/var/spool/zeek/sip.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-sip + - condition: ${kubernetes.hints.zeek.smb_cmd.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.smb_cmd + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.smb_cmd.stream|'all'} + paths: + - /var/log/bro/current/smb_cmd.log + - /opt/zeek/logs/current/smb_cmd.log + - /usr/local/var/spool/zeek/smb_cmd.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-smb-cmd + - condition: ${kubernetes.hints.zeek.smb_files.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.smb_files + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.smb_files.stream|'all'} + paths: + - /var/log/bro/current/smb_files.log + - /opt/zeek/logs/current/smb_files.log + - /usr/local/var/spool/zeek/smb_files.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-smb-files + - condition: ${kubernetes.hints.zeek.smb_mapping.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.smb_mapping + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.smb_mapping.stream|'all'} + paths: + - /var/log/bro/current/smb_mapping.log + - /opt/zeek/logs/current/smb_mapping.log + - /usr/local/var/spool/zeek/smb_mapping.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek.smb_mapping + - condition: ${kubernetes.hints.zeek.smtp.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.smtp + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.smtp.stream|'all'} + paths: + - /var/log/bro/current/smtp.log + - /opt/zeek/logs/current/smtp.log + - /usr/local/var/spool/zeek/smtp.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-smtp + - condition: ${kubernetes.hints.zeek.snmp.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.snmp + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.snmp.stream|'all'} + paths: + - /var/log/bro/current/snmp.log + - /opt/zeek/logs/current/snmp.log + - /usr/local/var/spool/zeek/snmp.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-snmp + - condition: ${kubernetes.hints.zeek.socks.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.socks + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.socks.stream|'all'} + paths: + - /var/log/bro/current/socks.log + - /opt/zeek/logs/current/socks.log + - /usr/local/var/spool/zeek/socks.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-socks + - condition: ${kubernetes.hints.zeek.software.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.software + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.software.stream|'all'} + paths: + - /var/log/bro/current/software.log + - /opt/zeek/logs/current/software.log + - /usr/local/var/spool/zeek/software.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-software + - condition: ${kubernetes.hints.zeek.ssh.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.ssh + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.ssh.stream|'all'} + paths: + - /var/log/bro/current/ssh.log + - /opt/zeek/logs/current/ssh.log + - /usr/local/var/spool/zeek/ssh.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-ssh + - condition: ${kubernetes.hints.zeek.ssl.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.ssl + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.ssl.stream|'all'} + paths: + - /var/log/bro/current/ssl.log + - /opt/zeek/logs/current/ssl.log + - /usr/local/var/spool/zeek/ssl.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-ssl + - condition: ${kubernetes.hints.zeek.stats.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.stats + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.stats.stream|'all'} + paths: + - /var/log/bro/current/stats.log + - /opt/zeek/logs/current/stats.log + - /usr/local/var/spool/zeek/stats.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-stats + - condition: ${kubernetes.hints.zeek.syslog.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.syslog + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.syslog.stream|'all'} + paths: + - /var/log/bro/current/syslog.log + - /opt/zeek/logs/current/syslog.log + - /usr/local/var/spool/zeek/syslog.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-syslog + - condition: ${kubernetes.hints.zeek.traceroute.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.traceroute + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.traceroute.stream|'all'} + paths: + - /var/log/bro/current/traceroute.log + - /opt/zeek/logs/current/traceroute.log + - /usr/local/var/spool/zeek/traceroute.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-traceroute + - condition: ${kubernetes.hints.zeek.tunnel.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.tunnel + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.tunnel.stream|'all'} + paths: + - /var/log/bro/current/tunnel.log + - /opt/zeek/logs/current/tunnel.log + - /usr/local/var/spool/zeek/tunnel.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-tunnel + - condition: ${kubernetes.hints.zeek.weird.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.weird + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.weird.stream|'all'} + paths: + - /var/log/bro/current/weird.log + - /opt/zeek/logs/current/weird.log + - /usr/local/var/spool/zeek/weird.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-weird + - condition: ${kubernetes.hints.zeek.x509.enabled} == true or ${kubernetes.hints.zeek.enabled} == true + data_stream: + dataset: zeek.x509 + type: logs + exclude_files: + - .gz$ + parsers: + - container: + format: auto + stream: ${kubernetes.hints.zeek.x509.stream|'all'} + paths: + - /var/log/bro/current/x509.log + - /opt/zeek/logs/current/x509.log + - /usr/local/var/spool/zeek/x509.log + prospector: + scanner: + symlinks: true + tags: + - forwarded + - zeek-x509 + data_stream.namespace: default + - name: httpjson-zeek + type: httpjson + use_output: default + streams: + - condition: ${kubernetes.hints.zeek.capture_loss.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.capture_loss + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="capture_loss-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-capture-loss + - condition: ${kubernetes.hints.zeek.connection.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.connection + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="conn-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-connection + - condition: ${kubernetes.hints.zeek.dce_rpc.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.dce_rpc + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="dce_rpc-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-dce-rpc + - condition: ${kubernetes.hints.zeek.dhcp.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.dhcp + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="dhcp-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-dhcp + - condition: ${kubernetes.hints.zeek.dnp3.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.dnp3 + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="dnp3-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-dnp3 + - condition: ${kubernetes.hints.zeek.dns.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.dns + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="dns-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-dns + - condition: ${kubernetes.hints.zeek.dpd.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.dpd + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="dpd-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-dpd + - condition: ${kubernetes.hints.zeek.files.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.files + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="files-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-files + - condition: ${kubernetes.hints.zeek.ftp.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.ftp + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="ftp-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-ftp + - condition: ${kubernetes.hints.zeek.http.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.http + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="http-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-http + - condition: ${kubernetes.hints.zeek.intel.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.intel + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="intel-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-intel + - condition: ${kubernetes.hints.zeek.irc.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.irc + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="irc-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-irc + - condition: ${kubernetes.hints.zeek.kerberos.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.kerberos + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="kerberos-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-kerberos + - condition: ${kubernetes.hints.zeek.modbus.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.modbus + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="modbus-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-modbus + - condition: ${kubernetes.hints.zeek.mysql.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.mysql + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="mysql-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-mysql + - condition: ${kubernetes.hints.zeek.notice.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.notice + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="notice-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-notice + - condition: ${kubernetes.hints.zeek.ntlm.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.ntlm + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="ntlm-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-ntlm + - condition: ${kubernetes.hints.zeek.ntp.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.ntp + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="ntp-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-ntp + - condition: ${kubernetes.hints.zeek.ocsp.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.ocsp + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="ocsp-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-ocsp + - condition: ${kubernetes.hints.zeek.pe.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.pe + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="pe-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-pe + - condition: ${kubernetes.hints.zeek.radius.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.radius + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="radius-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-radius + - condition: ${kubernetes.hints.zeek.rdp.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.rdp + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="rdp-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-rdp + - condition: ${kubernetes.hints.zeek.rfb.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.rfb + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="rfb-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-rfb + - condition: ${kubernetes.hints.zeek.signature.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.signature + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="signature-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-signature + - condition: ${kubernetes.hints.zeek.sip.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.sip + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="sip-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-sip + - condition: ${kubernetes.hints.zeek.smb_cmd.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.smb_cmd + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="smb_cmd-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-smb-cmd + - condition: ${kubernetes.hints.zeek.smb_files.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.smb_files + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="smb_files-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-smb-files + - condition: ${kubernetes.hints.zeek.smb_mapping.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.smb_mapping + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="smb_mapping-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - condition: ${kubernetes.hints.zeek.smtp.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.smtp + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="smtp-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-smtp + - condition: ${kubernetes.hints.zeek.snmp.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.snmp + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="snmp-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-snmp + - condition: ${kubernetes.hints.zeek.socks.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.socks + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="socks-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-socks + - condition: ${kubernetes.hints.zeek.ssh.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.ssh + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="ssh-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-ssh + - condition: ${kubernetes.hints.zeek.ssl.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.ssl + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="ssl-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-ssl + - condition: ${kubernetes.hints.zeek.stats.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.stats + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="stats-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-stats + - condition: ${kubernetes.hints.zeek.syslog.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.syslog + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="syslog-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-syslog + - condition: ${kubernetes.hints.zeek.traceroute.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.traceroute + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="traceroute-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-traceroute + - condition: ${kubernetes.hints.zeek.tunnel.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.tunnel + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="tunnel-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-tunnel + - condition: ${kubernetes.hints.zeek.weird.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.weird + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="weird-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-weird + - condition: ${kubernetes.hints.zeek.x509.enabled} == true and ${kubernetes.hints.zeek.enabled} == true + config_version: 2 + cursor: + index_earliest: + value: '[[.last_event.result.max_indextime]]' + data_stream: + dataset: zeek.x509 + type: logs + interval: 10s + request.method: POST + request.transforms: + - set: + target: url.params.search + value: search sourcetype="x509-*" | streamstats max(_indextime) AS max_indextime + - set: + target: url.params.output_mode + value: json + - set: + default: '[[(now (parseDuration "-10s")).Unix]]' + target: url.params.index_earliest + value: '[[ .cursor.index_earliest ]]' + - set: + target: url.params.index_latest + value: '[[(now).Unix]]' + - set: + target: header.Content-Type + value: application/x-www-form-urlencoded + request.url: https://server.example.com:8089/services/search/jobs/export + response.decode_as: application/x-ndjson + response.split: + delimiter: |4+ + target: body.result._raw + type: string + tags: + - forwarded + - zeek-x509 + data_stream.namespace: default diff --git a/deploy/kubernetes/elastic-agent-standalone/templates.d/zookeeper.yml b/deploy/kubernetes/elastic-agent-standalone/templates.d/zookeeper.yml new file mode 100644 index 00000000000..5199734c315 --- /dev/null +++ b/deploy/kubernetes/elastic-agent-standalone/templates.d/zookeeper.yml @@ -0,0 +1,54 @@ +inputs: + - name: zookeeper/metrics-zookeeper + type: zookeeper/metrics + use_output: default + streams: + - condition: ${kubernetes.hints.zookeeper.connection.enabled} == true or ${kubernetes.hints.zookeeper.enabled} == true + data_stream: + dataset: zookeeper.connection + type: metrics + hosts: + - ${kubernetes.hints.zookeeper.connection.host|'localhost:2181'} + metricsets: + - connection + period: ${kubernetes.hints.zookeeper.connection.period|'10s'} + - condition: ${kubernetes.hints.zookeeper.mntr.enabled} == true or ${kubernetes.hints.zookeeper.enabled} == true + data_stream: + dataset: zookeeper.mntr + type: metrics + hosts: + - ${kubernetes.hints.zookeeper.mntr.host|'localhost:2181'} + metricsets: + - mntr + period: ${kubernetes.hints.zookeeper.mntr.period|'10s'} + - condition: ${kubernetes.hints.zookeeper.server.enabled} == true or ${kubernetes.hints.zookeeper.enabled} == true + data_stream: + dataset: zookeeper.server + type: metrics + hosts: + - ${kubernetes.hints.zookeeper.server.host|'localhost:2181'} + metricsets: + - server + period: ${kubernetes.hints.zookeeper.server.period|'10s'} + data_stream.namespace: default + - name: filestream-zookeeper + type: filestream + use_output: default + streams: + - condition: ${kubernetes.hints.zookeeper.container_logs.enabled} == true + data_stream: + dataset: kubernetes.container_logs + type: logs + exclude_files: [] + exclude_lines: [] + parsers: + - container: + format: auto + stream: all + paths: + - /var/log/containers/*${kubernetes.hints.container_id}.log + prospector: + scanner: + symlinks: true + tags: [] + data_stream.namespace: default diff --git a/dev-tools/mage/crossbuild.go b/dev-tools/mage/crossbuild.go index 56a6860393b..8a193003ab7 100644 --- a/dev-tools/mage/crossbuild.go +++ b/dev-tools/mage/crossbuild.go @@ -281,6 +281,15 @@ func (b GolangCrossBuilder) Build() error { verbose = "true" } var args []string + // There's a bug on certain debian versions: + // https://discuss.linuxcontainers.org/t/debian-jessie-containers-have-extremely-low-performance/1272 + // basically, apt-get has a bug where will try to iterate through every possible FD as set by the NOFILE ulimit. + // On certain docker installs, docker will set the ulimit to a value > 10^9, which means apt-get will take >1 hour. + // This runs across all possible debian platforms, since there's no real harm in it. + if strings.Contains(image, "debian") { + args = append(args, "--ulimit", "nofile=262144:262144") + } + if runtime.GOOS != "windows" { args = append(args, "--env", "EXEC_UID="+strconv.Itoa(os.Getuid()), diff --git a/dev-tools/packaging/files/darwin/PkgInfo b/dev-tools/packaging/files/darwin/PkgInfo new file mode 100644 index 00000000000..bd04210fb49 --- /dev/null +++ b/dev-tools/packaging/files/darwin/PkgInfo @@ -0,0 +1 @@ +APPL???? \ No newline at end of file diff --git a/dev-tools/packaging/packages.yml b/dev-tools/packaging/packages.yml index c02c0596e0e..70a47df591d 100644 --- a/dev-tools/packaging/packages.yml +++ b/dev-tools/packaging/packages.yml @@ -118,13 +118,7 @@ shared: config_mode: 0644 skip_on_missing: true - - &agent_binary_files - '{{.BeatName}}{{.BinaryExt}}': - source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - mode: 0755 - 'data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}}': - source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - mode: 0755 + - &agent_binary_common_files LICENSE.txt: source: '{{ repo.RootDir }}/LICENSE.txt' mode: 0644 @@ -150,18 +144,54 @@ shared: {{ commit }} mode: 0644 - # Binary package spec (tar.gz for linux/darwin) for community beats. + - &agent_binary_files + '{{.BeatName}}{{.BinaryExt}}': + source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + mode: 0755 + 'data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}}': + source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + mode: 0755 + <<: *agent_binary_common_files + + - &agent_darwin_app_bundle_files + 'data/{{.BeatName}}-{{ commit_short }}/elastic-agent.app/Contents/Info.plist': + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/darwin/Info.plist.tmpl' + mode: 0644 + 'data/{{.BeatName}}-{{ commit_short }}/elastic-agent.app/Contents/PkgInfo': + template: '{{ elastic_beats_dir }}/dev-tools/packaging/files/darwin/PkgInfo' + mode: 0644 + + - &agent_darwin_binary_files + '{{.BeatName}}{{.BinaryExt}}': + source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + mode: 0755 + 'data/{{.BeatName}}-{{ commit_short }}/elastic-agent.app/Contents/MacOS/{{.BeatName}}{{.BinaryExt}}': + source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + mode: 0755 + <<: *agent_darwin_app_bundle_files + <<: *agent_binary_common_files + + - &agent_components + 'data/{{.BeatName}}-{{ commit_short }}/components': + source: '{{.AgentDropPath}}/{{.GOOS}}-{{.AgentArchName}}.tar.gz/' + mode: 0755 + config_mode: 0644 + skip_on_missing: true + + # Binary package spec (tar.gz for linux) for community beats. - &agent_binary_spec <<: *common files: <<: *agent_binary_files - 'data/{{.BeatName}}-{{ commit_short }}/components': - source: '{{.AgentDropPath}}/{{.GOOS}}-{{.AgentArchName}}.tar.gz/' - mode: 0755 - config_mode: 0644 - skip_on_missing: true + <<: *agent_components + - &agent_darwin_binary_spec + <<: *common + files: + <<: *agent_darwin_binary_files + <<: *agent_components + # Binary package spec (zip for windows) for community beats. - &agent_windows_binary_spec <<: *common @@ -730,11 +760,14 @@ specs: - os: darwin types: [tgz] spec: - <<: *agent_binary_spec + <<: *agent_darwin_binary_spec <<: *elastic_license_for_binaries files: + 'data/{{.BeatName}}-{{ commit_short }}/elastic-agent': + template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/darwin/elastic-agent.tmpl' + mode: 0755 '{{.BeatName}}{{.BinaryExt}}': - source: data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}} + source: data/{{.BeatName}}-{{ commit_short }}/elastic-agent.app/Contents/MacOS/{{.BeatName}}{{.BinaryExt}} symlink: true mode: 0755 diff --git a/dev-tools/packaging/templates/darwin/Info.plist.tmpl b/dev-tools/packaging/templates/darwin/Info.plist.tmpl new file mode 100644 index 00000000000..b98202219b5 --- /dev/null +++ b/dev-tools/packaging/templates/darwin/Info.plist.tmpl @@ -0,0 +1,20 @@ + + + + + CFBundleExecutable + elastic-agent + CFBundleIdentifier + co.elastic.elastic-agent + CFBundleInfoDictionaryVersion + 6.0 + CFBundleName + elastic-agent + CFBundlePackageType + APPL + CFBundleShortVersionString + {{ beat_version }} + CFBundleVersion + {{ beat_version }} + + diff --git a/dev-tools/packaging/templates/darwin/elastic-agent.tmpl b/dev-tools/packaging/templates/darwin/elastic-agent.tmpl new file mode 100644 index 00000000000..74c0f238c28 --- /dev/null +++ b/dev-tools/packaging/templates/darwin/elastic-agent.tmpl @@ -0,0 +1,11 @@ +#!/bin/sh +# Fix up the symlink and exit + +set -e + +symlink="/Library/Elastic/Agent/elastic-agent" + +if test -L "$symlink"; then + ln -sfn "data/elastic-agent-{{ commit_short }}/elastic-agent.app/Contents/MacOS/elastic-agent" "$symlink" +fi + diff --git a/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl b/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl index 7ff81be9559..ab16391a611 100644 --- a/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl +++ b/dev-tools/packaging/templates/docker/Dockerfile.elastic-agent.tmpl @@ -1,245 +1,233 @@ -{{- $beatHome := printf "%s/%s" "/usr/share" .BeatName }} -{{- $beatBinary := printf "%s/%s" $beatHome .BeatName }} -{{- $repoInfo := repo }} - -# Prepare home in a different stage to avoid creating additional layers on -# the final image because of permission changes. -FROM {{ .buildFrom }} AS home - -COPY beat {{ $beatHome }} - -RUN mkdir -p {{ $beatHome }}/data {{ $beatHome }}/data/elastic-agent-{{ commit_short }}/logs && \ - chown -R root:root {{ $beatHome }} && \ - find {{ $beatHome }} -type d -exec chmod 0755 {} \; && \ - find {{ $beatHome }} -type f -exec chmod 0644 {} \; && \ - find {{ $beatHome }}/data -type d -exec chmod 0770 {} \; && \ - find {{ $beatHome }}/data -type f -exec chmod 0660 {} \; && \ - rm {{ $beatBinary }} && \ - ln -s {{ $beatHome }}/data/elastic-agent-{{ commit_short }}/elastic-agent {{ $beatBinary }} && \ - chmod 0755 {{ $beatHome }}/data/elastic-agent-*/elastic-agent && \ - chmod 0755 {{ $beatHome }}/data/elastic-agent-*/components/*beat && \ - (chmod 0755 {{ $beatHome }}/data/elastic-agent-*/components/apm-server || true) && \ - (chmod 0755 {{ $beatHome }}/data/elastic-agent-*/components/elastic-endpoint || true) && \ - find {{ $beatHome }}/data/elastic-agent-{{ commit_short }}/components -name "*.yml*" -type f -exec chown root:root {} \; && \ - find {{ $beatHome }}/data/elastic-agent-{{ commit_short }}/components -name "*.yml*" -type f -exec chmod 0644 {} \; && \ -{{- range $i, $modulesd := .ModulesDirs }} - chmod 0775 {{ $beatHome}}/{{ $modulesd }} && \ -{{- end }} -{{- if contains .image_name "-cloud" }} - mkdir -p /opt/filebeat /opt/metricbeat && \ - tar xf {{ $beatHome }}/data/cloud_downloads/metricbeat-*.tar.gz -C /opt/metricbeat --strip-components=1 && \ - tar xf {{ $beatHome }}/data/cloud_downloads/filebeat-*.tar.gz -C /opt/filebeat --strip-components=1 && \ -{{- end }} - rm -rf {{ $beatHome }}/data/cloud_downloads && \ - true - -FROM {{ .from }} - -ENV BEAT_SETUID_AS={{ .user }} - -{{- if contains .from "ubi-minimal" }} -RUN for iter in {1..10}; do microdnf update -y && microdnf install -y tar gzip findutils shadow-utils && microdnf clean all && exit_code=0 && break || exit_code=$? && echo "microdnf error: retry $iter in 10s" && sleep 10; done; (exit $exit_code) -{{- else }} - -RUN for iter in {1..10}; do \ - apt-get update -y && \ - DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends --yes ca-certificates curl gawk libcap2-bin xz-utils && \ - apt-get clean all && \ - exit_code=0 && break || exit_code=$? && echo "apt-get error: retry $iter in 10s" && sleep 10; \ - done; \ - (exit $exit_code) -{{- end }} - -{{- if (and (contains .image_name "-complete") (not (contains .from "ubi-minimal"))) }} -RUN apt-get update -y && \ - for iter in {1..10}; do \ - DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends --yes \ - libglib2.0-0\ - libnss3\ - libnspr4\ - libatk1.0-0\ - libatk-bridge2.0-0\ - libcups2\ - libdrm2\ - libdbus-1-3\ - libxcb1\ - libxkbcommon0\ - libx11-6\ - libxcomposite1\ - libxdamage1\ - libxext6\ - libxfixes3\ - libxrandr2\ - libgbm1\ - libpango-1.0-0\ - libcairo2\ - libasound2\ - libatspi2.0-0\ - libxshmfence1 \ - fonts-noto-core\ - fonts-noto-cjk &&\ - apt-get clean all && \ - exit_code=0 && break || exit_code=$? && echo "apt-get error: retry $iter in 10s" && sleep 10; \ - done; \ - (exit $exit_code) -ENV NODE_PATH={{ $beatHome }}/.node -RUN echo \ - $NODE_PATH \ - {{ $beatHome }}/.config \ - {{ $beatHome }}/.synthetics \ - {{ $beatHome }}/.npm \ - {{ $beatHome }}/.cache \ - | xargs -IDIR sh -c 'mkdir -p DIR && chmod 0770 DIR' -{{- end }} - -LABEL \ - org.label-schema.build-date="{{ date }}" \ - org.label-schema.schema-version="1.0" \ - org.label-schema.vendor="{{ .BeatVendor }}" \ - org.label-schema.license="{{ .License }}" \ - org.label-schema.name="{{ .BeatName }}" \ - org.label-schema.version="{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}" \ - org.label-schema.url="{{ .BeatURL }}" \ - org.label-schema.vcs-url="{{ $repoInfo.RootImportPath }}" \ - org.label-schema.vcs-ref="{{ commit }}" \ - io.k8s.description="{{ .BeatDescription }}" \ - io.k8s.display-name="{{ .BeatName | title }} image" \ - org.opencontainers.image.created="{{ date }}" \ - org.opencontainers.image.licenses="{{ .License }}" \ - org.opencontainers.image.title="{{ .BeatName | title }}" \ - org.opencontainers.image.vendor="{{ .BeatVendor }}" \ - name="{{ .BeatName }}" \ - maintainer="infra@elastic.co" \ - vendor="{{ .BeatVendor }}" \ - version="{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}" \ - release="1" \ - url="{{ .BeatURL }}" \ - summary="{{ .BeatName }}" \ - license="{{ .License }}" \ - description="{{ .BeatDescription }}" - -ENV ELASTIC_CONTAINER "true" -ENV PATH={{ $beatHome }}:$PATH -ENV GODEBUG="madvdontneed=1" - -# Add an init process, check the checksum to make sure it's a match -RUN set -e ; \ - TINI_BIN=""; \ - TINI_SHA256=""; \ - TINI_VERSION="v0.19.0"; \ - case "$(arch)" in \ - x86_64) \ - TINI_BIN="tini-amd64"; \ - TINI_SHA256="93dcc18adc78c65a028a84799ecf8ad40c936fdfc5f2a57b1acda5a8117fa82c"; \ - ;; \ - aarch64) \ - TINI_BIN="tini-arm64"; \ - TINI_SHA256="07952557df20bfd2a95f9bef198b445e006171969499a1d361bd9e6f8e5e0e81"; \ - ;; \ - *) \ - echo >&2 ; echo >&2 "Unsupported architecture \$(arch)" ; echo >&2 ; exit 1 ; \ - ;; \ - esac ; \ - curl --retry 8 -S -L -O "https://github.com/krallin/tini/releases/download/${TINI_VERSION}/${TINI_BIN}" ; \ - echo "${TINI_SHA256} ${TINI_BIN}" | sha256sum -c - ; \ - mv "${TINI_BIN}" /usr/bin/tini ; \ - chmod +x /usr/bin/tini - -COPY docker-entrypoint /usr/local/bin/docker-entrypoint -RUN chmod 755 /usr/local/bin/docker-entrypoint - -COPY --from=home {{ $beatHome }} {{ $beatHome }} - -# Elastic Agent needs group permissions in the home itself to be able to -# create fleet.yml when running as non-root. -RUN chmod 0770 {{ $beatHome }} - -RUN mkdir /licenses -COPY --from=home {{ $beatHome }}/LICENSE.txt /licenses -COPY --from=home {{ $beatHome }}/NOTICE.txt /licenses - -{{- if contains .image_name "-cloud" }} -COPY --from=home /opt /opt -{{- end }} - - -RUN setcap cap_net_raw,cap_setuid+p {{ $beatHome }}/data/elastic-agent-{{ commit_short }}/components/heartbeat && \ -{{- if .linux_capabilities }} -# Since the beat is stored at the other end of a symlink we must follow the symlink first -# For security reasons setcap does not support symlinks. This is smart in the general case -# but in our specific case since we're building a trusted image from trusted binaries this is -# fine. Thus, we use readlink to follow the link and setcap on the actual binary - readlink -f {{ $beatBinary }} | xargs setcap {{ .linux_capabilities }} && \ -{{- end }} -true - -{{- if eq .user "root" }} -{{- if contains .image_name "-cloud" }} -# Generate folder for a stub command that will be overwritten at runtime -RUN mkdir /app -{{- end }} -{{- else }} -RUN groupadd --gid 1000 {{ .BeatName }} -RUN useradd -M --uid 1000 --gid 1000 --groups 0 --home {{ $beatHome }} {{ .user }} -{{- if (and (contains .image_name "-complete") (not (contains .from "ubi-minimal"))) }} -RUN chown {{ .user }} $NODE_PATH -{{- end }} -{{- if contains .image_name "-cloud" }} -# Generate folder for a stub command that will be overwritten at runtime -RUN mkdir /app -RUN chown {{ .user }} /app -{{- end }} -{{- end }} - -USER {{ .user }} - -{{- if (and (contains .image_name "-complete") (not (contains .from "ubi-minimal"))) }} -# Setup synthetics env vars -ENV ELASTIC_SYNTHETICS_CAPABLE=true -ENV SUITES_DIR={{ $beatHome }}/suites -ENV NODE_VERSION=16.15.0 -ENV PATH="$NODE_PATH/node/bin:$PATH" -# Install the latest version of @elastic/synthetics forcefully ignoring the previously -# cached node_modules, heartbeat then calls the global executable to run test suites -# Setup node -RUN cd {{$beatHome}}/.node \ - && NODE_DOWNLOAD_URL="" \ - && case "$(arch)" in \ - x86_64) \ - NODE_DOWNLOAD_URL=https://nodejs.org/dist/v${NODE_VERSION}/node-v${NODE_VERSION}-linux-x64.tar.xz \ - ;; \ - aarch64) \ - NODE_DOWNLOAD_URL=https://nodejs.org/dist/v${NODE_VERSION}/node-v${NODE_VERSION}-linux-arm64.tar.xz \ - ;; \ - *) \ - echo >&2 ; echo >&2 "Unsupported architecture \$(arch)" ; echo >&2 ; exit 1 ; \ - ;; \ - esac \ - && mkdir -p node \ - && curl ${NODE_DOWNLOAD_URL} | tar -xJ --strip 1 -C node \ - && chmod ug+rwX -R $NODE_PATH \ - && npm i -g -f @elastic/synthetics && chmod ug+rwX -R $NODE_PATH -{{- end }} - - -{{- range $i, $port := .ExposePorts }} -EXPOSE {{ $port }} -{{- end }} - -# When running under Docker, we must ensure libbeat monitoring pulls cgroup -# metrics from /sys/fs/cgroup//, ignoring any paths found in -# /proc/self/cgroup. -ENV LIBBEAT_MONITORING_CGROUPS_HIERARCHY_OVERRIDE=/ - -WORKDIR {{ $beatHome }} - -{{- if contains .image_name "-cloud" }} -ENTRYPOINT ["/usr/bin/tini", "--"] -CMD ["/app/apm.sh"] -# Generate a stub command that will be overwritten at runtime -RUN echo -e '#!/bin/sh\nexec /usr/local/bin/docker-entrypoint' > /app/apm.sh && \ - chmod 0555 /app/apm.sh -{{- else }} -ENTRYPOINT ["/usr/bin/tini", "--", "/usr/local/bin/docker-entrypoint"] -{{- end }} - +{{- $beatHome := printf "%s/%s" "/usr/share" .BeatName }} +{{- $beatBinary := printf "%s/%s" $beatHome .BeatName }} +{{- $repoInfo := repo }} + +# Prepare home in a different stage to avoid creating additional layers on +# the final image because of permission changes. +FROM {{ .buildFrom }} AS home + +COPY beat {{ $beatHome }} + +RUN mkdir -p {{ $beatHome }}/data {{ $beatHome }}/data/elastic-agent-{{ commit_short }}/logs && \ + chown -R root:root {{ $beatHome }} && \ + find {{ $beatHome }} -type d -exec chmod 0755 {} \; && \ + find {{ $beatHome }} -type f -exec chmod 0644 {} \; && \ + find {{ $beatHome }}/data -type d -exec chmod 0770 {} \; && \ + find {{ $beatHome }}/data -type f -exec chmod 0660 {} \; && \ + rm {{ $beatBinary }} && \ + ln -s {{ $beatHome }}/data/elastic-agent-{{ commit_short }}/elastic-agent {{ $beatBinary }} && \ + chmod 0755 {{ $beatHome }}/data/elastic-agent-*/elastic-agent && \ + chmod 0755 {{ $beatHome }}/data/elastic-agent-*/components/*beat && \ + (chmod 0755 {{ $beatHome }}/data/elastic-agent-*/components/apm-server || true) && \ + (chmod 0755 {{ $beatHome }}/data/elastic-agent-*/components/elastic-endpoint || true) && \ + find {{ $beatHome }}/data/elastic-agent-{{ commit_short }}/components -name "*.yml*" -type f -exec chown root:root {} \; && \ + find {{ $beatHome }}/data/elastic-agent-{{ commit_short }}/components -name "*.yml*" -type f -exec chmod 0644 {} \; && \ +{{- range $i, $modulesd := .ModulesDirs }} + chmod 0775 {{ $beatHome}}/{{ $modulesd }} && \ +{{- end }} +{{- if contains .image_name "-cloud" }} + mkdir -p /opt/filebeat /opt/metricbeat && \ + tar xf {{ $beatHome }}/data/cloud_downloads/metricbeat-*.tar.gz -C /opt/metricbeat --strip-components=1 && \ + tar xf {{ $beatHome }}/data/cloud_downloads/filebeat-*.tar.gz -C /opt/filebeat --strip-components=1 && \ +{{- end }} + rm -rf {{ $beatHome }}/data/cloud_downloads && \ + true + +FROM {{ .from }} + +ENV BEAT_SETUID_AS={{ .user }} + +{{- if contains .from "ubi-minimal" }} +RUN for iter in {1..10}; do microdnf update -y && microdnf install -y tar gzip findutils shadow-utils && microdnf clean all && exit_code=0 && break || exit_code=$? && echo "microdnf error: retry $iter in 10s" && sleep 10; done; (exit $exit_code) +{{- else }} + +RUN for iter in {1..10}; do \ + apt-get update -y && \ + DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends --yes ca-certificates curl gawk libcap2-bin xz-utils && \ + apt-get clean all && \ + exit_code=0 && break || exit_code=$? && echo "apt-get error: retry $iter in 10s" && sleep 10; \ + done; \ + (exit $exit_code) +{{- end }} + +LABEL \ + org.label-schema.build-date="{{ date }}" \ + org.label-schema.schema-version="1.0" \ + org.label-schema.vendor="{{ .BeatVendor }}" \ + org.label-schema.license="{{ .License }}" \ + org.label-schema.name="{{ .BeatName }}" \ + org.label-schema.version="{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}" \ + org.label-schema.url="{{ .BeatURL }}" \ + org.label-schema.vcs-url="{{ $repoInfo.RootImportPath }}" \ + org.label-schema.vcs-ref="{{ commit }}" \ + io.k8s.description="{{ .BeatDescription }}" \ + io.k8s.display-name="{{ .BeatName | title }} image" \ + org.opencontainers.image.created="{{ date }}" \ + org.opencontainers.image.licenses="{{ .License }}" \ + org.opencontainers.image.title="{{ .BeatName | title }}" \ + org.opencontainers.image.vendor="{{ .BeatVendor }}" \ + name="{{ .BeatName }}" \ + maintainer="infra@elastic.co" \ + vendor="{{ .BeatVendor }}" \ + version="{{ beat_version }}{{if .Snapshot}}-SNAPSHOT{{end}}" \ + release="1" \ + url="{{ .BeatURL }}" \ + summary="{{ .BeatName }}" \ + license="{{ .License }}" \ + description="{{ .BeatDescription }}" + +ENV ELASTIC_CONTAINER "true" +ENV PATH={{ $beatHome }}:$PATH +ENV GODEBUG="madvdontneed=1" + +# Add an init process, check the checksum to make sure it's a match +RUN set -e ; \ + TINI_BIN=""; \ + TINI_SHA256=""; \ + TINI_VERSION="v0.19.0"; \ + case "$(arch)" in \ + x86_64) \ + TINI_BIN="tini-amd64"; \ + TINI_SHA256="93dcc18adc78c65a028a84799ecf8ad40c936fdfc5f2a57b1acda5a8117fa82c"; \ + ;; \ + aarch64) \ + TINI_BIN="tini-arm64"; \ + TINI_SHA256="07952557df20bfd2a95f9bef198b445e006171969499a1d361bd9e6f8e5e0e81"; \ + ;; \ + *) \ + echo >&2 ; echo >&2 "Unsupported architecture \$(arch)" ; echo >&2 ; exit 1 ; \ + ;; \ + esac ; \ + curl --retry 8 -S -L -O "https://github.com/krallin/tini/releases/download/${TINI_VERSION}/${TINI_BIN}" ; \ + echo "${TINI_SHA256} ${TINI_BIN}" | sha256sum -c - ; \ + mv "${TINI_BIN}" /usr/bin/tini ; \ + chmod +x /usr/bin/tini + +COPY docker-entrypoint /usr/local/bin/docker-entrypoint +RUN chmod 755 /usr/local/bin/docker-entrypoint + +COPY --from=home {{ $beatHome }} {{ $beatHome }} + +# Elastic Agent needs group permissions in the home itself to be able to +# create fleet.yml when running as non-root. +RUN chmod 0770 {{ $beatHome }} + +RUN mkdir /licenses +COPY --from=home {{ $beatHome }}/LICENSE.txt /licenses +COPY --from=home {{ $beatHome }}/NOTICE.txt /licenses + +{{- if contains .image_name "-cloud" }} +COPY --from=home /opt /opt +{{- end }} + + +RUN setcap cap_net_raw,cap_setuid+p {{ $beatHome }}/data/elastic-agent-{{ commit_short }}/components/heartbeat && \ +{{- if .linux_capabilities }} +# Since the beat is stored at the other end of a symlink we must follow the symlink first +# For security reasons setcap does not support symlinks. This is smart in the general case +# but in our specific case since we're building a trusted image from trusted binaries this is +# fine. Thus, we use readlink to follow the link and setcap on the actual binary + readlink -f {{ $beatBinary }} | xargs setcap {{ .linux_capabilities }} && \ +{{- end }} +true + +{{- if eq .user "root" }} +{{- if contains .image_name "-cloud" }} +# Generate folder for a stub command that will be overwritten at runtime +RUN mkdir /app +{{- end }} +{{- else }} +RUN groupadd --gid 1000 {{ .BeatName }} +RUN useradd -M --uid 1000 --gid 1000 --groups 0 --home {{ $beatHome }} {{ .user }} + +{{- if contains .image_name "-cloud" }} +# Generate folder for a stub command that will be overwritten at runtime +RUN mkdir /app +RUN chown {{ .user }} /app +{{- end }} +{{- end }} + +{{- if (and (contains .image_name "-complete") (not (contains .from "ubi-minimal"))) }} +USER root +ENV NODE_PATH={{ $beatHome }}/.node +RUN echo \ + $NODE_PATH \ + {{ $beatHome }}/.config \ + {{ $beatHome }}/.synthetics \ + {{ $beatHome }}/.npm \ + {{ $beatHome }}/.cache \ + | xargs -IDIR sh -c 'mkdir -p DIR && chmod 0770 DIR' + +# Setup synthetics env vars +ENV ELASTIC_SYNTHETICS_CAPABLE=true +ENV NODE_VERSION=16.15.0 +ENV PATH="$NODE_PATH/node/bin:$PATH" +# Install the latest version of @elastic/synthetics forcefully ignoring the previously +# cached node_modules, heartbeat then calls the global executable to run test suites +# Setup node +RUN cd {{$beatHome}}/.node \ + && NODE_DOWNLOAD_URL="" \ + && case "$(arch)" in \ + arm64) \ + NODE_DOWNLOAD_URL=https://nodejs.org/dist/v${NODE_VERSION}/node-v${NODE_VERSION}-linux-x64.tar.xz \ + ;; \ + x86_64) \ + NODE_DOWNLOAD_URL=https://nodejs.org/dist/v${NODE_VERSION}/node-v${NODE_VERSION}-linux-x64.tar.xz \ + ;; \ + aarch64) \ + NODE_DOWNLOAD_URL=https://nodejs.org/dist/v${NODE_VERSION}/node-v${NODE_VERSION}-linux-arm64.tar.xz \ + ;; \ + *) \ + echo >&2 ; echo >&2 "Unsupported architecture \$(arch)" ; echo >&2 ; exit 1 ; \ + ;; \ + esac \ + && mkdir -p node \ + && curl ${NODE_DOWNLOAD_URL} | tar -xJ --strip 1 -C node \ + && chmod ug+rwX -R $NODE_PATH + +# Install synthetics as a regular user, installing npm deps as root odesn't work +RUN chown -R {{ .user }} $NODE_PATH +USER {{ .user }} +# If this fails dump the NPM logs +RUN npm i -g --loglevel verbose -f @elastic/synthetics@stack_release || sh -c 'tail -n +1 /root/.npm/_logs/* && exit 1' +RUN chmod ug+rwX -R $NODE_PATH +USER root + +# Install the deps as needed by the exact version of playwright elastic synthetics uses +# We don't use npx playwright install-deps because that could pull a newer version +# Install additional fonts as well +RUN for iter in {1..10}; do \ + apt-get update -y && \ + $NODE_PATH/node/lib/node_modules/@elastic/synthetics/node_modules/.bin/playwright install-deps chromium && \ + DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends --yes \ + fonts-noto \ + fonts-noto-cjk && \ + exit_code=0 && break || exit_code=$? && echo "apt-get error: retry $iter in 10s" && sleep 10; \ + done; \ + (exit $exit_code) + +{{- end }} +USER {{ .user }} + + +{{- range $i, $port := .ExposePorts }} +EXPOSE {{ $port }} +{{- end }} + +# When running under Docker, we must ensure libbeat monitoring pulls cgroup +# metrics from /sys/fs/cgroup//, ignoring any paths found in +# /proc/self/cgroup. +ENV LIBBEAT_MONITORING_CGROUPS_HIERARCHY_OVERRIDE=/ + +WORKDIR {{ $beatHome }} + +{{- if contains .image_name "-cloud" }} +ENTRYPOINT ["/usr/bin/tini", "--"] +CMD ["/app/apm.sh"] +# Generate a stub command that will be overwritten at runtime +RUN echo -e '#!/bin/sh\nexec /usr/local/bin/docker-entrypoint' > /app/apm.sh && \ + chmod 0555 /app/apm.sh +{{- else }} +ENTRYPOINT ["/usr/bin/tini", "--", "/usr/local/bin/docker-entrypoint"] +{{- end }} + diff --git a/dev-tools/packaging/templates/docker/Dockerfile.tmpl b/dev-tools/packaging/templates/docker/Dockerfile.tmpl index 06cce5a13b0..d2edf7909cb 100644 --- a/dev-tools/packaging/templates/docker/Dockerfile.tmpl +++ b/dev-tools/packaging/templates/docker/Dockerfile.tmpl @@ -181,7 +181,7 @@ RUN cd /usr/share/heartbeat/.node \ && mkdir -p node \ && curl ${NODE_DOWNLOAD_URL} | tar -xJ --strip 1 -C node \ && chmod ug+rwX -R $NODE_PATH \ - && npm i -g -f @elastic/synthetics && chmod ug+rwX -R $NODE_PATH + && npm i -g -f @elastic/synthetics@stack_release && chmod ug+rwX -R $NODE_PATH {{- end }} {{- range $i, $port := .ExposePorts }} diff --git a/dev-tools/packaging/templates/ironbank/hardening_manifest.yaml.tmpl b/dev-tools/packaging/templates/ironbank/hardening_manifest.yaml.tmpl index 3c753caa0fb..e4b4df82e23 100644 --- a/dev-tools/packaging/templates/ironbank/hardening_manifest.yaml.tmpl +++ b/dev-tools/packaging/templates/ironbank/hardening_manifest.yaml.tmpl @@ -35,7 +35,7 @@ labels: ## This value can be "opensource" or "commercial" mil.dso.ironbank.image.type: "commercial" ## Product the image belongs to for grouping multiple images - mil.dso.ironbank.product.name: "beats" + mil.dso.ironbank.product.name: "elastic-agent" # List of resources to make available to the offline build context resources: diff --git a/dev-tools/packaging/templates/linux/postinstall.sh.tmpl b/dev-tools/packaging/templates/linux/postinstall.sh.tmpl index 083ebb91060..d96f21a8629 100644 --- a/dev-tools/packaging/templates/linux/postinstall.sh.tmpl +++ b/dev-tools/packaging/templates/linux/postinstall.sh.tmpl @@ -3,16 +3,26 @@ set -e symlink="/usr/share/elastic-agent/bin/elastic-agent" -old_agent_dir="$( dirname "$(readlink -f -- "$symlink")" )" +old_agent_dir="" + +# check if $symlink exists for the previous install +# and derive the old agent directory +if test -L "$symlink"; then + resolved_symlink="$(readlink -f -- "$symlink")" + # check if it is resolved to non empty string + if ! [ -z "$resolved_symlink" ]; then + old_agent_dir="$( dirname "$resolved_symlink" )" + fi +fi commit_hash="{{ commit_short }}" -yml_path="$old_agent_dir/state.yml" -enc_path="$old_agent_dir/state.enc" +new_agent_dir="/var/lib/elastic-agent/data/elastic-agent-$commit_hash" -new_agent_dir="$( dirname "$old_agent_dir")/elastic-agent-$commit_hash" - -if ! [[ "$old_agent_dir" -ef "$new_agent_dir" ]]; then +# copy the state files if there was a previous agent install +if ! [ -z "$old_agent_dir" ] && ! [ "$old_agent_dir" -ef "$new_agent_dir" ]; then + yml_path="$old_agent_dir/state.yml" + enc_path="$old_agent_dir/state.enc" echo "migrate state from $old_agent_dir to $new_agent_dir" if test -f "$yml_path"; then @@ -24,15 +34,17 @@ if ! [[ "$old_agent_dir" -ef "$new_agent_dir" ]]; then echo "found "$enc_path", copy to "$new_agent_dir"." cp "$enc_path" "$new_agent_dir" fi +fi - if test -f "$symlink"; then - echo "found symlink $symlink, unlink" - unlink "$symlink" - fi - - echo "create symlink "$symlink" to "$new_agent_dir/elastic-agent"" - ln -s "$new_agent_dir/elastic-agent" "$symlink" +# delete symlink if exists +if test -L "$symlink"; then + echo "found symlink $symlink, unlink" + unlink "$symlink" fi +# create symlink to the new agent +echo "create symlink "$symlink" to "$new_agent_dir/elastic-agent"" +ln -s "$new_agent_dir/elastic-agent" "$symlink" + systemctl daemon-reload 2> /dev/null exit 0 diff --git a/go.mod b/go.mod index 148cee40adc..245d331130f 100644 --- a/go.mod +++ b/go.mod @@ -11,7 +11,7 @@ require ( github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534 github.com/docker/go-units v0.4.0 github.com/elastic/e2e-testing v1.99.2-0.20220117192005-d3365c99b9c4 - github.com/elastic/elastic-agent-autodiscover v0.0.0-20220404145827-89887023c1ab + github.com/elastic/elastic-agent-autodiscover v0.2.1 github.com/elastic/elastic-agent-client/v7 v7.0.0-20220804181728-b0328d2fe484 github.com/elastic/elastic-agent-libs v0.2.6 github.com/elastic/elastic-agent-system-metrics v0.4.4 @@ -121,7 +121,6 @@ require ( go.elastic.co/apm/v2 v2.0.0 // indirect go.elastic.co/fastjson v1.1.0 // indirect go.uber.org/atomic v1.9.0 // indirect - go.uber.org/goleak v1.1.12 // indirect go.uber.org/multierr v1.8.0 // indirect golang.org/x/mod v0.5.1 // indirect golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4 // indirect diff --git a/go.sum b/go.sum index 47d8d474785..f8fb1ecc1a7 100644 --- a/go.sum +++ b/go.sum @@ -92,6 +92,7 @@ github.com/Microsoft/hcsshim v0.8.14/go.mod h1:NtVKoYxQuTLx6gEq0L96c9Ju4JbRJ4nY2 github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn69iY6URG00= github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= +github.com/Microsoft/hcsshim v0.8.24/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= @@ -227,6 +228,7 @@ github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4S github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= +github.com/containerd/cgroups v1.0.3/go.mod h1:/ofk34relqNjSGyqPrmEULrO4Sc8LJhvJmWbUCUKqj8= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= @@ -240,6 +242,7 @@ github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMX github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= @@ -281,6 +284,7 @@ github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDG github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= @@ -376,11 +380,11 @@ github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:Htrtb github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/elastic/e2e-testing v1.99.2-0.20220117192005-d3365c99b9c4 h1:uYT+Krd8dsvnhnLK9pe/JHZkYtXEGPfbV4Wt1JPPol0= github.com/elastic/e2e-testing v1.99.2-0.20220117192005-d3365c99b9c4/go.mod h1:UcNuf4pX/qDVNQr0zybm1NL2YoWik+jKBaINZqQCA40= -github.com/elastic/elastic-agent-autodiscover v0.0.0-20220404145827-89887023c1ab h1:Jk6Mfk5BF8gtfE7X0bNCiDGBtwJVxRI79b4wLCAsP+A= -github.com/elastic/elastic-agent-autodiscover v0.0.0-20220404145827-89887023c1ab/go.mod h1:Gg1fsQI+rVms9FJ2DefBSojfPIzgkV8xlyG8fPG0DE8= +github.com/elastic/elastic-agent-autodiscover v0.2.1 h1:Nbeayh3vq2FNm6xaFo34mhUdOu0EVlpj53CqCsbU0E4= +github.com/elastic/elastic-agent-autodiscover v0.2.1/go.mod h1:gPnzzfdYNdgznAb+iG9eyyXaQXBbAMHa+Y6Z8hXfcGY= github.com/elastic/elastic-agent-client/v7 v7.0.0-20220804181728-b0328d2fe484 h1:uJIMfLgCenJvxsVmEjBjYGxt0JddCgw2IxgoNfcIXOk= github.com/elastic/elastic-agent-client/v7 v7.0.0-20220804181728-b0328d2fe484/go.mod h1:fkvyUfFwyAG5OnMF0h+FV9sC0Xn9YLITwQpSuwungQs= -github.com/elastic/elastic-agent-libs v0.0.0-20220303160015-5b4e674da3dd/go.mod h1://82M1l73IHx0wDbS2Tzkq6Fx9fkmytS1KgkIyzvNTM= +github.com/elastic/elastic-agent-libs v0.2.5/go.mod h1:chO3rtcLyGlKi9S0iGVZhYCzDfdDsAQYBc+ui588AFE= github.com/elastic/elastic-agent-libs v0.2.6 h1:DpcUcCVYZ7lNtHLUlyT1u/GtGAh49wpL15DTH7+8O5o= github.com/elastic/elastic-agent-libs v0.2.6/go.mod h1:chO3rtcLyGlKi9S0iGVZhYCzDfdDsAQYBc+ui588AFE= github.com/elastic/elastic-agent-system-metrics v0.4.4 h1:Br3S+TlBhijrLysOvbHscFhgQ00X/trDT5VEnOau0E0= @@ -949,7 +953,6 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8 github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= @@ -1403,6 +1406,7 @@ golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211020060615-d418f374d309/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4 h1:HVyaeDAYux4pnY+D/SiwmLOR36ewZ4iGQIIrtnuCjFA= @@ -1562,6 +1566,7 @@ golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 h1:xHms4gcpe1YE7A3yIllJXP16CMAGuqwO2lX1mTyyRRc= golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= diff --git a/internal/pkg/agent/application/application.go b/internal/pkg/agent/application/application.go index d3edb21888e..327138ac67a 100644 --- a/internal/pkg/agent/application/application.go +++ b/internal/pkg/agent/application/application.go @@ -75,6 +75,7 @@ func New( var configMgr coordinator.ConfigManager var managed *managedConfigManager var compModifiers []coordinator.ComponentsModifier + var composableManaged bool if configuration.IsStandalone(cfg.Fleet) { log.Info("Parsed configuration and determined agent is managed locally") @@ -102,6 +103,7 @@ func New( } else { log.Info("Parsed configuration and determined agent is managed by Fleet") + composableManaged = true compModifiers = append(compModifiers, FleetServerComponentModifier(cfg.Fleet.Server)) managed, err = newManagedConfigManager(log, agentInfo, cfg, store, runtime) if err != nil { @@ -111,7 +113,7 @@ func New( } } - composable, err := composable.New(log, rawConfig) + composable, err := composable.New(log, rawConfig, composableManaged) if err != nil { return nil, errors.New(err, "failed to initialize composable controller") } diff --git a/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go b/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go index 0288152f726..09396cf49fc 100644 --- a/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go +++ b/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go @@ -63,17 +63,18 @@ type stateStore interface { } type fleetGateway struct { - log *logger.Logger - client client.Sender - scheduler scheduler.Scheduler - settings *fleetGatewaySettings - agentInfo agentInfo - acker acker.Acker - unauthCounter int - stateFetcher coordinator.StateFetcher - stateStore stateStore - errCh chan error - actionCh chan []fleetapi.Action + log *logger.Logger + client client.Sender + scheduler scheduler.Scheduler + settings *fleetGatewaySettings + agentInfo agentInfo + acker acker.Acker + unauthCounter int + checkinFailCounter int + stateFetcher coordinator.StateFetcher + stateStore stateStore + errCh chan error + actionCh chan []fleetapi.Action } // New creates a new fleet gateway @@ -180,13 +181,25 @@ func (f *fleetGateway) doExecute(ctx context.Context, bo backoff.Backoff) (*flee // this mean we are rebooting to change the log level or the system is shutting us down. for ctx.Err() == nil { f.log.Debugf("Checking started") - resp, err := f.execute(ctx) + resp, took, err := f.execute(ctx) if err != nil { - f.log.Errorf("Could not communicate with fleet-server Checking API will retry, error: %s", err) + f.checkinFailCounter++ + + // Report the first two failures at warn level as they may be recoverable with retries. + if f.checkinFailCounter <= 2 { + f.log.Warnw("Possible transient error during checkin with fleet-server, retrying", + "error.message", err, "request_duration_ns", took, "failed_checkins", f.checkinFailCounter, + "retry_after_ns", bo.NextWait()) + } else { + f.log.Errorw("Cannot checkin in with fleet-server, retrying", + "error.message", err, "request_duration_ns", took, "failed_checkins", f.checkinFailCounter, + "retry_after_ns", bo.NextWait()) + } + if !bo.Wait() { // Something bad has happened and we log it and we should update our current state. err := errors.New( - "execute retry loop was stopped", + "checkin retry loop was stopped", errors.TypeNetwork, errors.M(errors.MetaKeyURI, f.client.URI()), ) @@ -197,6 +210,13 @@ func (f *fleetGateway) doExecute(ctx context.Context, bo backoff.Backoff) (*flee } continue } + + if f.checkinFailCounter > 0 { + // Log at same level as error logs above so subsequent successes are visible when log level is set to 'error'. + f.log.Errorf("Checkin request to fleet-server succeeded after %d failures", f.checkinFailCounter) + } + + f.checkinFailCounter = 0 // Request was successful, return the collected actions. return resp, nil } @@ -273,7 +293,7 @@ func (f *fleetGateway) convertToCheckinComponents(components []runtime.Component return checkinComponents } -func (f *fleetGateway) execute(ctx context.Context) (*fleetapi.CheckinResponse, error) { +func (f *fleetGateway) execute(ctx context.Context) (*fleetapi.CheckinResponse, time.Duration, error) { ecsMeta, err := info.Metadata() if err != nil { f.log.Error(errors.New("failed to load metadata", err)) @@ -301,7 +321,7 @@ func (f *fleetGateway) execute(ctx context.Context) (*fleetapi.CheckinResponse, Components: components, } - resp, err := cmd.Execute(ctx, req) + resp, took, err := cmd.Execute(ctx, req) if isUnauth(err) { f.unauthCounter++ @@ -309,15 +329,15 @@ func (f *fleetGateway) execute(ctx context.Context) (*fleetapi.CheckinResponse, f.log.Warnf("retrieved an invalid api key error '%d' times. Starting to unenroll the elastic agent.", f.unauthCounter) return &fleetapi.CheckinResponse{ Actions: []fleetapi.Action{&fleetapi.ActionUnenroll{ActionID: "", ActionType: "UNENROLL", IsDetected: true}}, - }, nil + }, took, nil } - return nil, err + return nil, took, err } f.unauthCounter = 0 if err != nil { - return nil, err + return nil, took, err } // Save the latest ackToken @@ -329,7 +349,7 @@ func (f *fleetGateway) execute(ctx context.Context) (*fleetapi.CheckinResponse, } } - return resp, nil + return resp, took, nil } // shouldUnenroll checks if the max number of trying an invalid key is reached diff --git a/internal/pkg/agent/application/info/state.go b/internal/pkg/agent/application/info/state.go index 1a6602f51f8..b9d73504d06 100644 --- a/internal/pkg/agent/application/info/state.go +++ b/internal/pkg/agent/application/info/state.go @@ -5,13 +5,15 @@ package info import ( - "fmt" "os" "path/filepath" - "strings" + "runtime" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" - "github.com/elastic/elastic-agent/internal/pkg/release" +) + +const ( + darwin = "darwin" ) // RunningInstalled returns true when executing Agent is the installed Agent. @@ -19,22 +21,19 @@ import ( // This verifies the running executable path based on hard-coded paths // for each platform type. func RunningInstalled() bool { - expected := filepath.Join(paths.InstallPath, paths.BinaryName) + expectedPaths := []string{filepath.Join(paths.InstallPath, paths.BinaryName)} + if runtime.GOOS == darwin { + // For the symlink on darwin the execPath is /usr/local/bin/elastic-agent + expectedPaths = append(expectedPaths, paths.ShellWrapperPath) + } execPath, _ := os.Executable() execPath, _ = filepath.Abs(execPath) - execName := filepath.Base(execPath) - execDir := filepath.Dir(execPath) - if IsInsideData(execDir) { - // executable path is being reported as being down inside of data path - // move up to directories to perform the comparison - execDir = filepath.Dir(filepath.Dir(execDir)) - execPath = filepath.Join(execDir, execName) - } - return paths.ArePathsEqual(expected, execPath) -} -// IsInsideData returns true when the exePath is inside of the current Agents data path. -func IsInsideData(exePath string) bool { - expectedPath := filepath.Join("data", fmt.Sprintf("elastic-agent-%s", release.ShortCommit())) - return strings.HasSuffix(exePath, expectedPath) + execPath = filepath.Join(paths.ExecDir(filepath.Dir(execPath)), filepath.Base(execPath)) + for _, expected := range expectedPaths { + if paths.ArePathsEqual(expected, execPath) { + return true + } + } + return false } diff --git a/internal/pkg/agent/application/paths/common.go b/internal/pkg/agent/application/paths/common.go index 1b6ef95b188..b89a197fdff 100644 --- a/internal/pkg/agent/application/paths/common.go +++ b/internal/pkg/agent/application/paths/common.go @@ -9,6 +9,7 @@ import ( "fmt" "os" "path/filepath" + "runtime" "strings" "sync" @@ -21,6 +22,8 @@ const ( // AgentLockFileName is the name of the overall Elastic Agent file lock. AgentLockFileName = "agent.lock" tempSubdir = "tmp" + + darwin = "darwin" ) // ExternalInputsPattern is a glob that matches the paths of external configuration files. @@ -190,16 +193,14 @@ func SetInstall(path string) { // initialTop returns the initial top-level path for the binary // // When nested in top-level/data/elastic-agent-${hash}/ the result is top-level/. +// The agent executable for MacOS is wrapped in the app bundle, so the path to the binary is +// top-level/data/elastic-agent-${hash}/elastic-agent.app/Contents/MacOS func initialTop() string { - exePath := retrieveExecutablePath() - if insideData(exePath) { - return filepath.Dir(filepath.Dir(exePath)) - } - return exePath + return ExecDir(retrieveExecutableDir()) } // retrieveExecutablePath returns the executing binary, even if the started binary was a symlink -func retrieveExecutablePath() string { +func retrieveExecutableDir() string { execPath, err := os.Executable() if err != nil { panic(err) @@ -211,8 +212,37 @@ func retrieveExecutablePath() string { return filepath.Dir(evalPath) } -// insideData returns true when the exePath is inside of the current Agents data path. -func insideData(exePath string) bool { - expectedPath := filepath.Join("data", fmt.Sprintf("elastic-agent-%s", release.ShortCommit())) - return strings.HasSuffix(exePath, expectedPath) +// isInsideData returns true when the exePath is inside of the current Agents data path. +func isInsideData(exeDir string) bool { + expectedDir := binaryDir(filepath.Join("data", fmt.Sprintf("elastic-agent-%s", release.ShortCommit()))) + return strings.HasSuffix(exeDir, expectedDir) +} + +// ExecDir returns the "executable" directory which is: +// 1. The same if the execDir is not inside of the data path +// 2. Two levels up if the execDir inside of the data path on non-macOS platforms +// 3. Five levels up if the execDir inside of the dataPath on macOS platform +func ExecDir(execDir string) string { + if isInsideData(execDir) { + execDir = filepath.Dir(filepath.Dir(execDir)) + if runtime.GOOS == darwin { + execDir = filepath.Dir(filepath.Dir(filepath.Dir(execDir))) + } + } + return execDir +} + +// binaryDir returns the application binary directory +// For macOS it appends the path inside of the app bundle +// For other platforms it returns the same dir +func binaryDir(baseDir string) string { + if runtime.GOOS == darwin { + baseDir = filepath.Join(baseDir, "elastic-agent.app", "Contents", "MacOS") + } + return baseDir +} + +// BinaryPath returns the application binary path that is concatenation of the directory and the agentName +func BinaryPath(baseDir, agentName string) string { + return filepath.Join(binaryDir(baseDir), agentName) } diff --git a/internal/pkg/agent/application/paths/common_test.go b/internal/pkg/agent/application/paths/common_test.go new file mode 100644 index 00000000000..27a9cf80ebd --- /dev/null +++ b/internal/pkg/agent/application/paths/common_test.go @@ -0,0 +1,93 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package paths + +import ( + "fmt" + "path/filepath" + "runtime" + "testing" + + "github.com/google/go-cmp/cmp" + + "github.com/elastic/elastic-agent/internal/pkg/release" +) + +func validTestPath() string { + validPath := filepath.Join("data", fmt.Sprintf("elastic-agent-%s", release.ShortCommit())) + if runtime.GOOS == darwin { + validPath = filepath.Join(validPath, "elastic-agent.app", "Contents", "MacOS") + } + return validPath +} + +func TestIsInsideData(t *testing.T) { + tests := []struct { + name string + exePath string + res bool + }{ + { + name: "empty", + }, + { + name: "invalid", + exePath: "data/elastic-agent", + }, + { + name: "valid", + exePath: validTestPath(), + res: true, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + res := isInsideData(tc.exePath) + diff := cmp.Diff(tc.res, res) + if diff != "" { + t.Error(diff) + } + }) + } +} + +func TestExecDir(t *testing.T) { + base := filepath.Join(string(filepath.Separator), "Library", "Elastic", "Agent") + tests := []struct { + name string + execDir string + resDir string + }{ + { + name: "empty", + }, + { + name: "non-data path", + execDir: "data/elastic-agent", + resDir: "data/elastic-agent", + }, + { + name: "valid", + execDir: validTestPath(), + resDir: ".", + }, + { + name: "valid abs", + execDir: filepath.Join(base, validTestPath()), + resDir: base, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + resDir := ExecDir(tc.execDir) + diff := cmp.Diff(tc.resDir, resDir) + if diff != "" { + t.Error(diff) + } + }) + } +} diff --git a/internal/pkg/agent/application/paths/files.go b/internal/pkg/agent/application/paths/files.go index 7d35549e840..e6a1bf2eda1 100644 --- a/internal/pkg/agent/application/paths/files.go +++ b/internal/pkg/agent/application/paths/files.go @@ -32,6 +32,9 @@ const defaultAgentStateStoreYmlFile = "state.yml" // defaultAgentStateStoreFile is the file that will contain the action that can be replayed after restart encrypted. const defaultAgentStateStoreFile = "state.enc" +// defaultInputDPath return the location of the inputs.d. +const defaultInputsDPath = "inputs.d" + // AgentConfigYmlFile is a name of file used to store agent information func AgentConfigYmlFile() string { return filepath.Join(Config(), defaultAgentFleetYmlFile) @@ -82,3 +85,8 @@ func AgentStateStoreYmlFile() string { func AgentStateStoreFile() string { return filepath.Join(Home(), defaultAgentStateStoreFile) } + +// AgentInputsDPath is directory that contains the fragment of inputs yaml for K8s deployment. +func AgentInputsDPath() string { + return filepath.Join(Config(), defaultInputsDPath) +} diff --git a/internal/pkg/agent/application/upgrade/artifact/config.go b/internal/pkg/agent/application/upgrade/artifact/config.go index 6db38fa612c..b09c6faf7e7 100644 --- a/internal/pkg/agent/application/upgrade/artifact/config.go +++ b/internal/pkg/agent/application/upgrade/artifact/config.go @@ -9,8 +9,12 @@ import ( "strings" "time" + c "github.com/elastic/elastic-agent-libs/config" "github.com/elastic/elastic-agent-libs/transport/httpcommon" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" + "github.com/elastic/elastic-agent/internal/pkg/agent/errors" + "github.com/elastic/elastic-agent/internal/pkg/config" + "github.com/elastic/elastic-agent/pkg/core/logger" ) const ( @@ -22,6 +26,10 @@ const ( DefaultSourceURI = "https://artifacts.elastic.co/downloads/" ) +type ConfigReloader interface { + Reload(*Config) error +} + // Config is a configuration used for verifier and downloader type Config struct { // OperatingSystem: operating system [linux, windows, darwin] @@ -49,6 +57,96 @@ type Config struct { httpcommon.HTTPTransportSettings `config:",inline" yaml:",inline"` // Note: use anonymous struct for json inline } +type Reloader struct { + log *logger.Logger + cfg *Config + reloaders []ConfigReloader +} + +func NewReloader(cfg *Config, log *logger.Logger, rr ...ConfigReloader) *Reloader { + return &Reloader{ + cfg: cfg, + log: log, + reloaders: rr, + } +} + +func (r *Reloader) Reload(rawConfig *config.Config) error { + if err := r.reloadConfig(rawConfig); err != nil { + return errors.New(err, "failed to reload config") + } + + if err := r.reloadSourceURI(rawConfig); err != nil { + return errors.New(err, "failed to reload source URI") + } + + for _, reloader := range r.reloaders { + if err := reloader.Reload(r.cfg); err != nil { + return errors.New(err, "failed reloading config") + } + } + + return nil +} + +func (r *Reloader) reloadConfig(rawConfig *config.Config) error { + type reloadConfig struct { + C *Config `json:"agent.download" config:"agent.download"` + } + tmp := &reloadConfig{ + C: DefaultConfig(), + } + if err := rawConfig.Unpack(&tmp); err != nil { + return err + } + + *(r.cfg) = Config{ + OperatingSystem: tmp.C.OperatingSystem, + Architecture: tmp.C.Architecture, + SourceURI: tmp.C.SourceURI, + TargetDirectory: tmp.C.TargetDirectory, + InstallPath: tmp.C.InstallPath, + DropPath: tmp.C.DropPath, + HTTPTransportSettings: tmp.C.HTTPTransportSettings, + } + + return nil +} + +func (r *Reloader) reloadSourceURI(rawConfig *config.Config) error { + type reloadConfig struct { + // SourceURI: source of the artifacts, e.g https://artifacts.elastic.co/downloads/ + SourceURI string `json:"agent.download.sourceURI" config:"agent.download.sourceURI"` + + // FleetSourceURI: source of the artifacts, e.g https://artifacts.elastic.co/downloads/ coming from fleet which uses + // different naming. + FleetSourceURI string `json:"agent.download.source_uri" config:"agent.download.source_uri"` + } + cfg := &reloadConfig{} + if err := rawConfig.Unpack(&cfg); err != nil { + return errors.New(err, "failed to unpack config during reload") + } + + var newSourceURI string + if fleetURI := strings.TrimSpace(cfg.FleetSourceURI); fleetURI != "" { + // fleet configuration takes precedence + newSourceURI = fleetURI + } else if sourceURI := strings.TrimSpace(cfg.SourceURI); sourceURI != "" { + newSourceURI = sourceURI + } + + if newSourceURI != "" { + r.log.Infof("Source URI changed from %q to %q", r.cfg.SourceURI, newSourceURI) + r.cfg.SourceURI = newSourceURI + } else { + // source uri unset, reset to default + r.log.Infof("Source URI reset from %q to %q", r.cfg.SourceURI, DefaultSourceURI) + r.cfg.SourceURI = DefaultSourceURI + } + + return nil +} + // DefaultConfig creates a config with pre-set default values. func DefaultConfig() *Config { transport := httpcommon.DefaultHTTPTransportSettings() @@ -100,3 +198,42 @@ func (c *Config) Arch() string { c.Architecture = arch return c.Architecture } + +// Unpack reads a config object into the settings. +func (c *Config) Unpack(cfg *c.C) error { + tmp := struct { + OperatingSystem string `json:"-" config:",ignore"` + Architecture string `json:"-" config:",ignore"` + SourceURI string `json:"sourceURI" config:"sourceURI"` + TargetDirectory string `json:"targetDirectory" config:"target_directory"` + InstallPath string `yaml:"installPath" config:"install_path"` + DropPath string `yaml:"dropPath" config:"drop_path"` + }{ + OperatingSystem: c.OperatingSystem, + Architecture: c.Architecture, + SourceURI: c.SourceURI, + TargetDirectory: c.TargetDirectory, + InstallPath: c.InstallPath, + DropPath: c.DropPath, + } + + if err := cfg.Unpack(&tmp); err != nil { + return err + } + + transport := DefaultConfig().HTTPTransportSettings + if err := cfg.Unpack(&transport); err != nil { + return err + } + + *c = Config{ + OperatingSystem: tmp.OperatingSystem, + Architecture: tmp.Architecture, + SourceURI: tmp.SourceURI, + TargetDirectory: tmp.TargetDirectory, + InstallPath: tmp.InstallPath, + DropPath: tmp.DropPath, + HTTPTransportSettings: transport, + } + return nil +} diff --git a/internal/pkg/agent/application/upgrade/artifact/config_test.go b/internal/pkg/agent/application/upgrade/artifact/config_test.go new file mode 100644 index 00000000000..803154e465f --- /dev/null +++ b/internal/pkg/agent/application/upgrade/artifact/config_test.go @@ -0,0 +1,248 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package artifact + +import ( + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/elastic/elastic-agent/internal/pkg/config" + "github.com/elastic/elastic-agent/pkg/core/logger" +) + +func TestReload(t *testing.T) { + type testCase struct { + input string + initialConfig *Config + expectedSourceURI string + expectedTargetDirectory string + expectedInstallDirectory string + expectedDropDirectory string + expectedFingerprint string + expectedTLS bool + expectedTLSEnabled bool + expectedDisableProxy bool + expectedTimeout time.Duration + } + defaultValues := DefaultConfig() + testCases := []testCase{ + { + input: `agent.download: + sourceURI: "testing.uri" + target_directory: "a/b/c" + install_path: "i/p" + drop_path: "d/p" + proxy_disable: true + timeout: 33s + ssl.enabled: true + ssl.ca_trusted_fingerprint: "my_finger_print" +`, + initialConfig: DefaultConfig(), + expectedSourceURI: "testing.uri", + expectedTargetDirectory: "a/b/c", + expectedInstallDirectory: "i/p", + expectedDropDirectory: "d/p", + expectedFingerprint: "my_finger_print", + expectedTLS: true, + expectedTLSEnabled: true, + expectedDisableProxy: true, + expectedTimeout: 33 * time.Second, + }, + { + input: `agent.download: + sourceURI: "testing.uri" +`, + initialConfig: DefaultConfig(), + expectedSourceURI: "testing.uri", + expectedTargetDirectory: defaultValues.TargetDirectory, + expectedInstallDirectory: defaultValues.InstallPath, + expectedDropDirectory: defaultValues.DropPath, + expectedFingerprint: "", + expectedTLS: defaultValues.TLS != nil, + expectedTLSEnabled: false, + expectedDisableProxy: defaultValues.Proxy.Disable, + expectedTimeout: defaultValues.Timeout, + }, + { + input: `agent.download: + sourceURI: "" +`, + initialConfig: &Config{ + SourceURI: "testing.uri", + HTTPTransportSettings: defaultValues.HTTPTransportSettings, + }, + expectedSourceURI: defaultValues.SourceURI, // fallback to default when set to empty + expectedTargetDirectory: defaultValues.TargetDirectory, + expectedInstallDirectory: defaultValues.InstallPath, + expectedDropDirectory: defaultValues.DropPath, + expectedFingerprint: "", + expectedTLS: defaultValues.TLS != nil, + expectedTLSEnabled: false, + expectedDisableProxy: defaultValues.Proxy.Disable, + expectedTimeout: defaultValues.Timeout, + }, + { + input: ``, + initialConfig: &Config{ + SourceURI: "testing.uri", + HTTPTransportSettings: defaultValues.HTTPTransportSettings, + }, + expectedSourceURI: defaultValues.SourceURI, // fallback to default when not set + expectedTargetDirectory: defaultValues.TargetDirectory, + expectedInstallDirectory: defaultValues.InstallPath, + expectedDropDirectory: defaultValues.DropPath, + expectedFingerprint: "", + expectedTLS: defaultValues.TLS != nil, + expectedTLSEnabled: false, + expectedDisableProxy: defaultValues.Proxy.Disable, + expectedTimeout: defaultValues.Timeout, + }, + { + input: `agent.download: + sourceURI: " " +`, + initialConfig: &Config{ + SourceURI: "testing.uri", + HTTPTransportSettings: defaultValues.HTTPTransportSettings, + }, + expectedSourceURI: defaultValues.SourceURI, // fallback to default when set to whitespace + expectedTargetDirectory: defaultValues.TargetDirectory, + expectedInstallDirectory: defaultValues.InstallPath, + expectedDropDirectory: defaultValues.DropPath, + expectedFingerprint: "", + expectedTLS: defaultValues.TLS != nil, + expectedTLSEnabled: false, + expectedDisableProxy: defaultValues.Proxy.Disable, + expectedTimeout: defaultValues.Timeout, + }, + { + input: `agent.download: + source_uri: " " +`, + initialConfig: &Config{ + SourceURI: "testing.uri", + HTTPTransportSettings: defaultValues.HTTPTransportSettings, + }, + expectedSourceURI: defaultValues.SourceURI, // fallback to default when set to whitespace + expectedTargetDirectory: defaultValues.TargetDirectory, + expectedInstallDirectory: defaultValues.InstallPath, + expectedDropDirectory: defaultValues.DropPath, + expectedFingerprint: "", + expectedTLS: defaultValues.TLS != nil, + expectedTLSEnabled: false, + expectedDisableProxy: defaultValues.Proxy.Disable, + expectedTimeout: defaultValues.Timeout, + }, + { + input: `agent.download: + source_uri: " " + sourceURI: " " +`, + initialConfig: DefaultConfig(), + expectedSourceURI: defaultValues.SourceURI, // fallback to default when set to whitespace + expectedTargetDirectory: defaultValues.TargetDirectory, + expectedInstallDirectory: defaultValues.InstallPath, + expectedDropDirectory: defaultValues.DropPath, + expectedFingerprint: "", + expectedTLS: defaultValues.TLS != nil, + expectedTLSEnabled: false, + expectedDisableProxy: defaultValues.Proxy.Disable, + expectedTimeout: defaultValues.Timeout, + }, + { + input: ``, + initialConfig: &Config{ + SourceURI: "testing.uri", + HTTPTransportSettings: defaultValues.HTTPTransportSettings, + }, + expectedSourceURI: defaultValues.SourceURI, + expectedTargetDirectory: defaultValues.TargetDirectory, + expectedInstallDirectory: defaultValues.InstallPath, + expectedDropDirectory: defaultValues.DropPath, + expectedFingerprint: "", + expectedTLS: defaultValues.TLS != nil, + expectedTLSEnabled: false, + expectedDisableProxy: defaultValues.Proxy.Disable, + expectedTimeout: defaultValues.Timeout, + }, + { + input: `agent.download: + source_uri: " " + sourceURI: "testing.uri" +`, + initialConfig: DefaultConfig(), + expectedSourceURI: "testing.uri", + expectedTargetDirectory: defaultValues.TargetDirectory, + expectedInstallDirectory: defaultValues.InstallPath, + expectedDropDirectory: defaultValues.DropPath, + expectedFingerprint: "", + expectedTLS: defaultValues.TLS != nil, + expectedTLSEnabled: false, + expectedDisableProxy: defaultValues.Proxy.Disable, + expectedTimeout: defaultValues.Timeout, + }, + { + input: `agent.download: + source_uri: "testing.uri" + sourceURI: " " +`, + initialConfig: DefaultConfig(), + expectedSourceURI: "testing.uri", + expectedTargetDirectory: defaultValues.TargetDirectory, + expectedInstallDirectory: defaultValues.InstallPath, + expectedDropDirectory: defaultValues.DropPath, + expectedFingerprint: "", + expectedTLS: defaultValues.TLS != nil, + expectedTLSEnabled: false, + expectedDisableProxy: defaultValues.Proxy.Disable, + expectedTimeout: defaultValues.Timeout, + }, + { + input: `agent.download: + source_uri: "testing.uri" + sourceURI: "another.uri" +`, + initialConfig: DefaultConfig(), + expectedSourceURI: "testing.uri", + expectedTargetDirectory: defaultValues.TargetDirectory, + expectedInstallDirectory: defaultValues.InstallPath, + expectedDropDirectory: defaultValues.DropPath, + expectedFingerprint: "", + expectedTLS: defaultValues.TLS != nil, + expectedTLSEnabled: false, + expectedDisableProxy: defaultValues.Proxy.Disable, + expectedTimeout: defaultValues.Timeout, + }, + } + + l, _ := logger.NewTesting("t") + for _, tc := range testCases { + cfg := tc.initialConfig + reloader := NewReloader(cfg, l) + + c, err := config.NewConfigFrom(tc.input) + require.NoError(t, err) + + require.NoError(t, reloader.Reload(c)) + + require.Equal(t, tc.expectedSourceURI, cfg.SourceURI) + require.Equal(t, tc.expectedTargetDirectory, cfg.TargetDirectory) + require.Equal(t, tc.expectedInstallDirectory, cfg.InstallPath) + require.Equal(t, tc.expectedDropDirectory, cfg.DropPath) + require.Equal(t, tc.expectedTimeout, cfg.Timeout) + + require.Equal(t, tc.expectedDisableProxy, cfg.Proxy.Disable) + + if tc.expectedTLS { + require.NotNil(t, cfg.TLS) + require.Equal(t, tc.expectedTLSEnabled, *cfg.TLS.Enabled) + require.Equal(t, tc.expectedFingerprint, cfg.TLS.CATrustedFingerprint) + } else { + require.Nil(t, cfg.TLS) + } + } +} diff --git a/internal/pkg/agent/application/upgrade/artifact/download/composed/downloader.go b/internal/pkg/agent/application/upgrade/artifact/download/composed/downloader.go index 84e353ff661..b5de15fc9a8 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/composed/downloader.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/composed/downloader.go @@ -12,6 +12,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download" + "github.com/elastic/elastic-agent/internal/pkg/agent/errors" ) // Downloader is a downloader with a predefined set of downloaders. @@ -50,3 +51,17 @@ func (e *Downloader) Download(ctx context.Context, a artifact.Artifact, version return "", err } + +func (e *Downloader) Reload(c *artifact.Config) error { + for _, d := range e.dd { + reloadable, ok := d.(download.Reloader) + if !ok { + continue + } + + if err := reloadable.Reload(c); err != nil { + return errors.New(err, "failed reloading artifact config for composed downloader") + } + } + return nil +} diff --git a/internal/pkg/agent/application/upgrade/artifact/download/composed/verifier.go b/internal/pkg/agent/application/upgrade/artifact/download/composed/verifier.go index 26c714d8c52..8930c2a1ba6 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/composed/verifier.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/composed/verifier.go @@ -5,12 +5,11 @@ package composed import ( - "errors" - "github.com/hashicorp/go-multierror" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download" + "github.com/elastic/elastic-agent/internal/pkg/agent/errors" ) // Verifier is a verifier with a predefined set of verifiers. @@ -54,3 +53,17 @@ func (e *Verifier) Verify(a artifact.Artifact, version string) error { return err } + +func (e *Verifier) Reload(c *artifact.Config) error { + for _, v := range e.vv { + reloadable, ok := v.(download.Reloader) + if !ok { + continue + } + + if err := reloadable.Reload(c); err != nil { + return errors.New(err, "failed reloading artifact config for composed verifier") + } + } + return nil +} diff --git a/internal/pkg/agent/application/upgrade/artifact/download/http/downloader.go b/internal/pkg/agent/application/upgrade/artifact/download/http/downloader.go index 5ef423825a9..7e7ca63ed23 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/http/downloader.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/http/downloader.go @@ -73,6 +73,23 @@ func NewDownloaderWithClient(log progressLogger, config *artifact.Config, client } } +func (e *Downloader) Reload(c *artifact.Config) error { + // reload client + client, err := c.HTTPTransportSettings.Client( + httpcommon.WithAPMHTTPInstrumentation(), + ) + if err != nil { + return errors.New(err, "http.downloader: failed to generate client out of config") + } + + client.Transport = withHeaders(client.Transport, headers) + + e.client = *client + e.config = c + + return nil +} + // Download fetches the package from configured source. // Returns absolute path to downloaded package and an error. func (e *Downloader) Download(ctx context.Context, a artifact.Artifact, version string) (_ string, err error) { @@ -81,7 +98,9 @@ func (e *Downloader) Download(ctx context.Context, a artifact.Artifact, version defer func() { if err != nil { for _, path := range downloadedFiles { - os.Remove(path) + if err := os.Remove(path); err != nil { + e.log.Warnf("failed to cleanup %s: %v", path, err) + } } } }() @@ -171,12 +190,14 @@ func (e *Downloader) downloadFile(ctx context.Context, artifactName, filename, f resp, err := e.client.Do(req.WithContext(ctx)) if err != nil { - return "", errors.New(err, "fetching package failed", errors.TypeNetwork, errors.M(errors.MetaKeyURI, sourceURI)) + // return path, file already exists and needs to be cleaned up + return fullPath, errors.New(err, "fetching package failed", errors.TypeNetwork, errors.M(errors.MetaKeyURI, sourceURI)) } defer resp.Body.Close() if resp.StatusCode != 200 { - return "", errors.New(fmt.Sprintf("call to '%s' returned unsuccessful status code: %d", sourceURI, resp.StatusCode), errors.TypeNetwork, errors.M(errors.MetaKeyURI, sourceURI)) + // return path, file already exists and needs to be cleaned up + return fullPath, errors.New(fmt.Sprintf("call to '%s' returned unsuccessful status code: %d", sourceURI, resp.StatusCode), errors.TypeNetwork, errors.M(errors.MetaKeyURI, sourceURI)) } fileSize := -1 @@ -193,7 +214,8 @@ func (e *Downloader) downloadFile(ctx context.Context, artifactName, filename, f if err != nil { reportCancel() dp.ReportFailed(err) - return "", errors.New(err, "fetching package failed", errors.TypeNetwork, errors.M(errors.MetaKeyURI, sourceURI)) + // return path, file already exists and needs to be cleaned up + return fullPath, errors.New(err, "copying fetched package failed", errors.TypeNetwork, errors.M(errors.MetaKeyURI, sourceURI)) } reportCancel() dp.ReportComplete() diff --git a/internal/pkg/agent/application/upgrade/artifact/download/http/verifier.go b/internal/pkg/agent/application/upgrade/artifact/download/http/verifier.go index 4568c0f2cdd..46590f4e5db 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/http/verifier.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/http/verifier.go @@ -60,6 +60,24 @@ func NewVerifier(config *artifact.Config, allowEmptyPgp bool, pgp []byte) (*Veri return v, nil } +func (v *Verifier) Reload(c *artifact.Config) error { + // reload client + client, err := c.HTTPTransportSettings.Client( + httpcommon.WithAPMHTTPInstrumentation(), + httpcommon.WithModRoundtripper(func(rt http.RoundTripper) http.RoundTripper { + return withHeaders(rt, headers) + }), + ) + if err != nil { + return errors.New(err, "http.verifier: failed to generate client out of config") + } + + v.client = *client + v.config = c + + return nil +} + // Verify checks downloaded package on preconfigured // location against a key stored on elastic.co website. func (v *Verifier) Verify(a artifact.Artifact, version string) error { diff --git a/internal/pkg/agent/application/upgrade/artifact/download/reloadable.go b/internal/pkg/agent/application/upgrade/artifact/download/reloadable.go new file mode 100644 index 00000000000..3b2239740c7 --- /dev/null +++ b/internal/pkg/agent/application/upgrade/artifact/download/reloadable.go @@ -0,0 +1,14 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package download + +import ( + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" +) + +// Reloader is an interface allowing to reload artifact config +type Reloader interface { + Reload(*artifact.Config) error +} diff --git a/internal/pkg/agent/application/upgrade/artifact/download/snapshot/downloader.go b/internal/pkg/agent/application/upgrade/artifact/download/snapshot/downloader.go index b858fca0fc3..2a09c65e522 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/snapshot/downloader.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/snapshot/downloader.go @@ -5,19 +5,26 @@ package snapshot import ( + "context" "encoding/json" "fmt" "strings" + "github.com/elastic/elastic-agent-libs/transport/httpcommon" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download/http" - - "github.com/elastic/elastic-agent-libs/transport/httpcommon" + "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/release" "github.com/elastic/elastic-agent/pkg/core/logger" ) +type Downloader struct { + downloader download.Downloader + versionOverride string +} + // NewDownloader creates a downloader which first checks local directory // and then fallbacks to remote if configured. func NewDownloader(log *logger.Logger, config *artifact.Config, versionOverride string) (download.Downloader, error) { @@ -25,7 +32,36 @@ func NewDownloader(log *logger.Logger, config *artifact.Config, versionOverride if err != nil { return nil, err } - return http.NewDownloader(log, cfg) + + httpDownloader, err := http.NewDownloader(log, cfg) + if err != nil { + return nil, errors.New(err, "failed to create snapshot downloader") + } + + return &Downloader{ + downloader: httpDownloader, + versionOverride: versionOverride, + }, nil +} + +func (e *Downloader) Reload(c *artifact.Config) error { + reloader, ok := e.downloader.(artifact.ConfigReloader) + if !ok { + return nil + } + + cfg, err := snapshotConfig(c, e.versionOverride) + if err != nil { + return errors.New(err, "snapshot.downloader: failed to generate snapshot config") + } + + return reloader.Reload(cfg) +} + +// Download fetches the package from configured source. +// Returns absolute path to downloaded package and an error. +func (e *Downloader) Download(ctx context.Context, a artifact.Artifact, version string) (string, error) { + return e.downloader.Download(ctx, a, version) } func snapshotConfig(config *artifact.Config, versionOverride string) (*artifact.Config, error) { @@ -35,12 +71,13 @@ func snapshotConfig(config *artifact.Config, versionOverride string) (*artifact. } return &artifact.Config{ - OperatingSystem: config.OperatingSystem, - Architecture: config.Architecture, - SourceURI: snapshotURI, - TargetDirectory: config.TargetDirectory, - InstallPath: config.InstallPath, - DropPath: config.DropPath, + OperatingSystem: config.OperatingSystem, + Architecture: config.Architecture, + SourceURI: snapshotURI, + TargetDirectory: config.TargetDirectory, + InstallPath: config.InstallPath, + DropPath: config.DropPath, + HTTPTransportSettings: config.HTTPTransportSettings, }, nil } diff --git a/internal/pkg/agent/application/upgrade/artifact/download/snapshot/verifier.go b/internal/pkg/agent/application/upgrade/artifact/download/snapshot/verifier.go index 31ad26a0474..c114775cbdb 100644 --- a/internal/pkg/agent/application/upgrade/artifact/download/snapshot/verifier.go +++ b/internal/pkg/agent/application/upgrade/artifact/download/snapshot/verifier.go @@ -8,8 +8,14 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade/artifact/download/http" + "github.com/elastic/elastic-agent/internal/pkg/agent/errors" ) +type Verifier struct { + verifier download.Verifier + versionOverride string +} + // NewVerifier creates a downloader which first checks local directory // and then fallbacks to remote if configured. func NewVerifier(config *artifact.Config, allowEmptyPgp bool, pgp []byte, versionOverride string) (download.Verifier, error) { @@ -17,5 +23,32 @@ func NewVerifier(config *artifact.Config, allowEmptyPgp bool, pgp []byte, versio if err != nil { return nil, err } - return http.NewVerifier(cfg, allowEmptyPgp, pgp) + v, err := http.NewVerifier(cfg, allowEmptyPgp, pgp) + if err != nil { + return nil, errors.New(err, "failed to create snapshot verifier") + } + + return &Verifier{ + verifier: v, + versionOverride: versionOverride, + }, nil +} + +// Verify checks the package from configured source. +func (e *Verifier) Verify(a artifact.Artifact, version string) error { + return e.verifier.Verify(a, version) +} + +func (e *Verifier) Reload(c *artifact.Config) error { + reloader, ok := e.verifier.(artifact.ConfigReloader) + if !ok { + return nil + } + + cfg, err := snapshotConfig(c, e.versionOverride) + if err != nil { + return errors.New(err, "snapshot.downloader: failed to generate snapshot config") + } + + return reloader.Reload(cfg) } diff --git a/internal/pkg/agent/application/upgrade/cleanup.go b/internal/pkg/agent/application/upgrade/cleanup.go index 5e0618dfe78..2581e30a1d9 100644 --- a/internal/pkg/agent/application/upgrade/cleanup.go +++ b/internal/pkg/agent/application/upgrade/cleanup.go @@ -13,11 +13,15 @@ import ( "github.com/hashicorp/go-multierror" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" + "github.com/elastic/elastic-agent/pkg/core/logger" ) -// preUpgradeCleanup will remove files that do not have the passed version number from the downloads directory. -func preUpgradeCleanup(version string) error { - files, err := os.ReadDir(paths.Downloads()) +// cleanNonMatchingVersionsFromDownloads will remove files that do not have the passed version number from the downloads directory. +func cleanNonMatchingVersionsFromDownloads(log *logger.Logger, version string) error { + downloadsPath := paths.Downloads() + log.Debugw("Cleaning up non-matching downloaded versions", "version", version, "downloads.path", downloadsPath) + + files, err := os.ReadDir(downloadsPath) if err != nil { return fmt.Errorf("unable to read directory %q: %w", paths.Downloads(), err) } diff --git a/internal/pkg/agent/application/upgrade/cleanup_test.go b/internal/pkg/agent/application/upgrade/cleanup_test.go index 736a9c42b3d..1170c26946d 100644 --- a/internal/pkg/agent/application/upgrade/cleanup_test.go +++ b/internal/pkg/agent/application/upgrade/cleanup_test.go @@ -9,7 +9,9 @@ import ( "path/filepath" "testing" + "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" + "github.com/elastic/elastic-agent/pkg/core/logger" "github.com/stretchr/testify/require" ) @@ -31,7 +33,8 @@ func setupDir(t *testing.T) { func TestPreUpgradeCleanup(t *testing.T) { setupDir(t) - err := preUpgradeCleanup("8.4.0") + log := newErrorLogger(t) + err := cleanNonMatchingVersionsFromDownloads(log, "8.4.0") require.NoError(t, err) files, err := os.ReadDir(paths.Downloads()) @@ -42,3 +45,14 @@ func TestPreUpgradeCleanup(t *testing.T) { require.NoError(t, err) require.Equal(t, []byte("hello, world!"), p) } + +func newErrorLogger(t *testing.T) *logger.Logger { + t.Helper() + + loggerCfg := logger.DefaultLoggingConfig() + loggerCfg.Level = logp.ErrorLevel + + log, err := logger.NewFromConfig("", loggerCfg, false) + require.NoError(t, err) + return log +} diff --git a/internal/pkg/agent/application/upgrade/rollback.go b/internal/pkg/agent/application/upgrade/rollback.go index 8ce6958beae..b4f6014fb3d 100644 --- a/internal/pkg/agent/application/upgrade/rollback.go +++ b/internal/pkg/agent/application/upgrade/rollback.go @@ -31,33 +31,35 @@ const ( ) // Rollback rollbacks to previous version which was functioning before upgrade. -func Rollback(ctx context.Context, prevHash, currentHash string) error { +func Rollback(ctx context.Context, log *logger.Logger, prevHash string, currentHash string) error { // change symlink - if err := ChangeSymlink(ctx, prevHash); err != nil { + if err := ChangeSymlink(ctx, log, prevHash); err != nil { return err } // revert active commit - if err := UpdateActiveCommit(prevHash); err != nil { + if err := UpdateActiveCommit(log, prevHash); err != nil { return err } // Restart + log.Info("Restarting the agent after rollback") if err := restartAgent(ctx); err != nil { return err } // cleanup everything except version we're rolling back into - return Cleanup(prevHash, true) + return Cleanup(log, prevHash, true) } // Cleanup removes all artifacts and files related to a specified version. -func Cleanup(currentHash string, removeMarker bool) error { +func Cleanup(log *logger.Logger, currentHash string, removeMarker bool) error { + log.Debugw("Cleaning up upgrade", "hash", currentHash, "remove_marker", removeMarker) <-time.After(afterRestartDelay) // remove upgrade marker if removeMarker { - if err := CleanMarker(); err != nil { + if err := CleanMarker(log); err != nil { return err } } @@ -74,7 +76,9 @@ func Cleanup(currentHash string, removeMarker bool) error { } // remove symlink to avoid upgrade failures, ignore error - _ = os.Remove(prevSymlinkPath()) + prevSymlink := prevSymlinkPath() + log.Debugw("Removing previous symlink path", "file.path", prevSymlinkPath()) + _ = os.Remove(prevSymlink) dirPrefix := fmt.Sprintf("%s-", agentName) currentDir := fmt.Sprintf("%s-%s", agentName, currentHash) @@ -88,6 +92,7 @@ func Cleanup(currentHash string, removeMarker bool) error { } hashedDir := filepath.Join(paths.Data(), dir) + log.Debugw("Removing hashed data directory", "file.path", hashedDir) if cleanupErr := install.RemovePath(hashedDir); cleanupErr != nil { err = multierror.Append(err, cleanupErr) } @@ -113,6 +118,7 @@ func InvokeWatcher(log *logger.Logger) error { } }() + log.Debugw("Starting upgrade watcher", "path", cmd.Path, "args", cmd.Args, "env", cmd.Env, "dir", cmd.Dir) return cmd.Start() } diff --git a/internal/pkg/agent/application/upgrade/service_darwin.go b/internal/pkg/agent/application/upgrade/service_darwin.go index 2bdb435147b..58709dd3e53 100644 --- a/internal/pkg/agent/application/upgrade/service_darwin.go +++ b/internal/pkg/agent/application/upgrade/service_darwin.go @@ -14,7 +14,6 @@ import ( "fmt" "os" "os/exec" - "path/filepath" "regexp" "strconv" "strings" @@ -50,13 +49,13 @@ func (p *darwinPidProvider) Close() {} func (p *darwinPidProvider) PID(ctx context.Context) (int, error) { piders := []func(context.Context) (int, error){ - p.piderFromCmd(ctx, "launchctl", "list", paths.ServiceName), + p.piderFromCmd("launchctl", "list", paths.ServiceName), } // if release is specifically built to be upgradeable (using DEV flag) // we dont require to run as a service and will need sudo fallback if release.Upgradeable() { - piders = append(piders, p.piderFromCmd(ctx, "sudo", "launchctl", "list", paths.ServiceName)) + piders = append(piders, p.piderFromCmd("sudo", "launchctl", "list", paths.ServiceName)) } var pidErrors error @@ -72,7 +71,7 @@ func (p *darwinPidProvider) PID(ctx context.Context) (int, error) { return 0, pidErrors } -func (p *darwinPidProvider) piderFromCmd(ctx context.Context, name string, args ...string) func(context.Context) (int, error) { +func (p *darwinPidProvider) piderFromCmd(name string, args ...string) func(context.Context) (int, error) { return func(context.Context) (int, error) { listCmd := exec.Command(name, args...) listCmd.SysProcAttr = &syscall.SysProcAttr{ @@ -115,8 +114,8 @@ func (p *darwinPidProvider) piderFromCmd(ctx context.Context, name string, args } func invokeCmd(topPath string) *exec.Cmd { - homeExePath := filepath.Join(topPath, agentName) - + // paths.BinaryPath properly derives the newPath depending on the platform. The path to the binary for macOS is inside of the app bundle. + homeExePath := paths.BinaryPath(topPath, agentName) cmd := exec.Command(homeExePath, watcherSubcommand, "--path.config", paths.Config(), "--path.home", paths.Top(), diff --git a/internal/pkg/agent/application/upgrade/step_download.go b/internal/pkg/agent/application/upgrade/step_download.go index 926e310fda3..510fd00cb8f 100644 --- a/internal/pkg/agent/application/upgrade/step_download.go +++ b/internal/pkg/agent/application/upgrade/step_download.go @@ -40,6 +40,10 @@ func (u *Upgrader) downloadArtifact(ctx context.Context, version, sourceURI stri } } + u.log.Debugw("Downloading upgrade artifact", "version", version, + "source_uri", settings.SourceURI, "drop_path", settings.DropPath, + "target_path", settings.TargetDirectory, "install_path", settings.InstallPath) + verifier, err := newVerifier(version, u.log, &settings) if err != nil { return "", errors.New(err, "initiating verifier") diff --git a/internal/pkg/agent/application/upgrade/step_mark.go b/internal/pkg/agent/application/upgrade/step_mark.go index 7757ff6a9a1..fa337e3907a 100644 --- a/internal/pkg/agent/application/upgrade/step_mark.go +++ b/internal/pkg/agent/application/upgrade/step_mark.go @@ -17,6 +17,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/fleetapi" "github.com/elastic/elastic-agent/internal/pkg/release" + "github.com/elastic/elastic-agent/pkg/core/logger" ) const markerFilename = ".update-marker" @@ -91,7 +92,7 @@ func newMarkerSerializer(m *UpdateMarker) *updateMarkerSerializer { } // markUpgrade marks update happened so we can handle grace period -func (u *Upgrader) markUpgrade(_ context.Context, hash string, action *fleetapi.ActionUpgrade) error { +func (u *Upgrader) markUpgrade(_ context.Context, log *logger.Logger, hash string, action *fleetapi.ActionUpgrade) error { prevVersion := release.Version() prevHash := release.Commit() if len(prevHash) > hashLen { @@ -112,11 +113,12 @@ func (u *Upgrader) markUpgrade(_ context.Context, hash string, action *fleetapi. } markerPath := markerFilePath() + log.Infow("Writing upgrade marker file", "file.path", markerPath, "hash", marker.Hash, "prev_hash", prevHash) if err := ioutil.WriteFile(markerPath, markerBytes, 0600); err != nil { return errors.New(err, errors.TypeFilesystem, "failed to create update marker file", errors.M(errors.MetaKeyPath, markerPath)) } - if err := UpdateActiveCommit(hash); err != nil { + if err := UpdateActiveCommit(log, hash); err != nil { return err } @@ -124,8 +126,9 @@ func (u *Upgrader) markUpgrade(_ context.Context, hash string, action *fleetapi. } // UpdateActiveCommit updates active.commit file to point to active version. -func UpdateActiveCommit(hash string) error { +func UpdateActiveCommit(log *logger.Logger, hash string) error { activeCommitPath := filepath.Join(paths.Top(), agentCommitFile) + log.Infow("Updating active commit", "file.path", activeCommitPath, "hash", hash) if err := ioutil.WriteFile(activeCommitPath, []byte(hash), 0600); err != nil { return errors.New(err, errors.TypeFilesystem, "failed to update active commit", errors.M(errors.MetaKeyPath, activeCommitPath)) } @@ -134,8 +137,9 @@ func UpdateActiveCommit(hash string) error { } // CleanMarker removes a marker from disk. -func CleanMarker() error { +func CleanMarker(log *logger.Logger) error { markerFile := markerFilePath() + log.Debugw("Removing marker file", "file.path", markerFile) if err := os.Remove(markerFile); !os.IsNotExist(err) { return err } diff --git a/internal/pkg/agent/application/upgrade/step_relink.go b/internal/pkg/agent/application/upgrade/step_relink.go index 9c998262ecd..13c49693062 100644 --- a/internal/pkg/agent/application/upgrade/step_relink.go +++ b/internal/pkg/agent/application/upgrade/step_relink.go @@ -14,23 +14,32 @@ import ( "github.com/elastic/elastic-agent-libs/file" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" + "github.com/elastic/elastic-agent/pkg/core/logger" +) + +const ( + windows = "windows" + exe = ".exe" ) // ChangeSymlink updates symlink paths to match current version. -func ChangeSymlink(ctx context.Context, targetHash string) error { +func ChangeSymlink(ctx context.Context, log *logger.Logger, targetHash string) error { // create symlink to elastic-agent-{hash} hashedDir := fmt.Sprintf("%s-%s", agentName, targetHash) symlinkPath := filepath.Join(paths.Top(), agentName) - newPath := filepath.Join(paths.Top(), "data", hashedDir, agentName) + + // paths.BinaryPath properly derives the binary directory depending on the platform. The path to the binary for macOS is inside of the app bundle. + newPath := paths.BinaryPath(filepath.Join(paths.Top(), "data", hashedDir), agentName) // handle windows suffixes - if runtime.GOOS == "windows" { - symlinkPath += ".exe" - newPath += ".exe" + if runtime.GOOS == windows { + symlinkPath += exe + newPath += exe } prevNewPath := prevSymlinkPath() + log.Infow("Changing symlink", "symlink_path", symlinkPath, "new_path", newPath, "prev_path", prevNewPath) // remove symlink to avoid upgrade failures if err := os.Remove(prevNewPath); !os.IsNotExist(err) { @@ -49,7 +58,7 @@ func prevSymlinkPath() string { agentPrevName := agentName + ".prev" // handle windows suffixes - if runtime.GOOS == "windows" { + if runtime.GOOS == windows { agentPrevName = agentName + ".exe.prev" } diff --git a/internal/pkg/agent/application/upgrade/step_unpack.go b/internal/pkg/agent/application/upgrade/step_unpack.go index 108593c5083..45d007e55f4 100644 --- a/internal/pkg/agent/application/upgrade/step_unpack.go +++ b/internal/pkg/agent/application/upgrade/step_unpack.go @@ -8,10 +8,8 @@ import ( "archive/tar" "archive/zip" "compress/gzip" - "context" "fmt" "io" - "io/ioutil" "os" "path/filepath" "runtime" @@ -21,27 +19,31 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" + "github.com/elastic/elastic-agent/pkg/core/logger" ) // unpack unpacks archive correctly, skips root (symlink, config...) unpacks data/* -func (u *Upgrader) unpack(ctx context.Context, version, archivePath string) (string, error) { +func (u *Upgrader) unpack(version, archivePath string) (string, error) { // unpack must occur in directory that holds the installation directory // or the extraction will be double nested var hash string var err error - if runtime.GOOS == "windows" { - hash, err = unzip(version, archivePath) + if runtime.GOOS == windows { + hash, err = unzip(u.log, archivePath) } else { - hash, err = untar(version, archivePath) + hash, err = untar(u.log, version, archivePath) } + if err != nil { + u.log.Errorw("Failed to unpack upgrade artifact", "error.message", err, "version", version, "file.path", archivePath, "hash", hash) return "", err } + u.log.Infow("Unpacked upgrade artifact", "version", version, "file.path", archivePath, "hash", hash) return hash, nil } -func unzip(version, archivePath string) (string, error) { +func unzip(log *logger.Logger, archivePath string) (string, error) { var hash, rootDir string r, err := zip.OpenReader(archivePath) if err != nil { @@ -65,7 +67,7 @@ func unzip(version, archivePath string) (string, error) { //get hash fileName := strings.TrimPrefix(f.Name, fileNamePrefix) if fileName == agentCommitFile { - hashBytes, err := ioutil.ReadAll(rc) + hashBytes, err := io.ReadAll(rc) if err != nil || len(hashBytes) < hashLen { return err } @@ -82,9 +84,11 @@ func unzip(version, archivePath string) (string, error) { path := filepath.Join(paths.Data(), strings.TrimPrefix(fileName, "data/")) if f.FileInfo().IsDir() { - os.MkdirAll(path, f.Mode()) + log.Debugw("Unpacking directory", "archive", "zip", "file.path", path) + _ = os.MkdirAll(path, f.Mode()) } else { - os.MkdirAll(filepath.Dir(path), f.Mode()) + log.Debugw("Unpacking file", "archive", "zip", "file.path", path) + _ = os.MkdirAll(filepath.Dir(path), f.Mode()) f, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, f.Mode()) if err != nil { return err @@ -95,6 +99,7 @@ func unzip(version, archivePath string) (string, error) { } }() + //nolint:gosec // legacy if _, err = io.Copy(f, rc); err != nil { return err } @@ -119,7 +124,7 @@ func unzip(version, archivePath string) (string, error) { return hash, nil } -func untar(version, archivePath string) (string, error) { +func untar(log *logger.Logger, version string, archivePath string) (string, error) { r, err := os.Open(archivePath) if err != nil { return "", errors.New(fmt.Sprintf("artifact for 'elastic-agent' version '%s' could not be found at '%s'", version, archivePath), errors.TypeFilesystem, errors.M(errors.MetaKeyPath, archivePath)) @@ -157,7 +162,7 @@ func untar(version, archivePath string) (string, error) { fileName := strings.TrimPrefix(f.Name, fileNamePrefix) if fileName == agentCommitFile { - hashBytes, err := ioutil.ReadAll(tr) + hashBytes, err := io.ReadAll(tr) if err != nil || len(hashBytes) < hashLen { return "", err } @@ -183,6 +188,7 @@ func untar(version, archivePath string) (string, error) { mode := fi.Mode() switch { case mode.IsRegular(): + log.Debugw("Unpacking file", "archive", "tar", "file.path", abs) // just to be sure, it should already be created by Dir type if err := os.MkdirAll(filepath.Dir(abs), 0755); err != nil { return "", errors.New(err, "TarInstaller: creating directory for file "+abs, errors.TypeFilesystem, errors.M(errors.MetaKeyPath, abs)) @@ -193,6 +199,7 @@ func untar(version, archivePath string) (string, error) { return "", errors.New(err, "TarInstaller: creating file "+abs, errors.TypeFilesystem, errors.M(errors.MetaKeyPath, abs)) } + //nolint:gosec // legacy _, err = io.Copy(wf, tr) if closeErr := wf.Close(); closeErr != nil && err == nil { err = closeErr @@ -201,6 +208,7 @@ func untar(version, archivePath string) (string, error) { return "", fmt.Errorf("TarInstaller: error writing to %s: %w", abs, err) } case mode.IsDir(): + log.Debugw("Unpacking directory", "archive", "tar", "file.path", abs) if err := os.MkdirAll(abs, 0755); err != nil { return "", errors.New(err, "TarInstaller: creating directory for file "+abs, errors.TypeFilesystem, errors.M(errors.MetaKeyPath, abs)) } diff --git a/internal/pkg/agent/application/upgrade/upgrade.go b/internal/pkg/agent/application/upgrade/upgrade.go index 31f48d8d0d0..e4ef8c6066f 100644 --- a/internal/pkg/agent/application/upgrade/upgrade.go +++ b/internal/pkg/agent/application/upgrade/upgrade.go @@ -7,7 +7,6 @@ package upgrade import ( "context" "fmt" - "io/ioutil" "os" "path/filepath" "strings" @@ -113,26 +112,27 @@ func (u *Upgrader) Upgradeable() bool { // Upgrade upgrades running agent, function returns shutdown callback that must be called by reexec. func (u *Upgrader) Upgrade(ctx context.Context, version string, sourceURI string, action *fleetapi.ActionUpgrade) (_ reexec.ShutdownCallbackFn, err error) { + u.log.Infow("Upgrading agent", "version", version, "source_uri", sourceURI) span, ctx := apm.StartSpan(ctx, "upgrade", "app.internal") defer span.End() - err = preUpgradeCleanup(u.agentInfo.Version()) + err = cleanNonMatchingVersionsFromDownloads(u.log, u.agentInfo.Version()) if err != nil { - u.log.Errorf("Unable to clean downloads dir %q before update: %v", paths.Downloads(), err) + u.log.Errorw("Unable to clean downloads before update", "error.message", err, "downloads.path", paths.Downloads()) } sourceURI = u.sourceURI(sourceURI) archivePath, err := u.downloadArtifact(ctx, version, sourceURI) if err != nil { - // Run the same preUpgradeCleanup task to get rid of any newly downloaded files + // Run the same pre-upgrade cleanup task to get rid of any newly downloaded files // This may have an issue if users are upgrading to the same version number. - if dErr := preUpgradeCleanup(u.agentInfo.Version()); dErr != nil { - u.log.Errorf("Unable to remove file after verification failure: %v", dErr) + if dErr := cleanNonMatchingVersionsFromDownloads(u.log, u.agentInfo.Version()); dErr != nil { + u.log.Errorw("Unable to remove file after verification failure", "error.message", dErr) } return nil, err } - newHash, err := u.unpack(ctx, version, archivePath) + newHash, err := u.unpack(version, archivePath) if err != nil { return nil, err } @@ -146,31 +146,35 @@ func (u *Upgrader) Upgrade(ctx context.Context, version string, sourceURI string return nil, nil } - if err := copyActionStore(newHash); err != nil { + if err := copyActionStore(u.log, newHash); err != nil { return nil, errors.New(err, "failed to copy action store") } - if err := ChangeSymlink(ctx, newHash); err != nil { - rollbackInstall(ctx, newHash) + if err := ChangeSymlink(ctx, u.log, newHash); err != nil { + u.log.Errorw("Rolling back: changing symlink failed", "error.message", err) + rollbackInstall(ctx, u.log, newHash) return nil, err } - if err := u.markUpgrade(ctx, newHash, action); err != nil { - rollbackInstall(ctx, newHash) + if err := u.markUpgrade(ctx, u.log, newHash, action); err != nil { + u.log.Errorw("Rolling back: marking upgrade failed", "error.message", err) + rollbackInstall(ctx, u.log, newHash) return nil, err } if err := InvokeWatcher(u.log); err != nil { - rollbackInstall(ctx, newHash) + u.log.Errorw("Rolling back: starting watcher failed", "error.message", err) + rollbackInstall(ctx, u.log, newHash) return nil, err } cb := shutdownCallback(u.log, paths.Home(), release.Version(), version, release.TrimCommit(newHash)) // Clean everything from the downloads dir + u.log.Debugw("Removing downloads directory", "file.path", paths.Downloads()) err = os.RemoveAll(paths.Downloads()) if err != nil { - u.log.Errorf("Unable to clean downloads dir %q after update: %v", paths.Downloads(), err) + u.log.Errorw("Unable to clean downloads after update", "error.message", err, "file.path", paths.Downloads()) } return cb, nil @@ -212,20 +216,21 @@ func (u *Upgrader) sourceURI(retrievedURI string) string { return u.settings.SourceURI } -func rollbackInstall(ctx context.Context, hash string) { +func rollbackInstall(ctx context.Context, log *logger.Logger, hash string) { os.RemoveAll(filepath.Join(paths.Data(), fmt.Sprintf("%s-%s", agentName, hash))) - _ = ChangeSymlink(ctx, release.ShortCommit()) + _ = ChangeSymlink(ctx, log, release.ShortCommit()) } -func copyActionStore(newHash string) error { +func copyActionStore(log *logger.Logger, newHash string) error { // copies legacy action_store.yml, state.yml and state.enc encrypted file if exists storePaths := []string{paths.AgentActionStoreFile(), paths.AgentStateStoreYmlFile(), paths.AgentStateStoreFile()} + newHome := filepath.Join(filepath.Dir(paths.Home()), fmt.Sprintf("%s-%s", agentName, newHash)) + log.Debugw("Copying action store", "new_home_path", newHome) for _, currentActionStorePath := range storePaths { - newHome := filepath.Join(filepath.Dir(paths.Home()), fmt.Sprintf("%s-%s", agentName, newHash)) newActionStorePath := filepath.Join(newHome, filepath.Base(currentActionStorePath)) - - currentActionStore, err := ioutil.ReadFile(currentActionStorePath) + log.Debugw("Copying action store path", "from", currentActionStorePath, "to", newActionStorePath) + currentActionStore, err := os.ReadFile(currentActionStorePath) if os.IsNotExist(err) { // nothing to copy continue @@ -234,7 +239,7 @@ func copyActionStore(newHash string) error { return err } - if err := ioutil.WriteFile(newActionStorePath, currentActionStore, 0600); err != nil { + if err := os.WriteFile(newActionStorePath, currentActionStore, 0600); err != nil { return err } } diff --git a/internal/pkg/agent/cmd/container.go b/internal/pkg/agent/cmd/container.go index 74e574c4806..baa21918695 100644 --- a/internal/pkg/agent/cmd/container.go +++ b/internal/pkg/agent/cmd/container.go @@ -649,8 +649,13 @@ func performGET(cfg setupConfig, client *kibana.Client, path string, response in for i := 0; i < cfg.Kibana.RetryMaxCount; i++ { code, result, err := client.Connection.Request("GET", path, nil, nil, nil) if err != nil || code != 200 { - err = fmt.Errorf("http GET request to %s%s fails: %w. Response: %s", - client.Connection.URL, path, err, truncateString(result)) + if err != nil { + err = fmt.Errorf("http GET request to %s%s fails: %w. Response: %s", + client.Connection.URL, path, err, truncateString(result)) + } else { + err = fmt.Errorf("http GET request to %s%s fails. StatusCode: %d Response: %s", + client.Connection.URL, path, code, truncateString(result)) + } fmt.Fprintf(writer, "%s failed: %s\n", msg, err) <-time.After(cfg.Kibana.RetrySleepDuration) continue @@ -668,8 +673,13 @@ func performPOST(cfg setupConfig, client *kibana.Client, path string, writer io. for i := 0; i < cfg.Kibana.RetryMaxCount; i++ { code, result, err := client.Connection.Request("POST", path, nil, nil, nil) if err != nil || code >= 400 { - err = fmt.Errorf("http POST request to %s%s fails: %w. Response: %s", - client.Connection.URL, path, err, truncateString(result)) + if err != nil { + err = fmt.Errorf("http POST request to %s%s fails: %w. Response: %s", + client.Connection.URL, path, err, truncateString(result)) + } else { + err = fmt.Errorf("http POST request to %s%s fails. StatusCode: %d Response: %s", + client.Connection.URL, path, code, truncateString(result)) + } lastErr = err fmt.Fprintf(writer, "%s failed: %s\n", msg, err) <-time.After(cfg.Kibana.RetrySleepDuration) diff --git a/internal/pkg/agent/cmd/upgrade.go b/internal/pkg/agent/cmd/upgrade.go index 83128b970e8..5e5d75aeeba 100644 --- a/internal/pkg/agent/cmd/upgrade.go +++ b/internal/pkg/agent/cmd/upgrade.go @@ -36,8 +36,6 @@ func newUpgradeCommandWithArgs(_ []string, streams *cli.IOStreams) *cobra.Comman } func upgradeCmd(streams *cli.IOStreams, cmd *cobra.Command, args []string) error { - fmt.Fprintln(streams.Out, "The upgrade process of Elastic Agent is currently EXPERIMENTAL and should not be used in production") - version := args[0] sourceURI, _ := cmd.Flags().GetString("source-uri") diff --git a/internal/pkg/agent/cmd/watch.go b/internal/pkg/agent/cmd/watch.go index 64bd604cd85..353017b714e 100644 --- a/internal/pkg/agent/cmd/watch.go +++ b/internal/pkg/agent/cmd/watch.go @@ -15,6 +15,7 @@ import ( "github.com/spf13/cobra" + "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent/internal/pkg/agent/application/filelock" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/application/upgrade" @@ -40,8 +41,13 @@ func newWatchCommandWithArgs(_ []string, streams *cli.IOStreams) *cobra.Command Short: "Watch watches Elastic Agent for failures and initiates rollback.", Long: `Watch watches Elastic Agent for failures and initiates rollback.`, Run: func(_ *cobra.Command, _ []string) { - if err := watchCmd(); err != nil { - fmt.Fprintf(streams.Err, "Error: %v\n%s\n", err, troubleshootMessage()) + log, err := configuredLogger() + if err != nil { + fmt.Fprintf(streams.Err, "Error configuring logger: %v\n%s\n", err, troubleshootMessage()) + } + if err := watchCmd(log); err != nil { + log.Errorw("Watch command failed", "error.message", err) + fmt.Fprintf(streams.Err, "Watch command failed: %v\n%s\n", err, troubleshootMessage()) os.Exit(1) } }, @@ -50,12 +56,7 @@ func newWatchCommandWithArgs(_ []string, streams *cli.IOStreams) *cobra.Command return cmd } -func watchCmd() error { - log, err := configuredLogger() - if err != nil { - return err - } - +func watchCmd(log *logp.Logger) error { marker, err := upgrade.LoadMarker() if err != nil { log.Error("failed to load marker", err) @@ -88,7 +89,7 @@ func watchCmd() error { // if we're not within grace and marker is still there it might mean // that cleanup was not performed ok, cleanup everything except current version // hash is the same as hash of agent which initiated watcher. - if err := upgrade.Cleanup(release.ShortCommit(), true); err != nil { + if err := upgrade.Cleanup(log, release.ShortCommit(), true); err != nil { log.Error("rollback failed", err) } // exit nicely @@ -97,8 +98,8 @@ func watchCmd() error { ctx := context.Background() if err := watch(ctx, tilGrace, log); err != nil { - log.Debugf("Error detected proceeding to rollback: %v", err) - err = upgrade.Rollback(ctx, marker.PrevHash, marker.Hash) + log.Error("Error detected proceeding to rollback: %v", err) + err = upgrade.Rollback(ctx, log, marker.PrevHash, marker.Hash) if err != nil { log.Error("rollback failed", err) } @@ -109,7 +110,7 @@ func watchCmd() error { // in windows it might leave self untouched, this will get cleaned up // later at the start, because for windows we leave marker untouched. removeMarker := !isWindows() - err = upgrade.Cleanup(marker.Hash, removeMarker) + err = upgrade.Cleanup(log, marker.Hash, removeMarker) if err != nil { log.Error("rollback failed", err) } diff --git a/internal/pkg/agent/control/server/listener_windows.go b/internal/pkg/agent/control/server/listener_windows.go index 69d211502ea..73fd3b97d95 100644 --- a/internal/pkg/agent/control/server/listener_windows.go +++ b/internal/pkg/agent/control/server/listener_windows.go @@ -10,6 +10,7 @@ package server import ( "net" "os/user" + "strings" "github.com/pkg/errors" @@ -18,9 +19,14 @@ import ( "github.com/elastic/elastic-agent/pkg/core/logger" ) +const ( + NTAUTHORITY_SYSTEM = "S-1-5-18" + ADMINISTRATORS_GROUP = "S-1-5-32-544" +) + // createListener creates a named pipe listener on Windows -func createListener(_ *logger.Logger) (net.Listener, error) { - sd, err := securityDescriptor() +func createListener(log *logger.Logger) (net.Listener, error) { + sd, err := securityDescriptor(log) if err != nil { return nil, err } @@ -31,7 +37,7 @@ func cleanupListener(_ *logger.Logger) { // nothing to do on windows } -func securityDescriptor() (string, error) { +func securityDescriptor(log *logger.Logger) (string, error) { u, err := user.Current() if err != nil { return "", errors.Wrap(err, "failed to get current user") @@ -42,11 +48,42 @@ func securityDescriptor() (string, error) { // String definition: https://docs.microsoft.com/en-us/windows/win32/secauthz/ace-strings // Give generic read/write access to the specified user. descriptor := "D:P(A;;GA;;;" + u.Uid + ")" - if u.Username == "NT AUTHORITY\\SYSTEM" { + + if isAdmin, err := isWindowsAdmin(u); err != nil { + // do not fail, agent would end up in a loop, continue with limited permissions + log.Warnf("failed to detect admin: %w", err) + } else if isAdmin { // running as SYSTEM, include Administrators group so Administrators can talk over // the named pipe to the running Elastic Agent system process // https://support.microsoft.com/en-us/help/243330/well-known-security-identifiers-in-windows-operating-systems - descriptor += "(A;;GA;;;S-1-5-32-544)" // Administrators group + descriptor += "(A;;GA;;;" + ADMINISTRATORS_GROUP + ")" } return descriptor, nil } + +func isWindowsAdmin(u *user.User) (bool, error) { + if u.Username == "NT AUTHORITY\\SYSTEM" { + return true, nil + } + + if equalsSystemGroup(u.Uid) || equalsSystemGroup(u.Gid) { + return true, nil + } + + groups, err := u.GroupIds() + if err != nil { + return false, errors.Wrap(err, "failed to get current user groups") + } + + for _, groupSid := range groups { + if equalsSystemGroup(groupSid) { + return true, nil + } + } + + return false, nil +} + +func equalsSystemGroup(s string) bool { + return strings.EqualFold(s, NTAUTHORITY_SYSTEM) || strings.EqualFold(s, ADMINISTRATORS_GROUP) +} diff --git a/internal/pkg/agent/install/install.go b/internal/pkg/agent/install/install.go index 58f3fa73312..431fd1db931 100644 --- a/internal/pkg/agent/install/install.go +++ b/internal/pkg/agent/install/install.go @@ -6,17 +6,20 @@ package install import ( "fmt" - "io/ioutil" "os" "path/filepath" + "runtime" "github.com/otiai10/copy" - "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" ) +const ( + darwin = "darwin" +) + // Install installs Elastic Agent persistently on the system including creating and starting its service. func Install(cfgFile string) error { dir, err := findDirectory() @@ -53,15 +56,39 @@ func Install(cfgFile string) error { // place shell wrapper, if present on platform if paths.ShellWrapperPath != "" { - err = os.MkdirAll(filepath.Dir(paths.ShellWrapperPath), 0755) - if err == nil { - err = ioutil.WriteFile(paths.ShellWrapperPath, []byte(paths.ShellWrapper), 0755) - } - if err != nil { - return errors.New( - err, - fmt.Sprintf("failed to write shell wrapper (%s)", paths.ShellWrapperPath), - errors.M("destination", paths.ShellWrapperPath)) + // Install symlink for darwin instead of the wrapper script. + // Elastic-agent should be first process that launchd starts in order to be able to grant + // the Full-Disk Access (FDA) to the agent and it's child processes. + // This is specifically important for osquery FDA permissions at the moment. + if runtime.GOOS == darwin { + // Check if previous shell wrapper or symlink exists and remove it so it can be overwritten + if _, err := os.Lstat(paths.ShellWrapperPath); err == nil { + if err := os.Remove(paths.ShellWrapperPath); err != nil { + return errors.New( + err, + fmt.Sprintf("failed to remove (%s)", paths.ShellWrapperPath), + errors.M("destination", paths.ShellWrapperPath)) + } + } + err = os.Symlink("/Library/Elastic/Agent/elastic-agent", paths.ShellWrapperPath) + if err != nil { + return errors.New( + err, + fmt.Sprintf("failed to create elastic-agent symlink (%s)", paths.ShellWrapperPath), + errors.M("destination", paths.ShellWrapperPath)) + } + } else { + err = os.MkdirAll(filepath.Dir(paths.ShellWrapperPath), 0755) + if err == nil { + //nolint: gosec // this is intended to be an executable shell script, not chaning the permissions for the linter + err = os.WriteFile(paths.ShellWrapperPath, []byte(paths.ShellWrapper), 0755) + } + if err != nil { + return errors.New( + err, + fmt.Sprintf("failed to write shell wrapper (%s)", paths.ShellWrapperPath), + errors.M("destination", paths.ShellWrapperPath)) + } } } @@ -146,12 +173,7 @@ func findDirectory() (string, error) { if err != nil { return "", err } - sourceDir := filepath.Dir(execPath) - if info.IsInsideData(sourceDir) { - // executable path is being reported as being down inside of data path - // move up to directories to perform the copy - sourceDir = filepath.Dir(filepath.Dir(sourceDir)) - } + sourceDir := paths.ExecDir(filepath.Dir(execPath)) err = verifyDirectory(sourceDir) if err != nil { return "", err diff --git a/internal/pkg/agent/vars/vars.go b/internal/pkg/agent/vars/vars.go index b685583895f..65c0ef2ae1f 100644 --- a/internal/pkg/agent/vars/vars.go +++ b/internal/pkg/agent/vars/vars.go @@ -22,7 +22,7 @@ func WaitForVariables(ctx context.Context, l *logger.Logger, cfg *config.Config, var cancel context.CancelFunc var vars []*transpiler.Vars - composable, err := composable.New(l, cfg) + composable, err := composable.New(l, cfg, false) if err != nil { return nil, fmt.Errorf("failed to create composable controller: %w", err) } diff --git a/internal/pkg/agent/vault/vault_darwin.c b/internal/pkg/agent/vault/vault_darwin.c index c2bb85bb354..b5c1777dac1 100644 --- a/internal/pkg/agent/vault/vault_darwin.c +++ b/internal/pkg/agent/vault/vault_darwin.c @@ -209,10 +209,10 @@ OSStatus RemoveKeychainItem(SecKeychainRef keychain, const char *name, const cha char* GetOSStatusMessage(OSStatus status) { CFStringRef s = SecCopyErrorMessageString(status, NULL); char *p; - int n; - n = CFStringGetLength(s)*8; - p = malloc(n); - CFStringGetCString(s, p, n, kCFStringEncodingUTF8); + int n; + n = CFStringGetLength(s)*8; + p = malloc(n); + CFStringGetCString(s, p, n, kCFStringEncodingUTF8); CFRelease(s); - return p; + return p; } diff --git a/internal/pkg/composable/context.go b/internal/pkg/composable/context.go index 97767f4a5d5..3a805efd249 100644 --- a/internal/pkg/composable/context.go +++ b/internal/pkg/composable/context.go @@ -14,7 +14,7 @@ import ( ) // ContextProviderBuilder creates a new context provider based on the given config and returns it. -type ContextProviderBuilder func(log *logger.Logger, config *config.Config) (corecomp.ContextProvider, error) +type ContextProviderBuilder func(log *logger.Logger, config *config.Config, managed bool) (corecomp.ContextProvider, error) // MustAddContextProvider adds a new ContextProviderBuilder and panics if it AddContextProvider returns an error. func (r *providerRegistry) MustAddContextProvider(name string, builder ContextProviderBuilder) { @@ -24,6 +24,7 @@ func (r *providerRegistry) MustAddContextProvider(name string, builder ContextPr } } +//nolint:dupl,goimports,nolintlint // false positive // AddContextProvider adds a new ContextProviderBuilder func (r *providerRegistry) AddContextProvider(name string, builder ContextProviderBuilder) error { r.lock.Lock() @@ -32,11 +33,14 @@ func (r *providerRegistry) AddContextProvider(name string, builder ContextProvid if name == "" { return fmt.Errorf("provider name is required") } + if strings.ToLower(name) != name { return fmt.Errorf("provider name must be lowercase") } + _, contextExists := r.contextProviders[name] _, dynamicExists := r.dynamicProviders[name] + if contextExists || dynamicExists { return fmt.Errorf("provider '%s' is already registered", name) } diff --git a/internal/pkg/composable/controller.go b/internal/pkg/composable/controller.go index babd1230586..0af5a0d93e8 100644 --- a/internal/pkg/composable/controller.go +++ b/internal/pkg/composable/controller.go @@ -46,7 +46,7 @@ type controller struct { } // New creates a new controller. -func New(log *logger.Logger, c *config.Config) (Controller, error) { +func New(log *logger.Logger, c *config.Config, managed bool) (Controller, error) { l := log.Named("composable") var providersCfg Config @@ -65,7 +65,7 @@ func New(log *logger.Logger, c *config.Config) (Controller, error) { // explicitly disabled; skipping continue } - provider, err := builder(l, pCfg) + provider, err := builder(l, pCfg, managed) if err != nil { return nil, errors.New(err, fmt.Sprintf("failed to build provider '%s'", name), errors.TypeConfig, errors.M("provider", name)) } @@ -82,7 +82,7 @@ func New(log *logger.Logger, c *config.Config) (Controller, error) { // explicitly disabled; skipping continue } - provider, err := builder(l.Named(strings.Join([]string{"providers", name}, ".")), pCfg) + provider, err := builder(l.Named(strings.Join([]string{"providers", name}, ".")), pCfg, managed) if err != nil { return nil, errors.New(err, fmt.Sprintf("failed to build provider '%s'", name), errors.TypeConfig, errors.M("provider", name)) } diff --git a/internal/pkg/composable/controller_test.go b/internal/pkg/composable/controller_test.go index a8c3ec7df93..d4fdbb8fdfc 100644 --- a/internal/pkg/composable/controller_test.go +++ b/internal/pkg/composable/controller_test.go @@ -78,7 +78,7 @@ func TestController(t *testing.T) { log, err := logger.New("", false) require.NoError(t, err) - c, err := composable.New(log, cfg) + c, err := composable.New(log, cfg, false) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) diff --git a/internal/pkg/composable/dynamic.go b/internal/pkg/composable/dynamic.go index c83c2ccc2e2..22ff438fe71 100644 --- a/internal/pkg/composable/dynamic.go +++ b/internal/pkg/composable/dynamic.go @@ -34,7 +34,7 @@ type DynamicProvider interface { } // DynamicProviderBuilder creates a new dynamic provider based on the given config and returns it. -type DynamicProviderBuilder func(log *logger.Logger, config *config.Config) (DynamicProvider, error) +type DynamicProviderBuilder func(log *logger.Logger, config *config.Config, managed bool) (DynamicProvider, error) // MustAddDynamicProvider adds a new DynamicProviderBuilder and panics if it AddDynamicProvider returns an error. func (r *providerRegistry) MustAddDynamicProvider(name string, builder DynamicProviderBuilder) { @@ -44,28 +44,29 @@ func (r *providerRegistry) MustAddDynamicProvider(name string, builder DynamicPr } } +//nolint:dupl,goimports,nolintlint // false positive // AddDynamicProvider adds a new DynamicProviderBuilder -func (r *providerRegistry) AddDynamicProvider(name string, builder DynamicProviderBuilder) error { +func (r *providerRegistry) AddDynamicProvider(providerName string, builder DynamicProviderBuilder) error { r.lock.Lock() defer r.lock.Unlock() - if name == "" { - return fmt.Errorf("provider name is required") + if providerName == "" { + return fmt.Errorf("provider providerName is required") } - if strings.ToLower(name) != name { - return fmt.Errorf("provider name must be lowercase") + if strings.ToLower(providerName) != providerName { + return fmt.Errorf("provider providerName must be lowercase") } - _, contextExists := r.contextProviders[name] - _, dynamicExists := r.dynamicProviders[name] + _, contextExists := r.contextProviders[providerName] + _, dynamicExists := r.dynamicProviders[providerName] if contextExists || dynamicExists { - return fmt.Errorf("provider '%s' is already registered", name) + return fmt.Errorf("provider '%s' is already registered", providerName) } if builder == nil { - return fmt.Errorf("provider '%s' cannot be registered with a nil factory", name) + return fmt.Errorf("provider '%s' cannot be registered with a nil factory", providerName) } - r.dynamicProviders[name] = builder - r.logger.Debugf("Registered provider: %s", name) + r.dynamicProviders[providerName] = builder + r.logger.Debugf("Registered provider: %s", providerName) return nil } diff --git a/internal/pkg/composable/providers/agent/agent.go b/internal/pkg/composable/providers/agent/agent.go index ed8eb956afe..2fb5bb284e5 100644 --- a/internal/pkg/composable/providers/agent/agent.go +++ b/internal/pkg/composable/providers/agent/agent.go @@ -42,6 +42,6 @@ func (*contextProvider) Run(comm corecomp.ContextProviderComm) error { } // ContextProviderBuilder builds the context provider. -func ContextProviderBuilder(_ *logger.Logger, _ *config.Config) (corecomp.ContextProvider, error) { +func ContextProviderBuilder(_ *logger.Logger, _ *config.Config, _ bool) (corecomp.ContextProvider, error) { return &contextProvider{}, nil } diff --git a/internal/pkg/composable/providers/agent/agent_test.go b/internal/pkg/composable/providers/agent/agent_test.go index f3c6904b05c..cd15e8058ea 100644 --- a/internal/pkg/composable/providers/agent/agent_test.go +++ b/internal/pkg/composable/providers/agent/agent_test.go @@ -20,7 +20,7 @@ func TestContextProvider(t *testing.T) { testutils.InitStorage(t) builder, _ := composable.Providers.GetContextProvider("agent") - provider, err := builder(nil, nil) + provider, err := builder(nil, nil, true) require.NoError(t, err) comm := ctesting.NewContextComm(context.Background()) diff --git a/internal/pkg/composable/providers/docker/docker.go b/internal/pkg/composable/providers/docker/docker.go index 8647677e6e8..fa58b00a880 100644 --- a/internal/pkg/composable/providers/docker/docker.go +++ b/internal/pkg/composable/providers/docker/docker.go @@ -102,7 +102,7 @@ func (c *dynamicProvider) Run(comm composable.DynamicProviderComm) error { } // DynamicProviderBuilder builds the dynamic provider. -func DynamicProviderBuilder(logger *logger.Logger, c *config.Config) (composable.DynamicProvider, error) { +func DynamicProviderBuilder(logger *logger.Logger, c *config.Config, managed bool) (composable.DynamicProvider, error) { var cfg Config if c == nil { c = config.New() @@ -146,7 +146,7 @@ func generateData(event bus.Event) (*dockerContainerData, error) { "image": container.Image, "labels": processorLabelMap, }, - "to": "container", + "target": "container", }, }, }, diff --git a/internal/pkg/composable/providers/docker/docker_test.go b/internal/pkg/composable/providers/docker/docker_test.go index d0b5c69ba4d..a035fe06a58 100644 --- a/internal/pkg/composable/providers/docker/docker_test.go +++ b/internal/pkg/composable/providers/docker/docker_test.go @@ -53,7 +53,7 @@ func TestGenerateData(t *testing.T) { "co_elastic_logs/disable": "true", }, }, - "to": "container", + "target": "container", }, }, } diff --git a/internal/pkg/composable/providers/env/env.go b/internal/pkg/composable/providers/env/env.go index 6f65120de48..ac6ef4be446 100644 --- a/internal/pkg/composable/providers/env/env.go +++ b/internal/pkg/composable/providers/env/env.go @@ -31,7 +31,7 @@ func (*contextProvider) Run(comm corecomp.ContextProviderComm) error { } // ContextProviderBuilder builds the context provider. -func ContextProviderBuilder(_ *logger.Logger, _ *config.Config) (corecomp.ContextProvider, error) { +func ContextProviderBuilder(_ *logger.Logger, _ *config.Config, _ bool) (corecomp.ContextProvider, error) { return &contextProvider{}, nil } diff --git a/internal/pkg/composable/providers/env/env_test.go b/internal/pkg/composable/providers/env/env_test.go index f41f6200697..a03f37ee577 100644 --- a/internal/pkg/composable/providers/env/env_test.go +++ b/internal/pkg/composable/providers/env/env_test.go @@ -17,7 +17,7 @@ import ( func TestContextProvider(t *testing.T) { builder, _ := composable.Providers.GetContextProvider("env") - provider, err := builder(nil, nil) + provider, err := builder(nil, nil, true) require.NoError(t, err) comm := ctesting.NewContextComm(context.Background()) diff --git a/internal/pkg/composable/providers/host/host.go b/internal/pkg/composable/providers/host/host.go index cc98021e77b..b722a5f4c69 100644 --- a/internal/pkg/composable/providers/host/host.go +++ b/internal/pkg/composable/providers/host/host.go @@ -77,7 +77,7 @@ func (c *contextProvider) Run(comm corecomp.ContextProviderComm) error { } // ContextProviderBuilder builds the context provider. -func ContextProviderBuilder(log *logger.Logger, c *config.Config) (corecomp.ContextProvider, error) { +func ContextProviderBuilder(log *logger.Logger, c *config.Config, _ bool) (corecomp.ContextProvider, error) { p := &contextProvider{ logger: log, fetcher: getHostInfo, diff --git a/internal/pkg/composable/providers/host/host_test.go b/internal/pkg/composable/providers/host/host_test.go index 869f6a82050..7cf2f208abd 100644 --- a/internal/pkg/composable/providers/host/host_test.go +++ b/internal/pkg/composable/providers/host/host_test.go @@ -33,10 +33,10 @@ func TestContextProvider(t *testing.T) { builder, _ := composable.Providers.GetContextProvider("host") log, err := logger.New("host_test", false) require.NoError(t, err) - provider, err := builder(log, c) + provider, err := builder(log, c, true) require.NoError(t, err) - hostProvider := provider.(*contextProvider) + hostProvider, _ := provider.(*contextProvider) hostProvider.fetcher = returnHostMapping() require.Equal(t, 100*time.Millisecond, hostProvider.CheckInterval) diff --git a/internal/pkg/composable/providers/kubernetes/config.go b/internal/pkg/composable/providers/kubernetes/config.go index 9bec67b66b8..4a97b417c59 100644 --- a/internal/pkg/composable/providers/kubernetes/config.go +++ b/internal/pkg/composable/providers/kubernetes/config.go @@ -7,6 +7,8 @@ package kubernetes import ( "time" + "github.com/elastic/elastic-agent-libs/config" + "github.com/elastic/elastic-agent-autodiscover/kubernetes" "github.com/elastic/elastic-agent-autodiscover/kubernetes/metadata" "github.com/elastic/elastic-agent-libs/logp" @@ -34,6 +36,9 @@ type Config struct { LabelsDedot bool `config:"labels.dedot"` AnnotationsDedot bool `config:"annotations.dedot"` + + Hints *config.C `config:"hints"` + Prefix string `config:"prefix"` } // Resources config section for resources' config blocks @@ -56,6 +61,7 @@ func (c *Config) InitDefaults() { c.LabelsDedot = true c.AnnotationsDedot = true c.AddResourceMetadata = metadata.GetDefaultResourceMetadataConfig() + c.Prefix = "co.elastic" } // Validate ensures correctness of config diff --git a/internal/pkg/composable/providers/kubernetes/hints.go b/internal/pkg/composable/providers/kubernetes/hints.go new file mode 100644 index 00000000000..98bde12f54d --- /dev/null +++ b/internal/pkg/composable/providers/kubernetes/hints.go @@ -0,0 +1,260 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package kubernetes + +import ( + "fmt" + "regexp" + "strings" + + "github.com/elastic/elastic-agent-autodiscover/utils" + + "github.com/elastic/elastic-agent-libs/logp" + "github.com/elastic/elastic-agent-libs/mapstr" +) + +const ( + integration = "package" + datastreams = "data_streams" + + host = "host" + period = "period" + timeout = "timeout" + metricspath = "metrics_path" + username = "username" + password = "password" + stream = "stream" // this is the container stream: stdout/stderr +) + +type hintsBuilder struct { + Key string + + logger *logp.Logger +} + +func (m *hintsBuilder) getIntegration(hints mapstr.M) string { + return utils.GetHintString(hints, m.Key, integration) +} + +func (m *hintsBuilder) getDataStreams(hints mapstr.M) []string { + ds := utils.GetHintAsList(hints, m.Key, datastreams) + return ds +} + +func (m *hintsBuilder) getHost(hints mapstr.M) string { + return utils.GetHintString(hints, m.Key, host) +} + +func (m *hintsBuilder) getStreamHost(hints mapstr.M, streamName string) string { + key := fmt.Sprintf("%v.%v", streamName, host) + return utils.GetHintString(hints, m.Key, key) +} + +func (m *hintsBuilder) getPeriod(hints mapstr.M) string { + return utils.GetHintString(hints, m.Key, period) +} + +func (m *hintsBuilder) getStreamPeriod(hints mapstr.M, streamName string) string { + key := fmt.Sprintf("%v.%v", streamName, period) + return utils.GetHintString(hints, m.Key, key) +} + +func (m *hintsBuilder) getTimeout(hints mapstr.M) string { + return utils.GetHintString(hints, m.Key, timeout) +} + +func (m *hintsBuilder) getStreamTimeout(hints mapstr.M, streamName string) string { + key := fmt.Sprintf("%v.%v", streamName, timeout) + return utils.GetHintString(hints, m.Key, key) +} + +func (m *hintsBuilder) getMetricspath(hints mapstr.M) string { + return utils.GetHintString(hints, m.Key, metricspath) +} + +func (m *hintsBuilder) getStreamMetricspath(hints mapstr.M, streamName string) string { + key := fmt.Sprintf("%v.%v", streamName, metricspath) + return utils.GetHintString(hints, m.Key, key) +} + +func (m *hintsBuilder) getUsername(hints mapstr.M) string { + return utils.GetHintString(hints, m.Key, username) +} + +func (m *hintsBuilder) getStreamUsername(hints mapstr.M, streamName string) string { + key := fmt.Sprintf("%v.%v", streamName, username) + return utils.GetHintString(hints, m.Key, key) +} + +func (m *hintsBuilder) getPassword(hints mapstr.M) string { + return utils.GetHintString(hints, m.Key, password) +} + +func (m *hintsBuilder) getStreamPassword(hints mapstr.M, streamName string) string { + key := fmt.Sprintf("%v.%v", streamName, password) + return utils.GetHintString(hints, m.Key, key) +} + +func (m *hintsBuilder) getContainerStream(hints mapstr.M) string { + return utils.GetHintString(hints, m.Key, stream) +} + +func (m *hintsBuilder) getStreamContainerStream(hints mapstr.M, streamName string) string { + key := fmt.Sprintf("%v.%v", streamName, stream) + return utils.GetHintString(hints, m.Key, key) +} + +// Replace hints like `'${kubernetes.pod.ip}:6379'` with the actual values from the resource metadata. +// So if you replace the `${kubernetes.pod.ip}` part with the value from the Pod's metadata +// you end up with sth like `10.28.90.345:6379` +func (m *hintsBuilder) getFromMeta(value string, kubeMeta mapstr.M) string { + if value == "" { + return "" + } + r := regexp.MustCompile(`\${([^{}]+)}`) + matches := r.FindAllString(value, -1) + for _, match := range matches { + key := strings.TrimSuffix(strings.TrimPrefix(match, "${kubernetes."), "}") + val, err := kubeMeta.GetValue(key) + if err != nil { + m.logger.Debugf("cannot retrieve key from k8smeta: %v", key) + return "" + } + hintVal, ok := val.(string) + if !ok { + m.logger.Debugf("cannot convert value into string: %v", val) + return "" + } + value = strings.Replace(value, match, hintVal, -1) + } + return value +} + +// GenerateHintsMapping gets a hint's map extracted from the annotations and constructs the final +// hints' mapping to be emitted. +func GenerateHintsMapping(hints mapstr.M, kubeMeta mapstr.M, logger *logp.Logger, containerID string) mapstr.M { + builder := hintsBuilder{ + Key: "hints", // consider doing it a configurable, + logger: logger, + } + + hintsMapping := mapstr.M{} + integration := builder.getIntegration(hints) + if integration == "" { + return hintsMapping + } + integrationHints := mapstr.M{} + + if containerID != "" { + _, _ = hintsMapping.Put("container_id", containerID) + // Add the default container log fallback to enable any template which defines + // a log input with a `"${kubernetes.hints.container_logs.enabled} == true"` condition + _, _ = integrationHints.Put("container_logs.enabled", true) + } + + // TODO: add support for processors + // Processors should be data_stream specific. + // Add a basic processor as a base like: + //- add_fields: + // target: kubernetes + // fields: + // hints: true + // Blocked by https://github.com/elastic/elastic-agent/issues/735 + + integrationHost := builder.getFromMeta(builder.getHost(hints), kubeMeta) + if integrationHost != "" { + _, _ = integrationHints.Put(host, integrationHost) + } + integrationPeriod := builder.getFromMeta(builder.getPeriod(hints), kubeMeta) + if integrationPeriod != "" { + _, _ = integrationHints.Put(period, integrationPeriod) + } + integrationTimeout := builder.getFromMeta(builder.getTimeout(hints), kubeMeta) + if integrationTimeout != "" { + _, _ = integrationHints.Put(timeout, integrationTimeout) + } + integrationMetricsPath := builder.getFromMeta(builder.getMetricspath(hints), kubeMeta) + if integrationMetricsPath != "" { + _, _ = integrationHints.Put(metricspath, integrationMetricsPath) + } + integrationUsername := builder.getFromMeta(builder.getUsername(hints), kubeMeta) + if integrationUsername != "" { + _, _ = integrationHints.Put(username, integrationUsername) + } + integrationPassword := builder.getFromMeta(builder.getPassword(hints), kubeMeta) + if integrationPassword != "" { + _, _ = integrationHints.Put(password, integrationPassword) + } + integrationContainerStream := builder.getFromMeta(builder.getContainerStream(hints), kubeMeta) + if integrationContainerStream != "" { + _, _ = integrationHints.Put(stream, integrationContainerStream) + } + + dataStreams := builder.getDataStreams(hints) + if len(dataStreams) == 0 { + _, _ = integrationHints.Put("enabled", true) + } + for _, dataStream := range dataStreams { + streamHints := mapstr.M{ + "enabled": true, + } + if integrationPeriod != "" { + _, _ = streamHints.Put(period, integrationPeriod) + } + if integrationHost != "" { + _, _ = streamHints.Put(host, integrationHost) + } + if integrationTimeout != "" { + _, _ = streamHints.Put(timeout, integrationTimeout) + } + if integrationMetricsPath != "" { + _, _ = streamHints.Put(metricspath, integrationMetricsPath) + } + if integrationUsername != "" { + _, _ = streamHints.Put(username, integrationUsername) + } + if integrationPassword != "" { + _, _ = streamHints.Put(password, integrationPassword) + } + if integrationContainerStream != "" { + _, _ = streamHints.Put(stream, integrationContainerStream) + } + + streamPeriod := builder.getFromMeta(builder.getStreamPeriod(hints, dataStream), kubeMeta) + if streamPeriod != "" { + _, _ = streamHints.Put(period, streamPeriod) + } + streamHost := builder.getFromMeta(builder.getStreamHost(hints, dataStream), kubeMeta) + if streamHost != "" { + _, _ = streamHints.Put(host, streamHost) + } + streamTimeout := builder.getFromMeta(builder.getStreamTimeout(hints, dataStream), kubeMeta) + if streamTimeout != "" { + _, _ = streamHints.Put(timeout, streamTimeout) + } + streamMetricsPath := builder.getFromMeta(builder.getStreamMetricspath(hints, dataStream), kubeMeta) + if streamMetricsPath != "" { + _, _ = streamHints.Put(metricspath, streamMetricsPath) + } + streamUsername := builder.getFromMeta(builder.getStreamUsername(hints, dataStream), kubeMeta) + if streamUsername != "" { + _, _ = streamHints.Put(username, streamUsername) + } + streamPassword := builder.getFromMeta(builder.getStreamPassword(hints, dataStream), kubeMeta) + if streamPassword != "" { + _, _ = streamHints.Put(password, streamPassword) + } + streamContainerStream := builder.getFromMeta(builder.getStreamContainerStream(hints, dataStream), kubeMeta) + if streamContainerStream != "" { + _, _ = streamHints.Put(stream, streamContainerStream) + } + _, _ = integrationHints.Put(dataStream, streamHints) + + } + + _, _ = hintsMapping.Put(integration, integrationHints) + + return hintsMapping +} diff --git a/internal/pkg/composable/providers/kubernetes/hints_test.go b/internal/pkg/composable/providers/kubernetes/hints_test.go new file mode 100644 index 00000000000..04c25575f26 --- /dev/null +++ b/internal/pkg/composable/providers/kubernetes/hints_test.go @@ -0,0 +1,368 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package kubernetes + +import ( + "testing" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + + "github.com/stretchr/testify/assert" + + "github.com/elastic/elastic-agent-autodiscover/kubernetes" + "github.com/elastic/elastic-agent-libs/mapstr" +) + +func TestGenerateHintsMapping(t *testing.T) { + logger := getLogger() + pod := &kubernetes.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testpod", + UID: types.UID(uid), + Namespace: "testns", + Labels: map[string]string{ + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", + }, + Annotations: map[string]string{ + "app": "production", + }, + }, + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "v1", + }, + Spec: kubernetes.PodSpec{ + NodeName: "testnode", + }, + Status: kubernetes.PodStatus{PodIP: "127.0.0.5"}, + } + + mapping := map[string]interface{}{ + "namespace": pod.GetNamespace(), + "pod": mapstr.M{ + "uid": string(pod.GetUID()), + "name": pod.GetName(), + "ip": pod.Status.PodIP, + }, + "namespace_annotations": mapstr.M{ + "nsa": "nsb", + }, + "labels": mapstr.M{ + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", + }, + "annotations": mapstr.M{ + "app": "production", + }, + } + hints := mapstr.M{ + "hints": mapstr.M{ + "data_streams": "info, key, keyspace", + "host": "${kubernetes.pod.ip}:6379", + "info": mapstr.M{"period": "1m", "timeout": "41s"}, + "key": mapstr.M{"period": "10m"}, + "package": "redis", + "password": "password", + "username": "username", + "metrics_path": "/metrics", + "timeout": "42s", + "period": "42s", + }, + } + + expected := mapstr.M{ + "redis": mapstr.M{ + "host": "127.0.0.5:6379", + "metrics_path": "/metrics", + "username": "username", + "password": "password", + "timeout": "42s", + "period": "42s", + "info": mapstr.M{ + "enabled": true, + "host": "127.0.0.5:6379", + "period": "1m", + "metrics_path": "/metrics", + "username": "username", + "password": "password", + "timeout": "41s", + }, "key": mapstr.M{ + "enabled": true, + "host": "127.0.0.5:6379", + "period": "10m", + "metrics_path": "/metrics", + "username": "username", + "password": "password", + "timeout": "42s", + }, "keyspace": mapstr.M{ + "enabled": true, + "host": "127.0.0.5:6379", + "period": "42s", + "metrics_path": "/metrics", + "username": "username", + "password": "password", + "timeout": "42s", + }, + }, + } + + hintsMapping := GenerateHintsMapping(hints, mapping, logger, "") + + assert.Equal(t, expected, hintsMapping) +} + +func TestGenerateHintsMappingWithDefaults(t *testing.T) { + logger := getLogger() + pod := &kubernetes.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testpod", + UID: types.UID(uid), + Namespace: "testns", + Labels: map[string]string{ + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", + }, + Annotations: map[string]string{ + "app": "production", + }, + }, + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "v1", + }, + Spec: kubernetes.PodSpec{ + NodeName: "testnode", + }, + Status: kubernetes.PodStatus{PodIP: "127.0.0.5"}, + } + + mapping := map[string]interface{}{ + "namespace": pod.GetNamespace(), + "pod": mapstr.M{ + "uid": string(pod.GetUID()), + "name": pod.GetName(), + "ip": pod.Status.PodIP, + }, + "namespace_annotations": mapstr.M{ + "nsa": "nsb", + }, + "labels": mapstr.M{ + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", + }, + "annotations": mapstr.M{ + "app": "production", + }, + } + hints := mapstr.M{ + "hints": mapstr.M{ + "host": "${kubernetes.pod.ip}:6379", + "package": "redis", + "metrics_path": "/metrics", + "timeout": "42s", + "period": "42s", + }, + } + + expected := mapstr.M{ + "redis": mapstr.M{ + "enabled": true, + "host": "127.0.0.5:6379", + "metrics_path": "/metrics", + "timeout": "42s", + "period": "42s", + }, + } + + hintsMapping := GenerateHintsMapping(hints, mapping, logger, "") + + assert.Equal(t, expected, hintsMapping) +} + +func TestGenerateHintsMappingWithContainerID(t *testing.T) { + logger := getLogger() + pod := &kubernetes.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testpod", + UID: types.UID(uid), + Namespace: "testns", + Labels: map[string]string{ + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", + }, + Annotations: map[string]string{ + "app": "production", + }, + }, + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "v1", + }, + Spec: kubernetes.PodSpec{ + NodeName: "testnode", + }, + Status: kubernetes.PodStatus{PodIP: "127.0.0.5"}, + } + + mapping := map[string]interface{}{ + "namespace": pod.GetNamespace(), + "pod": mapstr.M{ + "uid": string(pod.GetUID()), + "name": pod.GetName(), + "ip": pod.Status.PodIP, + }, + "namespace_annotations": mapstr.M{ + "nsa": "nsb", + }, + "labels": mapstr.M{ + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", + }, + "annotations": mapstr.M{ + "app": "production", + }, + } + hints := mapstr.M{ + "hints": mapstr.M{ + "data_streams": "info, key, keyspace", + "host": "${kubernetes.pod.ip}:6379", + "info": mapstr.M{"period": "1m", "timeout": "41s"}, + "key": mapstr.M{"period": "10m"}, + "package": "redis", + "password": "password", + "username": "username", + "metrics_path": "/metrics", + "timeout": "42s", + "period": "42s", + }, + } + + expected := mapstr.M{ + "container_id": "asdfghjklqwerty", + "redis": mapstr.M{ + "container_logs": mapstr.M{ + "enabled": true, + }, + "host": "127.0.0.5:6379", + "metrics_path": "/metrics", + "username": "username", + "password": "password", + "timeout": "42s", + "period": "42s", + "info": mapstr.M{ + "enabled": true, + "host": "127.0.0.5:6379", + "period": "1m", + "metrics_path": "/metrics", + "username": "username", + "password": "password", + "timeout": "41s", + }, "key": mapstr.M{ + "enabled": true, + "host": "127.0.0.5:6379", + "period": "10m", + "metrics_path": "/metrics", + "username": "username", + "password": "password", + "timeout": "42s", + }, "keyspace": mapstr.M{ + "enabled": true, + "host": "127.0.0.5:6379", + "period": "42s", + "metrics_path": "/metrics", + "username": "username", + "password": "password", + "timeout": "42s", + }, + }, + } + + hintsMapping := GenerateHintsMapping(hints, mapping, logger, "asdfghjklqwerty") + + assert.Equal(t, expected, hintsMapping) +} + +func TestGenerateHintsMappingWithLogStream(t *testing.T) { + logger := getLogger() + pod := &kubernetes.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Name: "testpod", + UID: types.UID(uid), + Namespace: "testns", + Labels: map[string]string{ + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", + }, + Annotations: map[string]string{ + "app": "production", + }, + }, + TypeMeta: metav1.TypeMeta{ + Kind: "Pod", + APIVersion: "v1", + }, + Spec: kubernetes.PodSpec{ + NodeName: "testnode", + }, + Status: kubernetes.PodStatus{PodIP: "127.0.0.5"}, + } + + mapping := map[string]interface{}{ + "namespace": pod.GetNamespace(), + "pod": mapstr.M{ + "uid": string(pod.GetUID()), + "name": pod.GetName(), + "ip": pod.Status.PodIP, + }, + "namespace_annotations": mapstr.M{ + "nsa": "nsb", + }, + "labels": mapstr.M{ + "foo": "bar", + "with-dash": "dash-value", + "with/slash": "some/path", + }, + "annotations": mapstr.M{ + "app": "production", + }, + } + hints := mapstr.M{ + "hints": mapstr.M{ + "data_streams": "access, error", + "access": mapstr.M{"stream": "stdout"}, + "error": mapstr.M{"stream": "stderr"}, + "package": "apache", + }, + } + + expected := mapstr.M{ + "container_id": "asdfghjkl", + "apache": mapstr.M{ + "container_logs": mapstr.M{ + "enabled": true, + }, + "access": mapstr.M{ + "enabled": true, + "stream": "stdout", + }, "error": mapstr.M{ + "enabled": true, + "stream": "stderr", + }, + }, + } + + hintsMapping := GenerateHintsMapping(hints, mapping, logger, "asdfghjkl") + + assert.Equal(t, expected, hintsMapping) +} diff --git a/internal/pkg/composable/providers/kubernetes/kubernetes.go b/internal/pkg/composable/providers/kubernetes/kubernetes.go index 9f43522f2da..73309439a78 100644 --- a/internal/pkg/composable/providers/kubernetes/kubernetes.go +++ b/internal/pkg/composable/providers/kubernetes/kubernetes.go @@ -7,9 +7,11 @@ package kubernetes import ( "fmt" + "github.com/elastic/elastic-agent-autodiscover/kubernetes" + "github.com/elastic/elastic-agent-libs/logp" + k8s "k8s.io/client-go/kubernetes" - "github.com/elastic/elastic-agent-autodiscover/kubernetes" "github.com/elastic/elastic-agent/internal/pkg/agent/errors" "github.com/elastic/elastic-agent/internal/pkg/composable" "github.com/elastic/elastic-agent/internal/pkg/config" @@ -34,12 +36,13 @@ func init() { } type dynamicProvider struct { - logger *logger.Logger - config *Config + logger *logger.Logger + config *Config + managed bool } // DynamicProviderBuilder builds the dynamic provider. -func DynamicProviderBuilder(logger *logger.Logger, c *config.Config) (composable.DynamicProvider, error) { +func DynamicProviderBuilder(logger *logger.Logger, c *config.Config, managed bool) (composable.DynamicProvider, error) { var cfg Config if c == nil { c = config.New() @@ -49,11 +52,15 @@ func DynamicProviderBuilder(logger *logger.Logger, c *config.Config) (composable return nil, errors.New(err, "failed to unpack configuration") } - return &dynamicProvider{logger, &cfg}, nil + return &dynamicProvider{logger, &cfg, managed}, nil } // Run runs the kubernetes context provider. func (p *dynamicProvider) Run(comm composable.DynamicProviderComm) error { + if p.config.Hints.Enabled() { + betalogger := logp.NewLogger("cfgwarn") + betalogger.Warnf("BETA: Hints' feature is beta.") + } eventers := make([]Eventer, 0, 3) if p.config.Resources.Pod.Enabled { eventer, err := p.watchResource(comm, "pod") @@ -153,19 +160,19 @@ func (p *dynamicProvider) newEventer( client k8s.Interface) (Eventer, error) { switch resourceType { case "pod": - eventer, err := NewPodEventer(comm, p.config, p.logger, client, p.config.Scope) + eventer, err := NewPodEventer(comm, p.config, p.logger, client, p.config.Scope, p.managed) if err != nil { return nil, err } return eventer, nil case nodeScope: - eventer, err := NewNodeEventer(comm, p.config, p.logger, client, p.config.Scope) + eventer, err := NewNodeEventer(comm, p.config, p.logger, client, p.config.Scope, p.managed) if err != nil { return nil, err } return eventer, nil case "service": - eventer, err := NewServiceEventer(comm, p.config, p.logger, client, p.config.Scope) + eventer, err := NewServiceEventer(comm, p.config, p.logger, client, p.config.Scope, p.managed) if err != nil { return nil, err } diff --git a/internal/pkg/composable/providers/kubernetes/node.go b/internal/pkg/composable/providers/kubernetes/node.go index a1539afb9c1..0e5aebc8931 100644 --- a/internal/pkg/composable/providers/kubernetes/node.go +++ b/internal/pkg/composable/providers/kubernetes/node.go @@ -43,7 +43,8 @@ func NewNodeEventer( cfg *Config, logger *logp.Logger, client k8s.Interface, - scope string) (Eventer, error) { + scope string, + managed bool) (Eventer, error) { watcher, err := kubernetes.NewNamedWatcher("agent-node", client, &kubernetes.Node{}, kubernetes.WatchOptions{ SyncTimeout: cfg.SyncPeriod, Node: cfg.Node, diff --git a/internal/pkg/composable/providers/kubernetes/node_test.go b/internal/pkg/composable/providers/kubernetes/node_test.go index ab19e7d2ce2..8415304b00b 100644 --- a/internal/pkg/composable/providers/kubernetes/node_test.go +++ b/internal/pkg/composable/providers/kubernetes/node_test.go @@ -93,11 +93,6 @@ func TestGenerateNodeData(t *testing.T) { type nodeMeta struct{} // Generate generates node metadata from a resource object -// Metadata map is in the following form: -// { -// "kubernetes": {}, -// "some.ecs.field": "asdf" -// } // All Kubernetes fields that need to be stored under kubernetes. prefix are populated by // GenerateK8s method while fields that are part of ECS are generated by GenerateECS method func (n *nodeMeta) Generate(obj kubernetes.Resource, opts ...metadata.FieldOptions) mapstr.M { diff --git a/internal/pkg/composable/providers/kubernetes/pod.go b/internal/pkg/composable/providers/kubernetes/pod.go index a8b11b06585..27c9b53bec2 100644 --- a/internal/pkg/composable/providers/kubernetes/pod.go +++ b/internal/pkg/composable/providers/kubernetes/pod.go @@ -9,6 +9,8 @@ import ( "sync" "time" + "github.com/elastic/elastic-agent-autodiscover/utils" + "github.com/elastic/elastic-agent-autodiscover/kubernetes" "github.com/elastic/elastic-agent-autodiscover/kubernetes/metadata" c "github.com/elastic/elastic-agent-libs/config" @@ -23,15 +25,16 @@ import ( ) type pod struct { - logger *logp.Logger - cleanupTimeout time.Duration - comm composable.DynamicProviderComm - scope string - config *Config - metagen metadata.MetaGen watcher kubernetes.Watcher nodeWatcher kubernetes.Watcher + comm composable.DynamicProviderComm + metagen metadata.MetaGen namespaceWatcher kubernetes.Watcher + config *Config + logger *logp.Logger + scope string + managed bool + cleanupTimeout time.Duration // Mutex used by configuration updates not triggered by the main watcher, // to avoid race conditions between cross updates and deletions. @@ -51,7 +54,8 @@ func NewPodEventer( cfg *Config, logger *logp.Logger, client k8s.Interface, - scope string) (Eventer, error) { + scope string, + managed bool) (Eventer, error) { watcher, err := kubernetes.NewNamedWatcher("agent-pod", client, &kubernetes.Pod{}, kubernetes.WatchOptions{ SyncTimeout: cfg.SyncPeriod, Node: cfg.Node, @@ -95,6 +99,7 @@ func NewPodEventer( watcher: watcher, nodeWatcher: nodeWatcher, namespaceWatcher: namespaceWatcher, + managed: managed, } watcher.AddEventHandler(p) @@ -149,10 +154,32 @@ func (p *pod) emitRunning(pod *kubernetes.Pod) { data := generatePodData(pod, p.metagen, namespaceAnnotations) data.mapping["scope"] = p.scope - // Emit the pod - // We emit Pod + containers to ensure that configs matching Pod only - // get Pod metadata (not specific to any container) - _ = p.comm.AddOrUpdate(data.uid, PodPriority, data.mapping, data.processors) + + if p.config.Hints.Enabled() { // This is "hints based autodiscovery flow" + if !p.managed { + if ann, ok := data.mapping["annotations"]; ok { + annotations, _ := ann.(mapstr.M) + hints := utils.GenerateHints(annotations, "", p.config.Prefix) + if len(hints) > 0 { + p.logger.Debugf("Extracted hints are :%v", hints) + hintsMapping := GenerateHintsMapping(hints, data.mapping, p.logger, "") + p.logger.Debugf("Generated hints mappings are :%v", hintsMapping) + _ = p.comm.AddOrUpdate( + data.uid, + PodPriority, + map[string]interface{}{"hints": hintsMapping}, + data.processors, + ) + } + } + } + } else { // This is the "template-based autodiscovery" flow + // emit normal mapping to be used in dynamic variable resolution + // Emit the pod + // We emit Pod + containers to ensure that configs matching Pod only + // get Pod metadata (not specific to any container) + _ = p.comm.AddOrUpdate(data.uid, PodPriority, data.mapping, data.processors) + } // Emit all containers in the pod // We should deal with init containers stopping after initialization @@ -160,7 +187,7 @@ func (p *pod) emitRunning(pod *kubernetes.Pod) { } func (p *pod) emitContainers(pod *kubernetes.Pod, namespaceAnnotations mapstr.M) { - generateContainerData(p.comm, pod, p.metagen, namespaceAnnotations) + generateContainerData(p.comm, pod, p.metagen, namespaceAnnotations, p.logger, p.managed, p.config) } func (p *pod) emitStopped(pod *kubernetes.Pod) { @@ -240,6 +267,12 @@ func generatePodData( _ = safemapstr.Put(annotations, k, v) } k8sMapping["annotations"] = annotations + // Pass labels(not dedoted) to all events so that they can be used in templating. + labels := mapstr.M{} + for k, v := range pod.GetObjectMeta().GetLabels() { + _ = safemapstr.Put(labels, k, v) + } + k8sMapping["labels"] = labels processors := []map[string]interface{}{} // meta map includes metadata that go under kubernetes.* @@ -265,7 +298,10 @@ func generateContainerData( comm composable.DynamicProviderComm, pod *kubernetes.Pod, kubeMetaGen metadata.MetaGen, - namespaceAnnotations mapstr.M) { + namespaceAnnotations mapstr.M, + logger *logp.Logger, + managed bool, + config *Config) { containers := kubernetes.GetContainersInPod(pod) @@ -275,6 +311,12 @@ func generateContainerData( _ = safemapstr.Put(annotations, k, v) } + // Pass labels to all events so that it can be used in templating. + labels := mapstr.M{} + for k, v := range pod.GetObjectMeta().GetLabels() { + _ = safemapstr.Put(labels, k, v) + } + for _, c := range containers { // If it doesn't have an ID, container doesn't exist in // the runtime, emit only an event if we are stopping, so @@ -299,8 +341,9 @@ func generateContainerData( if len(namespaceAnnotations) != 0 { k8sMapping["namespace_annotations"] = namespaceAnnotations } - // add annotations to be discoverable by templates + // add annotations and labels to be discoverable by templates k8sMapping["annotations"] = annotations + k8sMapping["labels"] = labels //container ECS fields cmeta := mapstr.M{ @@ -344,7 +387,28 @@ func generateContainerData( _, _ = containerMeta.Put("port", fmt.Sprintf("%v", port.ContainerPort)) _, _ = containerMeta.Put("port_name", port.Name) k8sMapping["container"] = containerMeta - _ = comm.AddOrUpdate(eventID, ContainerPriority, k8sMapping, processors) + + if config.Hints.Enabled() { // This is "hints based autodiscovery flow" + if !managed { + if ann, ok := k8sMapping["annotations"]; ok { + annotations, _ := ann.(mapstr.M) + hints := utils.GenerateHints(annotations, "", config.Prefix) + if len(hints) > 0 { + logger.Debugf("Extracted hints are :%v", hints) + hintsMapping := GenerateHintsMapping(hints, k8sMapping, logger, c.ID) + logger.Debugf("Generated hints mappings are :%v", hintsMapping) + _ = comm.AddOrUpdate( + eventID, + PodPriority, + map[string]interface{}{"hints": hintsMapping}, + processors, + ) + } + } + } + } else { // This is the "template-based autodiscovery" flow + _ = comm.AddOrUpdate(eventID, ContainerPriority, k8sMapping, processors) + } } } else { k8sMapping["container"] = containerMeta diff --git a/internal/pkg/composable/providers/kubernetes/pod_test.go b/internal/pkg/composable/providers/kubernetes/pod_test.go index 95361fd2ce0..7409ad1a3ea 100644 --- a/internal/pkg/composable/providers/kubernetes/pod_test.go +++ b/internal/pkg/composable/providers/kubernetes/pod_test.go @@ -9,17 +9,28 @@ import ( "fmt" "testing" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + "github.com/stretchr/testify/assert" + "github.com/elastic/elastic-agent-libs/logp" + "github.com/elastic/elastic-agent/pkg/core/logger" + "github.com/elastic/elastic-agent-autodiscover/kubernetes" "github.com/elastic/elastic-agent-autodiscover/kubernetes/metadata" "github.com/elastic/elastic-agent-libs/mapstr" - - v1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/types" + "github.com/elastic/elastic-agent/internal/pkg/config" ) +func getLogger() *logger.Logger { + loggerCfg := logger.DefaultLoggingConfig() + loggerCfg.Level = logp.ErrorLevel + l, _ := logger.NewFromConfig("", loggerCfg, false) + return l +} + func TestGeneratePodData(t *testing.T) { pod := &kubernetes.Pod{ ObjectMeta: metav1.ObjectMeta{ @@ -153,13 +164,21 @@ func TestGenerateContainerPodData(t *testing.T) { context.TODO(), providerDataChan, } + logger := getLogger() + var cfg Config + c := config.New() + _ = c.Unpack(&cfg) generateContainerData( &comm, pod, &podMeta{}, mapstr.M{ "nsa": "nsb", - }) + }, + logger, + true, + &cfg, + ) mapping := map[string]interface{}{ "namespace": pod.GetNamespace(), @@ -274,13 +293,21 @@ func TestEphemeralContainers(t *testing.T) { context.TODO(), providerDataChan, } + + logger := getLogger() + var cfg Config + c := config.New() + _ = c.Unpack(&cfg) generateContainerData( &comm, pod, &podMeta{}, mapstr.M{ "nsa": "nsb", - }) + }, + logger, + true, + &cfg) mapping := map[string]interface{}{ "namespace": pod.GetNamespace(), @@ -366,11 +393,6 @@ func (t *MockDynamicComm) Remove(id string) { type podMeta struct{} // Generate generates pod metadata from a resource object -// Metadata map is in the following form: -// { -// "kubernetes": {}, -// "some.ecs.field": "asdf" -// } // All Kubernetes fields that need to be stored under kubernetes. prefix are populated by // GenerateK8s method while fields that are part of ECS are generated by GenerateECS method func (p *podMeta) Generate(obj kubernetes.Resource, opts ...metadata.FieldOptions) mapstr.M { diff --git a/internal/pkg/composable/providers/kubernetes/service.go b/internal/pkg/composable/providers/kubernetes/service.go index 49c20627734..4060c12e646 100644 --- a/internal/pkg/composable/providers/kubernetes/service.go +++ b/internal/pkg/composable/providers/kubernetes/service.go @@ -43,7 +43,8 @@ func NewServiceEventer( cfg *Config, logger *logp.Logger, client k8s.Interface, - scope string) (Eventer, error) { + scope string, + managed bool) (Eventer, error) { watcher, err := kubernetes.NewNamedWatcher("agent-service", client, &kubernetes.Service{}, kubernetes.WatchOptions{ SyncTimeout: cfg.SyncPeriod, Node: cfg.Node, diff --git a/internal/pkg/composable/providers/kubernetes/service_test.go b/internal/pkg/composable/providers/kubernetes/service_test.go index 69e945ee1cd..1943e3cfcdb 100644 --- a/internal/pkg/composable/providers/kubernetes/service_test.go +++ b/internal/pkg/composable/providers/kubernetes/service_test.go @@ -107,11 +107,6 @@ func TestGenerateServiceData(t *testing.T) { type svcMeta struct{} // Generate generates svc metadata from a resource object -// Metadata map is in the following form: -// { -// "kubernetes": {}, -// "some.ecs.field": "asdf" -// } // All Kubernetes fields that need to be stored under kubernetes. prefix are populated by // GenerateK8s method while fields that are part of ECS are generated by GenerateECS method func (s *svcMeta) Generate(obj kubernetes.Resource, opts ...metadata.FieldOptions) mapstr.M { diff --git a/internal/pkg/composable/providers/kubernetesleaderelection/config.go b/internal/pkg/composable/providers/kubernetesleaderelection/config.go index d92d35566a2..7ccc2f9a799 100644 --- a/internal/pkg/composable/providers/kubernetesleaderelection/config.go +++ b/internal/pkg/composable/providers/kubernetesleaderelection/config.go @@ -8,10 +8,12 @@ import "github.com/elastic/elastic-agent-autodiscover/kubernetes" // Config for kubernetes_leaderelection provider type Config struct { - KubeConfig string `config:"kube_config"` - KubeClientOptions kubernetes.KubeClientOptions `config:"kube_client_options"` + KubeConfig string `config:"kube_config"` + // Name of the leaderelection lease LeaderLease string `config:"leader_lease"` + + KubeClientOptions kubernetes.KubeClientOptions `config:"kube_client_options"` } // InitDefaults initializes the default values for the config. diff --git a/internal/pkg/composable/providers/kubernetesleaderelection/kubernetes_leaderelection.go b/internal/pkg/composable/providers/kubernetesleaderelection/kubernetes_leaderelection.go index d0d773d1663..1fc6c7e958d 100644 --- a/internal/pkg/composable/providers/kubernetesleaderelection/kubernetes_leaderelection.go +++ b/internal/pkg/composable/providers/kubernetesleaderelection/kubernetes_leaderelection.go @@ -33,7 +33,7 @@ type contextProvider struct { } // ContextProviderBuilder builds the provider. -func ContextProviderBuilder(logger *logger.Logger, c *config.Config) (corecomp.ContextProvider, error) { +func ContextProviderBuilder(logger *logger.Logger, c *config.Config, managed bool) (corecomp.ContextProvider, error) { var cfg Config if c == nil { c = config.New() diff --git a/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets.go b/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets.go index d6e8190c13a..543d0cd6b28 100644 --- a/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets.go +++ b/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets.go @@ -36,7 +36,7 @@ type contextProviderK8sSecrets struct { } // ContextProviderBuilder builds the context provider. -func ContextProviderBuilder(logger *logger.Logger, c *config.Config) (corecomp.ContextProvider, error) { +func ContextProviderBuilder(logger *logger.Logger, c *config.Config, managed bool) (corecomp.ContextProvider, error) { var cfg Config if c == nil { c = config.New() diff --git a/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets_test.go b/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets_test.go index 388f33074bb..f633a9f062e 100644 --- a/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets_test.go +++ b/internal/pkg/composable/providers/kubernetessecrets/kubernetes_secrets_test.go @@ -51,7 +51,7 @@ func Test_K8sSecretsProvider_Fetch(t *testing.T) { cfg, err := config.NewConfigFrom(map[string]string{"a": "b"}) require.NoError(t, err) - p, err := ContextProviderBuilder(logger, cfg) + p, err := ContextProviderBuilder(logger, cfg, true) require.NoError(t, err) fp, _ := p.(*contextProviderK8sSecrets) @@ -106,7 +106,7 @@ func Test_K8sSecretsProvider_FetchWrongSecret(t *testing.T) { cfg, err := config.NewConfigFrom(map[string]string{"a": "b"}) require.NoError(t, err) - p, err := ContextProviderBuilder(logger, cfg) + p, err := ContextProviderBuilder(logger, cfg, true) require.NoError(t, err) fp, _ := p.(*contextProviderK8sSecrets) diff --git a/internal/pkg/composable/providers/local/local.go b/internal/pkg/composable/providers/local/local.go index b44affc78df..b54e6142ee0 100644 --- a/internal/pkg/composable/providers/local/local.go +++ b/internal/pkg/composable/providers/local/local.go @@ -32,7 +32,7 @@ func (c *contextProvider) Run(comm corecomp.ContextProviderComm) error { } // ContextProviderBuilder builds the context provider. -func ContextProviderBuilder(_ *logger.Logger, c *config.Config) (corecomp.ContextProvider, error) { +func ContextProviderBuilder(_ *logger.Logger, c *config.Config, _ bool) (corecomp.ContextProvider, error) { p := &contextProvider{} if c != nil { err := c.Unpack(p) diff --git a/internal/pkg/composable/providers/local/local_test.go b/internal/pkg/composable/providers/local/local_test.go index 6afe29251d5..dfec629b88a 100644 --- a/internal/pkg/composable/providers/local/local_test.go +++ b/internal/pkg/composable/providers/local/local_test.go @@ -26,7 +26,7 @@ func TestContextProvider(t *testing.T) { }) require.NoError(t, err) builder, _ := composable.Providers.GetContextProvider("local") - provider, err := builder(nil, cfg) + provider, err := builder(nil, cfg, true) require.NoError(t, err) comm := ctesting.NewContextComm(context.Background()) diff --git a/internal/pkg/composable/providers/localdynamic/localdynamic.go b/internal/pkg/composable/providers/localdynamic/localdynamic.go index 0fd81738976..9d9f5c501ae 100644 --- a/internal/pkg/composable/providers/localdynamic/localdynamic.go +++ b/internal/pkg/composable/providers/localdynamic/localdynamic.go @@ -41,7 +41,7 @@ func (c *dynamicProvider) Run(comm composable.DynamicProviderComm) error { } // DynamicProviderBuilder builds the dynamic provider. -func DynamicProviderBuilder(_ *logger.Logger, c *config.Config) (composable.DynamicProvider, error) { +func DynamicProviderBuilder(_ *logger.Logger, c *config.Config, _ bool) (composable.DynamicProvider, error) { p := &dynamicProvider{} if c != nil { err := c.Unpack(p) diff --git a/internal/pkg/composable/providers/localdynamic/localdynamic_test.go b/internal/pkg/composable/providers/localdynamic/localdynamic_test.go index a20b37852d9..8cc0a44ccd7 100644 --- a/internal/pkg/composable/providers/localdynamic/localdynamic_test.go +++ b/internal/pkg/composable/providers/localdynamic/localdynamic_test.go @@ -60,7 +60,7 @@ func TestContextProvider(t *testing.T) { }) require.NoError(t, err) builder, _ := composable.Providers.GetDynamicProvider("local_dynamic") - provider, err := builder(nil, cfg) + provider, err := builder(nil, cfg, true) require.NoError(t, err) comm := ctesting.NewDynamicComm(context.Background()) diff --git a/internal/pkg/composable/providers/path/path.go b/internal/pkg/composable/providers/path/path.go index 05af5bcd0b0..389a21fe6bc 100644 --- a/internal/pkg/composable/providers/path/path.go +++ b/internal/pkg/composable/providers/path/path.go @@ -14,7 +14,7 @@ import ( ) func init() { - composable.Providers.MustAddContextProvider("path", ContextProviderBuilder) + composable.Providers.AddContextProvider("path", ContextProviderBuilder) } type contextProvider struct{} @@ -34,6 +34,6 @@ func (*contextProvider) Run(comm corecomp.ContextProviderComm) error { } // ContextProviderBuilder builds the context provider. -func ContextProviderBuilder(_ *logger.Logger, _ *config.Config) (corecomp.ContextProvider, error) { +func ContextProviderBuilder(_ *logger.Logger, _ *config.Config, _ bool) (corecomp.ContextProvider, error) { return &contextProvider{}, nil } diff --git a/internal/pkg/composable/providers/path/path_test.go b/internal/pkg/composable/providers/path/path_test.go index 14f263e56db..094865d3fbd 100644 --- a/internal/pkg/composable/providers/path/path_test.go +++ b/internal/pkg/composable/providers/path/path_test.go @@ -18,7 +18,7 @@ import ( func TestContextProvider(t *testing.T) { builder, _ := composable.Providers.GetContextProvider("path") - provider, err := builder(nil, nil) + provider, err := builder(nil, nil, true) require.NoError(t, err) comm := ctesting.NewContextComm(context.Background()) diff --git a/internal/pkg/composable/testing/dynamic.go b/internal/pkg/composable/testing/dynamic.go index bfa48dff57d..99b499835cd 100644 --- a/internal/pkg/composable/testing/dynamic.go +++ b/internal/pkg/composable/testing/dynamic.go @@ -81,6 +81,7 @@ func (t *DynamicComm) Previous(id string) (DynamicState, bool) { return prev, ok } +//nolint:prealloc,goimports,nolintlint // false positive // PreviousIDs returns the previous set mapping ID. func (t *DynamicComm) PreviousIDs() []string { t.lock.Lock() @@ -100,6 +101,7 @@ func (t *DynamicComm) Current(id string) (DynamicState, bool) { return curr, ok } +//nolint:prealloc,goimports,nolintlint // false positive // CurrentIDs returns the current set mapping ID. func (t *DynamicComm) CurrentIDs() []string { t.lock.Lock() diff --git a/internal/pkg/core/backoff/backoff.go b/internal/pkg/core/backoff/backoff.go index 06723e7db9a..c97eaae199d 100644 --- a/internal/pkg/core/backoff/backoff.go +++ b/internal/pkg/core/backoff/backoff.go @@ -4,11 +4,16 @@ package backoff +import "time" + // Backoff defines the interface for backoff strategies. type Backoff interface { // Wait blocks for a duration of time governed by the backoff strategy. Wait() bool + // NextWait returns the duration of the next call to Wait(). + NextWait() time.Duration + // Reset resets the backoff duration to an initial value governed by the backoff strategy. Reset() } diff --git a/internal/pkg/core/backoff/backoff_test.go b/internal/pkg/core/backoff/backoff_test.go index 88498ff5a58..12332eb15f2 100644 --- a/internal/pkg/core/backoff/backoff_test.go +++ b/internal/pkg/core/backoff/backoff_test.go @@ -14,14 +14,9 @@ import ( type factory func(<-chan struct{}) Backoff -func TestBackoff(t *testing.T) { - t.Run("test close channel", testCloseChannel) - t.Run("test unblock after some time", testUnblockAfterInit) -} - -func testCloseChannel(t *testing.T) { - init := 2 * time.Second - max := 5 * time.Minute +func TestCloseChannel(t *testing.T) { + init := 2 * time.Millisecond + max := 5 * time.Second tests := map[string]factory{ "ExpBackoff": func(done <-chan struct{}) Backoff { @@ -42,9 +37,9 @@ func testCloseChannel(t *testing.T) { } } -func testUnblockAfterInit(t *testing.T) { - init := 1 * time.Second - max := 5 * time.Minute +func TestUnblockAfterInit(t *testing.T) { + init := 1 * time.Millisecond + max := 5 * time.Second tests := map[string]factory{ "ExpBackoff": func(done <-chan struct{}) Backoff { @@ -68,3 +63,36 @@ func testUnblockAfterInit(t *testing.T) { }) } } + +func TestNextWait(t *testing.T) { + init := time.Millisecond + max := 5 * time.Second + + tests := map[string]factory{ + "ExpBackoff": func(done <-chan struct{}) Backoff { + return NewExpBackoff(done, init, max) + }, + "EqualJitterBackoff": func(done <-chan struct{}) Backoff { + return NewEqualJitterBackoff(done, init, max) + }, + } + + for name, f := range tests { + t.Run(name, func(t *testing.T) { + c := make(chan struct{}) + b := f(c) + + startWait := b.NextWait() + assert.Equal(t, startWait, b.NextWait(), "next wait not stable") + + startedAt := time.Now() + b.Wait() + waitDuration := time.Now().Sub(startedAt) + nextWait := b.NextWait() + + t.Logf("actualWait: %s startWait: %s nextWait: %s", waitDuration, startWait, nextWait) + assert.Less(t, startWait, nextWait, "wait value did not increase") + assert.GreaterOrEqual(t, waitDuration, startWait, "next wait duration <= actual wait duration") + }) + } +} diff --git a/internal/pkg/core/backoff/equal_jitter.go b/internal/pkg/core/backoff/equal_jitter.go index d87077397cd..671201f5892 100644 --- a/internal/pkg/core/backoff/equal_jitter.go +++ b/internal/pkg/core/backoff/equal_jitter.go @@ -16,8 +16,9 @@ type EqualJitterBackoff struct { duration time.Duration done <-chan struct{} - init time.Duration - max time.Duration + init time.Duration + max time.Duration + nextRand time.Duration last time.Time } @@ -29,6 +30,7 @@ func NewEqualJitterBackoff(done <-chan struct{}, init, max time.Duration) Backof done: done, init: init, max: max, + nextRand: time.Duration(rand.Int63n(int64(init))), //nolint:gosec } } @@ -38,13 +40,18 @@ func (b *EqualJitterBackoff) Reset() { b.duration = b.init * 2 } +func (b *EqualJitterBackoff) NextWait() time.Duration { + // Make sure we have always some minimal back off and jitter. + temp := b.duration / 2 + return temp + b.nextRand +} + // Wait block until either the timer is completed or channel is done. func (b *EqualJitterBackoff) Wait() bool { - // Make sure we have always some minimal back off and jitter. - temp := int64(b.duration / 2) - backoff := time.Duration(temp + rand.Int63n(temp)) + backoff := b.NextWait() // increase duration for next wait. + b.nextRand = time.Duration(rand.Int63n(int64(b.duration))) b.duration *= 2 if b.duration > b.max { b.duration = b.max diff --git a/internal/pkg/core/backoff/exponential.go b/internal/pkg/core/backoff/exponential.go index 81224b95eb5..51b5b4e0cb5 100644 --- a/internal/pkg/core/backoff/exponential.go +++ b/internal/pkg/core/backoff/exponential.go @@ -36,18 +36,23 @@ func (b *ExpBackoff) Reset() { b.duration = b.init } +func (b *ExpBackoff) NextWait() time.Duration { + nextWait := b.duration + nextWait *= 2 + if nextWait > b.max { + nextWait = b.max + } + return nextWait +} + // Wait block until either the timer is completed or channel is done. func (b *ExpBackoff) Wait() bool { - backoff := b.duration - b.duration *= 2 - if b.duration > b.max { - b.duration = b.max - } + b.duration = b.NextWait() select { case <-b.done: return false - case <-time.After(backoff): + case <-time.After(b.duration): b.last = time.Now() return true } diff --git a/internal/pkg/crypto/io.go b/internal/pkg/crypto/io.go index 738a216774a..2012bdf1b5c 100644 --- a/internal/pkg/crypto/io.go +++ b/internal/pkg/crypto/io.go @@ -21,11 +21,11 @@ import ( // Option is the default options used to generate the encrypt and decrypt writer. // NOTE: the defined options need to be same for both the Reader and the writer. type Option struct { + Generator bytesGen IterationsCount int KeyLength int SaltLength int IVLength int - Generator bytesGen // BlockSize must be a factor of aes.BlockSize BlockSize int @@ -180,7 +180,6 @@ func (w *Writer) Write(b []byte) (int, error) { } func (w *Writer) writeBlock(b []byte) error { - // randomly generate the salt and the initialization vector, this information will be saved // on disk in the file as part of the header iv, err := w.generator(w.option.IVLength) @@ -189,12 +188,14 @@ func (w *Writer) writeBlock(b []byte) error { return w.err } + // nolint: errcheck // Ignore the error at this point. w.writer.Write(iv) encodedBytes := w.gcm.Seal(nil, iv, b, nil) l := make([]byte, 4) binary.LittleEndian.PutUint32(l, uint32(len(encodedBytes))) + // nolint: errcheck // Ignore the error at this point. w.writer.Write(l) _, err = w.writer.Write(encodedBytes) @@ -325,7 +326,7 @@ func (r *Reader) consumeBlock() error { } encodedBytes := make([]byte, l) - _, err = io.ReadAtLeast(r.reader, encodedBytes, int(l)) + _, err = io.ReadAtLeast(r.reader, encodedBytes, l) if err != nil { r.err = errors.Wrapf(err, "fail read the block of %d bytes", l) } @@ -364,7 +365,6 @@ func (r *Reader) Close() error { func randomBytes(length int) ([]byte, error) { r := make([]byte, length) _, err := rand.Read(r) - if err != nil { return nil, err } diff --git a/internal/pkg/fleetapi/acker/retrier/retrier.go b/internal/pkg/fleetapi/acker/retrier/retrier.go index 747fe93645d..b007bfdf064 100644 --- a/internal/pkg/fleetapi/acker/retrier/retrier.go +++ b/internal/pkg/fleetapi/acker/retrier/retrier.go @@ -32,19 +32,19 @@ type Option func(*Retrier) // Retrier implements retrier for actions acks type Retrier struct { - log *logger.Logger acker BatchAcker // AckBatch provider + log *logger.Logger - initialRetryInterval time.Duration // initial retry interval - maxRetryInterval time.Duration // max retry interval - maxRetries int // configurable maxNumber of retries per action + doneCh chan struct{} // signal channel to kickoff retry loop if not running + kickCh chan struct{} // signal channel when retry loop is done actions []fleetapi.Action // pending actions - mx sync.Mutex - kickCh chan struct{} // signal channel to kickoff retry loop if not running + maxRetryInterval time.Duration // max retry interval + maxRetries int // configurable maxNumber of retries per action + initialRetryInterval time.Duration // initial retry interval - doneCh chan struct{} // signal channel when retry loop is done + mx sync.Mutex } // New creates new instance of retrier @@ -173,7 +173,6 @@ func (r *Retrier) runRetries(ctx context.Context) { default: } r.log.Debug("ack retrier: exit retry loop") - } func (r *Retrier) updateRetriesMap(retries map[string]int, actions []fleetapi.Action, resp *fleetapi.AckResponse) (failed []fleetapi.Action) { diff --git a/internal/pkg/fleetapi/checkin_cmd.go b/internal/pkg/fleetapi/checkin_cmd.go index 9c2cd1513e1..33bcd3dab55 100644 --- a/internal/pkg/fleetapi/checkin_cmd.go +++ b/internal/pkg/fleetapi/checkin_cmd.go @@ -38,12 +38,11 @@ type CheckinComponent struct { // CheckinRequest consists of multiple events reported to fleet ui. type CheckinRequest struct { - Status string `json:"status"` - AckToken string `json:"ack_token,omitempty"` - Events []SerializableEvent `json:"events"` - Metadata *info.ECSMeta `json:"local_metadata,omitempty"` - Message string `json:"message"` // V2 Agent message - Components []CheckinComponent `json:"components"` // V2 Agent components + Status string `json:"status"` + AckToken string `json:"ack_token,omitempty"` + Metadata *info.ECSMeta `json:"local_metadata,omitempty"` + Message string `json:"message"` // V2 Agent message + Components []CheckinComponent `json:"components"` // V2 Agent components } // SerializableEvent is a representation of the event to be send to the Fleet Server API via the checkin @@ -96,23 +95,26 @@ func NewCheckinCmd(info agentInfo, client client.Sender) *CheckinCmd { } } -// Execute enroll the Agent in the Fleet Server. -func (e *CheckinCmd) Execute(ctx context.Context, r *CheckinRequest) (*CheckinResponse, error) { +// Execute enroll the Agent in the Fleet Server. Returns the decoded check in response, a duration indicating +// how long the request took, and an error. +func (e *CheckinCmd) Execute(ctx context.Context, r *CheckinRequest) (*CheckinResponse, time.Duration, error) { if err := r.Validate(); err != nil { - return nil, err + return nil, 0, err } b, err := json.Marshal(r) if err != nil { - return nil, errors.New(err, + return nil, 0, errors.New(err, "fail to encode the checkin request", errors.TypeUnexpected) } cp := fmt.Sprintf(checkingPath, e.info.AgentID()) + sendStart := time.Now() resp, err := e.client.Send(ctx, "POST", cp, nil, nil, bytes.NewBuffer(b)) + sendDuration := time.Now().Sub(sendStart) if err != nil { - return nil, errors.New(err, + return nil, sendDuration, errors.New(err, "fail to checkin to fleet-server", errors.TypeNetwork, errors.M(errors.MetaKeyURI, cp)) @@ -120,26 +122,26 @@ func (e *CheckinCmd) Execute(ctx context.Context, r *CheckinRequest) (*CheckinRe defer resp.Body.Close() if resp.StatusCode != http.StatusOK { - return nil, client.ExtractError(resp.Body) + return nil, sendDuration, client.ExtractError(resp.Body) } rs, err := ioutil.ReadAll(resp.Body) if err != nil { - return nil, errors.New(err, "failed to read checkin response") + return nil, sendDuration, errors.New(err, "failed to read checkin response") } checkinResponse := &CheckinResponse{} decoder := json.NewDecoder(bytes.NewReader(rs)) if err := decoder.Decode(checkinResponse); err != nil { - return nil, errors.New(err, + return nil, sendDuration, errors.New(err, "fail to decode checkin response", errors.TypeNetwork, errors.M(errors.MetaKeyURI, cp)) } if err := checkinResponse.Validate(); err != nil { - return nil, err + return nil, sendDuration, err } - return checkinResponse, nil + return checkinResponse, sendDuration, nil } diff --git a/internal/pkg/fleetapi/checkin_cmd_test.go b/internal/pkg/fleetapi/checkin_cmd_test.go index 2d9aef2741a..56726bb5559 100644 --- a/internal/pkg/fleetapi/checkin_cmd_test.go +++ b/internal/pkg/fleetapi/checkin_cmd_test.go @@ -11,6 +11,7 @@ import ( "io/ioutil" "net/http" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -25,6 +26,7 @@ func (*agentinfo) AgentID() string { return "id" } func TestCheckin(t *testing.T) { const withAPIKey = "secret" + const requestDelay = time.Millisecond ctx := context.Background() agentInfo := &agentinfo{} @@ -39,6 +41,8 @@ func TestCheckin(t *testing.T) { mux.HandleFunc(path, authHandler(func(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusInternalServerError) fmt.Fprint(w, raw) + // Introduce a small delay to test the request time measurment. + time.Sleep(requestDelay) }, withAPIKey)) return mux }, withAPIKey, @@ -47,8 +51,10 @@ func TestCheckin(t *testing.T) { request := CheckinRequest{} - _, err := cmd.Execute(ctx, &request) + _, took, err := cmd.Execute(ctx, &request) require.Error(t, err) + // Ensure the request took at least as long as the artificial delay. + require.GreaterOrEqual(t, took, requestDelay) }, )) @@ -96,7 +102,7 @@ func TestCheckin(t *testing.T) { request := CheckinRequest{} - r, err := cmd.Execute(ctx, &request) + r, _, err := cmd.Execute(ctx, &request) require.NoError(t, err) require.Equal(t, 1, len(r.Actions)) @@ -157,7 +163,7 @@ func TestCheckin(t *testing.T) { request := CheckinRequest{} - r, err := cmd.Execute(ctx, &request) + r, _, err := cmd.Execute(ctx, &request) require.NoError(t, err) require.Equal(t, 2, len(r.Actions)) @@ -173,7 +179,7 @@ func TestCheckin(t *testing.T) { }, )) - t.Run("When we receive no action", withServerWithAuthClient( + t.Run("When we receive no action with delay", withServerWithAuthClient( func(t *testing.T) *http.ServeMux { raw := `{ "actions": [] }` mux := http.NewServeMux() @@ -189,7 +195,7 @@ func TestCheckin(t *testing.T) { request := CheckinRequest{} - r, err := cmd.Execute(ctx, &request) + r, _, err := cmd.Execute(ctx, &request) require.NoError(t, err) require.Equal(t, 0, len(r.Actions)) @@ -223,7 +229,7 @@ func TestCheckin(t *testing.T) { request := CheckinRequest{Metadata: testMetadata()} - r, err := cmd.Execute(ctx, &request) + r, _, err := cmd.Execute(ctx, &request) require.NoError(t, err) require.Equal(t, 0, len(r.Actions)) @@ -257,7 +263,7 @@ func TestCheckin(t *testing.T) { request := CheckinRequest{} - r, err := cmd.Execute(ctx, &request) + r, _, err := cmd.Execute(ctx, &request) require.NoError(t, err) require.Equal(t, 0, len(r.Actions)) diff --git a/magefile.go b/magefile.go index 084aa62dc08..ed633505e49 100644 --- a/magefile.go +++ b/magefile.go @@ -10,14 +10,12 @@ package main import ( "context" "fmt" - "io" "os" "os/exec" "path/filepath" "runtime" "strconv" "strings" - "sync" "time" "github.com/hashicorp/go-multierror" @@ -53,7 +51,7 @@ const ( externalArtifacts = "EXTERNAL" configFile = "elastic-agent.yml" agentDropPath = "AGENT_DROP_PATH" - specSuffix = ".spec.yml" // TODO: change after beat ignores yml config + specSuffix = ".spec.yml" checksumFilename = "checksum.yml" ) @@ -97,48 +95,6 @@ type Demo mg.Namespace // Dev runs package and build for dev purposes. type Dev mg.Namespace -// Notice regenerates the NOTICE.txt file. -func Notice() error { - fmt.Println(">> Generating NOTICE") - fmt.Println(">> fmt - go mod tidy") - err := sh.RunV("go", "mod", "tidy", "-v") - if err != nil { - return errors.Wrap(err, "failed running go mod tidy, please fix the issues reported") - } - fmt.Println(">> fmt - go mod download") - err = sh.RunV("go", "mod", "download") - if err != nil { - return errors.Wrap(err, "failed running go mod download, please fix the issues reported") - } - fmt.Println(">> fmt - go list") - str, err := sh.Output("go", "list", "-m", "-json", "all") - if err != nil { - return errors.Wrap(err, "failed running go list, please fix the issues reported") - } - fmt.Println(">> fmt - go run") - cmd := exec.Command("go", "run", "go.elastic.co/go-licence-detector", "-includeIndirect", "-rules", "dev-tools/notice/rules.json", "-overrides", "dev-tools/notice/overrides.json", "-noticeTemplate", "dev-tools/notice/NOTICE.txt.tmpl", - "-noticeOut", "NOTICE.txt", "-depsOut", "\"\"") - stdin, err := cmd.StdinPipe() - if err != nil { - return errors.Wrap(err, "failed running go run, please fix the issues reported") - } - var wg sync.WaitGroup - wg.Add(1) - go func() { - defer stdin.Close() - defer wg.Done() - if _, err := io.WriteString(stdin, str); err != nil { - fmt.Println(err) - } - }() - wg.Wait() - _, err = cmd.CombinedOutput() - if err != nil { - return errors.Wrap(err, "failed combined output, please fix the issues reported") - } - return nil -} - func CheckNoChanges() error { fmt.Println(">> fmt - go run") err := sh.RunV("go", "mod", "tidy", "-v") diff --git a/specs/heartbeat.spec.yml b/specs/heartbeat.spec.yml index 0b7da1c9048..ba6a08934b8 100644 --- a/specs/heartbeat.spec.yml +++ b/specs/heartbeat.spec.yml @@ -1,47 +1,44 @@ -version: 2 -inputs: - - name: synthetics/synthetics - description: "Synthetics Browser Monitor" - platforms: &platforms - - linux/amd64 - - linux/arm64 - - darwin/amd64 - - darwin/arm64 - - windows/amd64 - - container/amd64 - - container/arm64 - outputs: &outputs - - elasticsearch - - kafka - - logstash - - redis - command: - args: &args - - "-E" - - "setup.ilm.enabled=false" - - "-E" - - "setup.template.enabled=false" - - "-E" - - "management.enabled=true" - - "-E" - - "logging.level=debug" - - "-E" - - "gc_percent=${HEARTBEAT_GOGC:100}" - - name: synthetics/http - description: "Synthetics HTTP Monitor" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: synthetics/icmp - description: "Synthetics ICMP Monitor" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: synthetics/tcp - description: "Synthetics TCP Monitor" - platforms: *platforms - outputs: *outputs - command: - args: *args +version: 2 +inputs: + - name: synthetics/synthetics + description: "Synthetics Browser Monitor" + platforms: &platforms + - linux/amd64 + - linux/arm64 + - darwin/amd64 + - darwin/arm64 + - windows/amd64 + - container/amd64 + - container/arm64 + outputs: &outputs + - elasticsearch + command: + args: &args + - "-E" + - "setup.ilm.enabled=false" + - "-E" + - "setup.template.enabled=false" + - "-E" + - "management.enabled=true" + - "-E" + - "logging.level=debug" + - "-E" + - "gc_percent=${HEARTBEAT_GOGC:100}" + - name: synthetics/http + description: "Synthetics HTTP Monitor" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: synthetics/icmp + description: "Synthetics ICMP Monitor" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: synthetics/tcp + description: "Synthetics TCP Monitor" + platforms: *platforms + outputs: *outputs + command: + args: *args diff --git a/testing/environments/snapshot.yml b/testing/environments/snapshot.yml index b8b6792b912..1c415537ad4 100644 --- a/testing/environments/snapshot.yml +++ b/testing/environments/snapshot.yml @@ -3,7 +3,7 @@ version: '2.3' services: elasticsearch: - image: docker.elastic.co/elasticsearch/elasticsearch:8.4.0-d058e92f-SNAPSHOT + image: docker.elastic.co/elasticsearch/elasticsearch:8.6.0-cae815eb-SNAPSHOT # When extend is used it merges healthcheck.tests, see: # https://github.com/docker/compose/issues/8962 # healthcheck: @@ -42,7 +42,7 @@ services: - ./docker/logstash/pki:/etc/pki:ro kibana: - image: docker.elastic.co/kibana/kibana:8.4.0-d058e92f-SNAPSHOT + image: docker.elastic.co/kibana/kibana:8.6.0-cae815eb-SNAPSHOT environment: - "ELASTICSEARCH_USERNAME=kibana_system_user" - "ELASTICSEARCH_PASSWORD=testing" diff --git a/version/docs/version.asciidoc b/version/docs/version.asciidoc index 9d0056a0c38..0485d65c441 100644 --- a/version/docs/version.asciidoc +++ b/version/docs/version.asciidoc @@ -1,6 +1,6 @@ :stack-version: 8.3.0 :doc-branch: main -:go-version: 1.17.10 +:go-version: 1.18.7 :release-state: unreleased :python: 3.7 :docker: 1.12 From 94204971e5f9d6aabaaea0e4941de7532bf781f1 Mon Sep 17 00:00:00 2001 From: Alex K <8418476+fearful-symmetry@users.noreply.github.com> Date: Mon, 24 Oct 2022 14:34:05 -0700 Subject: [PATCH 29/49] Add input name alias for cloudbeat integrations (#1596) * add name alias for cloudbeat * add anchors for yaml fields * add EKS input --- specs/cloudbeat.spec.yml | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/specs/cloudbeat.spec.yml b/specs/cloudbeat.spec.yml index 0cd100c28d6..1ecbe47e330 100644 --- a/specs/cloudbeat.spec.yml +++ b/specs/cloudbeat.spec.yml @@ -2,7 +2,7 @@ version: 2 inputs: - name: cloudbeat description: "Cloudbeat" - platforms: + platforms: &platforms - linux/amd64 - linux/arm64 - darwin/amd64 @@ -10,13 +10,13 @@ inputs: - windows/amd64 - container/amd64 - container/arm64 - outputs: + outputs: &outputs - elasticsearch - kafka - logstash - redis command: - args: + args: &args - "-E" - "management.enabled=true" - "-E" @@ -25,3 +25,15 @@ inputs: - "setup.template.enabled=false" - "-E" - "gc_percent=${CLOUDBEAT_GOGC:100}" + - name: cloudbeat/cis_k8s + description: "CIS Kubernetes monitoring" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: cloudbeat/cis_eks + description: "CIS elastic Kubernetes monitoring" + platforms: *platforms + outputs: *outputs + command: + args: *args \ No newline at end of file From 96e071e16f49194ab1c6a01a7e88707986afbad2 Mon Sep 17 00:00:00 2001 From: Michel Laterman <82832767+michel-laterman@users.noreply.github.com> Date: Mon, 24 Oct 2022 15:11:23 -0700 Subject: [PATCH 30/49] Change the stater to include a local flag. (#1308) * Change the stater to include a local flag. Change the state reporter to use a local flag that determines if local errors are included in the resulting state. Assume that configMgr errors are all local - this effects mainly the fleet_gateway. Allow the gateway to report an error if a checkin fails. When a checkin fails the local state reported through the status command and liveness endpoint will include the error, but checkins to fleet-server will not. * Add ActionsError() method to config manager Add a new ActionsError() methdo the the config managers. For the non-managed instances it will return a nil channel. For the managed instances it will return the dispatcher error queue directly. Have teh coordinator gather from this channel as it does for the others and treat any errors as non-local. * Fix linter --- .../handlers/handler_action_application.go | 2 +- .../application/coordinator/coordinator.go | 20 ++++-- .../agent/application/coordinator/handler.go | 2 +- .../application/fleet_server_bootstrap.go | 4 ++ .../gateway/fleet/fleet_gateway.go | 3 +- .../gateway/fleet/fleet_gateway_test.go | 2 +- .../pkg/agent/application/managed_mode.go | 61 ++++++++----------- internal/pkg/agent/application/once.go | 6 ++ internal/pkg/agent/application/periodic.go | 6 ++ internal/pkg/agent/control/server/server.go | 4 +- 10 files changed, 66 insertions(+), 44 deletions(-) diff --git a/internal/pkg/agent/application/actions/handlers/handler_action_application.go b/internal/pkg/agent/application/actions/handlers/handler_action_application.go index d83d536dfc0..552427a16b0 100644 --- a/internal/pkg/agent/application/actions/handlers/handler_action_application.go +++ b/internal/pkg/agent/application/actions/handlers/handler_action_application.go @@ -48,7 +48,7 @@ func (h *AppAction) Handle(ctx context.Context, a fleetapi.Action, acker acker.A return fmt.Errorf("invalid type, expected ActionApp and received %T", a) } - state := h.coord.State() + state := h.coord.State(false) unit, ok := findUnitFromInputType(state, action.InputType) if !ok { // If the matching action is not found ack the action with the error for action result document diff --git a/internal/pkg/agent/application/coordinator/coordinator.go b/internal/pkg/agent/application/coordinator/coordinator.go index 6c68e5cf610..21ba1ccf359 100644 --- a/internal/pkg/agent/application/coordinator/coordinator.go +++ b/internal/pkg/agent/application/coordinator/coordinator.go @@ -123,6 +123,11 @@ type ErrorReporter interface { type ConfigManager interface { Runner + // ActionErrors returns the error channel for actions. + // May return errors for fleet managed agents. + // Will always be empty for stand alone agents. + ActionErrors() <-chan error + // Watch returns the chanel to watch for configuration changes. Watch() <-chan ConfigChange } @@ -149,7 +154,7 @@ type State struct { // StateFetcher provides an interface to fetch the current state of the coordinator. type StateFetcher interface { // State returns the current state of the coordinator. - State() State + State(bool) State } // Coordinator manages the entire state of the Elastic Agent. @@ -169,6 +174,7 @@ type Coordinator struct { runtimeMgrErr error configMgr ConfigManager configMgrErr error + actionsErr error varsMgr VarsManager varsMgrErr error @@ -199,7 +205,8 @@ func New(logger *logger.Logger, agentInfo *info.AgentInfo, specs component.Runti } // State returns the current state for the coordinator. -func (c *Coordinator) State() (s State) { +// local indicates if local configMgr errors should be reported as part of the state. +func (c *Coordinator) State(local bool) (s State) { s.State = c.state.state s.Message = c.state.message s.Components = c.runtimeMgr.State() @@ -215,9 +222,12 @@ func (c *Coordinator) State() (s State) { if c.runtimeMgrErr != nil { s.State = agentclient.Failed s.Message = c.runtimeMgrErr.Error() - } else if c.configMgrErr != nil { + } else if local && c.configMgrErr != nil { s.State = agentclient.Failed s.Message = c.configMgrErr.Error() + } else if c.actionsErr != nil { + s.State = agentclient.Failed + s.Message = c.actionsErr.Error() } else if c.varsMgrErr != nil { s.State = agentclient.Failed s.Message = c.varsMgrErr.Error() @@ -441,7 +451,7 @@ func (c *Coordinator) DiagnosticHooks() diagnostics.Hooks { Description: "current state of running components by the Elastic Agent", ContentType: "application/yaml", Hook: func(_ context.Context) []byte { - s := c.State() + s := c.State(true) o, err := yaml.Marshal(s) if err != nil { return []byte(fmt.Sprintf("error: %q", err)) @@ -521,6 +531,8 @@ func (c *Coordinator) runner(ctx context.Context) error { c.runtimeMgrErr = runtimeErr case configErr := <-c.configMgr.Errors(): c.configMgrErr = configErr + case actionsErr := <-c.configMgr.ActionErrors(): + c.actionsErr = actionsErr case varsErr := <-c.varsMgr.Errors(): c.varsMgrErr = varsErr case change := <-configWatcher.Watch(): diff --git a/internal/pkg/agent/application/coordinator/handler.go b/internal/pkg/agent/application/coordinator/handler.go index 22130d1a776..eba0d830e36 100644 --- a/internal/pkg/agent/application/coordinator/handler.go +++ b/internal/pkg/agent/application/coordinator/handler.go @@ -24,7 +24,7 @@ type LivenessResponse struct { // Response code is 200 for a healthy agent, and 503 otherwise. // Response body is a JSON object that contains the agent ID, status, message, and the last status update time. func (c *Coordinator) ServeHTTP(wr http.ResponseWriter, req *http.Request) { - s := c.State() + s := c.State(true) lr := LivenessResponse{ ID: c.agentInfo.AgentID(), Status: s.State.String(), diff --git a/internal/pkg/agent/application/fleet_server_bootstrap.go b/internal/pkg/agent/application/fleet_server_bootstrap.go index 369c63bf53b..cf2bebef69e 100644 --- a/internal/pkg/agent/application/fleet_server_bootstrap.go +++ b/internal/pkg/agent/application/fleet_server_bootstrap.go @@ -115,6 +115,10 @@ func (m *fleetServerBootstrapManager) Errors() <-chan error { return m.errCh } +func (m *fleetServerBootstrapManager) ActionErrors() <-chan error { + return nil +} + func (m *fleetServerBootstrapManager) Watch() <-chan coordinator.ConfigChange { return m.ch } diff --git a/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go b/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go index 09396cf49fc..e7b994acfc6 100644 --- a/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go +++ b/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go @@ -208,6 +208,7 @@ func (f *fleetGateway) doExecute(ctx context.Context, bo backoff.Backoff) (*flee f.errCh <- err return nil, err } + f.errCh <- err continue } @@ -306,7 +307,7 @@ func (f *fleetGateway) execute(ctx context.Context) (*fleetapi.CheckinResponse, } // get current state - state := f.stateFetcher.State() + state := f.stateFetcher.State(false) // convert components into checkin components structure components := f.convertToCheckinComponents(state.Components) diff --git a/internal/pkg/agent/application/gateway/fleet/fleet_gateway_test.go b/internal/pkg/agent/application/gateway/fleet/fleet_gateway_test.go index 076453f1374..7dd69bd752d 100644 --- a/internal/pkg/agent/application/gateway/fleet/fleet_gateway_test.go +++ b/internal/pkg/agent/application/gateway/fleet/fleet_gateway_test.go @@ -399,7 +399,7 @@ func (testAgentInfo) AgentID() string { return "agent-secret" } type emptyStateFetcher struct{} -func (e *emptyStateFetcher) State() coordinator.State { +func (e *emptyStateFetcher) State(_ bool) coordinator.State { return coordinator.State{} } diff --git a/internal/pkg/agent/application/managed_mode.go b/internal/pkg/agent/application/managed_mode.go index ca50495dcb6..32cff92f8e4 100644 --- a/internal/pkg/agent/application/managed_mode.go +++ b/internal/pkg/agent/application/managed_mode.go @@ -40,6 +40,7 @@ type managedConfigManager struct { store storage.Store stateStore *store.StateStore actionQueue *queue.ActionQueue + dispatcher *dispatcher.ActionDispatcher runtime *runtime.Manager coord *coordinator.Coordinator @@ -73,6 +74,11 @@ func newManagedConfigManager( return nil, fmt.Errorf("unable to initialize action queue: %w", err) } + actionDispatcher, err := dispatcher.New(log, handlers.NewDefault(log), actionQueue) + if err != nil { + return nil, fmt.Errorf("unable to initialize action dispatcher: %w", err) + } + return &managedConfigManager{ log: log, agentInfo: agentInfo, @@ -81,6 +87,7 @@ func newManagedConfigManager( store: storeSaver, stateStore: stateStore, actionQueue: actionQueue, + dispatcher: actionDispatcher, runtime: runtime, ch: make(chan coordinator.ConfigChange), errCh: make(chan error), @@ -108,11 +115,8 @@ func (m *managedConfigManager) Run(ctx context.Context) error { gatewayCtx, gatewayCancel := context.WithCancel(ctx) defer gatewayCancel() - // Create the actionDispatcher. - actionDispatcher, policyChanger, err := newManagedActionDispatcher(m, gatewayCancel) - if err != nil { - return err - } + // Initialize the actionDispatcher. + policyChanger := m.initDispatcher(gatewayCancel) // Create ackers to enqueue/retry failed acks ack, err := fleet.NewAcker(m.log, m.agentInfo, m.client) @@ -139,18 +143,6 @@ func (m *managedConfigManager) Run(ctx context.Context) error { close(retrierRun) }() - // Gather errors from the dispatcher and pass to the error channel. - go func() { - for { - select { - case <-ctx.Done(): - return - case err := <-actionDispatcher.Errors(): - m.errCh <- err // err is one or more failures from dispatching an action - } - } - }() - actions := m.stateStore.Actions() stateRestored := false if len(actions) > 0 && !m.wasUnenrolled() { @@ -158,7 +150,7 @@ func (m *managedConfigManager) Run(ctx context.Context) error { // persisted action on disk we should be able to ask Fleet to get the latest configuration. // But at the moment this is not possible because the policy change was acked. m.log.Info("restoring current policy from disk") - actionDispatcher.Dispatch(ctx, actionAcker, actions...) + m.dispatcher.Dispatch(ctx, actionAcker, actions...) stateRestored = true } @@ -221,7 +213,7 @@ func (m *managedConfigManager) Run(ctx context.Context) error { case <-ctx.Done(): return case actions := <-gateway.Actions(): - actionDispatcher.Dispatch(ctx, actionAcker, actions...) + m.dispatcher.Dispatch(ctx, actionAcker, actions...) } } }() @@ -230,6 +222,12 @@ func (m *managedConfigManager) Run(ctx context.Context) error { return gatewayRunner.Err() } +// ActionErrors returns the error channel for actions. +// May return errors for fleet managed errors. +func (m *managedConfigManager) ActionErrors() <-chan error { + return m.dispatcher.Errors() +} + func (m *managedConfigManager) Errors() <-chan error { return m.errCh } @@ -299,12 +297,7 @@ func fleetServerRunning(state runtime.ComponentState) bool { return false } -func newManagedActionDispatcher(m *managedConfigManager, canceller context.CancelFunc) (*dispatcher.ActionDispatcher, *handlers.PolicyChange, error) { - actionDispatcher, err := dispatcher.New(m.log, handlers.NewDefault(m.log), m.actionQueue) - if err != nil { - return nil, nil, err - } - +func (m *managedConfigManager) initDispatcher(canceller context.CancelFunc) *handlers.PolicyChange { policyChanger := handlers.NewPolicyChange( m.log, m.agentInfo, @@ -313,17 +306,17 @@ func newManagedActionDispatcher(m *managedConfigManager, canceller context.Cance m.ch, ) - actionDispatcher.MustRegister( + m.dispatcher.MustRegister( &fleetapi.ActionPolicyChange{}, policyChanger, ) - actionDispatcher.MustRegister( + m.dispatcher.MustRegister( &fleetapi.ActionPolicyReassign{}, handlers.NewPolicyReassign(m.log), ) - actionDispatcher.MustRegister( + m.dispatcher.MustRegister( &fleetapi.ActionUnenroll{}, handlers.NewUnenroll( m.log, @@ -333,12 +326,12 @@ func newManagedActionDispatcher(m *managedConfigManager, canceller context.Cance ), ) - actionDispatcher.MustRegister( + m.dispatcher.MustRegister( &fleetapi.ActionUpgrade{}, handlers.NewUpgrade(m.log, m.coord), ) - actionDispatcher.MustRegister( + m.dispatcher.MustRegister( &fleetapi.ActionSettings{}, handlers.NewSettings( m.log, @@ -347,7 +340,7 @@ func newManagedActionDispatcher(m *managedConfigManager, canceller context.Cance ), ) - actionDispatcher.MustRegister( + m.dispatcher.MustRegister( &fleetapi.ActionCancel{}, handlers.NewCancel( m.log, @@ -355,15 +348,15 @@ func newManagedActionDispatcher(m *managedConfigManager, canceller context.Cance ), ) - actionDispatcher.MustRegister( + m.dispatcher.MustRegister( &fleetapi.ActionApp{}, handlers.NewAppAction(m.log, m.coord), ) - actionDispatcher.MustRegister( + m.dispatcher.MustRegister( &fleetapi.ActionUnknown{}, handlers.NewUnknown(m.log), ) - return actionDispatcher, policyChanger, nil + return policyChanger } diff --git a/internal/pkg/agent/application/once.go b/internal/pkg/agent/application/once.go index fca0ed3e741..c9ea64f1744 100644 --- a/internal/pkg/agent/application/once.go +++ b/internal/pkg/agent/application/once.go @@ -50,6 +50,12 @@ func (o *once) Errors() <-chan error { return o.errCh } +// ActionErrors returns the error channel for actions. +// Returns nil channel. +func (o *once) ActionErrors() <-chan error { + return nil +} + func (o *once) Watch() <-chan coordinator.ConfigChange { return o.ch } diff --git a/internal/pkg/agent/application/periodic.go b/internal/pkg/agent/application/periodic.go index 3c4e2ed4d63..8e6bb6d5af1 100644 --- a/internal/pkg/agent/application/periodic.go +++ b/internal/pkg/agent/application/periodic.go @@ -51,6 +51,12 @@ func (p *periodic) Errors() <-chan error { return p.errCh } +// ActionErrors returns the error channel for actions. +// Returns nil channel. +func (p *periodic) ActionErrors() <-chan error { + return nil +} + func (p *periodic) Watch() <-chan coordinator.ConfigChange { return p.ch } diff --git a/internal/pkg/agent/control/server/server.go b/internal/pkg/agent/control/server/server.go index 160ec2d6b41..03e19618aeb 100644 --- a/internal/pkg/agent/control/server/server.go +++ b/internal/pkg/agent/control/server/server.go @@ -107,7 +107,7 @@ func (s *Server) Version(_ context.Context, _ *cproto.Empty) (*cproto.VersionRes func (s *Server) State(_ context.Context, _ *cproto.Empty) (*cproto.StateResponse, error) { var err error - state := s.coord.State() + state := s.coord.State(true) components := make([]*cproto.ComponentState, 0, len(state.Components)) for _, comp := range state.Components { units := make([]*cproto.ComponentUnitState, 0, len(comp.State.Units)) @@ -166,7 +166,7 @@ func (s *Server) Restart(_ context.Context, _ *cproto.Empty) (*cproto.RestartRes func (s *Server) Upgrade(ctx context.Context, request *cproto.UpgradeRequest) (*cproto.UpgradeResponse, error) { err := s.coord.Upgrade(ctx, request.Version, request.SourceURI, nil) if err != nil { - return &cproto.UpgradeResponse{ //nolint:nilerr // returns err as response + return &cproto.UpgradeResponse{ Status: cproto.ActionStatus_FAILURE, Error: err.Error(), }, nil From fc3eba3501a0695a477a46331293331873bb850c Mon Sep 17 00:00:00 2001 From: Aleksandr Maus Date: Tue, 25 Oct 2022 12:54:54 -0400 Subject: [PATCH 31/49] Service runtime V2 (#1529) * Service V2 runtime * Implements service runtime component for V2. * Extends endpoint spec with some additional attributes for service start/stop/status checks and creds discovery. The creds discovery logic is taken from V1, cleaned up and extracted into its own file, added utz. * Implements service uninstall * Refactors pkg/core/process/process.go adds additional options that are needed for the service_command implementation. * Changes ComponentsModifier to access raw config, needed for the EndpointComponentModifier * Injects host.id into configuration, needed for Endpoint * Injects fleet and policy.revision configuration into the Endpoint input configuration * Bumps the version to 8.6.0 to make it consistent with current beats V2 branch * Addresses linter complains on affected files * Remove the service watcher, all the start/stopping logic * Add changelog * Fix typo * Send STOPPING only upon teardown * Wait for check-in with timeout before sending stopping on teardown * Fix the service loop routine blocking on channel after stopped * Addressed code review comments * Make linter happy * Try to fix make check-ci * Spellcheck runtime README.md * Remove .Stop timeout from the spec as it is no longer used * Addressed code review feedback --- NOTICE.txt | 224 ++++++++- .../1666095433-service_runtime_v2.yaml | 5 + go.mod | 8 +- go.sum | 12 +- internal/pkg/agent/application/application.go | 10 +- .../application/coordinator/coordinator.go | 4 +- .../application/fleet_server_bootstrap.go | 41 +- .../agent/application/info/inject_config.go | 3 + internal/pkg/agent/cmd/container.go | 9 +- internal/pkg/agent/cmd/enroll_cmd.go | 14 +- internal/pkg/agent/configuration/settings.go | 1 + internal/pkg/agent/install/uninstall.go | 8 +- .../pkg/agent/storage/store/state_store.go | 1 - pkg/component/component.go | 6 + pkg/component/runtime/README.md | 28 ++ pkg/component/runtime/command.go | 15 +- pkg/component/runtime/conn_info_server.go | 88 ++++ .../runtime/conn_info_server_test.go | 260 +++++++++++ pkg/component/runtime/runtime.go | 10 +- pkg/component/runtime/service.go | 438 ++++++++++++++++++ pkg/component/runtime/service_command.go | 114 +++++ pkg/component/runtime/service_command_test.go | 183 ++++++++ pkg/component/runtime/state.go | 18 +- pkg/component/spec.go | 12 + pkg/component/spec_test.go | 2 + pkg/core/process/process.go | 135 ++++-- specs/endpoint-security.spec.yml | 5 +- 27 files changed, 1576 insertions(+), 78 deletions(-) create mode 100644 changelog/fragments/1666095433-service_runtime_v2.yaml create mode 100644 pkg/component/runtime/README.md create mode 100644 pkg/component/runtime/conn_info_server.go create mode 100644 pkg/component/runtime/conn_info_server_test.go create mode 100644 pkg/component/runtime/service.go create mode 100644 pkg/component/runtime/service_command.go create mode 100644 pkg/component/runtime/service_command_test.go diff --git a/NOTICE.txt b/NOTICE.txt index f23805c5d87..7bc5103d040 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -602,6 +602,218 @@ Contents of probable licence file $GOMODCACHE/github.com/docker/go-units@v0.4.0/ limitations under the License. +-------------------------------------------------------------------------------- +Dependency : github.com/dolmen-go/contextio +Version: v0.0.0-20200217195037-68fc5150bcd5 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/dolmen-go/contextio@v0.0.0-20200217195037-68fc5150bcd5/LICENSE: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + -------------------------------------------------------------------------------- Dependency : github.com/elastic/e2e-testing Version: v1.99.2-0.20220117192005-d3365c99b9c4 @@ -1695,11 +1907,11 @@ Contents of probable licence file $GOMODCACHE/github.com/elastic/go-licenser@v0. -------------------------------------------------------------------------------- Dependency : github.com/elastic/go-sysinfo -Version: v1.7.1 +Version: v1.8.1 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/go-sysinfo@v1.7.1/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/elastic/go-sysinfo@v1.8.1/LICENSE.txt: Apache License @@ -1907,11 +2119,11 @@ Contents of probable licence file $GOMODCACHE/github.com/elastic/go-sysinfo@v1.7 -------------------------------------------------------------------------------- Dependency : github.com/elastic/go-ucfg -Version: v0.8.5 +Version: v0.8.6 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/go-ucfg@v0.8.5/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/elastic/go-ucfg@v0.8.6/LICENSE: Apache License Version 2.0, January 2004 @@ -5230,11 +5442,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : golang.org/x/sys -Version: v0.0.0-20220422013727-9388b58f7150 +Version: v0.0.0-20220715151400-c0bba94af5f8 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/golang.org/x/sys@v0.0.0-20220422013727-9388b58f7150/LICENSE: +Contents of probable licence file $GOMODCACHE/golang.org/x/sys@v0.0.0-20220715151400-c0bba94af5f8/LICENSE: Copyright (c) 2009 The Go Authors. All rights reserved. diff --git a/changelog/fragments/1666095433-service_runtime_v2.yaml b/changelog/fragments/1666095433-service_runtime_v2.yaml new file mode 100644 index 00000000000..f54aa7e5d9c --- /dev/null +++ b/changelog/fragments/1666095433-service_runtime_v2.yaml @@ -0,0 +1,5 @@ +kind: feature +summary: Service runtime for V2 +description: Service runtime for V2, tailored specifically for Endpoint service. +pr: 1529 +issue: 1069 diff --git a/go.mod b/go.mod index 245d331130f..df1845dff01 100644 --- a/go.mod +++ b/go.mod @@ -10,14 +10,15 @@ require ( github.com/cavaliercoder/go-rpm v0.0.0-20190131055624-7a9c54e3d83e github.com/coreos/go-systemd/v22 v22.3.3-0.20220203105225-a9a7ef127534 github.com/docker/go-units v0.4.0 + github.com/dolmen-go/contextio v0.0.0-20200217195037-68fc5150bcd5 github.com/elastic/e2e-testing v1.99.2-0.20220117192005-d3365c99b9c4 github.com/elastic/elastic-agent-autodiscover v0.2.1 github.com/elastic/elastic-agent-client/v7 v7.0.0-20220804181728-b0328d2fe484 github.com/elastic/elastic-agent-libs v0.2.6 github.com/elastic/elastic-agent-system-metrics v0.4.4 github.com/elastic/go-licenser v0.4.0 - github.com/elastic/go-sysinfo v1.7.1 - github.com/elastic/go-ucfg v0.8.5 + github.com/elastic/go-sysinfo v1.8.1 + github.com/elastic/go-ucfg v0.8.6 github.com/gofrs/flock v0.8.1 github.com/gofrs/uuid v4.2.0+incompatible github.com/google/go-cmp v0.5.6 @@ -47,7 +48,7 @@ require ( golang.org/x/crypto v0.0.0-20210817164053-32db794688a5 golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 + golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 golang.org/x/tools v0.1.9 google.golang.org/grpc v1.46.0 google.golang.org/protobuf v1.28.0 @@ -152,6 +153,7 @@ require ( replace ( github.com/Microsoft/go-winio => github.com/bi-zone/go-winio v0.4.15 + github.com/Shopify/sarama => github.com/elastic/sarama v1.19.1-0.20220310193331-ebc2b0d8eef3 github.com/dop251/goja => github.com/andrewkroh/goja v0.0.0-20190128172624-dd2ac4456e20 github.com/dop251/goja_nodejs => github.com/dop251/goja_nodejs v0.0.0-20171011081505-adff31b136e6 github.com/fsnotify/fsnotify => github.com/adriansr/fsnotify v1.4.8-0.20211018144411-a81f2b630e7c diff --git a/go.sum b/go.sum index f8fb1ecc1a7..73ded2d2cf3 100644 --- a/go.sum +++ b/go.sum @@ -374,6 +374,8 @@ github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDD github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dolmen-go/contextio v0.0.0-20200217195037-68fc5150bcd5 h1:BzN9o4IS1Hj+AM5qDggsfMDQGFXau5KagipEFmnyIbc= +github.com/dolmen-go/contextio v0.0.0-20200217195037-68fc5150bcd5/go.mod h1:cxc20xI7fOgsFHWgt+PenlDDnMcrvh7Ocuj5hEFIdEk= github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5/go.mod h1:qssHWj60/X5sZFNxpG4HBPDHVqxNm4DfnCKgrbZOT+s= github.com/dsnet/golib v0.0.0-20171103203638-1ea166775780/go.mod h1:Lj+Z9rebOhdfkVLjJ8T6VcRQv3SXugXy999NBtR9aFY= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= @@ -398,11 +400,13 @@ github.com/elastic/go-licenser v0.4.0/go.mod h1:V56wHMpmdURfibNBggaSBfqgPxyT1Tld github.com/elastic/go-structform v0.0.9 h1:HpcS7xljL4kSyUfDJ8cXTJC6rU5ChL1wYb6cx3HLD+o= github.com/elastic/go-structform v0.0.9/go.mod h1:CZWf9aIRYY5SuKSmOhtXScE5uQiLZNqAFnwKR4OrIM4= github.com/elastic/go-sysinfo v1.1.1/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= -github.com/elastic/go-sysinfo v1.7.1 h1:Wx4DSARcKLllpKT2TnFVdSUJOsybqMYCNQZq1/wO+s0= github.com/elastic/go-sysinfo v1.7.1/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= +github.com/elastic/go-sysinfo v1.8.1 h1:4Yhj+HdV6WjbCRgGdZpPJ8lZQlXZLKDAeIkmQ/VRvi4= +github.com/elastic/go-sysinfo v1.8.1/go.mod h1:JfllUnzoQV/JRYymbH3dO1yggI3mV2oTKSXsDHM+uIM= github.com/elastic/go-ucfg v0.8.4/go.mod h1:4E8mPOLSUV9hQ7sgLEJ4bvt0KhMuDJa8joDT2QGAEKA= -github.com/elastic/go-ucfg v0.8.5 h1:4GB/rMpuh7qTcSFaxJUk97a/JyvFzhi6t+kaskTTLdM= github.com/elastic/go-ucfg v0.8.5/go.mod h1:4E8mPOLSUV9hQ7sgLEJ4bvt0KhMuDJa8joDT2QGAEKA= +github.com/elastic/go-ucfg v0.8.6 h1:stUeyh2goTgGX+/wb9gzKvTv0YB0231LTpKUgCKj4U0= +github.com/elastic/go-ucfg v0.8.6/go.mod h1:4E8mPOLSUV9hQ7sgLEJ4bvt0KhMuDJa8joDT2QGAEKA= github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU= github.com/elastic/go-windows v1.0.1 h1:AlYZOldA+UJ0/2nBuqWdo90GFCgG9xuyw9SYzGUtJm0= github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss= @@ -1567,8 +1571,8 @@ golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220422013727-9388b58f7150 h1:xHms4gcpe1YE7A3yIllJXP16CMAGuqwO2lX1mTyyRRc= -golang.org/x/sys v0.0.0-20220422013727-9388b58f7150/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8 h1:0A+M6Uqn+Eje4kHMK80dtF3JCXC4ykBgQG4Fe06QRhQ= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= diff --git a/internal/pkg/agent/application/application.go b/internal/pkg/agent/application/application.go index 327138ac67a..75435cf8e45 100644 --- a/internal/pkg/agent/application/application.go +++ b/internal/pkg/agent/application/application.go @@ -104,7 +104,9 @@ func New( log.Info("Parsed configuration and determined agent is managed by Fleet") composableManaged = true - compModifiers = append(compModifiers, FleetServerComponentModifier(cfg.Fleet.Server)) + compModifiers = append(compModifiers, FleetServerComponentModifier(cfg.Fleet.Server), + EndpointComponentModifier(cfg.Fleet)) + managed, err = newManagedConfigManager(log, agentInfo, cfg, store, runtime) if err != nil { return nil, err @@ -130,6 +132,7 @@ func New( func mergeFleetConfig(rawConfig *config.Config) (storage.Store, *configuration.Configuration, error) { path := paths.AgentConfigFile() store := storage.NewEncryptedDiskStore(path) + reader, err := store.Load() if err != nil { return store, nil, errors.New(err, "could not initialize config store", @@ -161,6 +164,11 @@ func mergeFleetConfig(rawConfig *config.Config) (storage.Store, *configuration.C errors.M(errors.MetaKeyPath, path)) } + // Fix up fleet.agent.id otherwise the fleet.agent.id is empty string + if cfg.Settings != nil && cfg.Fleet != nil && cfg.Fleet.Info != nil && cfg.Fleet.Info.ID == "" { + cfg.Fleet.Info.ID = cfg.Settings.ID + } + if err := cfg.Fleet.Valid(); err != nil { return store, nil, errors.New(err, "fleet configuration is invalid", diff --git a/internal/pkg/agent/application/coordinator/coordinator.go b/internal/pkg/agent/application/coordinator/coordinator.go index 21ba1ccf359..d244f85b531 100644 --- a/internal/pkg/agent/application/coordinator/coordinator.go +++ b/internal/pkg/agent/application/coordinator/coordinator.go @@ -142,7 +142,7 @@ type VarsManager interface { // ComponentsModifier is a function that takes the computed components model and modifies it before // passing it into the components runtime manager. -type ComponentsModifier func(comps []component.Component) ([]component.Component, error) +type ComponentsModifier func(comps []component.Component, cfg map[string]interface{}) ([]component.Component, error) // State provides the current state of the coordinator along with all the current states of components and units. type State struct { @@ -682,7 +682,7 @@ func (c *Coordinator) compute() (map[string]interface{}, []component.Component, } for _, modifier := range c.modifiers { - comps, err = modifier(comps) + comps, err = modifier(comps, cfg) if err != nil { return nil, nil, fmt.Errorf("failed to modify components: %w", err) } diff --git a/internal/pkg/agent/application/fleet_server_bootstrap.go b/internal/pkg/agent/application/fleet_server_bootstrap.go index cf2bebef69e..808feee0af0 100644 --- a/internal/pkg/agent/application/fleet_server_bootstrap.go +++ b/internal/pkg/agent/application/fleet_server_bootstrap.go @@ -21,6 +21,7 @@ import ( const ( elasticsearch = "elasticsearch" fleetServer = "fleet-server" + endpoint = "endpoint" ) // injectFleetServerInput is the base configuration that is used plus the FleetServerComponentModifier that adjusts @@ -43,7 +44,7 @@ var injectFleetServerInput = config.MustNewConfigFrom(map[string]interface{}{ // FleetServerComponentModifier modifies the comps to inject extra information from the policy into // the Fleet Server component and units needed to run Fleet Server correctly. func FleetServerComponentModifier(serverCfg *configuration.FleetServerConfig) coordinator.ComponentsModifier { - return func(comps []component.Component) ([]component.Component, error) { + return func(comps []component.Component, _ map[string]interface{}) ([]component.Component, error) { for i, comp := range comps { if comp.Spec.InputType == fleetServer { for j, unit := range comp.Units { @@ -82,6 +83,44 @@ func FleetServerComponentModifier(serverCfg *configuration.FleetServerConfig) co } } +// EndpointComponentModifier the modifier for the Endpoint configuration. +// The Endpoint expects the fleet configuration passed to it by the Agent +// because it needs to be able to connect to the fleet server directly. +func EndpointComponentModifier(fleetCfg *configuration.FleetAgentConfig) coordinator.ComponentsModifier { + return func(comps []component.Component, cfg map[string]interface{}) ([]component.Component, error) { + for i, comp := range comps { + if comp.Spec.InputType == endpoint { + for j, unit := range comp.Units { + if unit.Type == client.UnitTypeInput && unit.Config.Type == endpoint { + unitCfgMap, err := toMapStr(unit.Config.Source.AsMap(), map[string]interface{}{"fleet": fleetCfg}) + if err != nil { + return nil, err + } + // Set host.id for the host, assign the host from the top level config + // Endpoint expects this + // "host": { + // "id": "b62e91be682a4108bbb080152cc5eeac" + // }, + if v, ok := unitCfgMap["fleet"]; ok { + if m, ok := v.(map[string]interface{}); ok { + m["host"] = cfg["host"] + } + } + unitCfg, err := component.ExpectedConfig(unitCfgMap) + if err != nil { + return nil, err + } + unit.Config = unitCfg + } + comp.Units[j] = unit + } + } + comps[i] = comp + } + return comps, nil + } +} + type fleetServerBootstrapManager struct { log *logger.Logger diff --git a/internal/pkg/agent/application/info/inject_config.go b/internal/pkg/agent/application/info/inject_config.go index ced41d9f217..03f57a6ddcd 100644 --- a/internal/pkg/agent/application/info/inject_config.go +++ b/internal/pkg/agent/application/info/inject_config.go @@ -42,6 +42,9 @@ func agentGlobalConfig() (map[string]interface{}, error) { "home": paths.Home(), "logs": paths.Logs(), }, + "host": map[string]interface{}{ + "id": hostInfo.Info().UniqueID, + }, "runtime.os": runtime.GOOS, "runtime.arch": runtime.GOARCH, "runtime.osinfo.type": hostInfo.Info().OS.Type, diff --git a/internal/pkg/agent/cmd/container.go b/internal/pkg/agent/cmd/container.go index baa21918695..06fd9bdf962 100644 --- a/internal/pkg/agent/cmd/container.go +++ b/internal/pkg/agent/cmd/container.go @@ -9,7 +9,6 @@ import ( "encoding/json" "fmt" "io" - "io/ioutil" "net/url" "os" "os/exec" @@ -231,12 +230,12 @@ func containerCmd(streams *cli.IOStreams) error { wg.Done() // sending kill signal to current process (elastic-agent) logInfo(streams, "Initiate shutdown elastic-agent.") - mainProc.Signal(syscall.SIGTERM) // nolint:errcheck //not required + mainProc.Signal(syscall.SIGTERM) //nolint:errcheck //not required }() defer func() { if apmProc != nil { - apmProc.Stop() // nolint:errcheck //not required + apmProc.Stop() //nolint:errcheck //not required logInfo(streams, "Initiate shutdown legacy apm-server.") } }() @@ -722,7 +721,7 @@ func runLegacyAPMServer(streams *cli.IOStreams, path string) (*process.Info, err } // Get the apm-server directory - files, err := ioutil.ReadDir(path) + files, err := os.ReadDir(path) if err != nil { return nil, errors.New(err, fmt.Sprintf("reading directory %s", path)) } @@ -750,7 +749,7 @@ func runLegacyAPMServer(streams *cli.IOStreams, path string) (*process.Info, err addEnv("--httpprof", "HTTPPROF") addSettingEnv("gc_percent", "APMSERVER_GOGC") logInfo(streams, "Starting legacy apm-server daemon as a subprocess.") - return process.Start(spec.BinaryPath, os.Geteuid(), os.Getegid(), args, nil) + return process.Start(spec.BinaryPath, process.WithArgs(args)) } func logToStderr(cfg *configuration.Configuration) { diff --git a/internal/pkg/agent/cmd/enroll_cmd.go b/internal/pkg/agent/cmd/enroll_cmd.go index 77519772fe7..805b8a47757 100644 --- a/internal/pkg/agent/cmd/enroll_cmd.go +++ b/internal/pkg/agent/cmd/enroll_cmd.go @@ -9,7 +9,6 @@ import ( "context" "fmt" "io" - "io/ioutil" "math/rand" "os" "os/exec" @@ -287,7 +286,7 @@ func (c *enrollCmd) writeDelayEnroll(streams *cli.IOStreams) error { errors.TypeConfig, errors.M("path", enrollPath)) } - err = ioutil.WriteFile(enrollPath, data, 0600) + err = os.WriteFile(enrollPath, data, 0600) if err != nil { return errors.New( err, @@ -598,12 +597,15 @@ func (c *enrollCmd) startAgent(ctx context.Context) (<-chan *os.ProcessState, er if !paths.IsVersionHome() { args = append(args, "--path.home.unversioned") } - proc, err := process.StartContext( - ctx, cmd, os.Geteuid(), os.Getegid(), args, nil, func(c *exec.Cmd) error { + proc, err := process.Start( + cmd, + process.WithContext(ctx), + process.WithArgs(args), + process.WithCmdOptions(func(c *exec.Cmd) error { c.Stdout = os.Stdout c.Stderr = os.Stderr return nil - }) + })) if err != nil { return nil, err } @@ -632,7 +634,7 @@ func yamlToReader(in interface{}) (io.Reader, error) { } func delay(ctx context.Context, d time.Duration) { - t := time.NewTimer(time.Duration(rand.Int63n(int64(d)))) //nolint:gosec // the RNG is allowed to be weak + t := time.NewTimer(time.Duration(rand.Int63n(int64(d)))) defer t.Stop() select { case <-ctx.Done(): diff --git a/internal/pkg/agent/configuration/settings.go b/internal/pkg/agent/configuration/settings.go index 3b509270344..878f4cbfe85 100644 --- a/internal/pkg/agent/configuration/settings.go +++ b/internal/pkg/agent/configuration/settings.go @@ -14,6 +14,7 @@ import ( // SettingsConfig is an collection of agent settings configuration. type SettingsConfig struct { + ID string `yaml:"id" config:"id" json:"id"` DownloadConfig *artifact.Config `yaml:"download" config:"download" json:"download"` ProcessConfig *process.Config `yaml:"process" config:"process" json:"process"` GRPC *GRPCConfig `yaml:"grpc" config:"grpc" json:"grpc"` diff --git a/internal/pkg/agent/install/uninstall.go b/internal/pkg/agent/install/uninstall.go index fd99c3bbb82..ef62524455f 100644 --- a/internal/pkg/agent/install/uninstall.go +++ b/internal/pkg/agent/install/uninstall.go @@ -24,6 +24,7 @@ import ( "github.com/elastic/elastic-agent/internal/pkg/config" "github.com/elastic/elastic-agent/internal/pkg/config/operations" "github.com/elastic/elastic-agent/pkg/component" + comprt "github.com/elastic/elastic-agent/pkg/component/runtime" "github.com/elastic/elastic-agent/pkg/core/logger" ) @@ -149,7 +150,7 @@ func uninstallComponents(ctx context.Context, cfgFile string) error { // remove each service component for _, comp := range comps { - if err := uninstallComponent(ctx, comp); err != nil { + if err := uninstallComponent(ctx, log, comp); err != nil { os.Stderr.WriteString(fmt.Sprintf("failed to uninstall component %q: %s\n", comp.ID, err)) } } @@ -157,9 +158,8 @@ func uninstallComponents(ctx context.Context, cfgFile string) error { return nil } -func uninstallComponent(_ context.Context, _ component.Component) error { - // TODO(blakerouse): Perform uninstall of service component; once the service runtime is written. - return errors.New("failed to uninstall component; not implemented") +func uninstallComponent(ctx context.Context, log *logp.Logger, comp component.Component) error { + return comprt.UninstallService(ctx, log, comp) } func serviceComponentsFromConfig(specs component.RuntimeSpecs, cfg *config.Config) ([]component.Component, error) { diff --git a/internal/pkg/agent/storage/store/state_store.go b/internal/pkg/agent/storage/store/state_store.go index 522e46fdade..3e90189c55a 100644 --- a/internal/pkg/agent/storage/store/state_store.go +++ b/internal/pkg/agent/storage/store/state_store.go @@ -148,7 +148,6 @@ func migrateStateStore(log *logger.Logger, actionStorePath, stateStorePath strin stateStoreExits, err := stateDiskStore.Exists() if err != nil { - log.With() log.Errorf("failed to check if state store %s exists: %v", stateStorePath, err) return err } diff --git a/pkg/component/component.go b/pkg/component/component.go index f77b27d6fcf..467fede9e40 100644 --- a/pkg/component/component.go +++ b/pkg/component/component.go @@ -108,6 +108,7 @@ func (r *RuntimeSpecs) ToComponents(policy map[string]interface{}, monitoringInj } func (r *RuntimeSpecs) PolicyToComponents(policy map[string]interface{}) ([]Component, map[string]string, error) { + const revision = "revision" outputsMap, err := toIntermediate(policy) if err != nil { return nil, nil, err @@ -180,6 +181,11 @@ func (r *RuntimeSpecs) PolicyToComponents(policy map[string]interface{}) ([]Comp // skip; not enabled continue } + if v, ok := policy[revision]; ok { + input.input["policy"] = map[string]interface{}{ + revision: v, + } + } cfg, cfgErr := ExpectedConfig(input.input) if cfg != nil { cfg.Type = inputType // ensure alias is replaced in the ExpectedConfig to be non-alias type diff --git a/pkg/component/runtime/README.md b/pkg/component/runtime/README.md new file mode 100644 index 00000000000..0622c911f76 --- /dev/null +++ b/pkg/component/runtime/README.md @@ -0,0 +1,28 @@ +# Runtime documentation + + +## Service runtime + +This part of the documentation describes how the Agent ```service runtime``` works. The design is not new and was inherited from V1, just was not documented anywhere. + +The service runtime is currently used to support integration with the Endpoint service and is very much customized to the expected behavior of the service. The Endpoint service can not be stopped (protected on windows) and the Agent runtime component is not expected to manage the lifetime of the service. The Endpoint service is expected to be always running. + +In order for the Endpoint to connect to the Agent, the Agent starts up the gRPC "connection info" server on the local port specified in the endpoint specification file. The "connection info" service sends the connection parameters/credentials to the agent upon the connection, the Endpoint uses to establish primary connection to the Agent + +The following are the steps the Endpoint goes through to establish the connection to the Agent: +1. The Endpoint connects to the "connection info" local port +2. The Agent sends the connection parameters/credentials to the Endpoint and closes the connection +3. The Endpoint establishes the primary connection to the Agent + +The Agent can only call 3 commands on the endpoint binary that allows it to: +1. Check if the Endpoint service is installed +2. Install the Endpoint service. The Endpoint service is started automatically upon installation. +3. Uninstall the Endpoint service. + + +The Agent is expected to send ```STOPPING``` state to the Endpoint if possible. This helps to ```deactivate``` the Endpoint in the k8s environment for example. + +When the Endpoint is removed from the policy the Endpoint is uninstalled by the Agent as follows: +1. If the Endpoint has never checked in the Agent waits with the timeout for the first check-in +2. The Agent sends ```STOPPING``` state to the Endpoint +3. The Agent calls uninstall command based on the service specification \ No newline at end of file diff --git a/pkg/component/runtime/command.go b/pkg/component/runtime/command.go index 2cabe906b1f..22c1898fcdc 100644 --- a/pkg/component/runtime/command.go +++ b/pkg/component/runtime/command.go @@ -33,6 +33,8 @@ const ( envAgentComponentID = "AGENT_COMPONENT_ID" envAgentComponentInputType = "AGENT_COMPONENT_INPUT_TYPE" + + stateUnknownMessage = "Unknown" ) type MonitoringManager interface { @@ -83,8 +85,8 @@ func NewCommandRuntime(comp component.Component, monitor MonitoringManager) (Com // Run starts the runtime for the component. // -// Called by Manager inside a go-routine. Run should not return until the passed in context is done. Run is always -// called before any of the other methods in the interface and once the context is done none of those methods will +// Called by Manager inside a goroutine. Run does not return until the passed in context is done. Run is always +// called before any of the other methods in the interface and once the context is done none of those methods should // ever be called again. func (c *CommandRuntime) Run(ctx context.Context, comm Communicator) error { checkinPeriod := c.current.Spec.Spec.Command.Timeouts.Checkin @@ -243,7 +245,7 @@ func (c *CommandRuntime) forceCompState(state client.UnitState, msg string) { // compState updates just the component state not all the units. func (c *CommandRuntime) compState(state client.UnitState) { - msg := "Unknown" + msg := stateUnknownMessage if state == client.UnitStateHealthy { msg = fmt.Sprintf("Healthy: communicating with pid '%d'", c.proc.PID) } else if state == client.UnitStateDegraded { @@ -298,7 +300,10 @@ func (c *CommandRuntime) start(comm Communicator) error { _ = os.MkdirAll(dataPath, 0755) args = append(args, "-E", "path.data="+dataPath) - proc, err := process.Start(path, uid, gid, args, env, attachOutErr, dirPath(workDir)) + proc, err := process.Start(path, + process.WithArgs(args), + process.WithEnv(env), + process.WithCmdOptions(attachOutErr, dirPath(workDir))) if err != nil { return err } @@ -410,7 +415,7 @@ func attachOutErr(cmd *exec.Cmd) error { return nil } -func dirPath(path string) process.Option { +func dirPath(path string) process.CmdOption { return func(cmd *exec.Cmd) error { cmd.Dir = path return nil diff --git a/pkg/component/runtime/conn_info_server.go b/pkg/component/runtime/conn_info_server.go new file mode 100644 index 00000000000..f3e2a0a6fe0 --- /dev/null +++ b/pkg/component/runtime/conn_info_server.go @@ -0,0 +1,88 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package runtime + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "time" + + "github.com/elastic/elastic-agent/pkg/core/logger" +) + +const ( + defaultStopTimeout = 15 * time.Second + windows = "windows" +) + +type connInfoServer struct { + log *logger.Logger + listener net.Listener + waitCtx context.Context + stopTimeout time.Duration +} + +func newConnInfoServer(log *logger.Logger, comm Communicator, port int) (*connInfoServer, error) { + listener, err := net.Listen("tcp", fmt.Sprintf("127.0.0.1:%d", port)) + if err != nil { + return nil, fmt.Errorf("failed to start connection credentials listener: %w", err) + } + + s := &connInfoServer{log: log, listener: listener, stopTimeout: defaultStopTimeout} + + var cn context.CancelFunc + s.waitCtx, cn = context.WithCancel(context.Background()) + go func() { + defer cn() + for { + conn, err := listener.Accept() + if err != nil { + log.Errorf("failed accept conn info connection: %v", err) + break + } + log.Debugf("client connected, sending connection info") + err = comm.WriteConnInfo(conn) + if err != nil { + if !errors.Is(err, io.EOF) { + log.Errorf("failed write conn info: %v", err) + } + } + err = conn.Close() + if err != nil { + log.Errorf("failed conn info connection close: %v", err) + } + } + }() + + return s, nil +} + +func (s *connInfoServer) stop() error { + // wait service stop with timeout + ctx, cn := context.WithTimeout(s.waitCtx, s.stopTimeout) + defer cn() + + err := s.listener.Close() + if err != nil { + s.log.Errorf("failed close conn info connection: %v", err) + } + + <-ctx.Done() + cerr := ctx.Err() + if errors.Is(cerr, context.Canceled) { + cerr = nil + } + + if errors.Is(cerr, context.DeadlineExceeded) { + s.log.Errorf("timeout while stopping conn info server: %v", err) + } + if err != nil { + return err + } + return cerr +} diff --git a/pkg/component/runtime/conn_info_server_test.go b/pkg/component/runtime/conn_info_server_test.go new file mode 100644 index 00000000000..4b221a64930 --- /dev/null +++ b/pkg/component/runtime/conn_info_server_test.go @@ -0,0 +1,260 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package runtime + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "runtime" + "syscall" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + protobuf "google.golang.org/protobuf/proto" + + "github.com/elastic/elastic-agent-client/v7/pkg/client" + "github.com/elastic/elastic-agent-client/v7/pkg/proto" + "github.com/elastic/elastic-agent/internal/pkg/testutils" +) + +type mockCommunicator struct { + ch chan *proto.CheckinObserved + connInfo *proto.ConnInfo +} + +func newMockCommunicator() *mockCommunicator { + return &mockCommunicator{ + ch: make(chan *proto.CheckinObserved, 1), + connInfo: &proto.ConnInfo{ + Addr: getAddress(), + ServerName: "endpoint", + Token: "some token", + CaCert: []byte("some CA cert"), + PeerCert: []byte("some cert"), + PeerKey: []byte("some key"), + Services: []proto.ConnInfoServices{proto.ConnInfoServices_CheckinV2}, + }, + } +} + +func (c *mockCommunicator) WriteConnInfo(w io.Writer, services ...client.Service) error { + infoBytes, err := protobuf.Marshal(c.connInfo) + if err != nil { + return fmt.Errorf("failed to marshal connection information: %w", err) + } + _, err = w.Write(infoBytes) + if err != nil { + return fmt.Errorf("failed to write connection information: %w", err) + } + + return nil +} + +func (c *mockCommunicator) CheckinExpected(expected *proto.CheckinExpected) { +} + +func (c *mockCommunicator) CheckinObserved() <-chan *proto.CheckinObserved { + return c.ch +} + +const testPort = 6788 + +func getAddress() string { + return fmt.Sprintf("127.0.0.1:%d", testPort) +} + +func TestConnInfoNormal(t *testing.T) { + log := testutils.NewErrorLogger(t) + + comm := newMockCommunicator() + + // Start server + srv, err := newConnInfoServer(log, comm, testPort) + if err != nil { + t.Fatal(err) + } + defer func() { + err := srv.stop() + if err != nil { + t.Fatal(err) + } + }() + + const count = 2 // read connection info a couple of times to make sure the server keeps working for multiple calls + + for i := 0; i < count; i++ { + // Connect to the server + conn, err := net.Dial("tcp", getAddress()) + if err != nil { + t.Fatal(err) + } + + b, err := io.ReadAll(conn) + if err != nil { + t.Fatal(err) + } + + var connInfo proto.ConnInfo + err = protobuf.Unmarshal(b, &connInfo) + if err != nil { + t.Fatal(err) + } + + // Check the received result + diff := cmp.Diff(&connInfo, comm.connInfo, cmpopts.IgnoreUnexported(proto.ConnInfo{})) + if diff != "" { + t.Error(diff) + } + } +} + +func TestConnInfoConnCloseThenAnotherConn(t *testing.T) { + log := testutils.NewErrorLogger(t) + + comm := newMockCommunicator() + + // Start server + srv, err := newConnInfoServer(log, comm, testPort) + if err != nil { + t.Fatal(err) + } + defer func() { + err := srv.stop() + if err != nil { + t.Fatal(err) + } + }() + + // Connect to the server + conn, err := net.Dial("tcp", getAddress()) + if err != nil { + t.Fatal(err) + } + + // Close connection + err = conn.Close() + if err != nil { + t.Fatal(err) + } + + // Connect again after closed + conn, err = net.Dial("tcp", getAddress()) + if err != nil { + t.Fatal(err) + } + + b, err := io.ReadAll(conn) + if err != nil { + t.Fatal(err) + } + + var connInfo proto.ConnInfo + err = protobuf.Unmarshal(b, &connInfo) + if err != nil { + t.Fatal(err) + } + + // Check the received result + diff := cmp.Diff(&connInfo, comm.connInfo, cmpopts.IgnoreUnexported(proto.ConnInfo{})) + if diff != "" { + t.Error(diff) + } +} + +func TestConnInfoClosed(t *testing.T) { + log := testutils.NewErrorLogger(t) + + comm := newMockCommunicator() + + // Start server + srv, err := newConnInfoServer(log, comm, testPort) + if err != nil { + t.Fatal(err) + } + + err = srv.stop() + if err != nil { + t.Fatal(err) + } + + _, err = net.Dial("tcp", getAddress()) + if err == nil { + t.Fatal("want non-nil err") + } + + // There is no good way to check on connection refused error cross-platform + // On windows we get windows.WSAECONNREFUSED on *nix we get syscall.ECONNREFUSED + // Importing the golang.org/x/sys/windows in here in order to get access to windows.WSAECONNREFUSED + // causes issue for *nix builds: "imports golang.org/x/sys/windows: build constraints exclude all Go files". + // In order to avoid creating extra plaform specific files compare just errno for this test. + wantErrNo := int(syscall.ECONNREFUSED) + if runtime.GOOS == windows { + wantErrNo = 10061 // windows.WSAECONNREFUSED + } + var ( + syserr syscall.Errno + errno int + ) + if errors.As(err, &syserr) { + errno = int(syserr) + if wantErrNo != errno { + t.Fatal(err) + } + } else { + t.Fatalf("unexpected error: %v", err) + } +} + +func TestConnInfoDoubleStop(t *testing.T) { + log := testutils.NewErrorLogger(t) + + comm := newMockCommunicator() + + // Start server + srv, err := newConnInfoServer(log, comm, testPort) + if err != nil { + t.Fatal(err) + } + + err = srv.stop() + if err != nil { + t.Fatal(err) + } + + err = srv.stop() + if err == nil { + t.Fatal("want err, got nil ") + } +} + +func TestConnInfoStopTimeout(t *testing.T) { + log := testutils.NewErrorLogger(t) + + comm := newMockCommunicator() + + // Start server + srv, err := newConnInfoServer(log, comm, testPort) + if err != nil { + t.Fatal(err) + } + + // inject the context for wait that we can control to emulate timeout + var cn context.CancelFunc + srv.waitCtx, cn = context.WithCancel(context.Background()) + defer cn() + + srv.stopTimeout = 100 * time.Millisecond + + err = srv.stop() + // Expected timeout on stop + if !errors.Is(err, context.DeadlineExceeded) { + t.Fatal(err) + } +} diff --git a/pkg/component/runtime/runtime.go b/pkg/component/runtime/runtime.go index 58c7d1ed153..e06702b2141 100644 --- a/pkg/component/runtime/runtime.go +++ b/pkg/component/runtime/runtime.go @@ -21,8 +21,8 @@ import ( type ComponentRuntime interface { // Run starts the runtime for the component. // - // Called by Manager inside a go-routine. Run should not return until the passed in context is done. Run is always - // called before any of the other methods in the interface and once the context is done none of those methods will + // Called by Manager inside a goroutine. Run does not return until the passed in context is done. Run is always + // called before any of the other methods in the interface and once the context is done none of those methods should // ever be called again. Run(ctx context.Context, comm Communicator) error // Watch returns the channel that sends component state. @@ -54,13 +54,13 @@ type ComponentRuntime interface { } // NewComponentRuntime creates the proper runtime based on the input specification for the component. -func NewComponentRuntime(comp component.Component, monitor MonitoringManager) (ComponentRuntime, error) { +func NewComponentRuntime(comp component.Component, logger *logger.Logger, monitor MonitoringManager) (ComponentRuntime, error) { if comp.Err != nil { return NewFailedRuntime(comp) } else if comp.Spec.Spec.Command != nil { return NewCommandRuntime(comp, monitor) } else if comp.Spec.Spec.Service != nil { - return nil, errors.New("service component runtime not implemented") + return NewServiceRuntime(comp, logger) } return nil, errors.New("unknown component runtime") } @@ -87,7 +87,7 @@ func newComponentRuntimeState(m *Manager, logger *logger.Logger, monitor Monitor if err != nil { return nil, err } - runtime, err := NewComponentRuntime(comp, monitor) + runtime, err := NewComponentRuntime(comp, logger, monitor) if err != nil { return nil, err } diff --git a/pkg/component/runtime/service.go b/pkg/component/runtime/service.go new file mode 100644 index 00000000000..9c55b9fa4d7 --- /dev/null +++ b/pkg/component/runtime/service.go @@ -0,0 +1,438 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package runtime + +import ( + "context" + "errors" + "fmt" + "time" + + "github.com/kardianos/service" + + "github.com/elastic/elastic-agent-client/v7/pkg/client" + "github.com/elastic/elastic-agent-client/v7/pkg/proto" + "github.com/elastic/elastic-agent/pkg/component" + "github.com/elastic/elastic-agent/pkg/core/logger" +) + +const ( + defaultCheckServiceStatusInterval = 30 * time.Second // 30 seconds default for now, consistent with the command check-in interval +) + +var ( + ErrOperationSpecUndefined = errors.New("operation spec undefined") + ErrInvalidServiceSpec = errors.New("invalid service spec") +) + +type executeServiceCommandFunc func(ctx context.Context, log *logger.Logger, binaryPath string, spec *component.ServiceOperationsCommandSpec) error + +// ServiceRuntime provides the command runtime for running a component as a service. +type ServiceRuntime struct { + comp component.Component + log *logger.Logger + + ch chan ComponentState + actionCh chan actionMode + compCh chan component.Component + statusCh chan service.Status + + state ComponentState + + executeServiceCommandImpl executeServiceCommandFunc +} + +// NewServiceRuntime creates a new command runtime for the provided component. +func NewServiceRuntime(comp component.Component, logger *logger.Logger) (ComponentRuntime, error) { + if comp.Spec.Spec.Service == nil { + return nil, errors.New("must have service defined in specification") + } + + state := newComponentState(&comp) + + s := &ServiceRuntime{ + comp: comp, + log: logger.Named("service_runtime"), + ch: make(chan ComponentState), + actionCh: make(chan actionMode), + compCh: make(chan component.Component), + statusCh: make(chan service.Status), + state: state, + executeServiceCommandImpl: executeServiceCommand, + } + + // Set initial state as STOPPED + s.state.compState(client.UnitStateStopped, fmt.Sprintf("Stopped: %s service", s.name())) + return s, nil +} + +// Run starts the runtime for the component. +// +// Called by Manager inside a goroutine. Run does not return until the passed in context is done. Run is always +// called before any of the other methods in the interface and once the context is done none of those methods should +// ever be called again. +func (s *ServiceRuntime) Run(ctx context.Context, comm Communicator) (err error) { + checkinTimer := time.NewTimer(s.checkinPeriod()) + defer checkinTimer.Stop() + + // Stop the check-ins timer initially + checkinTimer.Stop() + + var ( + cis *connInfoServer + lastCheckin time.Time + missedCheckins int + ) + + cisStop := func() { + if cis != nil { + _ = cis.stop() + cis = nil + } + } + defer cisStop() + + for { + var err error + select { + case <-ctx.Done(): + s.log.Debug("context is done. exiting.") + return ctx.Err() + case as := <-s.actionCh: + switch as { + case actionStart: + // Initial state on start + lastCheckin = time.Time{} + missedCheckins = 0 + checkinTimer.Stop() + cisStop() + + // Start connection info + if cis == nil { + cis, err = newConnInfoServer(s.log, comm, s.comp.Spec.Spec.Service.CPort) + if err != nil { + err = fmt.Errorf("failed to start connection info service %s: %w", s.name(), err) + break + } + } + + // Start service + err = s.start(ctx) + if err != nil { + cisStop() + break + } + + // Start check-in timer + checkinTimer.Reset(s.checkinPeriod()) + case actionStop, actionTeardown: + // Stop check-in timer + s.log.Debugf("stop check-in timer for %s service", s.name()) + checkinTimer.Stop() + + // Stop connection info + s.log.Debugf("stop connection info for %s service", s.name()) + cisStop() + + // Stop service + s.stop(ctx, comm, lastCheckin, as == actionTeardown) + } + if err != nil { + s.forceCompState(client.UnitStateFailed, err.Error()) + } + case newComp := <-s.compCh: + s.processNewComp(newComp, comm) + case checkin := <-comm.CheckinObserved(): + s.processCheckin(checkin, comm, &lastCheckin) + case <-checkinTimer.C: + s.checkStatus(s.checkinPeriod(), &lastCheckin, &missedCheckins) + checkinTimer.Reset(s.checkinPeriod()) + } + } +} + +func (s *ServiceRuntime) start(ctx context.Context) (err error) { + name := s.name() + + // Set state to starting + s.forceCompState(client.UnitStateStarting, fmt.Sprintf("Starting: %s service runtime", name)) + + // Call the check command of the service + s.log.Debugf("check if %s service is installed", name) + err = s.check(ctx) + s.log.Debugf("after check if %s service is installed, err: %v", name, err) + if err != nil { + // Check failed, call the install command of the service + s.log.Debugf("failed check %s service: %v, try install", name, err) + err = s.install(ctx) + if err != nil { + return fmt.Errorf("failed install %s service: %w", name, err) + } + } + + // The service should start on it's own, expecting check-ins + return nil +} + +func (s *ServiceRuntime) stop(ctx context.Context, comm Communicator, lastCheckin time.Time, teardown bool) { + name := s.name() + + s.log.Debugf("stopping %s service runtime", name) + + checkedIn := !lastCheckin.IsZero() + + if teardown { + // If checked in before, send STOPPING + if s.isRunning() { + // If never checked in await for the checkin with the timeout + if !checkedIn { + timeout := s.checkinPeriod() + s.log.Debugf("%s service had never checked in, await for check-in for %v", name, timeout) + checkedIn = s.awaitCheckin(ctx, comm, timeout) + } + + // Received check in send STOPPING + if checkedIn { + s.log.Debugf("send stopping state to %s service", name) + s.state.forceExpectedState(client.UnitStateStopping) + comm.CheckinExpected(s.state.toCheckinExpected()) + } else { + s.log.Debugf("%s service had never checked in, proceed to uninstall", name) + } + } + + s.log.Debug("uninstall %s service", name) + err := s.uninstall(ctx) + if err != nil { + s.log.Errorf("failed %s service uninstall, err: %v", name, err) + } + } + + // Force component stopped state + s.log.Debug("set %s service runtime to stopped state", name) + s.forceCompState(client.UnitStateStopped, fmt.Sprintf("Stopped: %s service runtime", name)) +} + +// awaitCheckin awaits checkin with timeout. +func (s *ServiceRuntime) awaitCheckin(ctx context.Context, comm Communicator, timeout time.Duration) bool { + name := s.name() + t := time.NewTimer(timeout) + defer t.Stop() + + for { + select { + case <-ctx.Done(): + // stop cancelled + s.log.Debugf("stopping %s service, cancelled", name) + return false + case <-t.C: + // stop timed out + s.log.Debugf("stopping %s service, timed out", name) + return false + case <-comm.CheckinObserved(): + return true + } + } +} + +func (s *ServiceRuntime) processNewComp(newComp component.Component, comm Communicator) { + s.log.Debugf("observed component update for %s service", s.name()) + sendExpected := s.state.syncExpected(&newComp) + changed := s.state.syncUnits(&newComp) + if sendExpected || s.state.unsettled() { + comm.CheckinExpected(s.state.toCheckinExpected()) + } + if changed { + s.sendObserved() + } +} + +func (s *ServiceRuntime) processCheckin(checkin *proto.CheckinObserved, comm Communicator, lastCheckin *time.Time) { + name := s.name() + + s.log.Debugf("observed check-in for %s service: %v", name, checkin) + sendExpected := false + changed := false + + if s.state.State == client.UnitStateStarting { + // first observation after start, set component to healthy + s.state.State = client.UnitStateHealthy + s.state.Message = fmt.Sprintf("Healthy: communicating with %s service", name) + changed = true + } + + if !s.isRunning() { + return + } + + if lastCheckin.IsZero() { + // first check-in + sendExpected = true + } + *lastCheckin = time.Now().UTC() + if s.state.syncCheckin(checkin) { + changed = true + } + if s.state.unsettled() { + sendExpected = true + } + if sendExpected { + comm.CheckinExpected(s.state.toCheckinExpected()) + } + if changed { + s.sendObserved() + } + if s.state.cleanupStopped() { + s.sendObserved() + } +} + +// isRunning returns true is the service is running +func (s *ServiceRuntime) isRunning() bool { + return s.state.State != client.UnitStateStopping && + s.state.State != client.UnitStateStopped +} + +// checkStatus checks check-ins state, called on timer +func (s *ServiceRuntime) checkStatus(checkinPeriod time.Duration, lastCheckin *time.Time, missedCheckins *int) { + if s.isRunning() { + now := time.Now().UTC() + if lastCheckin.IsZero() { + // never checked-in + *missedCheckins++ + } else if now.Sub(*lastCheckin) > checkinPeriod { + // missed check-in during required period + *missedCheckins++ + } else if now.Sub(*lastCheckin) <= checkinPeriod { + *missedCheckins = 0 + } + if *missedCheckins == 0 { + s.compState(client.UnitStateHealthy, *missedCheckins) + } else if *missedCheckins > 0 && *missedCheckins < maxCheckinMisses { + s.compState(client.UnitStateDegraded, *missedCheckins) + } else if *missedCheckins >= maxCheckinMisses { + // something is wrong; the service should be checking in + msg := fmt.Sprintf("Failed: %s service missed %d check-ins", s.name(), maxCheckinMisses) + s.forceCompState(client.UnitStateFailed, msg) + } + } +} + +func (s *ServiceRuntime) checkinPeriod() time.Duration { + checkinPeriod := s.comp.Spec.Spec.Service.Timeouts.Checkin + if checkinPeriod == 0 { + checkinPeriod = defaultCheckServiceStatusInterval + } + return checkinPeriod +} + +// Watch returns a channel to watch for component state changes. +// +// A new state is sent anytime the state for a unit or the whole component changes. +func (s *ServiceRuntime) Watch() <-chan ComponentState { + return s.ch +} + +// Start starts the service. +// +// Non-blocking and never returns an error. +func (s *ServiceRuntime) Start() error { + s.actionCh <- actionStart + return nil +} + +// Update updates the currComp runtime with a new-revision for the component definition. +// +// Non-blocking and never returns an error. +func (s *ServiceRuntime) Update(comp component.Component) error { + s.compCh <- comp + return nil +} + +// Stop stops the service. +// +// Non-blocking and never returns an error. +func (s *ServiceRuntime) Stop() error { + s.actionCh <- actionStop + return nil +} + +// Teardown stop and uninstall the service. +// +// Non-blocking and never returns an error. +func (s *ServiceRuntime) Teardown() error { + s.actionCh <- actionTeardown + return nil +} + +func (s *ServiceRuntime) forceCompState(state client.UnitState, msg string) { + if s.state.forceState(state, msg) { + s.sendObserved() + } +} + +func (s *ServiceRuntime) sendObserved() { + s.ch <- s.state.Copy() +} + +func (s *ServiceRuntime) compState(state client.UnitState, missedCheckins int) { + name := s.name() + msg := stateUnknownMessage + if state == client.UnitStateHealthy { + msg = fmt.Sprintf("Healthy: communicating with %s service", name) + } else if state == client.UnitStateDegraded { + if missedCheckins == 1 { + msg = fmt.Sprintf("Degraded: %s service missed 1 check-in", name) + } else { + msg = fmt.Sprintf("Degraded: %s missed %d check-ins", name, missedCheckins) + } + } + if s.state.compState(state, msg) { + s.sendObserved() + } +} + +func (s *ServiceRuntime) name() string { + return s.comp.Spec.Spec.Name +} + +// check executes the service check command +func (s *ServiceRuntime) check(ctx context.Context) error { + if s.comp.Spec.Spec.Service.Operations.Check == nil { + s.log.Errorf("missing check spec for %s service", s.comp.Spec.BinaryName) + return ErrOperationSpecUndefined + } + s.log.Debugf("check if the %s is installed", s.comp.Spec.BinaryName) + return s.executeServiceCommandImpl(ctx, s.log, s.comp.Spec.BinaryPath, s.comp.Spec.Spec.Service.Operations.Check) +} + +// install executes the service install command +func (s *ServiceRuntime) install(ctx context.Context) error { + if s.comp.Spec.Spec.Service.Operations.Install == nil { + s.log.Errorf("missing install spec for %s service", s.comp.Spec.BinaryName) + return ErrOperationSpecUndefined + } + s.log.Debugf("install %s service", s.comp.Spec.BinaryName) + return s.executeServiceCommandImpl(ctx, s.log, s.comp.Spec.BinaryPath, s.comp.Spec.Spec.Service.Operations.Install) +} + +// uninstall executes the service uninstall command +func (s *ServiceRuntime) uninstall(ctx context.Context) error { + return uninstallService(ctx, s.log, s.comp, s.executeServiceCommandImpl) +} + +// UninstallService uninstalls the service +func UninstallService(ctx context.Context, log *logger.Logger, comp component.Component) error { + return uninstallService(ctx, log, comp, executeServiceCommand) +} + +func uninstallService(ctx context.Context, log *logger.Logger, comp component.Component, executeServiceCommandImpl executeServiceCommandFunc) error { + if comp.Spec.Spec.Service.Operations.Uninstall == nil { + log.Errorf("missing uninstall spec for %s service", comp.Spec.BinaryName) + return ErrOperationSpecUndefined + } + log.Debugf("uninstall %s service", comp.Spec.BinaryName) + return executeServiceCommandImpl(ctx, log, comp.Spec.BinaryPath, comp.Spec.Spec.Service.Operations.Uninstall) +} diff --git a/pkg/component/runtime/service_command.go b/pkg/component/runtime/service_command.go new file mode 100644 index 00000000000..61ccda076be --- /dev/null +++ b/pkg/component/runtime/service_command.go @@ -0,0 +1,114 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package runtime + +import ( + "bufio" + "context" + "errors" + "fmt" + "os/exec" + "path/filepath" + "strings" + "time" + + "github.com/dolmen-go/contextio" + + "github.com/elastic/elastic-agent/pkg/component" + "github.com/elastic/elastic-agent/pkg/core/logger" + "github.com/elastic/elastic-agent/pkg/core/process" +) + +func executeCommand(ctx context.Context, log *logger.Logger, binaryPath string, args []string, env []string, timeout time.Duration) error { + log = log.With("context", "command output") + // Create context with timeout if the timeout is greater than 0 + if timeout > 0 { + var cn context.CancelFunc + ctx, cn = context.WithTimeout(ctx, timeout) + defer cn() + } + + opts := []process.StartOption{ + process.WithContext(ctx), + process.WithArgs(args), + process.WithEnv(env), + } + + // Set the command working directory from binary + // This is needed because the endpoint installer was looking for it's resources in the current working directory + wdir := filepath.Dir(binaryPath) + if wdir != "." { + opts = append(opts, + process.WithCmdOptions(func(c *exec.Cmd) error { + c.Dir = wdir + return nil + })) + } + + proc, err := process.Start(binaryPath, opts...) + if err != nil { + return fmt.Errorf("failed starting the command: %w", err) + } + + // channel for the last error message from the stderr output + errch := make(chan string, 1) + ctxStderr := contextio.NewReader(ctx, proc.Stderr) + if ctxStderr != nil { + go func() { + var errText string + scanner := bufio.NewScanner(ctxStderr) + for scanner.Scan() { + line := scanner.Text() + if len(line) > 0 { + txt := strings.TrimSpace(line) + if len(txt) > 0 { + errText = txt + // Log error output line + log.Error(errText) + } + } + } + errch <- errText + }() + } + + procState := <-proc.Wait() + if errors.Is(ctx.Err(), context.DeadlineExceeded) { + err = ctx.Err() // Process was killed due to timeout + } else if !procState.Success() { + err = &exec.ExitError{ProcessState: procState} + } + + if err != nil { + errmsg := <-errch + errmsg = strings.TrimSpace(errmsg) + if errmsg != "" { + err = fmt.Errorf("%s: %w", errmsg, err) + } + } + + return err +} + +func executeServiceCommand(ctx context.Context, log *logger.Logger, binaryPath string, spec *component.ServiceOperationsCommandSpec) error { + if spec == nil { + log.Warnf("spec is nil, nothing to execute, binaryPath: %s", binaryPath) + return nil + } + return executeCommand(ctx, log, binaryPath, spec.Args, envSpecToEnv(spec.Env), spec.Timeout) +} + +func envSpecToEnv(envSpecs []component.CommandEnvSpec) []string { + if len(envSpecs) == 0 { + return nil + } + + env := make([]string, len(envSpecs)) + + for i, spec := range envSpecs { + env[i] = spec.Name + "=" + spec.Value + } + return env +} diff --git a/pkg/component/runtime/service_command_test.go b/pkg/component/runtime/service_command_test.go new file mode 100644 index 00000000000..07e419f2472 --- /dev/null +++ b/pkg/component/runtime/service_command_test.go @@ -0,0 +1,183 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package runtime + +import ( + "context" + "errors" + "os" + "os/exec" + "path/filepath" + "runtime" + "strings" + "testing" + "text/template" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + + "github.com/elastic/elastic-agent-libs/logp" + "github.com/elastic/elastic-agent/pkg/core/logger" +) + +type progConfig struct { + ErrMessage string + ExitCode int + SleepMS int +} + +const testProgramTemplate = ` +package main + +import ( + "fmt" + "os" + "time" +) + +func main() { + if len("{{.ErrMessage}}") > 0 { + fmt.Fprintf(os.Stderr, "{{.ErrMessage}}") + } + if {{.SleepMS}} != 0 { + time.Sleep(time.Duration({{.SleepMS}})*time.Millisecond) + } + if {{.ExitCode}} != 0 { + os.Exit({{.ExitCode}}) + } +} +` +const testModFile = ` +module prog + +go 1.18 +` + +func renderTestProg(cfg progConfig) string { + t := template.Must(template.New("prog").Parse(testProgramTemplate)) + var b strings.Builder + err := t.Execute(&b, cfg) + if err != nil { + panic(err) + } + return b.String() +} + +func getExeName(name string) string { + if runtime.GOOS == "windows" { + return name + ".exe" + } + return name +} + +func prepareTestProg(ctx context.Context, log *logger.Logger, dir string, cfg progConfig) (string, error) { + const name = "prog" + + progPath := filepath.Join(dir, name+".go") + + prog := renderTestProg(cfg) + err := os.WriteFile(progPath, []byte(prog), 0600) + if err != nil { + return "", err + } + + err = os.WriteFile(filepath.Join(dir, "go.mod"), []byte(testModFile), 0600) + if err != nil { + return "", err + } + + err = executeCommand(ctx, log, "go", []string{"build", "-o", dir, progPath}, nil, 0) + if err != nil { + return "", err + } + + return filepath.Join(dir, getExeName(name)), nil +} + +func TestExecuteCommand(t *testing.T) { + log := logp.NewLogger("test_service") + + tests := []struct { + name string + cfg progConfig + timeout time.Duration + wantErr error + }{ + { + name: "success", + }, + { + name: "fail no error output", + cfg: progConfig{"", 1, 0}, + }, + { + name: "fail with error output", + cfg: progConfig{"something failed", 2, 0}, + }, + { + name: "fail with timeout", + cfg: progConfig{"", 3, 5000}, // executable runs for 5 seconds + timeout: 100 * time.Millisecond, + wantErr: context.DeadlineExceeded, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + ctx, cn := context.WithCancel(context.Background()) + defer cn() + + dir := t.TempDir() + + // Prepare test program with expected param + exePath, err := prepareTestProg(ctx, log, dir, tc.cfg) + if err != nil { + t.Fatal(err) + } + + err = executeCommand(ctx, log, exePath, nil, nil, tc.timeout) + + if tc.wantErr != nil { + diff := cmp.Diff(tc.wantErr, err, cmpopts.EquateErrors()) + if diff != "" { + t.Fatal(diff) + } + } else { + // If exit code is not 0, expect error + if tc.cfg.ExitCode == 0 { + if err != nil { + t.Fatal(err) + } + } else { + if err != nil { + var exerr *exec.ExitError + if errors.As(err, &exerr) { + diff := cmp.Diff(tc.cfg.ExitCode, exerr.ExitCode()) + if diff != "" { + t.Fatal(diff) + } + } else { + t.Fatalf("want *exec.ExitError, got %T", err) + } + } else { + t.Fatalf("want error code %v, got nil", tc.cfg.ExitCode) + } + } + } + + // Test that we get the proper error message + // The stderr message is prepended to the err, separated with ':', for example "something failed: exit status 2" + if err != nil && tc.cfg.ErrMessage != "" { + arr := strings.Split(err.Error(), ":") + diff := cmp.Diff(tc.cfg.ErrMessage, arr[0]) + if diff != "" { + t.Fatal(diff) + } + } + }) + } + +} diff --git a/pkg/component/runtime/state.go b/pkg/component/runtime/state.go index 4a39a21d82e..832b7548ba7 100644 --- a/pkg/component/runtime/state.go +++ b/pkg/component/runtime/state.go @@ -5,6 +5,7 @@ package runtime import ( + "errors" "reflect" "github.com/elastic/elastic-agent-client/v7/pkg/client" @@ -131,7 +132,7 @@ func (s *ComponentState) syncExpected(comp *component.Component) bool { existing.configStateIdx = 1 changed = true } - if existing.err != unit.Err { + if !errors.Is(existing.err, unit.Err) { existing.err = unit.Err if existing.err != nil { existing.state = client.UnitStateFailed @@ -221,10 +222,9 @@ func (s *ComponentState) syncCheckin(checkin *proto.CheckinObserved) bool { if unit.Payload != nil { payload = unit.Payload.AsMap() } - touched[key] = true _, inExpected := s.expectedUnits[key] - existing, _ := s.Units[key] + existing := s.Units[key] existing.unitState = client.UnitState(unit.State) existing.unitMessage = unit.Message existing.unitPayload = payload @@ -400,6 +400,18 @@ func (s *ComponentState) forceState(state client.UnitState, msg string) bool { return changed } +// forceExpectedState force updates the expected state for the entire component, forcing that state on all expected units. +func (s *ComponentState) forceExpectedState(state client.UnitState) { + for k, unit := range s.expectedUnits { + if unit.state != state { + unit.state = state + } + + // unit is a copy and must be set back into the map + s.expectedUnits[k] = unit + } +} + // compState updates just the component state not all the units. func (s *ComponentState) compState(state client.UnitState, msg string) bool { if s.State != state || s.Message != msg { diff --git a/pkg/component/spec.go b/pkg/component/spec.go index 3d8b5cfe504..be20b92208c 100644 --- a/pkg/component/spec.go +++ b/pkg/component/spec.go @@ -81,10 +81,22 @@ func (t *CommandTimeoutSpec) InitDefaults() { t.Stop = 30 * time.Second } +// ServiceTimeoutSpec is the timeout specification for subprocess. +type ServiceTimeoutSpec struct { + Checkin time.Duration `config:"checkin" yaml:"checkin"` +} + +// InitDefaults initialized the defaults for the timeouts. +func (t *ServiceTimeoutSpec) InitDefaults() { + t.Checkin = 30 * time.Second +} + // ServiceSpec is the specification for an input that executes as a service. type ServiceSpec struct { + CPort int `config:"cport" yaml:"cport" validate:"required"` Log *ServiceLogSpec `config:"log,omitempty" yaml:"log,omitempty"` Operations ServiceOperationsSpec `config:"operations" yaml:"operations" validate:"required"` + Timeouts ServiceTimeoutSpec `config:"timeouts" yaml:"timeouts"` } // ServiceLogSpec is the specification for the log path that the service logs to. diff --git a/pkg/component/spec_test.go b/pkg/component/spec_test.go index 33866df5b0f..c51ef4b4517 100644 --- a/pkg/component/spec_test.go +++ b/pkg/component/spec_test.go @@ -130,6 +130,8 @@ inputs: outputs: - shipper service: + name: "co.elastic.endpoint" + cport: 6788 operations: install: args: ["install"] diff --git a/pkg/core/process/process.go b/pkg/core/process/process.go index 428469687b6..553dc4989dd 100644 --- a/pkg/core/process/process.go +++ b/pkg/core/process/process.go @@ -17,51 +17,78 @@ type Info struct { PID int Process *os.Process Stdin io.WriteCloser + Stderr io.ReadCloser } -// Option is an option func to change the underlying command -type Option func(c *exec.Cmd) error +// CmdOption is an option func to change the underlying command +type CmdOption func(c *exec.Cmd) error + +// StartConfig configuration for the process start set by the StartOption functions +type StartConfig struct { + ctx context.Context + uid, gid int + args, env []string + cmdOpts []CmdOption +} + +// StartOption start options function +type StartOption func(cfg *StartConfig) // Start starts a new process -func Start(path string, uid, gid int, args []string, env []string, opts ...Option) (proc *Info, err error) { - return StartContext(nil, path, uid, gid, args, env, opts...) //nolint:staticcheck // calls a different function if no ctx +func Start(path string, opts ...StartOption) (proc *Info, err error) { + // Apply options + c := StartConfig{ + uid: os.Geteuid(), + gid: os.Getegid(), + } + + for _, opt := range opts { + opt(&c) + } + + return startContext(c.ctx, path, c.uid, c.gid, c.args, c.env, c.cmdOpts...) } -// StartContext starts a new process with context. -func StartContext(ctx context.Context, path string, uid, gid int, args []string, env []string, opts ...Option) (*Info, error) { - cmd, err := getCmd(ctx, path, env, uid, gid, args...) - if err != nil { - return nil, fmt.Errorf("failed to create command for %q: %w", path, err) +// WithContext sets an optional context +func WithContext(ctx context.Context) StartOption { + return func(cfg *StartConfig) { + cfg.ctx = ctx } - for _, o := range opts { - if err := o(cmd); err != nil { - return nil, fmt.Errorf("failed to set option command for %q: %w", path, err) - } +} + +// WithArgs sets arguments +func WithArgs(args []string) StartOption { + return func(cfg *StartConfig) { + cfg.args = args } - stdin, err := cmd.StdinPipe() - if err != nil { - return nil, fmt.Errorf("failed to create stdin for %q: %w", path, err) +} + +// WithEnv sets the environment variables +func WithEnv(env []string) StartOption { + return func(cfg *StartConfig) { + cfg.env = env } +} - // start process - if err := cmd.Start(); err != nil { - return nil, fmt.Errorf("failed to start %q: %w", path, err) +// WithUID sets UID +func WithUID(uid int) StartOption { + return func(cfg *StartConfig) { + cfg.uid = uid } +} - // Hook to JobObject on windows, noop on other platforms. - // This ties the application processes lifespan to the agent's. - // Fixes the orphaned beats processes left behind situation - // after the agent process gets killed. - if err := JobObject.Assign(cmd.Process); err != nil { - _ = killCmd(cmd.Process) - return nil, fmt.Errorf("failed job assignment %q: %w", path, err) +// WithGID sets GID +func WithGID(gid int) StartOption { + return func(cfg *StartConfig) { + cfg.gid = gid } +} - return &Info{ - PID: cmd.Process.Pid, - Process: cmd.Process, - Stdin: stdin, - }, err +// WithCmdOptions sets the exec.Cmd options +func WithCmdOptions(cmdOpts ...CmdOption) StartOption { + return func(cfg *StartConfig) { + cfg.cmdOpts = cmdOpts + } } // Kill kills the process. @@ -99,3 +126,49 @@ func (i *Info) Wait() <-chan *os.ProcessState { return ch } + +// startContext starts a new process with context. The context is optional and can be nil. +func startContext(ctx context.Context, path string, uid, gid int, args []string, env []string, opts ...CmdOption) (*Info, error) { + cmd, err := getCmd(ctx, path, env, uid, gid, args...) + if err != nil { + return nil, fmt.Errorf("failed to create command for %q: %w", path, err) + } + for _, o := range opts { + if err := o(cmd); err != nil { + return nil, fmt.Errorf("failed to set option command for %q: %w", path, err) + } + } + stdin, err := cmd.StdinPipe() + if err != nil { + return nil, fmt.Errorf("failed to create stdin for %q: %w", path, err) + } + + var stderr io.ReadCloser + if cmd.Stderr == nil { + stderr, err = cmd.StderrPipe() + if err != nil { + return nil, fmt.Errorf("failed to create stderr for %q: %w", path, err) + } + } + + // start process + if err := cmd.Start(); err != nil { + return nil, fmt.Errorf("failed to start %q: %w", path, err) + } + + // Hook to JobObject on windows, noop on other platforms. + // This ties the application processes lifespan to the agent's. + // Fixes the orphaned beats processes left behind situation + // after the agent process gets killed. + if err := JobObject.Assign(cmd.Process); err != nil { + _ = killCmd(cmd.Process) + return nil, fmt.Errorf("failed job assignment %q: %w", path, err) + } + + return &Info{ + PID: cmd.Process.Pid, + Process: cmd.Process, + Stdin: stdin, + Stderr: stderr, + }, err +} diff --git a/specs/endpoint-security.spec.yml b/specs/endpoint-security.spec.yml index a34c66086de..69827c68e75 100644 --- a/specs/endpoint-security.spec.yml +++ b/specs/endpoint-security.spec.yml @@ -14,6 +14,7 @@ inputs: - condition: ${runtime.arch} == 'arm64' and ${runtime.family} == 'redhat' and ${runtime.major} == '7' message: "No support for RHEL7 on arm64" service: + cport: 6788 log: path: "/opt/Elastic/Endpoint/state/log/endpoint-*.log" operations: &operations @@ -46,6 +47,7 @@ inputs: outputs: - elasticsearch service: + cport: 6788 log: path: "/Library/Elastic/Endpoint/state/log/endpoint-*.log" operations: *operations @@ -60,6 +62,7 @@ inputs: - condition: ${runtime.user.root} == false message: "Elastic Agent must be running as Administrator or SYSTEM" service: + cport: 6788 log: path: "C:\\Program Files\\Elastic\\Endpoint\\state\\log\\endpoint-*.log" - operations: *operations + operations: *operations \ No newline at end of file From bd36958578571f350a1161052e4122cc9d94853f Mon Sep 17 00:00:00 2001 From: Michal Pristas Date: Thu, 3 Nov 2022 19:47:52 +0100 Subject: [PATCH 32/49] Sync components with state during container start (#1653) * Sync components with state during container start * path approach --- .../pkg/agent/application/paths/common.go | 17 ++++++++---- internal/pkg/agent/cmd/container.go | 27 +------------------ 2 files changed, 13 insertions(+), 31 deletions(-) diff --git a/internal/pkg/agent/application/paths/common.go b/internal/pkg/agent/application/paths/common.go index b89a197fdff..41284026091 100644 --- a/internal/pkg/agent/application/paths/common.go +++ b/internal/pkg/agent/application/paths/common.go @@ -35,6 +35,7 @@ var ( configFilePath string logsPath string downloadsPath string + componentsPath string installPath string unversionedHome bool tmpCreator sync.Once @@ -46,14 +47,23 @@ func init() { logsPath = topPath unversionedHome = false // only versioned by container subcommand + // these should never change + versionedHome := VersionedHome(topPath) + downloadsPath = filepath.Join(versionedHome, "downloads") + componentsPath = filepath.Join(versionedHome, "components") + fs := flag.CommandLine fs.StringVar(&topPath, "path.home", topPath, "Agent root path") fs.BoolVar(&unversionedHome, "path.home.unversioned", unversionedHome, "Agent root path is not versioned based on build") fs.StringVar(&configPath, "path.config", configPath, "Config path is the directory Agent looks for its config file") fs.StringVar(&configFilePath, "c", DefaultConfigName, "Configuration file, relative to path.config") fs.StringVar(&logsPath, "path.logs", logsPath, "Logs path contains Agent log output") - fs.StringVar(&downloadsPath, "path.downloads", downloadsPath, "Downloads path contains binaries Agent downloads") fs.StringVar(&installPath, "path.install", installPath, "Install path contains binaries Agent extracts") + + // enable user to download update artifacts to alternative place + // TODO: remove path.downloads support on next major (this can be configured using `agent.download.targetDirectory`) + // `path.download` serves just as init value for `agent.download.targetDirectory` + fs.StringVar(&downloadsPath, "path.downloads", downloadsPath, "Downloads path contains binaries Agent downloads") } // Top returns the top directory for Elastic Agent, all the versioned @@ -146,7 +156,7 @@ func Run() string { // Components returns the component directory for Agent func Components() string { - return filepath.Join(Home(), "components") + return componentsPath } // Logs returns the log directory for Agent @@ -166,9 +176,6 @@ func VersionedHome(base string) string { // Downloads returns the downloads directory for Agent func Downloads() string { - if downloadsPath == "" { - return filepath.Join(Home(), "downloads") - } return downloadsPath } diff --git a/internal/pkg/agent/cmd/container.go b/internal/pkg/agent/cmd/container.go index 06fd9bdf962..91c755bedfc 100644 --- a/internal/pkg/agent/cmd/container.go +++ b/internal/pkg/agent/cmd/container.go @@ -782,11 +782,7 @@ func setPaths(statePath, configPath, logsPath string, writePaths bool) error { return err } } - // sync the downloads to the data directory - destDownloads := filepath.Join(statePath, "data", "downloads") - if err := syncDir(paths.Downloads(), destDownloads); err != nil { - return fmt.Errorf("syncing download directory to STATE_PATH(%s) failed: %w", statePath, err) - } + originalInstall := paths.Install() originalTop := paths.Top() paths.SetTop(topPath) @@ -866,27 +862,6 @@ func tryContainerLoadPaths() error { return setPaths(paths.StatePath, paths.ConfigPath, paths.LogsPath, false) } -func syncDir(src string, dest string) error { - return filepath.Walk(src, func(path string, info os.FileInfo, err error) error { - if err != nil { - if os.IsNotExist(err) { - // source dir exists only if there's agent artifact - return nil - } - return err - } - relativePath := strings.TrimPrefix(path, src) - if info.IsDir() { - err = os.MkdirAll(filepath.Join(dest, relativePath), info.Mode()) - if err != nil { - return err - } - return nil - } - return copyFile(filepath.Join(dest, relativePath), path, info.Mode()) - }) -} - func copyFile(destPath string, srcPath string, mode os.FileMode) error { // if mode is unset; set to the same as the source file if mode == 0 { From 4b17703dc2d0dbb70074b462cec17b1c52d5a10a Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Fri, 4 Nov 2022 10:44:53 -0400 Subject: [PATCH 33/49] Subprocess reader start. --- pkg/component/runtime/command.go | 18 ++++++++++++------ pkg/component/runtime/command_logger.go | 11 +++++++++++ 2 files changed, 23 insertions(+), 6 deletions(-) create mode 100644 pkg/component/runtime/command_logger.go diff --git a/pkg/component/runtime/command.go b/pkg/component/runtime/command.go index 22c1898fcdc..80a53b7ac72 100644 --- a/pkg/component/runtime/command.go +++ b/pkg/component/runtime/command.go @@ -8,6 +8,7 @@ import ( "context" "errors" "fmt" + "github.com/elastic/elastic-agent/pkg/core/logger" "os" "os/exec" "path/filepath" @@ -50,6 +51,7 @@ type procState struct { // CommandRuntime provides the command runtime for running a component as a subprocess. type CommandRuntime struct { + logger *logger.Logger current component.Component monitor MonitoringManager @@ -67,11 +69,13 @@ type CommandRuntime struct { } // NewCommandRuntime creates a new command runtime for the provided component. -func NewCommandRuntime(comp component.Component, monitor MonitoringManager) (ComponentRuntime, error) { +func NewCommandRuntime(logger *logger.Logger, comp component.Component, monitor MonitoringManager) (ComponentRuntime, error) { if comp.Spec.Spec.Command == nil { return nil, errors.New("must have command defined in specification") } + logger = logger.With("component", comp.ID).With("type", comp.Spec.InputType) return &CommandRuntime{ + logger: logger, current: comp, ch: make(chan ComponentState), actionCh: make(chan actionMode), @@ -303,7 +307,7 @@ func (c *CommandRuntime) start(comm Communicator) error { proc, err := process.Start(path, process.WithArgs(args), process.WithEnv(env), - process.WithCmdOptions(attachOutErr, dirPath(workDir))) + process.WithCmdOptions(attachOutErr(c.logger), dirPath(workDir))) if err != nil { return err } @@ -409,10 +413,12 @@ func (c *CommandRuntime) workDir(uid int, gid int) (string, error) { return path, nil } -func attachOutErr(cmd *exec.Cmd) error { - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - return nil +func attachOutErr(logger *logger.Logger) process.CmdOption { + return func(cmd *exec.Cmd) error { + cmd.Stdout = &commandLogger{logger} + cmd.Stderr = &commandLogger{logger} + return nil + } } func dirPath(path string) process.CmdOption { diff --git a/pkg/component/runtime/command_logger.go b/pkg/component/runtime/command_logger.go new file mode 100644 index 00000000000..2b2d6db8f83 --- /dev/null +++ b/pkg/component/runtime/command_logger.go @@ -0,0 +1,11 @@ +package runtime + +import "github.com/elastic/elastic-agent/pkg/core/logger" + +type commandLogger struct { + logger *logger.Logger +} + +func (r *commandLogger) Write(p []byte) (n int, err error) { + return +} From 2a84a70cfe56878db0de38d5d1749619a86ee664 Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Mon, 7 Nov 2022 10:34:07 -0500 Subject: [PATCH 34/49] Implement io.Writer to handle reading stdout/stderr for spawned components. --- go.mod | 1 + go.sum | 13 +- pkg/component/runtime/command.go | 9 +- pkg/component/runtime/command_logger.go | 11 -- pkg/component/runtime/log_writer.go | 225 +++++++++++++++++++++++ pkg/component/runtime/log_writer_test.go | 214 +++++++++++++++++++++ pkg/component/runtime/manager_test.go | 5 +- pkg/component/runtime/runtime.go | 2 +- 8 files changed, 451 insertions(+), 29 deletions(-) delete mode 100644 pkg/component/runtime/command_logger.go create mode 100644 pkg/component/runtime/log_writer.go create mode 100644 pkg/component/runtime/log_writer_test.go diff --git a/go.mod b/go.mod index df1845dff01..4c82fd8ef3c 100644 --- a/go.mod +++ b/go.mod @@ -156,6 +156,7 @@ replace ( github.com/Shopify/sarama => github.com/elastic/sarama v1.19.1-0.20220310193331-ebc2b0d8eef3 github.com/dop251/goja => github.com/andrewkroh/goja v0.0.0-20190128172624-dd2ac4456e20 github.com/dop251/goja_nodejs => github.com/dop251/goja_nodejs v0.0.0-20171011081505-adff31b136e6 + github.com/elastic/elastic-agent-libs => github.com/blakerouse/elastic-agent-libs v0.0.0-20221105171455-89215adc2946 github.com/fsnotify/fsnotify => github.com/adriansr/fsnotify v1.4.8-0.20211018144411-a81f2b630e7c github.com/tonistiigi/fifo => github.com/containerd/fifo v0.0.0-20190816180239-bda0ff6ed73c ) diff --git a/go.sum b/go.sum index 73ded2d2cf3..141019bba2b 100644 --- a/go.sum +++ b/go.sum @@ -159,6 +159,8 @@ github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngE github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= +github.com/blakerouse/elastic-agent-libs v0.0.0-20221105171455-89215adc2946 h1:OGhNWuiNJCdb9flvTpmQwrC3HeCTP2O2QA1HEnZAzqA= +github.com/blakerouse/elastic-agent-libs v0.0.0-20221105171455-89215adc2946/go.mod h1:0J9lzJh+BjttIiVjYDLncKYCEWUUHiiqnuI64y6C6ss= github.com/blakesmith/ar v0.0.0-20150311145944-8bd4349a67f2 h1:oMCHnXa6CCCafdPDbMh/lWRhRByN0VFLvv+g+ayx1SI= github.com/blakesmith/ar v0.0.0-20150311145944-8bd4349a67f2/go.mod h1:PkYb9DJNAwrSvRx5DYA+gUcOIgTGVMNkfSCbZM8cWpI= github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= @@ -386,9 +388,6 @@ github.com/elastic/elastic-agent-autodiscover v0.2.1 h1:Nbeayh3vq2FNm6xaFo34mhUd github.com/elastic/elastic-agent-autodiscover v0.2.1/go.mod h1:gPnzzfdYNdgznAb+iG9eyyXaQXBbAMHa+Y6Z8hXfcGY= github.com/elastic/elastic-agent-client/v7 v7.0.0-20220804181728-b0328d2fe484 h1:uJIMfLgCenJvxsVmEjBjYGxt0JddCgw2IxgoNfcIXOk= github.com/elastic/elastic-agent-client/v7 v7.0.0-20220804181728-b0328d2fe484/go.mod h1:fkvyUfFwyAG5OnMF0h+FV9sC0Xn9YLITwQpSuwungQs= -github.com/elastic/elastic-agent-libs v0.2.5/go.mod h1:chO3rtcLyGlKi9S0iGVZhYCzDfdDsAQYBc+ui588AFE= -github.com/elastic/elastic-agent-libs v0.2.6 h1:DpcUcCVYZ7lNtHLUlyT1u/GtGAh49wpL15DTH7+8O5o= -github.com/elastic/elastic-agent-libs v0.2.6/go.mod h1:chO3rtcLyGlKi9S0iGVZhYCzDfdDsAQYBc+ui588AFE= github.com/elastic/elastic-agent-system-metrics v0.4.4 h1:Br3S+TlBhijrLysOvbHscFhgQ00X/trDT5VEnOau0E0= github.com/elastic/elastic-agent-system-metrics v0.4.4/go.mod h1:tF/f9Off38nfzTZHIVQ++FkXrDm9keFhFpJ+3pQ00iI= github.com/elastic/elastic-package v0.32.1/go.mod h1:l1fEnF52XRBL6a5h6uAemtdViz2bjtjUtgdQcuRhEAY= @@ -1133,7 +1132,6 @@ github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.0/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= @@ -1215,7 +1213,6 @@ go.elastic.co/apm/module/apmhttp/v2 v2.0.0 h1:GNfmK1LD4nE5fYqbLxROCpg1ucyjSFG5iw go.elastic.co/apm/module/apmhttp/v2 v2.0.0/go.mod h1:5KmxcNN7hkJh8sVW3Ggl/pYgnwiNenygE46bZoUb9RE= go.elastic.co/apm/v2 v2.0.0 h1:5BeBh+oIrVbMwPrW3uO9Uxm4w7HpKy92lYl5Rfj69Kg= go.elastic.co/apm/v2 v2.0.0/go.mod h1:KGQn56LtRmkQjt2qw4+c1Jz8gv9rCBUU/m21uxrqcps= -go.elastic.co/ecszap v1.0.0/go.mod h1:HTUi+QRmr3EuZMqxPX+5fyOdMNfUu5iPebgfhgsTJYQ= go.elastic.co/ecszap v1.0.1 h1:mBxqEJAEXBlpi5+scXdzL7LTFGogbuxipJC0KTZicyA= go.elastic.co/ecszap v1.0.1/go.mod h1:SVjazT+QgNeHSGOCUHvRgN+ZRj5FkB7IXQQsncdF57A= go.elastic.co/fastjson v1.1.0 h1:3MrGBWWVIxe/xvsbpghtkFoPciPhOCmjsR/HfwEeQR4= @@ -1261,7 +1258,6 @@ go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqe go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= -go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -1270,13 +1266,10 @@ go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= -go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= -go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= @@ -1625,8 +1618,6 @@ golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= diff --git a/pkg/component/runtime/command.go b/pkg/component/runtime/command.go index 80a53b7ac72..b4735b530a6 100644 --- a/pkg/component/runtime/command.go +++ b/pkg/component/runtime/command.go @@ -8,13 +8,14 @@ import ( "context" "errors" "fmt" - "github.com/elastic/elastic-agent/pkg/core/logger" "os" "os/exec" "path/filepath" "runtime" "time" + "github.com/elastic/elastic-agent/pkg/core/logger" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" "github.com/elastic/elastic-agent/pkg/utils" @@ -69,7 +70,7 @@ type CommandRuntime struct { } // NewCommandRuntime creates a new command runtime for the provided component. -func NewCommandRuntime(logger *logger.Logger, comp component.Component, monitor MonitoringManager) (ComponentRuntime, error) { +func NewCommandRuntime(comp component.Component, logger *logger.Logger, monitor MonitoringManager) (ComponentRuntime, error) { if comp.Spec.Spec.Command == nil { return nil, errors.New("must have command defined in specification") } @@ -415,8 +416,8 @@ func (c *CommandRuntime) workDir(uid int, gid int) (string, error) { func attachOutErr(logger *logger.Logger) process.CmdOption { return func(cmd *exec.Cmd) error { - cmd.Stdout = &commandLogger{logger} - cmd.Stderr = &commandLogger{logger} + cmd.Stdout = newLogWriter(logger.Core()) + cmd.Stderr = newLogWriter(logger.Core()) return nil } } diff --git a/pkg/component/runtime/command_logger.go b/pkg/component/runtime/command_logger.go deleted file mode 100644 index 2b2d6db8f83..00000000000 --- a/pkg/component/runtime/command_logger.go +++ /dev/null @@ -1,11 +0,0 @@ -package runtime - -import "github.com/elastic/elastic-agent/pkg/core/logger" - -type commandLogger struct { - logger *logger.Logger -} - -func (r *commandLogger) Write(p []byte) (n int, err error) { - return -} diff --git a/pkg/component/runtime/log_writer.go b/pkg/component/runtime/log_writer.go new file mode 100644 index 00000000000..8b9572bed17 --- /dev/null +++ b/pkg/component/runtime/log_writer.go @@ -0,0 +1,225 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package runtime + +import ( + "bytes" + "encoding/json" + "errors" + "strings" + "time" + + "go.uber.org/zap" + + "go.uber.org/zap/zapcore" +) + +type zapcoreWriter interface { + Write(zapcore.Entry, []zapcore.Field) error +} + +// logWriter is an `io.Writer` that takes lines and passes them through the logger. +// +// `Write` handles parsing lines as either ndjson or plain text. +type logWriter struct { + loggerCore zapcoreWriter + remainder []byte +} + +func newLogWriter(core zapcoreWriter) *logWriter { + return &logWriter{ + loggerCore: core, + } +} + +func (r *logWriter) Write(p []byte) (int, error) { + if len(p) == 0 { + // nothing to do + return 0, nil + } + offset := 0 + for { + idx := bytes.IndexByte(p[offset:], '\n') + if idx < 0 { + // not all used add to remainder to be used on next call + if r.remainder == nil || len(r.remainder) == 0 { + r.remainder = p[offset:] + } else { + r.remainder = append(r.remainder, p[offset:]...) + } + return len(p), nil + } + + var line []byte + if r.remainder != nil { + line = r.remainder + r.remainder = nil + line = append(line, p[offset:offset+idx]...) + } else { + line = append(line, p[offset:offset+idx]...) + } + offset += idx + 1 + // drop '\r' from line (needed for Windows) + if len(line) > 0 && line[len(line)-1] == '\r' { + line = line[0 : len(line)-1] + } + if len(line) == 0 { + // empty line + continue + } + str := strings.TrimSpace(string(line)) + // try to parse line as JSON + if str[0] == '{' && r.handleJSON(str) { + // handled as JSON + continue + } + // considered standard text being it's not JSON, log at basic info level + _ = r.loggerCore.Write(zapcore.Entry{ + Level: zapcore.InfoLevel, + Time: time.Now(), + Message: str, + }, nil) + } +} + +func (r *logWriter) handleJSON(line string) bool { + var evt map[string]interface{} + if err := json.Unmarshal([]byte(line), &evt); err != nil { + return false + } + lvl := getLevel(evt) + ts := getTimestamp(evt) + msg := getMessage(evt) + fields := getFields(evt) + _ = r.loggerCore.Write(zapcore.Entry{ + Level: lvl, + Time: ts, + Message: msg, + }, fields) + return true +} + +func getLevel(evt map[string]interface{}) zapcore.Level { + lvl := zapcore.InfoLevel + err := unmarshalLevel(&lvl, getStrVal(evt, "log.level")) + if err != nil { + err := unmarshalLevel(&lvl, getStrVal(evt, "log", "level")) + if err != nil { + err := unmarshalLevel(&lvl, getStrVal(evt, "level")) + if err == nil { + deleteVal(evt, "level") + } + } else { + deleteVal(evt, "log", "level") + } + } else { + deleteVal(evt, "log.level") + } + return lvl +} + +func unmarshalLevel(lvl *zapcore.Level, val string) error { + if val == "" { + return errors.New("empty val") + } else if val == "trace" { + // zap doesn't handle trace level we cast to debug + *lvl = zapcore.DebugLevel + return nil + } + return lvl.UnmarshalText([]byte(val)) +} + +func getMessage(evt map[string]interface{}) string { + msg := getStrVal(evt, "message") + if msg == "" { + msg = getStrVal(evt, "msg") + if msg != "" { + deleteVal(evt, "msg") + } + } else { + deleteVal(evt, "message") + } + return msg +} + +func getTimestamp(evt map[string]interface{}) time.Time { + t, err := time.Parse(time.RFC3339Nano, getStrVal(evt, "@timestamp")) + if err != nil { + t, err = time.Parse(time.RFC3339Nano, getStrVal(evt, "timestamp")) + if err != nil { + t, err = time.Parse(time.RFC3339Nano, getStrVal(evt, "time")) + if err != nil { + t = time.Now() + } else { + deleteVal(evt, "time") + } + } else { + deleteVal(evt, "timestamp") + } + } else { + deleteVal(evt, "@timestamp") + } + return t +} + +func getFields(evt map[string]interface{}) []zapcore.Field { + fields := make([]zapcore.Field, 0, len(evt)) + for k, v := range evt { + fields = append(fields, zap.Any(k, v)) + } + return fields +} + +func getStrVal(evt map[string]interface{}, fields ...string) string { + if len(fields) == 0 { + panic("must provide at least one field") + } + last := len(fields) - 1 + for i, field := range fields { + if i == last { + raw, ok := evt[field] + if !ok { + return "" + } + str, ok := raw.(string) + if !ok { + return "" + } + return str + } + raw, ok := evt[field] + if !ok { + return "" + } + nested, ok := raw.(map[string]interface{}) + if !ok { + return "" + } + evt = nested + } + return "" +} + +func deleteVal(evt map[string]interface{}, fields ...string) { + if len(fields) == 0 { + panic("must provide at least one field") + } + last := len(fields) - 1 + for i, field := range fields { + if i == last { + delete(evt, field) + return + } + raw, ok := evt[field] + if !ok { + return + } + nested, ok := raw.(map[string]interface{}) + if !ok { + return + } + evt = nested + } +} diff --git a/pkg/component/runtime/log_writer_test.go b/pkg/component/runtime/log_writer_test.go new file mode 100644 index 00000000000..7c23bc242c8 --- /dev/null +++ b/pkg/component/runtime/log_writer_test.go @@ -0,0 +1,214 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package runtime + +import ( + "sort" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/zap" + + "go.uber.org/zap/zapcore" +) + +type wrote struct { + entry zapcore.Entry + fields []zapcore.Field +} + +func TestLogWriter(t *testing.T) { + scenarios := []struct { + Name string + Lines []string + Wrote []wrote + }{ + { + Name: "multi plain text line", + Lines: []string{ + "simple written line\r\n", + "another written line\n", + }, + Wrote: []wrote{ + { + entry: zapcore.Entry{ + Level: zapcore.InfoLevel, + Time: time.Time{}, + Message: "simple written line", + }, + }, + { + entry: zapcore.Entry{ + Level: zapcore.InfoLevel, + Time: time.Time{}, + Message: "another written line", + }, + }, + }, + }, + { + Name: "multi split text line", + Lines: []string{ + "simple written line\r\n", + " another line sp", + "lit on ", + "", + "multi writes\n", + "\r\n", + "\n", + }, + Wrote: []wrote{ + { + entry: zapcore.Entry{ + Level: zapcore.InfoLevel, + Time: time.Time{}, + Message: "simple written line", + }, + }, + { + entry: zapcore.Entry{ + Level: zapcore.InfoLevel, + Time: time.Time{}, + Message: "another line split on multi writes", + }, + }, + }, + }, + { + Name: "json log lines", + Lines: []string{ + `{"@timestamp": "2009-11-10T23:00:00Z", "log.level": "debug", "message": "message field", "string": "extra", "int": 50}`, + "\n", + `{"timestamp": "2009-11-10T23:00:01Z", "log": {"level": "warn"}, "msg": "msg field", "string": "extra next", "int": 100}`, + "\n", + `{"time": "2009-11-10T23:00:02Z", "level": "trace", "message": "message field", "nested": {"key": "value"}}`, + "\n", + `{"level": "error", "message": "error string"}`, + "\n", + }, + Wrote: []wrote{ + { + entry: zapcore.Entry{ + Level: zapcore.DebugLevel, + Time: parseTime("2009-11-10T23:00:00Z"), + Message: "message field", + }, + fields: []zapcore.Field{ + zap.String("string", "extra"), + zap.Float64("int", 50), + }, + }, + { + entry: zapcore.Entry{ + Level: zapcore.WarnLevel, + Time: parseTime("2009-11-10T23:00:01Z"), + Message: "msg field", + }, + fields: []zapcore.Field{ + zap.String("string", "extra next"), + zap.Float64("int", 100), + zap.Any("log", map[string]interface{}{}), + }, + }, + { + entry: zapcore.Entry{ + Level: zapcore.DebugLevel, + Time: parseTime("2009-11-10T23:00:02Z"), + Message: "message field", + }, + fields: []zapcore.Field{ + zap.Any("nested", map[string]interface{}{ + "key": "value", + }), + }, + }, + { + entry: zapcore.Entry{ + Level: zapcore.ErrorLevel, + Time: time.Time{}, + Message: "error string", + }, + fields: []zapcore.Field{}, + }, + }, + }, + { + Name: "invalid JSON line", + Lines: []string{ + `{"broken": json`, + "\n", + }, + Wrote: []wrote{ + { + entry: zapcore.Entry{ + Level: zapcore.InfoLevel, + Time: time.Time{}, + Message: `{"broken": json`, + }, + }, + }, + }, + } + + for _, scenario := range scenarios { + t.Run(scenario.Name, func(t *testing.T) { + c := &captureCore{} + w := newLogWriter(c) + for _, line := range scenario.Lines { + l := len([]byte(line)) + c, err := w.Write([]byte(line)) + require.NoError(t, err) + require.Equal(t, l, c) + } + require.Len(t, c.wrote, len(scenario.Wrote)) + for i := 0; i < len(scenario.Wrote); i++ { + e := scenario.Wrote[i] + o := c.wrote[i] + if e.entry.Time.IsZero() { + // can't ensure times match; set it to observed before ensuring its equal + e.entry.Time = o.entry.Time + } + assert.Equal(t, e.entry, o.entry) + + // ensure the fields are in the same order (doesn't really matter for logging; but test cares) + if len(e.fields) > 0 { + sortFields(e.fields) + } + if len(o.fields) > 0 { + sortFields(o.fields) + } + assert.EqualValues(t, e.fields, o.fields) + } + }) + } +} + +type captureCore struct { + wrote []wrote +} + +func (c *captureCore) Write(entry zapcore.Entry, fields []zapcore.Field) error { + c.wrote = append(c.wrote, wrote{ + entry: entry, + fields: fields, + }) + return nil +} + +func parseTime(t string) time.Time { + v, err := time.Parse(time.RFC3339Nano, t) + if err != nil { + panic(err) + } + return v +} + +func sortFields(fields []zapcore.Field) { + sort.Slice(fields, func(i, j int) bool { + return fields[i].Key < fields[j].Key + }) +} diff --git a/pkg/component/runtime/manager_test.go b/pkg/component/runtime/manager_test.go index b71a24c35e0..06d9ac7d876 100644 --- a/pkg/component/runtime/manager_test.go +++ b/pkg/component/runtime/manager_test.go @@ -180,8 +180,9 @@ func TestManager_FakeInput_StartStop(t *testing.T) { }, Units: []component.Unit{ { - ID: "fake-input", - Type: client.UnitTypeInput, + ID: "fake-input", + Type: client.UnitTypeInput, + LogLevel: client.UnitLogLevelTrace, Config: component.MustExpectedConfig(map[string]interface{}{ "type": "fake", "state": int(client.UnitStateHealthy), diff --git a/pkg/component/runtime/runtime.go b/pkg/component/runtime/runtime.go index e06702b2141..8f2323e3678 100644 --- a/pkg/component/runtime/runtime.go +++ b/pkg/component/runtime/runtime.go @@ -58,7 +58,7 @@ func NewComponentRuntime(comp component.Component, logger *logger.Logger, monito if comp.Err != nil { return NewFailedRuntime(comp) } else if comp.Spec.Spec.Command != nil { - return NewCommandRuntime(comp, monitor) + return NewCommandRuntime(comp, logger, monitor) } else if comp.Spec.Spec.Service != nil { return NewServiceRuntime(comp, logger) } From 3ecddd67763a19de394c88da86a296da7f9eb1df Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Mon, 7 Nov 2022 11:31:32 -0500 Subject: [PATCH 35/49] Don't inject logging args to beats components. Always have beats log to stderr. --- .../application/monitoring/v1_monitor.go | 17 +- specs/auditbeat.spec.yml | 88 ++--- specs/cloudbeat.spec.yml | 82 +++-- specs/filebeat.spec.yml | 342 +++++++++--------- specs/fleet-server.spec.yml | 2 + specs/heartbeat.spec.yml | 4 +- specs/metricbeat.spec.yml | 328 ++++++++--------- specs/osquerybeat.spec.yml | 54 +-- specs/packetbeat.spec.yml | 60 +-- 9 files changed, 492 insertions(+), 485 deletions(-) diff --git a/internal/pkg/agent/application/monitoring/v1_monitor.go b/internal/pkg/agent/application/monitoring/v1_monitor.go index 1d8f2750afd..7eb3f7ac107 100644 --- a/internal/pkg/agent/application/monitoring/v1_monitor.go +++ b/internal/pkg/agent/application/monitoring/v1_monitor.go @@ -58,7 +58,7 @@ var ( supportedBeatsComponents = []string{"filebeat", "metricbeat", "auditbeat", "cloudbeat", "heartbeat", "osquerybeat", "packetbeat"} ) -// Beats monitor is providing V1 monitoring support. +// BeatsMonitor is providing V1 monitoring support for metrics and logs for endpoint-security only. type BeatsMonitor struct { enabled bool // feature flag disabling whole v1 monitoring story config *monitoringConfig @@ -178,21 +178,10 @@ func (b *BeatsMonitor) EnrichArgs(unit, binary string, args []string) []string { } } - loggingPath := loggingPath(unit, b.operatingSystem) - if loggingPath != "" { + if !b.config.C.LogMetrics { appendix = append(appendix, - "-E", "logging.files.path="+filepath.Dir(loggingPath), - "-E", "logging.files.name="+filepath.Base(loggingPath), - "-E", "logging.files.keepfiles=7", - "-E", "logging.files.permission=0640", - "-E", "logging.files.interval=1h", + "-E", "logging.metrics.enabled=false", ) - - if !b.config.C.LogMetrics { - appendix = append(appendix, - "-E", "logging.metrics.enabled=false", - ) - } } return append(args, appendix...) diff --git a/specs/auditbeat.spec.yml b/specs/auditbeat.spec.yml index f8c46a96873..a54a47fbbe8 100644 --- a/specs/auditbeat.spec.yml +++ b/specs/auditbeat.spec.yml @@ -1,43 +1,45 @@ -version: 2 -inputs: - - name: audit/auditd - description: "Auditd" - platforms: &platforms - - linux/amd64 - - linux/arm64 - - darwin/amd64 - - darwin/arm64 - - windows/amd64 - - container/amd64 - - container/arm64 - outputs: &outputs - - elasticsearch - - kafka - - logstash - - redis - command: - args: &args - - "-E" - - "setup.ilm.enabled=false" - - "-E" - - "setup.template.enabled=false" - - "-E" - - "management.enabled=true" - - "-E" - - "logging.level=debug" - - "-E" - - "gc_percent=${AUDITBEAT_GOGC:100}" - - "-E" - - "auditbeat.config.modules.enabled=false" - - name: audit/file_integrity - description: "Audit File Integrity" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: audit/system - description: "Audit System" - platforms: *platforms - outputs: *outputs - command: - args: *args +version: 2 +inputs: + - name: audit/auditd + description: "Auditd" + platforms: &platforms + - linux/amd64 + - linux/arm64 + - darwin/amd64 + - darwin/arm64 + - windows/amd64 + - container/amd64 + - container/arm64 + outputs: &outputs + - elasticsearch + - kafka + - logstash + - redis + command: + args: &args + - "-E" + - "setup.ilm.enabled=false" + - "-E" + - "setup.template.enabled=false" + - "-E" + - "management.enabled=true" + - "-E" + - "logging.level=info" + - "-E" + - "logging.to_stderr=true" + - "-E" + - "gc_percent=${AUDITBEAT_GOGC:100}" + - "-E" + - "auditbeat.config.modules.enabled=false" + - name: audit/file_integrity + description: "Audit File Integrity" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: audit/system + description: "Audit System" + platforms: *platforms + outputs: *outputs + command: + args: *args diff --git a/specs/cloudbeat.spec.yml b/specs/cloudbeat.spec.yml index 1ecbe47e330..b54ff87ec18 100644 --- a/specs/cloudbeat.spec.yml +++ b/specs/cloudbeat.spec.yml @@ -1,39 +1,43 @@ -version: 2 -inputs: - - name: cloudbeat - description: "Cloudbeat" - platforms: &platforms - - linux/amd64 - - linux/arm64 - - darwin/amd64 - - darwin/arm64 - - windows/amd64 - - container/amd64 - - container/arm64 - outputs: &outputs - - elasticsearch - - kafka - - logstash - - redis - command: - args: &args - - "-E" - - "management.enabled=true" - - "-E" - - "setup.ilm.enabled=false" - - "-E" - - "setup.template.enabled=false" - - "-E" - - "gc_percent=${CLOUDBEAT_GOGC:100}" - - name: cloudbeat/cis_k8s - description: "CIS Kubernetes monitoring" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: cloudbeat/cis_eks - description: "CIS elastic Kubernetes monitoring" - platforms: *platforms - outputs: *outputs - command: - args: *args \ No newline at end of file +version: 2 +inputs: + - name: cloudbeat + description: "Cloudbeat" + platforms: &platforms + - linux/amd64 + - linux/arm64 + - darwin/amd64 + - darwin/arm64 + - windows/amd64 + - container/amd64 + - container/arm64 + outputs: &outputs + - elasticsearch + - kafka + - logstash + - redis + command: + args: &args + - "-E" + - "management.enabled=true" + - "-E" + - "setup.ilm.enabled=false" + - "-E" + - "setup.template.enabled=false" + - "-E" + - "logging.level=info" + - "-E" + - "logging.to_stderr=true" + - "-E" + - "gc_percent=${CLOUDBEAT_GOGC:100}" + - name: cloudbeat/cis_k8s + description: "CIS Kubernetes monitoring" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: cloudbeat/cis_eks + description: "CIS elastic Kubernetes monitoring" + platforms: *platforms + outputs: *outputs + command: + args: *args diff --git a/specs/filebeat.spec.yml b/specs/filebeat.spec.yml index 07f3cb7666e..d492cf7606e 100644 --- a/specs/filebeat.spec.yml +++ b/specs/filebeat.spec.yml @@ -1,170 +1,172 @@ -version: 2 -inputs: - - name: aws-cloudwatch - description: "AWS Cloudwatch" - platforms: &platforms - - linux/amd64 - - linux/arm64 - - darwin/amd64 - - darwin/arm64 - - windows/amd64 - - container/amd64 - - container/arm64 - outputs: &outputs - - elasticsearch - - kafka - - logstash - - redis - command: - args: &args - - "-E" - - "setup.ilm.enabled=false" - - "-E" - - "setup.template.enabled=false" - - "-E" - - "management.enabled=true" - - "-E" - - "logging.level=debug" - - "-E" - - "gc_percent=${FILEBEAT_GOGC:100}" - - "-E" - - "filebeat.config.modules.enabled=false" - - name: aws-s3 - description: "AWS S3" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: azure-eventhub - description: "Azure Eventhub" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: cloudfoundry - description: "PCF Cloudfoundry" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: container - description: "Container logs" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: docker - aliases: - - log/docker - description: "Docker logs" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: gcp-pubsub - description: "GCP Pub-Sub" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: http_endpoint - description: "HTTP Endpoint" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: httpjson - description: "HTTP JSON Endpoint" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: journald - description: "Journald" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: kafka - description: "Kafka" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: log - aliases: - - logfile - - event/file - description: "Logfile" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: mqtt - description: "MQTT" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: netflow - description: "Netflow" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: o365audit - description: "Office 365 Audit" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: redis - aliases: - - log/redis_slowlog - description: "Redis" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: syslog - aliases: - - log/syslog - description: "Syslog" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: tcp - aliases: - - event/tcp - description: "TCP" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: udp - aliases: - - event/udp - description: "UDP" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: unix - description: "Unix Socket" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: winlog - description: "Winlog" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: filestream - description: "Filestream" - platforms: *platforms - outputs: *outputs - command: - args: *args +version: 2 +inputs: + - name: aws-cloudwatch + description: "AWS Cloudwatch" + platforms: &platforms + - linux/amd64 + - linux/arm64 + - darwin/amd64 + - darwin/arm64 + - windows/amd64 + - container/amd64 + - container/arm64 + outputs: &outputs + - elasticsearch + - kafka + - logstash + - redis + command: + args: &args + - "-E" + - "setup.ilm.enabled=false" + - "-E" + - "setup.template.enabled=false" + - "-E" + - "management.enabled=true" + - "-E" + - "logging.level=info" + - "-E" + - "logging.to_stderr=true" + - "-E" + - "gc_percent=${FILEBEAT_GOGC:100}" + - "-E" + - "filebeat.config.modules.enabled=false" + - name: aws-s3 + description: "AWS S3" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: azure-eventhub + description: "Azure Eventhub" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: cloudfoundry + description: "PCF Cloudfoundry" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: container + description: "Container logs" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: docker + aliases: + - log/docker + description: "Docker logs" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: gcp-pubsub + description: "GCP Pub-Sub" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: http_endpoint + description: "HTTP Endpoint" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: httpjson + description: "HTTP JSON Endpoint" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: journald + description: "Journald" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: kafka + description: "Kafka" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: log + aliases: + - logfile + - event/file + description: "Logfile" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: mqtt + description: "MQTT" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: netflow + description: "Netflow" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: o365audit + description: "Office 365 Audit" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: redis + aliases: + - log/redis_slowlog + description: "Redis" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: syslog + aliases: + - log/syslog + description: "Syslog" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: tcp + aliases: + - event/tcp + description: "TCP" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: udp + aliases: + - event/udp + description: "UDP" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: unix + description: "Unix Socket" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: winlog + description: "Winlog" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: filestream + description: "Filestream" + platforms: *platforms + outputs: *outputs + command: + args: *args diff --git a/specs/fleet-server.spec.yml b/specs/fleet-server.spec.yml index f1e760efe8b..75b39712aa4 100644 --- a/specs/fleet-server.spec.yml +++ b/specs/fleet-server.spec.yml @@ -16,4 +16,6 @@ inputs: args: - "--agent-mode" - "-E" + - "logging.level=info" + - "-E" - "logging.to_stderr=true" diff --git a/specs/heartbeat.spec.yml b/specs/heartbeat.spec.yml index ba6a08934b8..4036020396a 100644 --- a/specs/heartbeat.spec.yml +++ b/specs/heartbeat.spec.yml @@ -21,7 +21,9 @@ inputs: - "-E" - "management.enabled=true" - "-E" - - "logging.level=debug" + - "logging.level=info" + - "-E" + - "logging.to_stderr=true" - "-E" - "gc_percent=${HEARTBEAT_GOGC:100}" - name: synthetics/http diff --git a/specs/metricbeat.spec.yml b/specs/metricbeat.spec.yml index b160a4f29e7..94a45b86a86 100644 --- a/specs/metricbeat.spec.yml +++ b/specs/metricbeat.spec.yml @@ -1,163 +1,165 @@ -version: 2 -inputs: - - name: beat/metrics - description: "Beat metrics" - platforms: &platforms - - linux/amd64 - - linux/arm64 - - darwin/amd64 - - darwin/arm64 - - windows/amd64 - - container/amd64 - - container/arm64 - outputs: &outputs - - elasticsearch - - kafka - - logstash - - redis - command: - args: &args - - "-E" - - "setup.ilm.enabled=false" - - "-E" - - "setup.template.enabled=false" - - "-E" - - "management.enabled=true" - - "-E" - - "logging.level=debug" - - "-E" - - "gc_percent=${METRICBEAT_GOGC:100}" - - "-E" - - "metricbeat.config.modules.enabled=false" - - name: docker/metrics - description: "Docker metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: elasticsearch/metrics - description: "Elasticsearch metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: kibana/metrics - description: "Kibana metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: kubernetes/metrics - description: "Kubernetes metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: linux/metrics - description: "Linux metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: logstash/metrics - description: "Logstash metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: mongodb/metrics - description: "Mongodb metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: mysql/metrics - description: "MySQL metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: postgresql/metrics - description: "PostgreSQL metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: redis/metrics - description: "Redis metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: system/metrics - description: "System metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: uwsgi/metrics - description: "UWSGI metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: windows/metrics - description: "Windows metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: aws/metrics - description: "AWS metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: awsfargate/metrics - description: "AWS Fargate metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: azure/metrics - description: "Azure metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: cloudfoundry/metrics - description: "PCF Cloudfoundry metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: containerd/metrics - description: "Containerd metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: mssql/metrics - description: "Microsoft SQL Server metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: oracle/metrics - description: "Oracle Database metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: syncgateway/metrics - description: "Couchbase Sync Gateway metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: http/metrics - description: "HTTP metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args +version: 2 +inputs: + - name: beat/metrics + description: "Beat metrics" + platforms: &platforms + - linux/amd64 + - linux/arm64 + - darwin/amd64 + - darwin/arm64 + - windows/amd64 + - container/amd64 + - container/arm64 + outputs: &outputs + - elasticsearch + - kafka + - logstash + - redis + command: + args: &args + - "-E" + - "setup.ilm.enabled=false" + - "-E" + - "setup.template.enabled=false" + - "-E" + - "management.enabled=true" + - "-E" + - "logging.level=info" + - "-E" + - "logging.to_stderr=true" + - "-E" + - "gc_percent=${METRICBEAT_GOGC:100}" + - "-E" + - "metricbeat.config.modules.enabled=false" + - name: docker/metrics + description: "Docker metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: elasticsearch/metrics + description: "Elasticsearch metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: kibana/metrics + description: "Kibana metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: kubernetes/metrics + description: "Kubernetes metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: linux/metrics + description: "Linux metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: logstash/metrics + description: "Logstash metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: mongodb/metrics + description: "Mongodb metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: mysql/metrics + description: "MySQL metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: postgresql/metrics + description: "PostgreSQL metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: redis/metrics + description: "Redis metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: system/metrics + description: "System metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: uwsgi/metrics + description: "UWSGI metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: windows/metrics + description: "Windows metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: aws/metrics + description: "AWS metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: awsfargate/metrics + description: "AWS Fargate metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: azure/metrics + description: "Azure metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: cloudfoundry/metrics + description: "PCF Cloudfoundry metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: containerd/metrics + description: "Containerd metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: mssql/metrics + description: "Microsoft SQL Server metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: oracle/metrics + description: "Oracle Database metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: syncgateway/metrics + description: "Couchbase Sync Gateway metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: http/metrics + description: "HTTP metrics" + platforms: *platforms + outputs: *outputs + command: + args: *args diff --git a/specs/osquerybeat.spec.yml b/specs/osquerybeat.spec.yml index 31edb9a3edb..2bf4e53b8f8 100644 --- a/specs/osquerybeat.spec.yml +++ b/specs/osquerybeat.spec.yml @@ -1,26 +1,28 @@ -version: 2 -inputs: - - name: osquery - description: "Osquery" - platforms: - - linux/amd64 - - linux/arm64 - - darwin/amd64 - - darwin/arm64 - - windows/amd64 - - container/amd64 - - container/arm64 - outputs: - - elasticsearch - command: - args: - - "-E" - - "setup.ilm.enabled=false" - - "-E" - - "setup.template.enabled=false" - - "-E" - - "management.enabled=true" - - "-E" - - "logging.level=debug" - - "-E" - - "gc_percent=${OSQUERYBEAT_GOGC:100}" +version: 2 +inputs: + - name: osquery + description: "Osquery" + platforms: + - linux/amd64 + - linux/arm64 + - darwin/amd64 + - darwin/arm64 + - windows/amd64 + - container/amd64 + - container/arm64 + outputs: + - elasticsearch + command: + args: + - "-E" + - "setup.ilm.enabled=false" + - "-E" + - "setup.template.enabled=false" + - "-E" + - "management.enabled=true" + - "-E" + - "logging.level=info" + - "-E" + - "logging.to_stderr=true" + - "-E" + - "gc_percent=${OSQUERYBEAT_GOGC:100}" diff --git a/specs/packetbeat.spec.yml b/specs/packetbeat.spec.yml index 0519078cac8..cd788b89add 100644 --- a/specs/packetbeat.spec.yml +++ b/specs/packetbeat.spec.yml @@ -1,29 +1,31 @@ -version: 2 -inputs: - - name: packet - description: "Packet Capture" - platforms: - - linux/amd64 - - linux/arm64 - - darwin/amd64 - - darwin/arm64 - - windows/amd64 - - container/amd64 - - container/arm64 - outputs: - - elasticsearch - - kafka - - logstash - - redis - command: - args: - - "-E" - - "setup.ilm.enabled=false" - - "-E" - - "setup.template.enabled=false" - - "-E" - - "management.enabled=true" - - "-E" - - "logging.level=debug" - - "-E" - - "gc_percent=${PACKETBEAT_GOGC:100}" +version: 2 +inputs: + - name: packet + description: "Packet Capture" + platforms: + - linux/amd64 + - linux/arm64 + - darwin/amd64 + - darwin/arm64 + - windows/amd64 + - container/amd64 + - container/arm64 + outputs: + - elasticsearch + - kafka + - logstash + - redis + command: + args: + - "-E" + - "setup.ilm.enabled=false" + - "-E" + - "setup.template.enabled=false" + - "-E" + - "management.enabled=true" + - "-E" + - "logging.level=info" + - "-E" + - "logging.to_stderr=true" + - "-E" + - "gc_percent=${PACKETBEAT_GOGC:100}" From d7e7d16205fd071c253f3fdd3b89df0508782cc7 Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Mon, 7 Nov 2022 17:02:17 -0500 Subject: [PATCH 36/49] Update to v0.2.15 of elastic-agent-libs. --- NOTICE.txt | 4 ++-- go.mod | 3 +-- go.sum | 13 +++++++++++-- 3 files changed, 14 insertions(+), 6 deletions(-) diff --git a/NOTICE.txt b/NOTICE.txt index 7bc5103d040..cdd71e1a34f 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -1273,11 +1273,11 @@ SOFTWARE -------------------------------------------------------------------------------- Dependency : github.com/elastic/elastic-agent-libs -Version: v0.2.6 +Version: v0.2.15 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-libs@v0.2.6/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-libs@v0.2.15/LICENSE: Apache License Version 2.0, January 2004 diff --git a/go.mod b/go.mod index 4c82fd8ef3c..4d44c91ae45 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,7 @@ require ( github.com/elastic/e2e-testing v1.99.2-0.20220117192005-d3365c99b9c4 github.com/elastic/elastic-agent-autodiscover v0.2.1 github.com/elastic/elastic-agent-client/v7 v7.0.0-20220804181728-b0328d2fe484 - github.com/elastic/elastic-agent-libs v0.2.6 + github.com/elastic/elastic-agent-libs v0.2.15 github.com/elastic/elastic-agent-system-metrics v0.4.4 github.com/elastic/go-licenser v0.4.0 github.com/elastic/go-sysinfo v1.8.1 @@ -156,7 +156,6 @@ replace ( github.com/Shopify/sarama => github.com/elastic/sarama v1.19.1-0.20220310193331-ebc2b0d8eef3 github.com/dop251/goja => github.com/andrewkroh/goja v0.0.0-20190128172624-dd2ac4456e20 github.com/dop251/goja_nodejs => github.com/dop251/goja_nodejs v0.0.0-20171011081505-adff31b136e6 - github.com/elastic/elastic-agent-libs => github.com/blakerouse/elastic-agent-libs v0.0.0-20221105171455-89215adc2946 github.com/fsnotify/fsnotify => github.com/adriansr/fsnotify v1.4.8-0.20211018144411-a81f2b630e7c github.com/tonistiigi/fifo => github.com/containerd/fifo v0.0.0-20190816180239-bda0ff6ed73c ) diff --git a/go.sum b/go.sum index 141019bba2b..ac08a20814c 100644 --- a/go.sum +++ b/go.sum @@ -159,8 +159,6 @@ github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngE github.com/bits-and-blooms/bitset v1.2.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA= github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= -github.com/blakerouse/elastic-agent-libs v0.0.0-20221105171455-89215adc2946 h1:OGhNWuiNJCdb9flvTpmQwrC3HeCTP2O2QA1HEnZAzqA= -github.com/blakerouse/elastic-agent-libs v0.0.0-20221105171455-89215adc2946/go.mod h1:0J9lzJh+BjttIiVjYDLncKYCEWUUHiiqnuI64y6C6ss= github.com/blakesmith/ar v0.0.0-20150311145944-8bd4349a67f2 h1:oMCHnXa6CCCafdPDbMh/lWRhRByN0VFLvv+g+ayx1SI= github.com/blakesmith/ar v0.0.0-20150311145944-8bd4349a67f2/go.mod h1:PkYb9DJNAwrSvRx5DYA+gUcOIgTGVMNkfSCbZM8cWpI= github.com/blang/semver v3.1.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= @@ -388,6 +386,9 @@ github.com/elastic/elastic-agent-autodiscover v0.2.1 h1:Nbeayh3vq2FNm6xaFo34mhUd github.com/elastic/elastic-agent-autodiscover v0.2.1/go.mod h1:gPnzzfdYNdgznAb+iG9eyyXaQXBbAMHa+Y6Z8hXfcGY= github.com/elastic/elastic-agent-client/v7 v7.0.0-20220804181728-b0328d2fe484 h1:uJIMfLgCenJvxsVmEjBjYGxt0JddCgw2IxgoNfcIXOk= github.com/elastic/elastic-agent-client/v7 v7.0.0-20220804181728-b0328d2fe484/go.mod h1:fkvyUfFwyAG5OnMF0h+FV9sC0Xn9YLITwQpSuwungQs= +github.com/elastic/elastic-agent-libs v0.2.5/go.mod h1:chO3rtcLyGlKi9S0iGVZhYCzDfdDsAQYBc+ui588AFE= +github.com/elastic/elastic-agent-libs v0.2.15 h1:hdAbrZZ2mCPcQLRCE3E8xw3mHKl8HFMt36w7jan/XGo= +github.com/elastic/elastic-agent-libs v0.2.15/go.mod h1:0J9lzJh+BjttIiVjYDLncKYCEWUUHiiqnuI64y6C6ss= github.com/elastic/elastic-agent-system-metrics v0.4.4 h1:Br3S+TlBhijrLysOvbHscFhgQ00X/trDT5VEnOau0E0= github.com/elastic/elastic-agent-system-metrics v0.4.4/go.mod h1:tF/f9Off38nfzTZHIVQ++FkXrDm9keFhFpJ+3pQ00iI= github.com/elastic/elastic-package v0.32.1/go.mod h1:l1fEnF52XRBL6a5h6uAemtdViz2bjtjUtgdQcuRhEAY= @@ -1132,6 +1133,7 @@ github.com/stretchr/testify v1.2.1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.0/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= @@ -1213,6 +1215,7 @@ go.elastic.co/apm/module/apmhttp/v2 v2.0.0 h1:GNfmK1LD4nE5fYqbLxROCpg1ucyjSFG5iw go.elastic.co/apm/module/apmhttp/v2 v2.0.0/go.mod h1:5KmxcNN7hkJh8sVW3Ggl/pYgnwiNenygE46bZoUb9RE= go.elastic.co/apm/v2 v2.0.0 h1:5BeBh+oIrVbMwPrW3uO9Uxm4w7HpKy92lYl5Rfj69Kg= go.elastic.co/apm/v2 v2.0.0/go.mod h1:KGQn56LtRmkQjt2qw4+c1Jz8gv9rCBUU/m21uxrqcps= +go.elastic.co/ecszap v1.0.0/go.mod h1:HTUi+QRmr3EuZMqxPX+5fyOdMNfUu5iPebgfhgsTJYQ= go.elastic.co/ecszap v1.0.1 h1:mBxqEJAEXBlpi5+scXdzL7LTFGogbuxipJC0KTZicyA= go.elastic.co/ecszap v1.0.1/go.mod h1:SVjazT+QgNeHSGOCUHvRgN+ZRj5FkB7IXQQsncdF57A= go.elastic.co/fastjson v1.1.0 h1:3MrGBWWVIxe/xvsbpghtkFoPciPhOCmjsR/HfwEeQR4= @@ -1258,6 +1261,7 @@ go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqe go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= @@ -1266,10 +1270,13 @@ go.uber.org/goleak v1.1.11/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ go.uber.org/goleak v1.1.12 h1:gZAh5/EyT/HQwlpkCy6wTpqfH9H8Lz8zbm3dZh+OyzA= go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.8.0 h1:dg6GjLku4EH+249NNmoIciG9N/jURbDG+pFlTkhzIC8= go.uber.org/multierr v1.8.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.21.0 h1:WefMeulhovoZ2sYXz7st6K0sLj7bBhpiFaud4r4zST8= go.uber.org/zap v1.21.0/go.mod h1:wjWOCqI0f2ZZrJF/UufIOkiC8ii6tm1iqIsLo76RfJw= @@ -1618,6 +1625,8 @@ golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= From 73b3d2ed683da704ca99ce22008764631fecf09d Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Mon, 7 Nov 2022 22:05:55 -0500 Subject: [PATCH 37/49] [V2] Enable support for shippers (#1527) * Work on adding shipper support. * Fix fmt. * Fix reference to spec. Allow shipper to be null but still enabled if key exists. * Move supported shippers into its own key in the input specification. * Fix issue in merge. * Implement fake shipper and add fake shipper output to the fake component. * Add protoc to the test target. * Don't generate fake shipper protocol in test. * Commit fake GRPC into code. * Add unit test for running with shipper, with sending event between running componentn and running shipper. * Add docstring for shipper test. * Add changelog fragement. * Adjust paths for shipper to work on windows and better on unix. * Update changelog/fragments/1667571017-Add-support-for-running-the-elastic-agent-shipper.yaml Co-authored-by: Craig MacKenzie * Fix fake/component to connect over npipe on windows. Co-authored-by: Craig MacKenzie --- .gitignore | 9 +- ...for-running-the-elastic-agent-shipper.yaml | 31 + control.proto | 2 + .../handlers/handler_action_application.go | 10 +- .../application/coordinator/coordinator.go | 13 +- .../application/fleet_server_bootstrap.go | 4 +- .../gateway/fleet/fleet_gateway.go | 10 +- .../pkg/agent/application/managed_mode.go | 2 +- internal/pkg/agent/application/paths/paths.go | 7 +- .../agent/application/paths/paths_darwin.go | 7 +- .../agent/application/paths/paths_windows.go | 7 +- internal/pkg/agent/cmd/inspect.go | 6 +- internal/pkg/agent/control/addr.go | 2 +- internal/pkg/agent/control/addr_windows.go | 2 +- internal/pkg/agent/control/client/client.go | 11 +- .../pkg/agent/control/cproto/control.pb.go | 166 ++--- internal/pkg/agent/control/server/server.go | 19 +- internal/pkg/agent/install/uninstall.go | 2 +- internal/pkg/fleetapi/checkin_cmd.go | 19 +- magefile.go | 35 +- pkg/component/component.go | 202 +++++- pkg/component/component_test.go | 621 +++++++++++++++++- pkg/component/fake/common/common.go | 34 + pkg/component/fake/common/event.pb.go | 235 +++++++ pkg/component/fake/common/event.proto | 29 + pkg/component/fake/common/event_grpc.pb.go | 112 ++++ pkg/component/fake/{ => component}/README.md | 0 pkg/component/fake/component/dialer.go | 27 + .../fake/component/dialer_windows.go | 27 + pkg/component/fake/component/main.go | 580 ++++++++++++++++ pkg/component/fake/main.go | 342 ---------- pkg/component/fake/shipper/README.md | 3 + pkg/component/fake/shipper/listener.go | 29 + .../fake/shipper/listener_windows.go | 38 ++ pkg/component/fake/shipper/main.go | 514 +++++++++++++++ pkg/component/input_spec.go | 8 + pkg/component/load.go | 108 ++- pkg/component/output_spec.go | 31 - pkg/component/outputs.go | 21 - pkg/component/runtime/command.go | 73 +- pkg/component/runtime/manager.go | 53 +- pkg/component/runtime/manager_shipper.go | 127 ++++ pkg/component/runtime/manager_shipper_unix.go | 33 + .../runtime/manager_shipper_windows.go | 29 + pkg/component/runtime/manager_test.go | 358 +++++++++- pkg/component/runtime/runtime.go | 21 +- pkg/component/runtime/service.go | 42 +- pkg/component/runtime/shipper.go | 13 + pkg/component/shipper_spec.go | 33 + pkg/component/spec.go | 25 +- specs/filebeat.spec.yml | 363 +++++----- specs/metricbeat.spec.yml | 350 +++++----- specs/shipper.spec.yml | 18 + 53 files changed, 3847 insertions(+), 1016 deletions(-) create mode 100644 changelog/fragments/1667571017-Add-support-for-running-the-elastic-agent-shipper.yaml create mode 100644 pkg/component/fake/common/common.go create mode 100644 pkg/component/fake/common/event.pb.go create mode 100644 pkg/component/fake/common/event.proto create mode 100644 pkg/component/fake/common/event_grpc.pb.go rename pkg/component/fake/{ => component}/README.md (100%) create mode 100644 pkg/component/fake/component/dialer.go create mode 100644 pkg/component/fake/component/dialer_windows.go create mode 100644 pkg/component/fake/component/main.go delete mode 100644 pkg/component/fake/main.go create mode 100644 pkg/component/fake/shipper/README.md create mode 100644 pkg/component/fake/shipper/listener.go create mode 100644 pkg/component/fake/shipper/listener_windows.go create mode 100644 pkg/component/fake/shipper/main.go delete mode 100644 pkg/component/output_spec.go delete mode 100644 pkg/component/outputs.go create mode 100644 pkg/component/runtime/manager_shipper.go create mode 100644 pkg/component/runtime/manager_shipper_unix.go create mode 100644 pkg/component/runtime/manager_shipper_windows.go create mode 100644 pkg/component/runtime/shipper.go create mode 100644 pkg/component/shipper_spec.go create mode 100644 specs/shipper.spec.yml diff --git a/.gitignore b/.gitignore index 9940bf5068e..476cfd50764 100644 --- a/.gitignore +++ b/.gitignore @@ -45,7 +45,6 @@ fleet.enc.lock # Files generated with the bump version automations *.bck - # agent build/ elastic-agent @@ -54,9 +53,5 @@ elastic-agent.yml.* fleet.yml fleet.yml.lock fleet.yml.old -internal/pkg/agent/application/fleet.yml -internal/pkg/agent/transpiler/tests/exec-1.0-darwin-x86_64/exec -pkg/component/fake/fake - -# VSCode -/.vscode +pkg/component/fake/component/component +pkg/component/fake/shipper/shipper diff --git a/changelog/fragments/1667571017-Add-support-for-running-the-elastic-agent-shipper.yaml b/changelog/fragments/1667571017-Add-support-for-running-the-elastic-agent-shipper.yaml new file mode 100644 index 00000000000..26430b05741 --- /dev/null +++ b/changelog/fragments/1667571017-Add-support-for-running-the-elastic-agent-shipper.yaml @@ -0,0 +1,31 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: feature + +# Change summary; a 80ish characters long description of the change. +summary: Add experimental support for running the elastic-agent-shipper + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +#description: + +# Affected component; a word indicating the component this changeset affects. +component: + +# PR number; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +pr: 1527 + +# Issue number; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +issue: 219 diff --git a/control.proto b/control.proto index 4bcef0ea3ed..25eef00de4c 100644 --- a/control.proto +++ b/control.proto @@ -193,6 +193,8 @@ message DiagnosticAgentResponse { // DiagnosticUnitRequest specifies a specific unit to gather diagnostics from. message DiagnosticUnitRequest { + // ID of the component. + string component_id = 1; // Type of unit. UnitType unit_type = 2; // ID of the unit. diff --git a/internal/pkg/agent/application/actions/handlers/handler_action_application.go b/internal/pkg/agent/application/actions/handlers/handler_action_application.go index 552427a16b0..462340476eb 100644 --- a/internal/pkg/agent/application/actions/handlers/handler_action_application.go +++ b/internal/pkg/agent/application/actions/handlers/handler_action_application.go @@ -49,7 +49,7 @@ func (h *AppAction) Handle(ctx context.Context, a fleetapi.Action, acker acker.A } state := h.coord.State(false) - unit, ok := findUnitFromInputType(state, action.InputType) + comp, unit, ok := findUnitFromInputType(state, action.InputType) if !ok { // If the matching action is not found ack the action with the error for action result document action.StartedAt = time.Now().UTC().Format(time.RFC3339Nano) @@ -78,7 +78,7 @@ func (h *AppAction) Handle(ctx context.Context, a fleetapi.Action, acker acker.A h.log.Debugf("handlerAppAction: action '%v' started with timeout: %v", action.ActionType, timeout) ctx, cancel := context.WithTimeout(ctx, timeout) defer cancel() - res, err = h.coord.PerformAction(ctx, unit, action.InputType, params) + res, err = h.coord.PerformAction(ctx, comp, unit, action.InputType, params) } end := time.Now().UTC() @@ -151,13 +151,13 @@ func readMapString(m map[string]interface{}, key string, def string) string { return def } -func findUnitFromInputType(state coordinator.State, inputType string) (component.Unit, bool) { +func findUnitFromInputType(state coordinator.State, inputType string) (component.Component, component.Unit, bool) { for _, comp := range state.Components { for _, unit := range comp.Component.Units { if unit.Type == client.UnitTypeInput && unit.Config != nil && unit.Config.Type == inputType { - return unit, true + return comp.Component, unit, true } } } - return component.Unit{}, false + return component.Component{}, component.Unit{}, false } diff --git a/internal/pkg/agent/application/coordinator/coordinator.go b/internal/pkg/agent/application/coordinator/coordinator.go index d244f85b531..48a476a5164 100644 --- a/internal/pkg/agent/application/coordinator/coordinator.go +++ b/internal/pkg/agent/application/coordinator/coordinator.go @@ -91,14 +91,14 @@ type RuntimeManager interface { State() []runtime.ComponentComponentState // PerformAction executes an action on a unit. - PerformAction(ctx context.Context, unit component.Unit, name string, params map[string]interface{}) (map[string]interface{}, error) + PerformAction(ctx context.Context, comp component.Component, unit component.Unit, name string, params map[string]interface{}) (map[string]interface{}, error) // SubscribeAll provides an interface to watch for changes in all components. SubscribeAll(context.Context) *runtime.SubscriptionAll // PerformDiagnostics executes the diagnostic action for the provided units. If no units are provided then // it performs diagnostics for all current units. - PerformDiagnostics(context.Context, ...component.Unit) []runtime.ComponentUnitDiagnostic + PerformDiagnostics(context.Context, ...runtime.ComponentUnitDiagnosticRequest) []runtime.ComponentUnitDiagnostic } // ConfigChange provides an interface for receiving a new configuration. @@ -285,19 +285,20 @@ func (c *Coordinator) Upgrade(ctx context.Context, version string, sourceURI str return nil } +// AckUpgrade performs acknowledgement for upgrade. func (c *Coordinator) AckUpgrade(ctx context.Context, acker acker.Acker) error { return c.upgradeMgr.Ack(ctx, acker) } // PerformAction executes an action on a unit. -func (c *Coordinator) PerformAction(ctx context.Context, unit component.Unit, name string, params map[string]interface{}) (map[string]interface{}, error) { - return c.runtimeMgr.PerformAction(ctx, unit, name, params) +func (c *Coordinator) PerformAction(ctx context.Context, comp component.Component, unit component.Unit, name string, params map[string]interface{}) (map[string]interface{}, error) { + return c.runtimeMgr.PerformAction(ctx, comp, unit, name, params) } // PerformDiagnostics executes the diagnostic action for the provided units. If no units are provided then // it performs diagnostics for all current units. -func (c *Coordinator) PerformDiagnostics(ctx context.Context, units ...component.Unit) []runtime.ComponentUnitDiagnostic { - return c.runtimeMgr.PerformDiagnostics(ctx, units...) +func (c *Coordinator) PerformDiagnostics(ctx context.Context, req ...runtime.ComponentUnitDiagnosticRequest) []runtime.ComponentUnitDiagnostic { + return c.runtimeMgr.PerformDiagnostics(ctx, req...) } // Run runs the coordinator. diff --git a/internal/pkg/agent/application/fleet_server_bootstrap.go b/internal/pkg/agent/application/fleet_server_bootstrap.go index 808feee0af0..62106c30aea 100644 --- a/internal/pkg/agent/application/fleet_server_bootstrap.go +++ b/internal/pkg/agent/application/fleet_server_bootstrap.go @@ -46,7 +46,7 @@ var injectFleetServerInput = config.MustNewConfigFrom(map[string]interface{}{ func FleetServerComponentModifier(serverCfg *configuration.FleetServerConfig) coordinator.ComponentsModifier { return func(comps []component.Component, _ map[string]interface{}) ([]component.Component, error) { for i, comp := range comps { - if comp.Spec.InputType == fleetServer { + if comp.InputSpec != nil && comp.InputSpec.InputType == fleetServer { for j, unit := range comp.Units { if unit.Type == client.UnitTypeOutput && unit.Config.Type == elasticsearch { unitCfgMap, err := toMapStr(unit.Config.Source.AsMap(), &serverCfg.Output.Elasticsearch) @@ -89,7 +89,7 @@ func FleetServerComponentModifier(serverCfg *configuration.FleetServerConfig) co func EndpointComponentModifier(fleetCfg *configuration.FleetAgentConfig) coordinator.ComponentsModifier { return func(comps []component.Component, cfg map[string]interface{}) ([]component.Component, error) { for i, comp := range comps { - if comp.Spec.InputType == endpoint { + if comp.InputSpec != nil && comp.InputSpec.InputType == endpoint { for j, unit := range comp.Units { if unit.Type == client.UnitTypeInput && unit.Config.Type == endpoint { unitCfgMap, err := toMapStr(unit.Config.Source.AsMap(), map[string]interface{}{"fleet": fleetCfg}) diff --git a/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go b/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go index e7b994acfc6..9b31e7dcf12 100644 --- a/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go +++ b/internal/pkg/agent/application/gateway/fleet/fleet_gateway.go @@ -267,11 +267,19 @@ func (f *fleetGateway) convertToCheckinComponents(components []runtime.Component component := item.Component state := item.State + var shipperReference *fleetapi.CheckinShipperReference + if component.Shipper != nil { + shipperReference = &fleetapi.CheckinShipperReference{ + ComponentID: component.Shipper.ComponentID, + UnitID: component.Shipper.UnitID, + } + } checkinComponent := fleetapi.CheckinComponent{ ID: component.ID, - Type: component.Spec.InputType, + Type: component.Type(), Status: stateString(state.State), Message: state.Message, + Shipper: shipperReference, } if state.Units != nil { diff --git a/internal/pkg/agent/application/managed_mode.go b/internal/pkg/agent/application/managed_mode.go index 32cff92f8e4..af53e150888 100644 --- a/internal/pkg/agent/application/managed_mode.go +++ b/internal/pkg/agent/application/managed_mode.go @@ -271,7 +271,7 @@ func (m *managedConfigManager) waitForFleetServer(ctx context.Context) error { case <-ctx.Done(): return ctx.Err() case compState := <-sub.Ch(): - if compState.Component.Spec.InputType == "fleet-server" { + if compState.Component.InputSpec != nil && compState.Component.InputSpec.InputType == "fleet-server" { if fleetServerRunning(compState.State) { m.log.With("state", compState.State).Debugf("Fleet Server is running") return nil diff --git a/internal/pkg/agent/application/paths/paths.go b/internal/pkg/agent/application/paths/paths.go index cc0487bb2a8..2001f0be616 100644 --- a/internal/pkg/agent/application/paths/paths.go +++ b/internal/pkg/agent/application/paths/paths.go @@ -14,8 +14,11 @@ const ( // InstallPath is the installation path using for install command. InstallPath = "/opt/Elastic/Agent" - // SocketPath is the socket path used when installed. - SocketPath = "unix:///run/elastic-agent.sock" + // ControlSocketPath is the control socket path used when installed. + ControlSocketPath = "unix:///run/elastic-agent.sock" + + // ShipperSocketPipePattern is the socket path used when installed for a shipper pipe. + ShipperSocketPipePattern = "unix:///run/elastic-agent-%s-pipe.sock" // ServiceName is the service name when installed. ServiceName = "elastic-agent" diff --git a/internal/pkg/agent/application/paths/paths_darwin.go b/internal/pkg/agent/application/paths/paths_darwin.go index 1a60c53ff8d..64aeaa7d127 100644 --- a/internal/pkg/agent/application/paths/paths_darwin.go +++ b/internal/pkg/agent/application/paths/paths_darwin.go @@ -14,8 +14,11 @@ const ( // InstallPath is the installation path using for install command. InstallPath = "/Library/Elastic/Agent" - // SocketPath is the socket path used when installed. - SocketPath = "unix:///var/run/elastic-agent.sock" + // ControlSocketPath is the control socket path used when installed. + ControlSocketPath = "unix:///var/run/elastic-agent.sock" + + // ShipperSocketPipePattern is the socket path used when installed for a shipper pipe. + ShipperSocketPipePattern = "unix:///var/run/elastic-agent-%s-pipe.sock" // ServiceName is the service name when installed. ServiceName = "co.elastic.elastic-agent" diff --git a/internal/pkg/agent/application/paths/paths_windows.go b/internal/pkg/agent/application/paths/paths_windows.go index 0b81aa2061b..700634a134c 100644 --- a/internal/pkg/agent/application/paths/paths_windows.go +++ b/internal/pkg/agent/application/paths/paths_windows.go @@ -19,8 +19,11 @@ const ( // InstallPath is the installation path using for install command. InstallPath = `C:\Program Files\Elastic\Agent` - // SocketPath is the socket path used when installed. - SocketPath = `\\.\pipe\elastic-agent-system` + // ControlSocketPath is the control socket path used when installed. + ControlSocketPath = `\\.\pipe\elastic-agent-system` + + // ShipperSocketPipePattern is the socket path used when installed for a shipper pipe. + ShipperSocketPipePattern = `\\.\pipe\elastic-agent-%s-pipe.sock` // ServiceName is the service name when installed. ServiceName = "Elastic Agent" diff --git a/internal/pkg/agent/cmd/inspect.go b/internal/pkg/agent/cmd/inspect.go index 6d9b884aea4..d933a8fe1bf 100644 --- a/internal/pkg/agent/cmd/inspect.go +++ b/internal/pkg/agent/cmd/inspect.go @@ -287,7 +287,8 @@ func inspectComponents(ctx context.Context, cfgPath string, opts inspectComponen return fmt.Errorf("unable to find unit with ID: %s/%s", compID, unitID) } if !opts.showSpec { - comp.Spec = component.InputRuntimeSpec{} + comp.InputSpec = nil + comp.ShipperSpec = nil } if !opts.showConfig { for key, unit := range comp.Units { @@ -314,7 +315,8 @@ func inspectComponents(ctx context.Context, cfgPath string, opts inspectComponen // Hide runtime specification unless toggled on. if !opts.showSpec { for i, comp := range comps { - comp.Spec = component.InputRuntimeSpec{} + comp.InputSpec = nil + comp.ShipperSpec = nil comps[i] = comp } } diff --git a/internal/pkg/agent/control/addr.go b/internal/pkg/agent/control/addr.go index 7008fc754e7..31344ed69c1 100644 --- a/internal/pkg/agent/control/addr.go +++ b/internal/pkg/agent/control/addr.go @@ -20,7 +20,7 @@ import ( func Address() string { // when installed the control address is fixed if info.RunningInstalled() { - return paths.SocketPath + return paths.ControlSocketPath } // unix socket path must be less than 104 characters diff --git a/internal/pkg/agent/control/addr_windows.go b/internal/pkg/agent/control/addr_windows.go index 0b5dc711a7d..4eed0f26596 100644 --- a/internal/pkg/agent/control/addr_windows.go +++ b/internal/pkg/agent/control/addr_windows.go @@ -19,7 +19,7 @@ import ( func Address() string { // when installed the control address is fixed if info.RunningInstalled() { - return paths.SocketPath + return paths.ControlSocketPath } // not install, adjust the path based on data path diff --git a/internal/pkg/agent/control/client/client.go b/internal/pkg/agent/control/client/client.go index d876ba4aca1..87440c54141 100644 --- a/internal/pkg/agent/control/client/client.go +++ b/internal/pkg/agent/control/client/client.go @@ -58,6 +58,7 @@ type Version struct { Snapshot bool `json:"snapshot" yaml:"snapshot"` } +// ComponentVersionInfo is the version information for the component. type ComponentVersionInfo struct { // Name of the component. Name string `json:"name" yaml:"name"` @@ -115,8 +116,9 @@ type DiagnosticFileResult struct { // DiagnosticUnitRequest allows a specific unit to be targeted for diagnostics. type DiagnosticUnitRequest struct { - UnitID string - UnitType UnitType + ComponentID string + UnitID string + UnitType UnitType } // DiagnosticUnitResult is a set of results for a unit. @@ -308,8 +310,9 @@ func (c *client) DiagnosticUnits(ctx context.Context, units ...DiagnosticUnitReq reqs := make([]*cproto.DiagnosticUnitRequest, 0, len(units)) for _, u := range units { reqs = append(reqs, &cproto.DiagnosticUnitRequest{ - UnitType: u.UnitType, - UnitId: u.UnitID, + ComponentId: u.ComponentID, + UnitType: u.UnitType, + UnitId: u.UnitID, }) } diff --git a/internal/pkg/agent/control/cproto/control.pb.go b/internal/pkg/agent/control/cproto/control.pb.go index ed681fd38ef..01588cfa4c1 100644 --- a/internal/pkg/agent/control/cproto/control.pb.go +++ b/internal/pkg/agent/control/cproto/control.pb.go @@ -1155,6 +1155,8 @@ type DiagnosticUnitRequest struct { sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields + // ID of the component. + ComponentId string `protobuf:"bytes,1,opt,name=component_id,json=componentId,proto3" json:"component_id,omitempty"` // Type of unit. UnitType UnitType `protobuf:"varint,2,opt,name=unit_type,json=unitType,proto3,enum=cproto.UnitType" json:"unit_type,omitempty"` // ID of the unit. @@ -1193,6 +1195,13 @@ func (*DiagnosticUnitRequest) Descriptor() ([]byte, []int) { return file_control_proto_rawDescGZIP(), []int{13} } +func (x *DiagnosticUnitRequest) GetComponentId() string { + if x != nil { + return x.ComponentId + } + return "" +} + func (x *DiagnosticUnitRequest) GetUnitType() UnitType { if x != nil { return x.UnitType @@ -1501,85 +1510,88 @@ var file_control_proto_rawDesc = []byte{ 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, - 0x74, 0x73, 0x22, 0x5f, 0x0a, 0x15, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, - 0x55, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x09, 0x75, - 0x6e, 0x69, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, - 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x6e, 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, - 0x52, 0x08, 0x75, 0x6e, 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x6e, - 0x69, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x6e, 0x69, - 0x74, 0x49, 0x64, 0x22, 0x4d, 0x0a, 0x16, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, - 0x63, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x33, 0x0a, - 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x63, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, - 0x55, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, 0x05, 0x75, 0x6e, 0x69, - 0x74, 0x73, 0x22, 0xd1, 0x01, 0x0a, 0x16, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, - 0x63, 0x55, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, - 0x0c, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x49, 0x64, - 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x6e, 0x69, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x6e, 0x69, - 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x75, 0x6e, 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, - 0x17, 0x0a, 0x07, 0x75, 0x6e, 0x69, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x06, 0x75, 0x6e, 0x69, 0x74, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, - 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x36, - 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, - 0x1c, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, - 0x74, 0x69, 0x63, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, - 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x4f, 0x0a, 0x17, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, - 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, - 0x65, 0x12, 0x34, 0x0a, 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, - 0x32, 0x1e, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, - 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x52, 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x2a, 0x85, 0x01, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x54, 0x41, 0x52, 0x54, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, - 0x0f, 0x0a, 0x0b, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x55, 0x52, 0x49, 0x4e, 0x47, 0x10, 0x01, - 0x12, 0x0b, 0x0a, 0x07, 0x48, 0x45, 0x41, 0x4c, 0x54, 0x48, 0x59, 0x10, 0x02, 0x12, 0x0c, 0x0a, - 0x08, 0x44, 0x45, 0x47, 0x52, 0x41, 0x44, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0a, 0x0a, 0x06, 0x46, - 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x54, 0x4f, 0x50, 0x50, - 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x54, 0x4f, 0x50, 0x50, 0x45, 0x44, - 0x10, 0x06, 0x12, 0x0d, 0x0a, 0x09, 0x55, 0x50, 0x47, 0x52, 0x41, 0x44, 0x49, 0x4e, 0x47, 0x10, - 0x07, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x4f, 0x4c, 0x4c, 0x42, 0x41, 0x43, 0x4b, 0x10, 0x08, 0x2a, - 0x21, 0x0a, 0x08, 0x55, 0x6e, 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x09, 0x0a, 0x05, 0x49, - 0x4e, 0x50, 0x55, 0x54, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x4f, 0x55, 0x54, 0x50, 0x55, 0x54, - 0x10, 0x01, 0x2a, 0x28, 0x0a, 0x0c, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, 0x53, 0x10, 0x00, 0x12, - 0x0b, 0x0a, 0x07, 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x01, 0x2a, 0x7f, 0x0a, 0x0b, - 0x50, 0x70, 0x72, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x0a, 0x0a, 0x06, 0x41, - 0x4c, 0x4c, 0x4f, 0x43, 0x53, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x42, 0x4c, 0x4f, 0x43, 0x4b, - 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4d, 0x44, 0x4c, 0x49, 0x4e, 0x45, 0x10, 0x02, 0x12, - 0x0d, 0x0a, 0x09, 0x47, 0x4f, 0x52, 0x4f, 0x55, 0x54, 0x49, 0x4e, 0x45, 0x10, 0x03, 0x12, 0x08, - 0x0a, 0x04, 0x48, 0x45, 0x41, 0x50, 0x10, 0x04, 0x12, 0x09, 0x0a, 0x05, 0x4d, 0x55, 0x54, 0x45, - 0x58, 0x10, 0x05, 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x52, 0x4f, 0x46, 0x49, 0x4c, 0x45, 0x10, 0x06, - 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x48, 0x52, 0x45, 0x41, 0x44, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, - 0x10, 0x07, 0x12, 0x09, 0x0a, 0x05, 0x54, 0x52, 0x41, 0x43, 0x45, 0x10, 0x08, 0x32, 0x8e, 0x03, - 0x0a, 0x13, 0x45, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x43, 0x6f, - 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x31, 0x0a, 0x07, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, - 0x17, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, - 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x05, 0x53, 0x74, 0x61, 0x74, - 0x65, 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, - 0x1a, 0x15, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x07, 0x52, 0x65, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, 0x70, 0x74, - 0x79, 0x1a, 0x17, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, 0x65, 0x73, 0x74, 0x61, - 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, 0x0a, 0x07, 0x55, 0x70, - 0x67, 0x72, 0x61, 0x64, 0x65, 0x12, 0x16, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, - 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x17, 0x2e, - 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, - 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x52, 0x0a, 0x0f, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, - 0x73, 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x12, 0x1e, 0x2e, 0x63, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, - 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x63, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, - 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x52, 0x0a, 0x0f, 0x44, 0x69, - 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x12, 0x1e, 0x2e, + 0x74, 0x73, 0x22, 0x82, 0x01, 0x0a, 0x15, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, + 0x63, 0x55, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, + 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x49, 0x64, 0x12, + 0x2d, 0x0a, 0x09, 0x75, 0x6e, 0x69, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x6e, 0x69, 0x74, + 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x75, 0x6e, 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, + 0x0a, 0x07, 0x75, 0x6e, 0x69, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x06, 0x75, 0x6e, 0x69, 0x74, 0x49, 0x64, 0x22, 0x4d, 0x0a, 0x16, 0x44, 0x69, 0x61, 0x67, 0x6e, + 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x12, 0x33, 0x0a, 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x1d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, + 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x52, + 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x22, 0xd1, 0x01, 0x0a, 0x16, 0x44, 0x69, 0x61, 0x67, 0x6e, + 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x5f, 0x69, + 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, + 0x6e, 0x74, 0x49, 0x64, 0x12, 0x2d, 0x0a, 0x09, 0x75, 0x6e, 0x69, 0x74, 0x5f, 0x74, 0x79, 0x70, + 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x10, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x55, 0x6e, 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, 0x52, 0x08, 0x75, 0x6e, 0x69, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x6e, 0x69, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x6e, 0x69, 0x74, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, + 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, + 0x6f, 0x72, 0x12, 0x36, 0x0a, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x18, 0x05, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, + 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x46, 0x69, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x75, 0x6c, + 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x22, 0x4f, 0x0a, 0x17, 0x44, 0x69, + 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x34, 0x0a, 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x18, 0x02, + 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, + 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x52, 0x05, 0x75, 0x6e, 0x69, 0x74, 0x73, 0x2a, 0x85, 0x01, 0x0a, 0x05, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0c, 0x0a, 0x08, 0x53, 0x54, 0x41, 0x52, 0x54, 0x49, 0x4e, + 0x47, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x43, 0x4f, 0x4e, 0x46, 0x49, 0x47, 0x55, 0x52, 0x49, + 0x4e, 0x47, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x48, 0x45, 0x41, 0x4c, 0x54, 0x48, 0x59, 0x10, + 0x02, 0x12, 0x0c, 0x0a, 0x08, 0x44, 0x45, 0x47, 0x52, 0x41, 0x44, 0x45, 0x44, 0x10, 0x03, 0x12, + 0x0a, 0x0a, 0x06, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x04, 0x12, 0x0c, 0x0a, 0x08, 0x53, + 0x54, 0x4f, 0x50, 0x50, 0x49, 0x4e, 0x47, 0x10, 0x05, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x54, 0x4f, + 0x50, 0x50, 0x45, 0x44, 0x10, 0x06, 0x12, 0x0d, 0x0a, 0x09, 0x55, 0x50, 0x47, 0x52, 0x41, 0x44, + 0x49, 0x4e, 0x47, 0x10, 0x07, 0x12, 0x0c, 0x0a, 0x08, 0x52, 0x4f, 0x4c, 0x4c, 0x42, 0x41, 0x43, + 0x4b, 0x10, 0x08, 0x2a, 0x21, 0x0a, 0x08, 0x55, 0x6e, 0x69, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, + 0x09, 0x0a, 0x05, 0x49, 0x4e, 0x50, 0x55, 0x54, 0x10, 0x00, 0x12, 0x0a, 0x0a, 0x06, 0x4f, 0x55, + 0x54, 0x50, 0x55, 0x54, 0x10, 0x01, 0x2a, 0x28, 0x0a, 0x0c, 0x41, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x43, 0x43, 0x45, 0x53, + 0x53, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x46, 0x41, 0x49, 0x4c, 0x55, 0x52, 0x45, 0x10, 0x01, + 0x2a, 0x7f, 0x0a, 0x0b, 0x50, 0x70, 0x72, 0x6f, 0x66, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, + 0x0a, 0x0a, 0x06, 0x41, 0x4c, 0x4c, 0x4f, 0x43, 0x53, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x42, + 0x4c, 0x4f, 0x43, 0x4b, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4d, 0x44, 0x4c, 0x49, 0x4e, + 0x45, 0x10, 0x02, 0x12, 0x0d, 0x0a, 0x09, 0x47, 0x4f, 0x52, 0x4f, 0x55, 0x54, 0x49, 0x4e, 0x45, + 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, 0x48, 0x45, 0x41, 0x50, 0x10, 0x04, 0x12, 0x09, 0x0a, 0x05, + 0x4d, 0x55, 0x54, 0x45, 0x58, 0x10, 0x05, 0x12, 0x0b, 0x0a, 0x07, 0x50, 0x52, 0x4f, 0x46, 0x49, + 0x4c, 0x45, 0x10, 0x06, 0x12, 0x10, 0x0a, 0x0c, 0x54, 0x48, 0x52, 0x45, 0x41, 0x44, 0x43, 0x52, + 0x45, 0x41, 0x54, 0x45, 0x10, 0x07, 0x12, 0x09, 0x0a, 0x05, 0x54, 0x52, 0x41, 0x43, 0x45, 0x10, + 0x08, 0x32, 0x8e, 0x03, 0x0a, 0x13, 0x45, 0x6c, 0x61, 0x73, 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, + 0x6e, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x12, 0x31, 0x0a, 0x07, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x1a, 0x17, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x56, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2d, 0x0a, 0x05, + 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x15, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x74, + 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x31, 0x0a, 0x07, 0x52, + 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x12, 0x0d, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x17, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x52, + 0x65, 0x73, 0x74, 0x61, 0x72, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x3a, + 0x0a, 0x07, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x12, 0x16, 0x2e, 0x63, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, 0x64, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x17, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x55, 0x70, 0x67, 0x72, 0x61, + 0x64, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x52, 0x0a, 0x0f, 0x44, 0x69, + 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x12, 0x1e, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, - 0x63, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, + 0x63, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1f, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, - 0x63, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x26, - 0x5a, 0x21, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, 0x6b, 0x67, 0x2f, 0x61, - 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x2f, 0x63, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x63, 0x41, 0x67, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x52, + 0x0a, 0x0f, 0x44, 0x69, 0x61, 0x67, 0x6e, 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, + 0x73, 0x12, 0x1e, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, + 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x1f, 0x2e, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x44, 0x69, 0x61, 0x67, 0x6e, + 0x6f, 0x73, 0x74, 0x69, 0x63, 0x55, 0x6e, 0x69, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x42, 0x26, 0x5a, 0x21, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x70, + 0x6b, 0x67, 0x2f, 0x61, 0x67, 0x65, 0x6e, 0x74, 0x2f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, + 0x2f, 0x63, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, } var ( diff --git a/internal/pkg/agent/control/server/server.go b/internal/pkg/agent/control/server/server.go index 03e19618aeb..67fe85fab2b 100644 --- a/internal/pkg/agent/control/server/server.go +++ b/internal/pkg/agent/control/server/server.go @@ -11,6 +11,8 @@ import ( "net" "time" + "github.com/elastic/elastic-agent/pkg/component/runtime" + "go.elastic.co/apm" "go.elastic.co/apm/module/apmgrpc" "google.golang.org/grpc" @@ -129,7 +131,7 @@ func (s *Server) State(_ context.Context, _ *cproto.Empty) (*cproto.StateRespons } components = append(components, &cproto.ComponentState{ Id: comp.Component.ID, - Name: comp.Component.Spec.BinaryName, + Name: comp.Component.Type(), State: cproto.State(comp.State.State), Message: comp.State.Message, Units: units, @@ -202,15 +204,20 @@ func (s *Server) DiagnosticAgent(ctx context.Context, _ *cproto.DiagnosticAgentR // DiagnosticUnits returns diagnostic information for the specific units (or all units if non-provided). func (s *Server) DiagnosticUnits(ctx context.Context, req *cproto.DiagnosticUnitsRequest) (*cproto.DiagnosticUnitsResponse, error) { - units := make([]component.Unit, 0, len(req.Units)) + reqs := make([]runtime.ComponentUnitDiagnosticRequest, 0, len(req.Units)) for _, u := range req.Units { - units = append(units, component.Unit{ - ID: u.UnitId, - Type: client.UnitType(u.UnitType), + reqs = append(reqs, runtime.ComponentUnitDiagnosticRequest{ + Component: component.Component{ + ID: u.ComponentId, + }, + Unit: component.Unit{ + ID: u.UnitId, + Type: client.UnitType(u.UnitType), + }, }) } - diag := s.coord.PerformDiagnostics(ctx, units...) + diag := s.coord.PerformDiagnostics(ctx, reqs...) res := make([]*cproto.DiagnosticUnitResponse, 0, len(diag)) for _, d := range diag { r := &cproto.DiagnosticUnitResponse{ diff --git a/internal/pkg/agent/install/uninstall.go b/internal/pkg/agent/install/uninstall.go index ef62524455f..9e3eb56e5cd 100644 --- a/internal/pkg/agent/install/uninstall.go +++ b/internal/pkg/agent/install/uninstall.go @@ -173,7 +173,7 @@ func serviceComponentsFromConfig(specs component.RuntimeSpecs, cfg *config.Confi } var serviceComps []component.Component for _, comp := range allComps { - if comp.Err == nil && comp.Spec.Spec.Service != nil { + if comp.Err == nil && comp.InputSpec != nil && comp.InputSpec.Spec.Service != nil { // non-error and service based component serviceComps = append(serviceComps, comp) } diff --git a/internal/pkg/fleetapi/checkin_cmd.go b/internal/pkg/fleetapi/checkin_cmd.go index 33bcd3dab55..f1b048188c3 100644 --- a/internal/pkg/fleetapi/checkin_cmd.go +++ b/internal/pkg/fleetapi/checkin_cmd.go @@ -20,6 +20,7 @@ import ( const checkingPath = "/api/fleet/agents/%s/checkin" +// CheckinUnit provides information about a unit during checkin. type CheckinUnit struct { ID string `json:"id"` Type string `json:"type"` @@ -28,12 +29,20 @@ type CheckinUnit struct { Payload map[string]interface{} `json:"payload,omitempty"` } +// CheckinShipperReference provides information about a component shipper connection during checkin. +type CheckinShipperReference struct { + ComponentID string `json:"component_id"` + UnitID string `json:"unit_id"` +} + +// CheckinComponent provides information about a component during checkin. type CheckinComponent struct { - ID string `json:"id"` - Type string `json:"type"` - Status string `json:"status"` - Message string `json:"message"` - Units []CheckinUnit `json:"units,omitempty"` + ID string `json:"id"` + Type string `json:"type"` + Status string `json:"status"` + Message string `json:"message"` + Units []CheckinUnit `json:"units,omitempty"` + Shipper *CheckinShipperReference `json:"shipper,omitempty"` } // CheckinRequest consists of multiple events reported to fleet ui. diff --git a/magefile.go b/magefile.go index ed633505e49..4c4a082a432 100644 --- a/magefile.go +++ b/magefile.go @@ -226,17 +226,23 @@ func (Build) Clean() { // TestBinaries build the required binaries for the test suite. func (Build) TestBinaries() error { - p := filepath.Join("pkg", "component") - fakeBinary := "fake" - if runtime.GOOS == "windows" { - fakeBinary += ".exe" - } - outputName := filepath.Join(p, "fake", fakeBinary) - err := RunGo("build", "-o", outputName, filepath.Join(p, "fake", "main.go")) - if err != nil { - return err + p := filepath.Join("pkg", "component", "fake") + for _, name := range []string{"component", "shipper"} { + binary := name + if runtime.GOOS == "windows" { + binary += ".exe" + } + outputName := filepath.Join(p, name, binary) + err := RunGo("build", "-o", outputName, filepath.Join("github.com/elastic/elastic-agent", p, name, "...")) + if err != nil { + return err + } + err = os.Chmod(outputName, 0755) + if err != nil { + return err + } } - return os.Chmod(outputName, 0755) + return nil } // All run all the code checks. @@ -462,6 +468,15 @@ func ControlProto() error { "control.proto") } +// FakeShipperProto generates pkg/component/fake/common event protocol. +func FakeShipperProto() error { + return sh.RunV( + "protoc", + "--go_out=.", "--go_opt=paths=source_relative", + "--go-grpc_out=.", "--go-grpc_opt=paths=source_relative", + "pkg/component/fake/common/event.proto") +} + func BuildPGP() error { // go run elastic-agent/dev-tools/cmd/buildpgp/build_pgp.go --in agent/spec/GPG-KEY-elasticsearch --out elastic-agent/pkg/release/pgp.go goF := filepath.Join("dev-tools", "cmd", "buildpgp", "build_pgp.go") diff --git a/pkg/component/component.go b/pkg/component/component.go index 467fede9e40..d65fcfec9a4 100644 --- a/pkg/component/component.go +++ b/pkg/component/component.go @@ -17,6 +17,7 @@ import ( "github.com/elastic/elastic-agent/pkg/utils" ) +// GenerateMonitoringCfgFn is a function that can inject information into the model generation process. type GenerateMonitoringCfgFn func(map[string]interface{}, map[string]string) (map[string]interface{}, error) const ( @@ -24,11 +25,6 @@ const ( defaultUnitLogLevel = client.UnitLogLevelInfo ) -var ( - // ErrOutputNotSupported is returned when an input does not support an output type - ErrOutputNotSupported = newError("input doesn't support output type") -) - // ErrInputRuntimeCheckFail error is used when an input specification runtime prevention check occurs. type ErrInputRuntimeCheckFail struct { // message is the reason defined in the check @@ -45,6 +41,15 @@ func (e *ErrInputRuntimeCheckFail) Error() string { return e.message } +// ShipperReference provides a reference to the shipper component/unit that a component is connected to. +type ShipperReference struct { + // ComponentID is the ID of the component that this component is connected to. + ComponentID string `yaml:"component_id"` + + // UnitID is the ID of the unit inside of the component that this component is connected to. + UnitID string `yaml:"unit_id"` +} + // Unit is a single input or output that a component must run. type Unit struct { // ID is the unique ID of the unit. @@ -73,11 +78,27 @@ type Component struct { // the reason that all of these units are failed. Err error `yaml:"error,omitempty"` - // Spec on how the input should run. - Spec InputRuntimeSpec `yaml:"spec,omitempty"` + // InputSpec on how the input should run. (not set when ShipperSpec set) + InputSpec *InputRuntimeSpec `yaml:"input_spec,omitempty"` + + // ShipperSpec on how the shipper should run. (not set when InputSpec set) + ShipperSpec *ShipperRuntimeSpec `yaml:"shipper_spec,omitempty"` // Units that should be running inside this component. Units []Unit `yaml:"units"` + + // Shipper references the component/unit that this component used as its output. (not set when ShipperSpec) + Shipper *ShipperReference `yaml:"shipper,omitempty"` +} + +// Type returns the type of the component. +func (c *Component) Type() string { + if c.InputSpec != nil { + return c.InputSpec.InputType + } else if c.ShipperSpec != nil { + return c.ShipperSpec.ShipperType + } + return "" } // ToComponents returns the components that should be running based on the policy and the current runtime specification. @@ -107,6 +128,8 @@ func (r *RuntimeSpecs) ToComponents(policy map[string]interface{}, monitoringInj return components, nil } +// PolicyToComponents takes the policy and generated a component model along with providing a mapping between component +// and the running binary. func (r *RuntimeSpecs) PolicyToComponents(policy map[string]interface{}) ([]Component, map[string]string, error) { const revision = "revision" outputsMap, err := toIntermediate(policy) @@ -160,18 +183,27 @@ func (r *RuntimeSpecs) PolicyToComponents(policy map[string]interface{}) ([]Comp } } + shipperMap := make(map[string][]string) for inputType, inputs := range inputsMap { + var supportedShipper ShipperRuntimeSpec + var usingShipper bool + inputSpec, err := r.GetInput(inputType) if err == nil { // update the inputType to match the spec; as it could have been alias inputType = inputSpec.InputType - if !containsStr(inputSpec.Spec.Outputs, output.outputType) { - inputSpec = InputRuntimeSpec{} // empty the spec - err = ErrOutputNotSupported - } else { - err = validateRuntimeChecks(&inputSpec.Spec, vars) - if err != nil { + + // determine if we are operating with shipper support + supportedShipper, usingShipper = getSupportedShipper(r, output, inputSpec, vars) + if !usingShipper { + if !containsStr(inputSpec.Spec.Outputs, output.outputType) { inputSpec = InputRuntimeSpec{} // empty the spec + err = ErrOutputNotSupported + } else { + err = validateRuntimeChecks(&inputSpec.Spec.Runtime, vars) + if err != nil { + inputSpec = InputRuntimeSpec{} // empty the spec + } } } } @@ -200,21 +232,84 @@ func (r *RuntimeSpecs) PolicyToComponents(policy map[string]interface{}) ([]Comp } if len(units) > 0 { componentID := fmt.Sprintf("%s-%s", inputType, outputName) + if usingShipper { + // using shipper for this component + connected, _ := shipperMap[supportedShipper.ShipperType] + connected = append(connected, componentID) + shipperMap[supportedShipper.ShipperType] = connected + } else { + // using output inside the component + cfg, cfgErr := ExpectedConfig(output.output) + units = append(units, Unit{ + ID: componentID, + Type: client.UnitTypeOutput, + LogLevel: output.logLevel, + Config: cfg, + Err: cfgErr, + }) + } + components = append(components, Component{ + ID: componentID, + Err: err, + InputSpec: &inputSpec, + Units: units, + }) + componentIdsInputMap[componentID] = inputSpec.BinaryName + } + } + + // create the shipper components and units + for shipperType, connected := range shipperMap { + shipperSpec, _ := r.GetShipper(shipperType) // type always exists at this point + shipperCompID := fmt.Sprintf("%s-%s", shipperType, outputName) + + var shipperUnits []Unit + for _, componentID := range connected { + for i, component := range components { + if component.ID == componentID && component.Err == nil { + cfg, cfgErr := componentToShipperConfig(component) + shipperUnit := Unit{ + ID: componentID, + Type: client.UnitTypeInput, + LogLevel: output.logLevel, + Config: cfg, + Err: cfgErr, + } + shipperUnits = append(shipperUnits, shipperUnit) + component.Shipper = &ShipperReference{ + ComponentID: shipperCompID, + UnitID: shipperUnit.ID, + } + cfg, cfgErr = ExpectedConfig(map[string]interface{}{ + "type": shipperType, + }) + component.Units = append(component.Units, Unit{ + ID: componentID, + Type: client.UnitTypeOutput, + LogLevel: output.logLevel, + Config: cfg, + Err: cfgErr, + }) + components[i] = component + break + } + } + } + + if len(shipperUnits) > 0 { cfg, cfgErr := ExpectedConfig(output.output) - units = append(units, Unit{ - ID: componentID, + shipperUnits = append(shipperUnits, Unit{ + ID: shipperCompID, Type: client.UnitTypeOutput, LogLevel: output.logLevel, Config: cfg, Err: cfgErr, }) components = append(components, Component{ - ID: componentID, - Err: err, - Spec: inputSpec, - Units: units, + ID: shipperCompID, + ShipperSpec: &shipperSpec, + Units: shipperUnits, }) - componentIdsInputMap[componentID] = inputSpec.BinaryName } } } @@ -222,6 +317,69 @@ func (r *RuntimeSpecs) PolicyToComponents(policy map[string]interface{}) ([]Comp return components, componentIdsInputMap, nil } +func componentToShipperConfig(comp Component) (*proto.UnitExpectedConfig, error) { + cfgUnits := make([]interface{}, 0, len(comp.Units)) + for _, unit := range comp.Units { + if unit.Err == nil && unit.Type == client.UnitTypeInput { + cfgUnits = append(cfgUnits, map[string]interface{}{ + "id": unit.ID, + "config": unit.Config.Source.AsMap(), + }) + } + } + cfg := map[string]interface{}{ + "id": comp.ID, + "units": cfgUnits, + } + return ExpectedConfig(cfg) +} + +func getSupportedShipper(r *RuntimeSpecs, output outputI, inputSpec InputRuntimeSpec, vars *transpiler.Vars) (ShipperRuntimeSpec, bool) { + const ( + enabledKey = "enabled" + ) + + shippers, err := r.GetShippers(output.outputType) + if err != nil { + return ShipperRuntimeSpec{}, false + } + supportedShippers := make([]ShipperRuntimeSpec, 0, len(shippers)) + for _, shipper := range shippers { + if containsStr(inputSpec.Spec.Shippers, shipper.ShipperType) { + // validate the runtime specification to determine if it can even run + err = validateRuntimeChecks(&shipper.Spec.Runtime, vars) + if err != nil { + // shipper cannot run + continue + } + // beta-mode the shipper is not on by default, so we need to ensure that this shipper type + // is enabled in the output configuration + shipperConfigRaw, ok := output.output[shipper.ShipperType] + if ok { + // key exists enabled by default unless explicitly disabled + enabled := true + if shipperConfig, ok := shipperConfigRaw.(map[string]interface{}); ok { + if enabledRaw, ok := shipperConfig[enabledKey]; ok { + if enabledVal, ok := enabledRaw.(bool); ok { + enabled = enabledVal + } + } + } + if enabled { + // inputs supports this shipper (and it's enabled) + supportedShippers = append(supportedShippers, shipper) + } + } + } + } + if len(supportedShippers) == 0 { + return ShipperRuntimeSpec{}, false + } + // in the case of multiple shippers the first is taken from the input specification (this allows an input to + // prefer another shipper over a different shipper) + return supportedShippers[0], true +} + // toIntermediate takes the policy and returns it into an intermediate representation that is easier to map into a set // of components. func toIntermediate(policy map[string]interface{}) (map[string]outputI, error) { @@ -377,8 +535,8 @@ type outputI struct { inputs map[string][]inputI } -func validateRuntimeChecks(spec *InputSpec, store eql.VarStore) error { - for _, prevention := range spec.Runtime.Preventions { +func validateRuntimeChecks(runtime *RuntimeSpec, store eql.VarStore) error { + for _, prevention := range runtime.Preventions { expression, err := eql.New(prevention.Condition) if err != nil { // this should not happen because the specification already validates that this diff --git a/pkg/component/component_test.go b/pkg/component/component_test.go index 0d9e97d4c94..c4b83ca6eca 100644 --- a/pkg/component/component_test.go +++ b/pkg/component/component_test.go @@ -291,9 +291,9 @@ func TestToComponents(t *testing.T) { }, Result: []Component{ { - ID: "unknown-default", - Spec: InputRuntimeSpec{}, - Err: ErrInputNotSupported, + ID: "unknown-default", + InputSpec: &InputRuntimeSpec{}, + Err: ErrInputNotSupported, Units: []Unit{ { ID: "unknown-default", @@ -343,9 +343,9 @@ func TestToComponents(t *testing.T) { }, Result: []Component{ { - ID: "endpoint-default", - Spec: InputRuntimeSpec{}, - Err: ErrInputNotSupportedOnPlatform, + ID: "endpoint-default", + InputSpec: &InputRuntimeSpec{}, + Err: ErrInputNotSupportedOnPlatform, Units: []Unit{ { ID: "endpoint-default", @@ -386,9 +386,9 @@ func TestToComponents(t *testing.T) { }, Result: []Component{ { - ID: "endpoint-default", - Spec: InputRuntimeSpec{}, - Err: ErrOutputNotSupported, + ID: "endpoint-default", + InputSpec: &InputRuntimeSpec{}, + Err: ErrOutputNotSupported, Units: []Unit{ { ID: "endpoint-default", @@ -441,9 +441,9 @@ func TestToComponents(t *testing.T) { }, Result: []Component{ { - ID: "endpoint-default", - Spec: InputRuntimeSpec{}, - Err: NewErrInputRuntimeCheckFail("No support for RHEL7 on arm64"), + ID: "endpoint-default", + InputSpec: &InputRuntimeSpec{}, + Err: NewErrInputRuntimeCheckFail("No support for RHEL7 on arm64"), Units: []Unit{ { ID: "endpoint-default", @@ -499,7 +499,7 @@ func TestToComponents(t *testing.T) { Result: []Component{ { ID: "filestream-default", - Spec: InputRuntimeSpec{ + InputSpec: &InputRuntimeSpec{ InputType: "filestream", BinaryName: "filebeat", BinaryPath: filepath.Join("..", "..", "specs", "filebeat"), @@ -595,7 +595,7 @@ func TestToComponents(t *testing.T) { }, Result: []Component{ { - Spec: InputRuntimeSpec{ + InputSpec: &InputRuntimeSpec{ InputType: "filestream", BinaryName: "filebeat", BinaryPath: filepath.Join("..", "..", "specs", "filebeat"), @@ -697,7 +697,7 @@ func TestToComponents(t *testing.T) { }, Result: []Component{ { - Spec: InputRuntimeSpec{ + InputSpec: &InputRuntimeSpec{ InputType: "filestream", BinaryName: "filebeat", BinaryPath: filepath.Join("..", "..", "specs", "filebeat"), @@ -732,7 +732,7 @@ func TestToComponents(t *testing.T) { }, }, { - Spec: InputRuntimeSpec{ + InputSpec: &InputRuntimeSpec{ InputType: "filestream", BinaryName: "filebeat", BinaryPath: filepath.Join("..", "..", "specs", "filebeat"), @@ -767,7 +767,7 @@ func TestToComponents(t *testing.T) { }, }, { - Spec: InputRuntimeSpec{ + InputSpec: &InputRuntimeSpec{ InputType: "log", BinaryName: "filebeat", BinaryPath: filepath.Join("..", "..", "specs", "filebeat"), @@ -802,7 +802,7 @@ func TestToComponents(t *testing.T) { }, }, { - Spec: InputRuntimeSpec{ + InputSpec: &InputRuntimeSpec{ InputType: "log", BinaryName: "filebeat", BinaryPath: filepath.Join("..", "..", "specs", "filebeat"), @@ -828,7 +828,7 @@ func TestToComponents(t *testing.T) { }, }, { - Spec: InputRuntimeSpec{ + InputSpec: &InputRuntimeSpec{ InputType: "log", BinaryName: "filebeat", BinaryPath: filepath.Join("..", "..", "specs", "filebeat"), @@ -854,7 +854,7 @@ func TestToComponents(t *testing.T) { }, }, { - Spec: InputRuntimeSpec{ + InputSpec: &InputRuntimeSpec{ InputType: "log", BinaryName: "filebeat", BinaryPath: filepath.Join("..", "..", "specs", "filebeat"), @@ -880,7 +880,7 @@ func TestToComponents(t *testing.T) { }, }, { - Spec: InputRuntimeSpec{ + InputSpec: &InputRuntimeSpec{ InputType: "apm", BinaryName: "apm-server", BinaryPath: filepath.Join("..", "..", "specs", "apm-server"), @@ -907,6 +907,564 @@ func TestToComponents(t *testing.T) { }, }, }, + { + Name: "Simple w/ shipper", + Platform: linuxAMD64Platform, + Policy: map[string]interface{}{ + "outputs": map[string]interface{}{ + "default": map[string]interface{}{ + "type": "elasticsearch", + "enabled": true, + "shipper": map[string]interface{}{ + "enabled": true, + }, + }, + }, + "inputs": []interface{}{ + map[string]interface{}{ + "type": "filestream", + "id": "filestream-0", + "enabled": true, + }, + map[string]interface{}{ + "type": "filestream", + "id": "filestream-1", + "enabled": false, + }, + }, + }, + Result: []Component{ + { + ID: "filestream-default", + InputSpec: &InputRuntimeSpec{ + InputType: "filestream", + BinaryName: "filebeat", + BinaryPath: filepath.Join("..", "..", "specs", "filebeat"), + }, + Units: []Unit{ + { + ID: "filestream-default", + Type: client.UnitTypeOutput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ + "type": "shipper", + }), + }, + { + ID: "filestream-default-filestream-0", + Type: client.UnitTypeInput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ + "type": "filestream", + "id": "filestream-0", + }), + }, + }, + Shipper: &ShipperReference{ + ComponentID: "shipper-default", + UnitID: "filestream-default", + }, + }, + { + ID: "shipper-default", + ShipperSpec: &ShipperRuntimeSpec{ + ShipperType: "shipper", + BinaryName: "shipper", + BinaryPath: filepath.Join("..", "..", "specs", "shipper"), + }, + Units: []Unit{ + { + ID: "shipper-default", + Type: client.UnitTypeOutput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ + "type": "elasticsearch", + "shipper": map[string]interface{}{ + "enabled": true, + }, + }), + }, + { + ID: "filestream-default", + Type: client.UnitTypeInput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ + "id": "filestream-default", + "units": []interface{}{ + map[string]interface{}{ + "id": "filestream-default-filestream-0", + "config": map[string]interface{}{ + "type": "filestream", + "id": "filestream-0", + }, + }, + }, + }), + }, + }, + }, + }, + }, + { + Name: "Complex w/ shipper", + Platform: linuxAMD64Platform, + Policy: map[string]interface{}{ + "outputs": map[string]interface{}{ + "default": map[string]interface{}{ + "type": "elasticsearch", + "shipper": map[string]interface{}{}, + }, + "other": map[string]interface{}{ + "type": "elasticsearch", + "shipper": map[string]interface{}{ + "enabled": false, + }, + }, + "stashit": map[string]interface{}{ + "type": "logstash", + "shipper": map[string]interface{}{}, + }, + "redis": map[string]interface{}{ + "type": "redis", + "shipper": map[string]interface{}{}, + }, + }, + "inputs": []interface{}{ + map[string]interface{}{ + "type": "filestream", + "id": "filestream-0", + }, + map[string]interface{}{ + "type": "filestream", + "id": "filestream-1", + }, + map[string]interface{}{ + "type": "filestream", + "id": "filestream-2", + "enabled": false, + }, + map[string]interface{}{ + "type": "filestream", + "id": "filestream-3", + "use_output": "other", + }, + map[string]interface{}{ + "type": "filestream", + "id": "filestream-4", + "use_output": "other", + }, + map[string]interface{}{ + "type": "logfile", + "id": "logfile-0", + "use_output": "default", + }, + map[string]interface{}{ + "type": "log", + "id": "logfile-1", + "use_output": "default", + }, + map[string]interface{}{ + "type": "logfile", + "id": "logfile-2", + "use_output": "other", + }, + map[string]interface{}{ + "type": "logfile", + "id": "logfile-3", + "use_output": "stashit", + }, + map[string]interface{}{ + "type": "logfile", + "id": "logfile-4", + "use_output": "redis", + }, + map[string]interface{}{ + "type": "apm", + "id": "apm-server-0", + }, + }, + }, + Result: []Component{ + { + ID: "filestream-default", + InputSpec: &InputRuntimeSpec{ + InputType: "filestream", + BinaryName: "filebeat", + BinaryPath: filepath.Join("..", "..", "specs", "filebeat"), + }, + Units: []Unit{ + { + ID: "filestream-default", + Type: client.UnitTypeOutput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ + "type": "shipper", + }), + }, + { + ID: "filestream-default-filestream-0", + Type: client.UnitTypeInput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ + "type": "filestream", + "id": "filestream-0", + }), + }, + { + ID: "filestream-default-filestream-1", + Type: client.UnitTypeInput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ + "type": "filestream", + "id": "filestream-1", + }), + }, + }, + Shipper: &ShipperReference{ + ComponentID: "shipper-default", + UnitID: "filestream-default", + }, + }, + { + ID: "filestream-other", + InputSpec: &InputRuntimeSpec{ + InputType: "filestream", + BinaryName: "filebeat", + BinaryPath: filepath.Join("..", "..", "specs", "filebeat"), + }, + Units: []Unit{ + { + ID: "filestream-other", + Type: client.UnitTypeOutput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ + "type": "elasticsearch", + "shipper": map[string]interface{}{ + "enabled": false, + }, + }), + }, + { + ID: "filestream-other-filestream-3", + Type: client.UnitTypeInput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ + "type": "filestream", + "id": "filestream-3", + }), + }, + { + ID: "filestream-other-filestream-4", + Type: client.UnitTypeInput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ + "type": "filestream", + "id": "filestream-4", + }), + }, + }, + }, + { + ID: "log-default", + InputSpec: &InputRuntimeSpec{ + InputType: "log", + BinaryName: "filebeat", + BinaryPath: filepath.Join("..", "..", "specs", "filebeat"), + }, + Units: []Unit{ + { + ID: "log-default", + Type: client.UnitTypeOutput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ + "type": "shipper", + }), + }, + { + ID: "log-default-logfile-0", + Type: client.UnitTypeInput, + LogLevel: defaultUnitLogLevel, + Config: mustExpectedConfigForceType(map[string]interface{}{ + "type": "logfile", + "id": "logfile-0", + }, "log"), + }, + { + ID: "log-default-logfile-1", + Type: client.UnitTypeInput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ + "type": "log", + "id": "logfile-1", + }), + }, + }, + Shipper: &ShipperReference{ + ComponentID: "shipper-default", + UnitID: "log-default", + }, + }, + { + ID: "shipper-default", + ShipperSpec: &ShipperRuntimeSpec{ + ShipperType: "shipper", + BinaryName: "shipper", + BinaryPath: filepath.Join("..", "..", "specs", "shipper"), + }, + Units: []Unit{ + { + ID: "filestream-default", + Type: client.UnitTypeInput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ + "id": "filestream-default", + "units": []interface{}{ + map[string]interface{}{ + "id": "filestream-default-filestream-0", + "config": map[string]interface{}{ + "type": "filestream", + "id": "filestream-0", + }, + }, + map[string]interface{}{ + "id": "filestream-default-filestream-1", + "config": map[string]interface{}{ + "type": "filestream", + "id": "filestream-1", + }, + }, + }, + }), + }, + { + ID: "log-default", + Type: client.UnitTypeInput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ + "id": "log-default", + "units": []interface{}{ + map[string]interface{}{ + "id": "log-default-logfile-0", + "config": map[string]interface{}{ + "type": "logfile", + "id": "logfile-0", + }, + }, + map[string]interface{}{ + "id": "log-default-logfile-1", + "config": map[string]interface{}{ + "type": "log", + "id": "logfile-1", + }, + }, + }, + }), + }, + { + ID: "shipper-default", + Type: client.UnitTypeOutput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ + "type": "elasticsearch", + "shipper": map[string]interface{}{}, + }), + }, + }, + }, + { + ID: "log-other", + InputSpec: &InputRuntimeSpec{ + InputType: "log", + BinaryName: "filebeat", + BinaryPath: filepath.Join("..", "..", "specs", "filebeat"), + }, + Units: []Unit{ + { + ID: "log-other", + Type: client.UnitTypeOutput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ + "type": "elasticsearch", + "shipper": map[string]interface{}{ + "enabled": false, + }, + }), + }, + { + ID: "log-other-logfile-2", + Type: client.UnitTypeInput, + LogLevel: defaultUnitLogLevel, + Config: mustExpectedConfigForceType(map[string]interface{}{ + "type": "logfile", + "id": "logfile-2", + }, "log"), + }, + }, + }, + { + ID: "log-stashit", + InputSpec: &InputRuntimeSpec{ + InputType: "log", + BinaryName: "filebeat", + BinaryPath: filepath.Join("..", "..", "specs", "filebeat"), + }, + Units: []Unit{ + { + ID: "log-stashit", + Type: client.UnitTypeOutput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ + "type": "shipper", + }), + }, + { + ID: "log-stashit-logfile-3", + Type: client.UnitTypeInput, + LogLevel: defaultUnitLogLevel, + Config: mustExpectedConfigForceType(map[string]interface{}{ + "type": "logfile", + "id": "logfile-3", + }, "log"), + }, + }, + Shipper: &ShipperReference{ + ComponentID: "shipper-stashit", + UnitID: "log-stashit", + }, + }, + { + ID: "shipper-stashit", + ShipperSpec: &ShipperRuntimeSpec{ + ShipperType: "shipper", + BinaryName: "shipper", + BinaryPath: filepath.Join("..", "..", "specs", "shipper"), + }, + Units: []Unit{ + { + ID: "log-stashit", + Type: client.UnitTypeInput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ + "id": "log-stashit", + "units": []interface{}{ + map[string]interface{}{ + "id": "log-stashit-logfile-3", + "config": map[string]interface{}{ + "type": "logfile", + "id": "logfile-3", + }, + }, + }, + }), + }, + { + ID: "shipper-stashit", + Type: client.UnitTypeOutput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ + "type": "logstash", + "shipper": map[string]interface{}{}, + }), + }, + }, + }, + { + ID: "log-redis", + InputSpec: &InputRuntimeSpec{ + InputType: "log", + BinaryName: "filebeat", + BinaryPath: filepath.Join("..", "..", "specs", "filebeat"), + }, + Units: []Unit{ + { + ID: "log-redis", + Type: client.UnitTypeOutput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ + "type": "shipper", + }), + }, + { + ID: "log-redis-logfile-4", + Type: client.UnitTypeInput, + LogLevel: defaultUnitLogLevel, + Config: mustExpectedConfigForceType(map[string]interface{}{ + "type": "logfile", + "id": "logfile-4", + }, "log"), + }, + }, + Shipper: &ShipperReference{ + ComponentID: "shipper-redis", + UnitID: "log-redis", + }, + }, + { + ID: "shipper-redis", + ShipperSpec: &ShipperRuntimeSpec{ + ShipperType: "shipper", + BinaryName: "shipper", + BinaryPath: filepath.Join("..", "..", "specs", "shipper"), + }, + Units: []Unit{ + { + ID: "log-redis", + Type: client.UnitTypeInput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ + "id": "log-redis", + "units": []interface{}{ + map[string]interface{}{ + "id": "log-redis-logfile-4", + "config": map[string]interface{}{ + "type": "logfile", + "id": "logfile-4", + }, + }, + }, + }), + }, + { + ID: "shipper-redis", + Type: client.UnitTypeOutput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ + "type": "redis", + "shipper": map[string]interface{}{}, + }), + }, + }, + }, + { + ID: "apm-default", + InputSpec: &InputRuntimeSpec{ + InputType: "apm", + BinaryName: "apm-server", + BinaryPath: filepath.Join("..", "..", "specs", "apm-server"), + }, + Units: []Unit{ + { + ID: "apm-default", + Type: client.UnitTypeOutput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ + "type": "elasticsearch", + "shipper": map[string]interface{}{}, + }), + }, + { + ID: "apm-default-apm-server-0", + Type: client.UnitTypeInput, + LogLevel: defaultUnitLogLevel, + Config: MustExpectedConfig(map[string]interface{}{ + "type": "apm", + "id": "apm-server-0", + }), + }, + }, + }, + }, + }, } for _, scenario := range scenarios { @@ -927,11 +1485,24 @@ func TestToComponents(t *testing.T) { if expected.Err != nil { assert.Equal(t, expected.Err, actual.Err) assert.EqualValues(t, expected.Units, actual.Units) - } else { - assert.Equal(t, expected.Spec.InputType, actual.Spec.InputType) - assert.Equal(t, expected.Spec.BinaryName, actual.Spec.BinaryName) - assert.Equal(t, expected.Spec.BinaryPath, actual.Spec.BinaryPath) + } else if expected.InputSpec != nil { + assert.Nil(t, actual.ShipperSpec) + assert.Equal(t, expected.InputSpec.InputType, actual.InputSpec.InputType) + assert.Equal(t, expected.InputSpec.BinaryName, actual.InputSpec.BinaryName) + assert.Equal(t, expected.InputSpec.BinaryPath, actual.InputSpec.BinaryPath) + assert.EqualValues(t, expected.Units, actual.Units) + if expected.Shipper != nil { + assert.Equal(t, *expected.Shipper, *actual.Shipper) + } else { + assert.Nil(t, actual.Shipper) + } + } else if expected.ShipperSpec != nil { + assert.Nil(t, actual.InputSpec) + assert.Equal(t, expected.ShipperSpec.ShipperType, actual.ShipperSpec.ShipperType) + assert.Equal(t, expected.ShipperSpec.BinaryName, actual.ShipperSpec.BinaryName) + assert.Equal(t, expected.ShipperSpec.BinaryPath, actual.ShipperSpec.BinaryPath) assert.EqualValues(t, expected.Units, actual.Units) + assert.Nil(t, actual.Shipper) } } } diff --git a/pkg/component/fake/common/common.go b/pkg/component/fake/common/common.go new file mode 100644 index 00000000000..e3fc075547a --- /dev/null +++ b/pkg/component/fake/common/common.go @@ -0,0 +1,34 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package common + +import ( + "github.com/mitchellh/mapstructure" + + "github.com/elastic/elastic-agent-client/v7/pkg/proto" +) + +// FakeShipperConfigTLS is basic TLS configuration for a shipper. +type FakeShipperConfigTLS struct { + CAs []string `mapstructure:"certificate_authorities"` + Cert string `mapstructure:"certificate"` + Key string `mapstructure:"key"` +} + +// FakeShipperConfig is basic configuration for a shipper. +type FakeShipperConfig struct { + Server string `mapstructure:"server"` + TLS *FakeShipperConfigTLS `mapstructure:"ssl"` +} + +// ParseFakeShipperConfig parses the shipper GRPC server and ssl configuration information. +func ParseFakeShipperConfig(cfg *proto.UnitExpectedConfig) (FakeShipperConfig, error) { + var r FakeShipperConfig + err := mapstructure.Decode(cfg.Source.AsMap(), &r) + if err != nil { + return FakeShipperConfig{}, err + } + return r, nil +} diff --git a/pkg/component/fake/common/event.pb.go b/pkg/component/fake/common/event.pb.go new file mode 100644 index 00000000000..855ed0822f3 --- /dev/null +++ b/pkg/component/fake/common/event.pb.go @@ -0,0 +1,235 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.28.1 +// protoc v3.21.5 +// source: pkg/component/fake/common/event.proto + +package common + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + structpb "google.golang.org/protobuf/types/known/structpb" + timestamppb "google.golang.org/protobuf/types/known/timestamppb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Event message. +type Event struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Timestamp the event was generated. + Generated *timestamppb.Timestamp `protobuf:"bytes,1,opt,name=generated,proto3" json:"generated,omitempty"` + // Content of the event. + Content *structpb.Struct `protobuf:"bytes,2,opt,name=content,proto3" json:"content,omitempty"` +} + +func (x *Event) Reset() { + *x = Event{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_component_fake_common_event_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Event) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Event) ProtoMessage() {} + +func (x *Event) ProtoReflect() protoreflect.Message { + mi := &file_pkg_component_fake_common_event_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Event.ProtoReflect.Descriptor instead. +func (*Event) Descriptor() ([]byte, []int) { + return file_pkg_component_fake_common_event_proto_rawDescGZIP(), []int{0} +} + +func (x *Event) GetGenerated() *timestamppb.Timestamp { + if x != nil { + return x.Generated + } + return nil +} + +func (x *Event) GetContent() *structpb.Struct { + if x != nil { + return x.Content + } + return nil +} + +// Event response message. +type EventResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *EventResponse) Reset() { + *x = EventResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_pkg_component_fake_common_event_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *EventResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*EventResponse) ProtoMessage() {} + +func (x *EventResponse) ProtoReflect() protoreflect.Message { + mi := &file_pkg_component_fake_common_event_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use EventResponse.ProtoReflect.Descriptor instead. +func (*EventResponse) Descriptor() ([]byte, []int) { + return file_pkg_component_fake_common_event_proto_rawDescGZIP(), []int{1} +} + +var File_pkg_component_fake_common_event_proto protoreflect.FileDescriptor + +var file_pkg_component_fake_common_event_proto_rawDesc = []byte{ + 0x0a, 0x25, 0x70, 0x6b, 0x67, 0x2f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x2f, + 0x66, 0x61, 0x6b, 0x65, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x1a, + 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1f, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, + 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x74, + 0x0a, 0x05, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x38, 0x0a, 0x09, 0x67, 0x65, 0x6e, 0x65, 0x72, + 0x61, 0x74, 0x65, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, + 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x67, 0x65, 0x6e, 0x65, 0x72, 0x61, 0x74, 0x65, + 0x64, 0x12, 0x31, 0x0a, 0x07, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x07, 0x63, 0x6f, 0x6e, + 0x74, 0x65, 0x6e, 0x74, 0x22, 0x0f, 0x0a, 0x0d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0x46, 0x0a, 0x11, 0x46, 0x61, 0x6b, 0x65, 0x45, 0x76, 0x65, + 0x6e, 0x74, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x31, 0x0a, 0x09, 0x53, 0x65, + 0x6e, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x0d, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, + 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x1a, 0x15, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, + 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x1e, 0x5a, + 0x19, 0x70, 0x6b, 0x67, 0x2f, 0x63, 0x6f, 0x6d, 0x70, 0x6f, 0x6e, 0x65, 0x6e, 0x74, 0x2f, 0x66, + 0x61, 0x6b, 0x65, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_pkg_component_fake_common_event_proto_rawDescOnce sync.Once + file_pkg_component_fake_common_event_proto_rawDescData = file_pkg_component_fake_common_event_proto_rawDesc +) + +func file_pkg_component_fake_common_event_proto_rawDescGZIP() []byte { + file_pkg_component_fake_common_event_proto_rawDescOnce.Do(func() { + file_pkg_component_fake_common_event_proto_rawDescData = protoimpl.X.CompressGZIP(file_pkg_component_fake_common_event_proto_rawDescData) + }) + return file_pkg_component_fake_common_event_proto_rawDescData +} + +var file_pkg_component_fake_common_event_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_pkg_component_fake_common_event_proto_goTypes = []interface{}{ + (*Event)(nil), // 0: common.Event + (*EventResponse)(nil), // 1: common.EventResponse + (*timestamppb.Timestamp)(nil), // 2: google.protobuf.Timestamp + (*structpb.Struct)(nil), // 3: google.protobuf.Struct +} +var file_pkg_component_fake_common_event_proto_depIdxs = []int32{ + 2, // 0: common.Event.generated:type_name -> google.protobuf.Timestamp + 3, // 1: common.Event.content:type_name -> google.protobuf.Struct + 0, // 2: common.FakeEventProtocol.SendEvent:input_type -> common.Event + 1, // 3: common.FakeEventProtocol.SendEvent:output_type -> common.EventResponse + 3, // [3:4] is the sub-list for method output_type + 2, // [2:3] is the sub-list for method input_type + 2, // [2:2] is the sub-list for extension type_name + 2, // [2:2] is the sub-list for extension extendee + 0, // [0:2] is the sub-list for field type_name +} + +func init() { file_pkg_component_fake_common_event_proto_init() } +func file_pkg_component_fake_common_event_proto_init() { + if File_pkg_component_fake_common_event_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_pkg_component_fake_common_event_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Event); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_pkg_component_fake_common_event_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*EventResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_pkg_component_fake_common_event_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_pkg_component_fake_common_event_proto_goTypes, + DependencyIndexes: file_pkg_component_fake_common_event_proto_depIdxs, + MessageInfos: file_pkg_component_fake_common_event_proto_msgTypes, + }.Build() + File_pkg_component_fake_common_event_proto = out.File + file_pkg_component_fake_common_event_proto_rawDesc = nil + file_pkg_component_fake_common_event_proto_goTypes = nil + file_pkg_component_fake_common_event_proto_depIdxs = nil +} diff --git a/pkg/component/fake/common/event.proto b/pkg/component/fake/common/event.proto new file mode 100644 index 00000000000..35510edc364 --- /dev/null +++ b/pkg/component/fake/common/event.proto @@ -0,0 +1,29 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +syntax = "proto3"; + +package common; + +option cc_enable_arenas = true; +option go_package = "pkg/component/fake/common"; +import "google/protobuf/struct.proto"; +import "google/protobuf/timestamp.proto"; + +// Event message. +message Event { + // Timestamp the event was generated. + google.protobuf.Timestamp generated = 1; + // Content of the event. + google.protobuf.Struct content = 2; +} + +// Event response message. +message EventResponse { +} + +service FakeEventProtocol { + // SendEvent sends an event over the protocol. + rpc SendEvent(Event) returns (EventResponse); +} diff --git a/pkg/component/fake/common/event_grpc.pb.go b/pkg/component/fake/common/event_grpc.pb.go new file mode 100644 index 00000000000..532841874a1 --- /dev/null +++ b/pkg/component/fake/common/event_grpc.pb.go @@ -0,0 +1,112 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. +// versions: +// - protoc-gen-go-grpc v1.2.0 +// - protoc v3.21.5 +// source: pkg/component/fake/common/event.proto + +package common + +import ( + context "context" + + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +// Requires gRPC-Go v1.32.0 or later. +const _ = grpc.SupportPackageIsVersion7 + +// FakeEventProtocolClient is the client API for FakeEventProtocol service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type FakeEventProtocolClient interface { + // SendEvent sends an event over the protocol. + SendEvent(ctx context.Context, in *Event, opts ...grpc.CallOption) (*EventResponse, error) +} + +type fakeEventProtocolClient struct { + cc grpc.ClientConnInterface +} + +func NewFakeEventProtocolClient(cc grpc.ClientConnInterface) FakeEventProtocolClient { + return &fakeEventProtocolClient{cc} +} + +func (c *fakeEventProtocolClient) SendEvent(ctx context.Context, in *Event, opts ...grpc.CallOption) (*EventResponse, error) { + out := new(EventResponse) + err := c.cc.Invoke(ctx, "/common.FakeEventProtocol/SendEvent", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// FakeEventProtocolServer is the server API for FakeEventProtocol service. +// All implementations must embed UnimplementedFakeEventProtocolServer +// for forward compatibility +type FakeEventProtocolServer interface { + // SendEvent sends an event over the protocol. + SendEvent(context.Context, *Event) (*EventResponse, error) + mustEmbedUnimplementedFakeEventProtocolServer() +} + +// UnimplementedFakeEventProtocolServer must be embedded to have forward compatible implementations. +type UnimplementedFakeEventProtocolServer struct { +} + +func (UnimplementedFakeEventProtocolServer) SendEvent(context.Context, *Event) (*EventResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method SendEvent not implemented") +} +func (UnimplementedFakeEventProtocolServer) mustEmbedUnimplementedFakeEventProtocolServer() {} + +// UnsafeFakeEventProtocolServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to FakeEventProtocolServer will +// result in compilation errors. +type UnsafeFakeEventProtocolServer interface { + mustEmbedUnimplementedFakeEventProtocolServer() +} + +func RegisterFakeEventProtocolServer(s grpc.ServiceRegistrar, srv FakeEventProtocolServer) { + s.RegisterService(&FakeEventProtocol_ServiceDesc, srv) +} + +func _FakeEventProtocol_SendEvent_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Event) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(FakeEventProtocolServer).SendEvent(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/common.FakeEventProtocol/SendEvent", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(FakeEventProtocolServer).SendEvent(ctx, req.(*Event)) + } + return interceptor(ctx, in, info, handler) +} + +// FakeEventProtocol_ServiceDesc is the grpc.ServiceDesc for FakeEventProtocol service. +// It's only intended for direct use with grpc.RegisterService, +// and not to be introspected or modified (even as a copy) +var FakeEventProtocol_ServiceDesc = grpc.ServiceDesc{ + ServiceName: "common.FakeEventProtocol", + HandlerType: (*FakeEventProtocolServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "SendEvent", + Handler: _FakeEventProtocol_SendEvent_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "pkg/component/fake/common/event.proto", +} diff --git a/pkg/component/fake/README.md b/pkg/component/fake/component/README.md similarity index 100% rename from pkg/component/fake/README.md rename to pkg/component/fake/component/README.md diff --git a/pkg/component/fake/component/dialer.go b/pkg/component/fake/component/dialer.go new file mode 100644 index 00000000000..8f55407123a --- /dev/null +++ b/pkg/component/fake/component/dialer.go @@ -0,0 +1,27 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build !windows +// +build !windows + +package main + +import ( + "context" + "crypto/x509" + "net" + "strings" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" +) + +func dialContext(ctx context.Context, addr string, cp *x509.CertPool, serverName string) (*grpc.ClientConn, error) { + return grpc.DialContext(ctx, strings.TrimPrefix(addr, "unix://"), grpc.WithContextDialer(dialer), grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(cp, serverName))) +} + +func dialer(ctx context.Context, addr string) (net.Conn, error) { + var d net.Dialer + return d.DialContext(ctx, "unix", addr) +} diff --git a/pkg/component/fake/component/dialer_windows.go b/pkg/component/fake/component/dialer_windows.go new file mode 100644 index 00000000000..98793bc09e4 --- /dev/null +++ b/pkg/component/fake/component/dialer_windows.go @@ -0,0 +1,27 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build windows +// +build windows + +package main + +import ( + "context" + "crypto/x509" + "net" + + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + + "github.com/elastic/elastic-agent-libs/api/npipe" +) + +func dialContext(ctx context.Context, addr string, cp *x509.CertPool, serverName string) (*grpc.ClientConn, error) { + return grpc.DialContext(ctx, addr, grpc.WithContextDialer(dialer), grpc.WithTransportCredentials(credentials.NewClientTLSFromCert(cp, serverName))) +} + +func dialer(ctx context.Context, addr string) (net.Conn, error) { + return npipe.DialContext(addr)(ctx, "", "") +} diff --git a/pkg/component/fake/component/main.go b/pkg/component/fake/component/main.go new file mode 100644 index 00000000000..2f10148357f --- /dev/null +++ b/pkg/component/fake/component/main.go @@ -0,0 +1,580 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package main + +import ( + "context" + "crypto/x509" + "errors" + "fmt" + "io" + "os" + "os/signal" + "syscall" + "time" + + "google.golang.org/grpc" + "google.golang.org/protobuf/types/known/structpb" + "google.golang.org/protobuf/types/known/timestamppb" + + "github.com/rs/zerolog" + "golang.org/x/sync/errgroup" + + "github.com/elastic/elastic-agent-client/v7/pkg/client" + "github.com/elastic/elastic-agent-client/v7/pkg/proto" + + "github.com/elastic/elastic-agent/pkg/component/fake/common" +) + +const ( + fake = "fake" + fakeShipper = "fake-shipper" + + configuringMsg = "Configuring" + stoppingMsg = "Stopping" + stoppedMsg = "Stopped" +) + +func main() { + err := run() + if err != nil { + fmt.Fprintf(os.Stderr, "%s\n", err) + os.Exit(1) + } +} + +func run() error { + logger := zerolog.New(os.Stderr).With().Timestamp().Logger() + ver := client.VersionInfo{ + Name: fake, + Version: "1.0", + Meta: map[string]string{ + "input": fake, + }, + } + c, _, err := client.NewV2FromReader(os.Stdin, ver) + if err != nil { + return fmt.Errorf("failed to create GRPC client: %w", err) + } + + ctx, cancel := context.WithCancel(context.Background()) + n := make(chan os.Signal, 1) + signal.Notify(n, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) + defer func() { + signal.Stop(n) + cancel() + }() + go func() { + select { + case <-n: + cancel() + case <-ctx.Done(): + } + }() + + err = c.Start(ctx) + if err != nil { + return fmt.Errorf("failed to start GRPC client: %w", err) + } + + s := newStateManager(logger) + for { + select { + case <-ctx.Done(): + return nil + case change := <-c.UnitChanges(): + switch change.Type { + case client.UnitChangedAdded: + s.added(change.Unit) + case client.UnitChangedModified: + s.modified(change.Unit) + case client.UnitChangedRemoved: + s.removed(change.Unit) + } + case err := <-c.Errors(): + if err != nil && !errors.Is(err, context.Canceled) && !errors.Is(err, io.EOF) { + fmt.Fprintf(os.Stderr, "GRPC client error: %+v\n", err) + } + } + } +} + +type stateManager struct { + logger zerolog.Logger + inputs map[string]runningUnit + output runningUnit +} + +func newStateManager(logger zerolog.Logger) *stateManager { + return &stateManager{logger: logger, inputs: make(map[string]runningUnit)} +} + +func (s *stateManager) added(unit *client.Unit) { + if unit.Type() == client.UnitTypeOutput { + if s.output != nil { + _ = unit.UpdateState(client.UnitStateFailed, "Error: duplicate output unit; only supports one", nil) + return + } + r, err := newRunningUnit(s.logger, s, unit) + if err != nil { + _ = unit.UpdateState(client.UnitStateFailed, fmt.Sprintf("Error: %s", err), nil) + return + } + s.output = r + return + } + + _, ok := s.inputs[unit.ID()] + if ok { + _ = unit.UpdateState(client.UnitStateFailed, "Error: duplicate input unit", nil) + return + } + r, err := newRunningUnit(s.logger, s, unit) + if err != nil { + _ = unit.UpdateState(client.UnitStateFailed, fmt.Sprintf("Error: %s", err), nil) + return + } + s.inputs[unit.ID()] = r +} + +func (s *stateManager) modified(unit *client.Unit) { + if unit.Type() == client.UnitTypeOutput { + if s.output == nil { + _ = unit.UpdateState(client.UnitStateFailed, "Error: modified a non-existing output unit", nil) + return + } + err := s.output.Update(unit) + if err != nil { + _ = unit.UpdateState(client.UnitStateFailed, fmt.Sprintf("Error: %s", err), nil) + } + return + } + + existing, ok := s.inputs[unit.ID()] + if !ok { + _ = unit.UpdateState(client.UnitStateFailed, "Error: unknown unit", nil) + return + } + err := existing.Update(unit) + if err != nil { + _ = unit.UpdateState(client.UnitStateFailed, fmt.Sprintf("Error: %s", err), nil) + } +} + +func (s *stateManager) removed(unit *client.Unit) { + if unit.Type() == client.UnitTypeOutput { + if s.output != nil { + s.output = nil + } + return + } + + _, ok := s.inputs[unit.ID()] + if !ok { + return + } + delete(s.inputs, unit.ID()) +} + +type runningUnit interface { + Unit() *client.Unit + Update(u *client.Unit) error +} + +type sendEvent struct { + evt *common.Event + timeout time.Duration + doneCh chan error +} + +type fakeShipperOutput struct { + logger zerolog.Logger + unit *client.Unit + cfg *proto.UnitExpectedConfig + + evtCh chan sendEvent + + runner errgroup.Group + canceller context.CancelFunc +} + +func newFakeShipperOutput(logger zerolog.Logger, logLevel client.UnitLogLevel, unit *client.Unit, cfg *proto.UnitExpectedConfig) (*fakeShipperOutput, error) { + logger = logger.Level(toZerologLevel(logLevel)) + + f := &fakeShipperOutput{ + logger: logger, + unit: unit, + cfg: cfg, + evtCh: make(chan sendEvent), + } + + logger.Trace().Msg("registering kill action for unit") + unit.RegisterAction(&killAction{f.logger}) + + f.start(unit, cfg) + + return f, nil +} + +func (f *fakeShipperOutput) Unit() *client.Unit { + return f.unit +} + +func (f *fakeShipperOutput) Update(u *client.Unit) error { + expected, _, config := u.Expected() + if expected == client.UnitStateStopped { + // agent is requesting this input to stop + f.logger.Debug().Str("state", client.UnitStateStopping.String()).Str("message", stoppingMsg).Msg("updating unit state") + _ = u.UpdateState(client.UnitStateStopping, stoppingMsg, nil) + go func() { + f.stop() + f.logger.Debug().Str("state", client.UnitStateStopped.String()).Str("message", stoppedMsg).Msg("updating unit state") + _ = u.UpdateState(client.UnitStateStopped, stoppedMsg, nil) + }() + return nil + } + + if config.Type == "" { + return fmt.Errorf("unit missing config type") + } + if config.Type != fakeShipper { + return fmt.Errorf("unit type changed with the same unit ID: %s", config.Type) + } + + f.stop() + f.cfg = config + f.start(u, config) + + return nil +} + +func (f *fakeShipperOutput) sendEvent(event map[string]interface{}, timeout time.Duration) error { + content, err := structpb.NewStruct(event) + if err != nil { + return err + } + evt := &common.Event{ + Generated: timestamppb.Now(), + Content: content, + } + doneCh := make(chan error) + f.evtCh <- sendEvent{ + evt: evt, + timeout: timeout, + doneCh: doneCh, + } + return <-doneCh +} + +func (f *fakeShipperOutput) start(unit *client.Unit, cfg *proto.UnitExpectedConfig) { + ctx, cancel := context.WithCancel(context.Background()) + f.canceller = cancel + f.runner.Go(func() error { + for { + err := f.run(ctx, unit, cfg) + if err != nil { + if errors.Is(err, context.Canceled) { + // don't restart + return err + } + // restartable error + f.logger.Error().Err(err) + _ = unit.UpdateState(client.UnitStateFailed, err.Error(), nil) + // delay restart + <-time.After(time.Second) + } + } + }) +} + +func (f *fakeShipperOutput) stop() { + if f.canceller != nil { + f.canceller() + f.canceller = nil + f.runner.Wait() + } +} + +func (f *fakeShipperOutput) run(ctx context.Context, unit *client.Unit, cfg *proto.UnitExpectedConfig) error { + f.logger.Debug().Str("state", client.UnitStateConfiguring.String()).Str("message", configuringMsg).Msg("restarting shipper output") + _ = unit.UpdateState(client.UnitStateConfiguring, configuringMsg, nil) + + shipperCfg, err := common.ParseFakeShipperConfig(cfg) + if err != nil { + return fmt.Errorf("failed to parse fake shipper config: %w", err) + } + if shipperCfg.TLS == nil || len(shipperCfg.TLS.CAs) == 0 { + return fmt.Errorf("fake shipper ssl configuration missing") + } + certPool := x509.NewCertPool() + for _, certPEM := range shipperCfg.TLS.CAs { + if ok := certPool.AppendCertsFromPEM([]byte(certPEM)); !ok { + return errors.New("failed to append CA for shipper connection") + } + } + conn, err := dialContext(ctx, shipperCfg.Server, certPool, unit.ID()) + if err != nil { + return fmt.Errorf("grpc client failed to connect: %w", err) + } + defer conn.Close() + + connectedMsg := fmt.Sprintf("GRPC fake event pipe connected %q", shipperCfg.Server) + f.logger.Debug().Str("state", client.UnitStateHealthy.String()).Str("message", connectedMsg).Msg("connected to output") + _ = unit.UpdateState(client.UnitStateHealthy, connectedMsg, nil) + + client := common.NewFakeEventProtocolClient(conn) + for { + select { + case <-ctx.Done(): + return ctx.Err() + case evt := <-f.evtCh: + evtCtx, evtCanceller := context.WithTimeout(ctx, evt.timeout) + _, err := client.SendEvent(evtCtx, evt.evt, grpc.WaitForReady(true)) + evtCanceller() + evt.doneCh <- err + } + } +} + +type fakeInput struct { + logger zerolog.Logger + manager *stateManager + unit *client.Unit + cfg *proto.UnitExpectedConfig + + state client.UnitState + stateMsg string + + canceller context.CancelFunc +} + +func newFakeInput(logger zerolog.Logger, logLevel client.UnitLogLevel, manager *stateManager, unit *client.Unit, cfg *proto.UnitExpectedConfig) (*fakeInput, error) { + logger = logger.Level(toZerologLevel(logLevel)) + state, msg, err := getStateFromConfig(cfg) + if err != nil { + return nil, err + } + + i := &fakeInput{ + logger: logger, + manager: manager, + unit: unit, + cfg: cfg, + state: state, + stateMsg: msg, + } + + logger.Trace().Msg("registering set_state action for unit") + unit.RegisterAction(&stateSetterAction{i}) + logger.Trace().Msg("registering send_event action for unit") + unit.RegisterAction(&sendEventAction{i}) + logger.Trace().Msg("registering kill action for unit") + unit.RegisterAction(&killAction{i.logger}) + logger.Debug().Str("state", i.state.String()).Str("message", i.stateMsg).Msg("updating unit state") + _ = unit.UpdateState(i.state, i.stateMsg, nil) + + logTimer := 10 * time.Second + if logTimerValue, ok := cfg.Source.Fields["log_timer"]; ok { + logTimeStr := logTimerValue.GetStringValue() + if logTimeStr != "" { + logTimer, err = time.ParseDuration(logTimeStr) + if err != nil { + return nil, err + } + } + } + ctx, cancel := context.WithCancel(context.Background()) + go func() { + t := time.NewTicker(logTimer) + defer t.Stop() + for { + select { + case <-ctx.Done(): + return + case <-t.C: + logger.Trace().Dur("log_timer", logTimer).Msg("trace log ticker") + } + } + }() + i.canceller = cancel + + return i, nil +} + +func (f *fakeInput) Unit() *client.Unit { + return f.unit +} + +func (f *fakeInput) Update(u *client.Unit) error { + expected, _, config := u.Expected() + if expected == client.UnitStateStopped { + // agent is requesting this input to stop + f.logger.Debug().Str("state", client.UnitStateStopping.String()).Str("message", stoppingMsg).Msg("updating unit state") + _ = u.UpdateState(client.UnitStateStopping, stoppingMsg, nil) + f.canceller() + go func() { + <-time.After(1 * time.Second) + f.logger.Debug().Str("state", client.UnitStateStopped.String()).Str("message", stoppedMsg).Msg("updating unit state") + _ = u.UpdateState(client.UnitStateStopped, stoppedMsg, nil) + }() + return nil + } + + if config.Type == "" { + return fmt.Errorf("unit missing config type") + } + if config.Type != fake { + return fmt.Errorf("unit type changed with the same unit ID: %s", config.Type) + } + + state, stateMsg, err := getStateFromConfig(config) + if err != nil { + return fmt.Errorf("unit config parsing error: %w", err) + } + f.state = state + f.stateMsg = stateMsg + f.logger.Debug().Str("state", f.state.String()).Str("message", f.stateMsg).Msg("updating unit state") + _ = u.UpdateState(f.state, f.stateMsg, nil) + return nil +} + +type stateSetterAction struct { + input *fakeInput +} + +func (s *stateSetterAction) Name() string { + return "set_state" +} + +func (s *stateSetterAction) Execute(_ context.Context, params map[string]interface{}) (map[string]interface{}, error) { + s.input.logger.Trace().Msg("executing set_state action") + state, stateMsg, err := getStateFromMap(params) + if err != nil { + return nil, err + } + s.input.state = state + s.input.stateMsg = stateMsg + s.input.logger.Debug().Str("state", s.input.state.String()).Str("message", s.input.stateMsg).Msg("updating unit state") + _ = s.input.unit.UpdateState(s.input.state, s.input.stateMsg, nil) + return nil, nil +} + +type sendEventAction struct { + input *fakeInput +} + +func (s *sendEventAction) Name() string { + return "send_event" +} + +func (s *sendEventAction) Execute(_ context.Context, params map[string]interface{}) (map[string]interface{}, error) { + const ( + timeoutField = "timeout" + timeoutDefault = 3 * time.Second + ) + + s.input.logger.Trace().Msg("executing send_event action") + + // timeout is taken from the action to define the timeout + timeout := timeoutDefault + if timeoutRaw, ok := params[timeoutField]; ok { + if timeoutStr, ok := timeoutRaw.(string); ok { + dur, err := time.ParseDuration(timeoutStr) + if err != nil { + return nil, fmt.Errorf("failed to parse timeout duration: %w", err) + } + timeout = dur + } + } + + if s.input.manager.output != nil { + output, ok := s.input.manager.output.(*fakeShipperOutput) + if !ok { + return nil, fmt.Errorf("output is not fake-shipper output, cannot send event, got type %T", s.input.manager.output) + } + err := output.sendEvent(params, timeout) + if err != nil { + return nil, err + } + return nil, nil + } + return nil, errors.New("no output configured to send event") +} + +type killAction struct { + logger zerolog.Logger +} + +func (s *killAction) Name() string { + return "kill" +} + +func (s *killAction) Execute(_ context.Context, _ map[string]interface{}) (map[string]interface{}, error) { + s.logger.Trace().Msg("executing kill action") + os.Exit(1) + return nil, nil +} + +func newRunningUnit(logger zerolog.Logger, manager *stateManager, unit *client.Unit) (runningUnit, error) { + _, logLevel, config := unit.Expected() + if config.Type == "" { + return nil, fmt.Errorf("unit config type empty") + } + if unit.Type() == client.UnitTypeOutput { + switch config.Type { + case fakeShipper: + return newFakeShipperOutput(logger, logLevel, unit, config) + } + return nil, fmt.Errorf("unknown output unit config type: %s", config.Type) + } + switch config.Type { + case fake: + return newFakeInput(logger, logLevel, manager, unit, config) + } + return nil, fmt.Errorf("unknown input unit config type: %s", config.Type) +} + +func getStateFromConfig(cfg *proto.UnitExpectedConfig) (client.UnitState, string, error) { + return getStateFromMap(cfg.Source.AsMap()) +} + +func getStateFromMap(cfg map[string]interface{}) (client.UnitState, string, error) { + state, ok := cfg["state"] + if !ok { + return client.UnitStateStarting, "", errors.New("missing required state parameter") + } + stateTypeI, ok := state.(int) + if !ok { + // try float64 (JSON) does it differently than YAML + stateTypeF, ok := state.(float64) + if !ok { + return client.UnitStateStarting, "", fmt.Errorf("state parameter is not a valid unit state: %T", state) + } + stateTypeI = int(stateTypeF) + } + stateType := client.UnitState(stateTypeI) + stateMsgStr := "" + stateMsg, ok := cfg["message"] + if ok { + stateMsgStr, _ = stateMsg.(string) + } + return stateType, stateMsgStr, nil +} + +func toZerologLevel(level client.UnitLogLevel) zerolog.Level { + switch level { + case client.UnitLogLevelError: + return zerolog.ErrorLevel + case client.UnitLogLevelWarn: + return zerolog.WarnLevel + case client.UnitLogLevelInfo: + return zerolog.InfoLevel + case client.UnitLogLevelDebug: + return zerolog.DebugLevel + case client.UnitLogLevelTrace: + return zerolog.TraceLevel + } + return zerolog.InfoLevel +} diff --git a/pkg/component/fake/main.go b/pkg/component/fake/main.go deleted file mode 100644 index 0464d55b8ef..00000000000 --- a/pkg/component/fake/main.go +++ /dev/null @@ -1,342 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package main - -import ( - "context" - "errors" - "fmt" - "io" - "os" - "os/signal" - "syscall" - "time" - - "github.com/rs/zerolog" - - "github.com/elastic/elastic-agent-client/v7/pkg/client" - "github.com/elastic/elastic-agent-client/v7/pkg/proto" -) - -const ( - fake = "fake" - - stoppingMsg = "Stopping" - stoppedMsg = "Stopped" -) - -func main() { - err := run() - if err != nil { - fmt.Fprintf(os.Stderr, "%s\n", err) - os.Exit(1) - } -} - -func run() error { - logger := zerolog.New(os.Stderr).With().Timestamp().Logger() - ver := client.VersionInfo{ - Name: fake, - Version: "1.0", - Meta: map[string]string{ - "input": fake, - }, - } - c, _, err := client.NewV2FromReader(os.Stdin, ver) - if err != nil { - return fmt.Errorf("failed to create GRPC client: %w", err) - } - - ctx, cancel := context.WithCancel(context.Background()) - n := make(chan os.Signal, 1) - signal.Notify(n, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) - defer func() { - signal.Stop(n) - cancel() - }() - go func() { - select { - case <-n: - cancel() - case <-ctx.Done(): - } - }() - - err = c.Start(ctx) - if err != nil { - return fmt.Errorf("failed to start GRPC client: %w", err) - } - - s := newStateManager(logger) - for { - select { - case <-ctx.Done(): - return nil - case change := <-c.UnitChanges(): - switch change.Type { - case client.UnitChangedAdded: - s.added(change.Unit) - case client.UnitChangedModified: - s.modified(change.Unit) - case client.UnitChangedRemoved: - s.removed(change.Unit) - } - case err := <-c.Errors(): - if err != nil && !errors.Is(err, context.Canceled) && !errors.Is(err, io.EOF) { - fmt.Fprintf(os.Stderr, "GRPC client error: %+v\n", err) - } - } - } -} - -type unitKey struct { - unitType client.UnitType - unitID string -} - -type stateManager struct { - logger zerolog.Logger - units map[unitKey]runningUnit -} - -func newStateManager(logger zerolog.Logger) *stateManager { - return &stateManager{logger: logger, units: make(map[unitKey]runningUnit)} -} - -func (s *stateManager) added(unit *client.Unit) { - k := newUnitKey(unit) - _, ok := s.units[k] - if ok { - _ = unit.UpdateState(client.UnitStateFailed, "Error: duplicate unit", nil) - return - } - r, err := newRunningUnit(s.logger, unit) - if err != nil { - _ = unit.UpdateState(client.UnitStateFailed, fmt.Sprintf("Error: %s", err), nil) - return - } - s.units[k] = r -} - -func (s *stateManager) modified(unit *client.Unit) { - existing, ok := s.units[newUnitKey(unit)] - if !ok { - _ = unit.UpdateState(client.UnitStateFailed, "Error: unknown unit", nil) - return - } - err := existing.Update(unit) - if err != nil { - _ = unit.UpdateState(client.UnitStateFailed, fmt.Sprintf("Error: %s", err), nil) - } -} - -func (s *stateManager) removed(unit *client.Unit) { - k := newUnitKey(unit) - _, ok := s.units[k] - if !ok { - return - } - delete(s.units, k) -} - -type runningUnit interface { - Unit() *client.Unit - Update(u *client.Unit) error -} - -type fakeInput struct { - logger zerolog.Logger - unit *client.Unit - cfg *proto.UnitExpectedConfig - - state client.UnitState - stateMsg string - - canceller context.CancelFunc -} - -func newFakeInput(logger zerolog.Logger, logLevel client.UnitLogLevel, unit *client.Unit, cfg *proto.UnitExpectedConfig) (*fakeInput, error) { - logger = logger.Level(toZerologLevel(logLevel)) - state, msg, err := getStateFromConfig(cfg) - if err != nil { - return nil, err - } - - i := &fakeInput{ - logger: logger, - unit: unit, - cfg: cfg, - state: state, - stateMsg: msg, - } - - logger.Trace().Msg("registering set_state action for unit") - unit.RegisterAction(&stateSetterAction{i}) - logger.Trace().Msg("registering kill action for unit") - unit.RegisterAction(&killAction{i}) - logger.Debug().Str("state", i.state.String()).Str("message", i.stateMsg).Msg("updating unit state") - _ = unit.UpdateState(i.state, i.stateMsg, nil) - - logTimer := 10 * time.Second - if logTimerValue, ok := cfg.Source.Fields["log_timer"]; ok { - logTimeStr := logTimerValue.GetStringValue() - if logTimeStr != "" { - logTimer, err = time.ParseDuration(logTimeStr) - if err != nil { - return nil, err - } - } - } - ctx, cancel := context.WithCancel(context.Background()) - go func() { - t := time.NewTicker(logTimer) - defer t.Stop() - for { - select { - case <-ctx.Done(): - return - case <-t.C: - logger.Trace().Dur("log_timer", logTimer).Msg("trace log ticker") - } - } - }() - i.canceller = cancel - - return i, nil -} - -func (f *fakeInput) Unit() *client.Unit { - return f.unit -} - -func (f *fakeInput) Update(u *client.Unit) error { - expected, _, config := u.Expected() - if expected == client.UnitStateStopped { - // agent is requesting this input to stop - f.logger.Debug().Str("state", client.UnitStateStopping.String()).Str("message", stoppingMsg).Msg("updating unit state") - _ = u.UpdateState(client.UnitStateStopping, stoppingMsg, nil) - f.canceller() - go func() { - <-time.After(1 * time.Second) - f.logger.Debug().Str("state", client.UnitStateStopped.String()).Str("message", stoppedMsg).Msg("updating unit state") - _ = u.UpdateState(client.UnitStateStopped, stoppedMsg, nil) - }() - return nil - } - - if config.Type == "" { - return fmt.Errorf("unit missing config type") - } - if config.Type != fake { - return fmt.Errorf("unit type changed with the same unit ID: %s", config.Type) - } - - state, stateMsg, err := getStateFromConfig(config) - if err != nil { - return fmt.Errorf("unit config parsing error: %w", err) - } - f.state = state - f.stateMsg = stateMsg - f.logger.Debug().Str("state", f.state.String()).Str("message", f.stateMsg).Msg("updating unit state") - _ = u.UpdateState(f.state, f.stateMsg, nil) - return nil -} - -type stateSetterAction struct { - input *fakeInput -} - -func (s *stateSetterAction) Name() string { - return "set_state" -} - -func (s *stateSetterAction) Execute(_ context.Context, params map[string]interface{}) (map[string]interface{}, error) { - s.input.logger.Trace().Msg("executing set_state action") - state, stateMsg, err := getStateFromMap(params) - if err != nil { - return nil, err - } - s.input.state = state - s.input.stateMsg = stateMsg - s.input.logger.Debug().Str("state", s.input.state.String()).Str("message", s.input.stateMsg).Msg("updating unit state") - _ = s.input.unit.UpdateState(s.input.state, s.input.stateMsg, nil) - return nil, nil -} - -type killAction struct { - input *fakeInput -} - -func (s *killAction) Name() string { - return "kill" -} - -func (s *killAction) Execute(_ context.Context, _ map[string]interface{}) (map[string]interface{}, error) { - s.input.logger.Trace().Msg("executing kill action") - os.Exit(1) - return nil, nil -} - -func newRunningUnit(logger zerolog.Logger, unit *client.Unit) (runningUnit, error) { - _, logLevel, config := unit.Expected() - if config.Type == "" { - return nil, fmt.Errorf("unit config type empty") - } - switch config.Type { - case fake: - return newFakeInput(logger, logLevel, unit, config) - } - return nil, fmt.Errorf("unknown unit config type: %s", config.Type) -} - -func newUnitKey(unit *client.Unit) unitKey { - return unitKey{ - unitType: unit.Type(), - unitID: unit.ID(), - } -} - -func getStateFromConfig(cfg *proto.UnitExpectedConfig) (client.UnitState, string, error) { - return getStateFromMap(cfg.Source.AsMap()) -} - -func getStateFromMap(cfg map[string]interface{}) (client.UnitState, string, error) { - state, ok := cfg["state"] - if !ok { - return client.UnitStateStarting, "", errors.New("missing required state parameter") - } - stateTypeI, ok := state.(int) - if !ok { - // try float64 (JSON) does it differently than YAML - stateTypeF, ok := state.(float64) - if !ok { - return client.UnitStateStarting, "", fmt.Errorf("state parameter is not a valid unit state: %T", state) - } - stateTypeI = int(stateTypeF) - } - stateType := client.UnitState(stateTypeI) - stateMsgStr := "" - stateMsg, ok := cfg["message"] - if ok { - stateMsgStr, _ = stateMsg.(string) - } - return stateType, stateMsgStr, nil -} - -func toZerologLevel(level client.UnitLogLevel) zerolog.Level { - switch level { - case client.UnitLogLevelError: - return zerolog.ErrorLevel - case client.UnitLogLevelWarn: - return zerolog.WarnLevel - case client.UnitLogLevelInfo: - return zerolog.InfoLevel - case client.UnitLogLevelDebug: - return zerolog.DebugLevel - case client.UnitLogLevelTrace: - return zerolog.TraceLevel - } - return zerolog.InfoLevel -} diff --git a/pkg/component/fake/shipper/README.md b/pkg/component/fake/shipper/README.md new file mode 100644 index 00000000000..a1e9add5d7d --- /dev/null +++ b/pkg/component/fake/shipper/README.md @@ -0,0 +1,3 @@ +# Fake Shipper + +Fake shipper that can be controlled with actions through the GRPC control protocol. Allows unit tests to simulate control and communication with a shipper. diff --git a/pkg/component/fake/shipper/listener.go b/pkg/component/fake/shipper/listener.go new file mode 100644 index 00000000000..ce4d6a99a41 --- /dev/null +++ b/pkg/component/fake/shipper/listener.go @@ -0,0 +1,29 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build !windows + +package main + +import ( + "fmt" + "net" + "os" + "strings" +) + +func createListener(path string) (net.Listener, error) { + if !strings.HasPrefix(path, "unix://") { + return nil, fmt.Errorf("listener path must start with unix://; got %s", path) + } + path = strings.TrimPrefix(path, "unix://") + if _, err := os.Stat(path); !os.IsNotExist(err) { + os.Remove(path) + } + lis, err := net.Listen("unix", path) + if err != nil { + return nil, err + } + return lis, err +} diff --git a/pkg/component/fake/shipper/listener_windows.go b/pkg/component/fake/shipper/listener_windows.go new file mode 100644 index 00000000000..018fc3850ca --- /dev/null +++ b/pkg/component/fake/shipper/listener_windows.go @@ -0,0 +1,38 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build windows + +package main + +import ( + "fmt" + "net" + "os/user" + + "github.com/elastic/elastic-agent-libs/api/npipe" +) + +// createListener creates a named pipe listener on Windows +func createListener(path string) (net.Listener, error) { + sd, err := securityDescriptor() + if err != nil { + return nil, err + } + return npipe.NewListener(path, sd) +} + +func securityDescriptor() (string, error) { + u, err := user.Current() + if err != nil { + return "", fmt.Errorf("failed to get current user: %w", err) + } + // Named pipe security and access rights. + // We create the pipe and the specific users should only be able to write to it. + // See docs: https://docs.microsoft.com/en-us/windows/win32/ipc/named-pipe-security-and-access-rights + // String definition: https://docs.microsoft.com/en-us/windows/win32/secauthz/ace-strings + // Give generic read/write access to the specified user. + descriptor := "D:P(A;;GA;;;" + u.Uid + ")" + return descriptor, nil +} diff --git a/pkg/component/fake/shipper/main.go b/pkg/component/fake/shipper/main.go new file mode 100644 index 00000000000..347b9365090 --- /dev/null +++ b/pkg/component/fake/shipper/main.go @@ -0,0 +1,514 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package main + +import ( + "context" + "crypto/tls" + "errors" + "fmt" + "io" + "os" + "os/signal" + "sync" + "syscall" + + "golang.org/x/sync/errgroup" + "google.golang.org/grpc" + "google.golang.org/grpc/credentials" + + "github.com/rs/zerolog" + + "github.com/elastic/elastic-agent-client/v7/pkg/client" + "github.com/elastic/elastic-agent-client/v7/pkg/proto" + + "github.com/elastic/elastic-agent/pkg/component/fake/common" +) + +const ( + fakeActionOutput = "fake-action-output" + fakeShipper = "fake-shipper" + + healthyMsg = "Healthy" + stoppingMsg = "Stopping" + stoppedMsg = "Stopped" + + recordActionEventID = "id" +) + +func main() { + err := run() + if err != nil { + fmt.Fprintf(os.Stderr, "%s\n", err) + os.Exit(1) + } +} + +func run() error { + logger := zerolog.New(os.Stderr).With().Timestamp().Logger() + ver := client.VersionInfo{ + Name: fakeShipper, + Version: "1.0", + Meta: map[string]string{ + "shipper": fakeShipper, + }, + } + c, _, err := client.NewV2FromReader(os.Stdin, ver) + if err != nil { + return fmt.Errorf("failed to create GRPC client: %w", err) + } + + ctx, cancel := context.WithCancel(context.Background()) + n := make(chan os.Signal, 1) + signal.Notify(n, syscall.SIGINT, syscall.SIGTERM, syscall.SIGQUIT) + defer func() { + signal.Stop(n) + cancel() + }() + go func() { + select { + case <-n: + cancel() + case <-ctx.Done(): + } + }() + + err = c.Start(ctx) + if err != nil { + return fmt.Errorf("failed to start GRPC client: %w", err) + } + + s := newStateManager(logger) + for { + select { + case <-ctx.Done(): + return nil + case change := <-c.UnitChanges(): + switch change.Type { + case client.UnitChangedAdded: + s.added(change.Unit) + case client.UnitChangedModified: + s.modified(change.Unit) + case client.UnitChangedRemoved: + s.removed(change.Unit) + } + case err := <-c.Errors(): + if err != nil && !errors.Is(err, context.Canceled) && !errors.Is(err, io.EOF) { + fmt.Fprintf(os.Stderr, "GRPC client error: %+v\n", err) + } + } + } +} + +type unitKey struct { + unitType client.UnitType + unitID string +} + +type stateManager struct { + logger zerolog.Logger + unitsMx sync.RWMutex + units map[unitKey]runningUnit +} + +func newStateManager(logger zerolog.Logger) *stateManager { + return &stateManager{logger: logger, units: make(map[unitKey]runningUnit)} +} + +func (s *stateManager) added(unit *client.Unit) { + s.unitsMx.Lock() + defer s.unitsMx.Unlock() + k := newUnitKey(unit) + _, ok := s.units[k] + if ok { + _ = unit.UpdateState(client.UnitStateFailed, "Error: duplicate unit", nil) + return + } + r, err := newRunningUnit(s.logger, s, unit) + if err != nil { + _ = unit.UpdateState(client.UnitStateFailed, fmt.Sprintf("Error: %s", err), nil) + return + } + s.units[k] = r +} + +func (s *stateManager) modified(unit *client.Unit) { + s.unitsMx.Lock() + defer s.unitsMx.Unlock() + existing, ok := s.units[newUnitKey(unit)] + if !ok { + _ = unit.UpdateState(client.UnitStateFailed, "Error: unknown unit", nil) + return + } + err := existing.Update(unit) + if err != nil { + _ = unit.UpdateState(client.UnitStateFailed, fmt.Sprintf("Error: %s", err), nil) + } +} + +func (s *stateManager) removed(unit *client.Unit) { + s.unitsMx.Lock() + defer s.unitsMx.Unlock() + k := newUnitKey(unit) + _, ok := s.units[k] + if !ok { + return + } + delete(s.units, k) +} + +func (s *stateManager) received(ctx context.Context, event *common.Event) error { + var cnt map[string]interface{} + if event.Content != nil { + cnt = event.Content.AsMap() + } + s.logger.Trace().Fields(map[string]interface{}{ + "timestamp": event.Generated.AsTime(), + "content": cnt, + }).Msg("received event") + idRaw, ok := cnt[recordActionEventID] + if !ok { + return nil + } + id, ok := idRaw.(string) + if !ok { + return nil + } + s.unitsMx.RLock() + defer s.unitsMx.RUnlock() + for k, u := range s.units { + if ctx.Err() != nil { + return ctx.Err() + } + if k.unitType == client.UnitTypeOutput { + actionOutput, ok := u.(*fakeActionOutputRuntime) + if ok { + if actionOutput.received(ctx, id, event) { + // caught by output + break + } + } + } + } + return nil +} + +type runningUnit interface { + Unit() *client.Unit + Update(u *client.Unit) error +} + +type fakeActionOutputRuntime struct { + logger zerolog.Logger + unit *client.Unit + cfg *proto.UnitExpectedConfig + + subsMx sync.RWMutex + subs map[string]chan *common.Event + + previousMx sync.RWMutex + previous map[string]*common.Event +} + +func newFakeActionOutputRuntime(logger zerolog.Logger, logLevel client.UnitLogLevel, unit *client.Unit, cfg *proto.UnitExpectedConfig) (*fakeActionOutputRuntime, error) { + logger = logger.Level(toZerologLevel(logLevel)) + + i := &fakeActionOutputRuntime{ + logger: logger, + unit: unit, + cfg: cfg, + subs: make(map[string]chan *common.Event), + previous: make(map[string]*common.Event), + } + + logger.Trace().Msg("registering record event action for unit") + unit.RegisterAction(&recordEventAction{i}) + logger.Trace().Msg("registering kill action for unit") + unit.RegisterAction(&killAction{logger}) + logger.Debug().Str("state", client.UnitStateHealthy.String()).Str("message", healthyMsg).Msg("updating unit state") + _ = unit.UpdateState(client.UnitStateHealthy, healthyMsg, nil) + + return i, nil +} + +func (f *fakeActionOutputRuntime) Unit() *client.Unit { + return f.unit +} + +func (f *fakeActionOutputRuntime) Update(u *client.Unit) error { + expected, _, config := u.Expected() + if expected == client.UnitStateStopped { + // agent is requesting this to stop + f.logger.Debug().Str("state", client.UnitStateStopping.String()).Str("message", stoppingMsg).Msg("updating unit state") + _ = u.UpdateState(client.UnitStateStopping, stoppingMsg, nil) + go func() { + f.cleanup() + f.logger.Debug().Str("state", client.UnitStateStopped.String()).Str("message", stoppedMsg).Msg("updating unit state") + _ = u.UpdateState(client.UnitStateStopped, stoppedMsg, nil) + }() + return nil + } + + if config.Type == "" { + return fmt.Errorf("unit missing config type") + } + if config.Type != fakeActionOutput { + return fmt.Errorf("unit type changed with the same unit ID: %s", config.Type) + } + // nothing to really do + return nil +} + +func (f *fakeActionOutputRuntime) subscribe(id string) <-chan *common.Event { + f.previousMx.RLock() + e, ok := f.previous[id] + if ok { + f.previousMx.RUnlock() + f.logger.Trace().Str(recordActionEventID, id).Msg("event already received; directly sending to subscription") + c := make(chan *common.Event, 1) + c <- e + return c + } + f.previousMx.RUnlock() + + f.subsMx.Lock() + defer f.subsMx.Unlock() + c, ok := f.subs[id] + if ok { + return c + } + c = make(chan *common.Event, 1) + f.subs[id] = c + f.logger.Trace().Str(recordActionEventID, id).Msg("subscribing for an event") + return c +} + +func (f *fakeActionOutputRuntime) unsubscribe(id string) { + f.subsMx.Lock() + defer f.subsMx.Unlock() + f.logger.Trace().Str(recordActionEventID, id).Msg("unsubscribing for an event") + delete(f.subs, id) +} + +func (f *fakeActionOutputRuntime) cleanup() { + f.subsMx.Lock() + defer f.subsMx.Unlock() + for k, c := range f.subs { + close(c) + delete(f.subs, k) + } +} + +func (f *fakeActionOutputRuntime) received(ctx context.Context, id string, event *common.Event) bool { + f.subsMx.RLock() + defer f.subsMx.RUnlock() + c, ok := f.subs[id] + if ok { + f.logger.Trace().Str("id", id).Msg("subscription exists for event id") + f.previousMx.Lock() + f.previous[id] = event + f.previousMx.Unlock() + select { + case <-ctx.Done(): + return false + case c <- event: + return true + } + } + f.logger.Trace().Str("id", id).Msg("no subscription exists for event id") + return false +} + +// recordEventAction is an action that returns a result only once an event comes over the fake shipper protocol +type recordEventAction struct { + f *fakeActionOutputRuntime +} + +func (r *recordEventAction) Name() string { + return "record_event" +} + +func (r *recordEventAction) Execute(ctx context.Context, params map[string]interface{}) (map[string]interface{}, error) { + eventIDRaw, ok := params[recordActionEventID] + if !ok { + return nil, fmt.Errorf("missing required 'id' parameter") + } + eventID, ok := eventIDRaw.(string) + if !ok { + return nil, fmt.Errorf("'id' parameter not string type, got %T", eventIDRaw) + } + r.f.logger.Trace().Str(recordActionEventID, eventID).Msg("registering record event action") + c := r.f.subscribe(eventID) + defer r.f.unsubscribe(eventID) + select { + case <-ctx.Done(): + return nil, ctx.Err() + case e, ok := <-c: + r.f.logger.Trace().Fields(map[string]interface{}{ + "timestamp": e.Generated.AsTime(), + "content": e.Content.AsMap(), + }).Msg("record_event action got subscribed event") + if !ok { + return nil, fmt.Errorf("never recieved event") + } + return map[string]interface{}{ + "timestamp": e.Generated.String(), + "event": e.Content.AsMap(), + }, nil + } +} + +type fakeShipperInput struct { + common.UnimplementedFakeEventProtocolServer + + logger zerolog.Logger + manager *stateManager + unit *client.Unit + cfg *proto.UnitExpectedConfig + + srv *grpc.Server + wg errgroup.Group +} + +func newFakeShipperInput(logger zerolog.Logger, logLevel client.UnitLogLevel, manager *stateManager, unit *client.Unit, cfg *proto.UnitExpectedConfig) (*fakeShipperInput, error) { + logger = logger.Level(toZerologLevel(logLevel)) + + i := &fakeShipperInput{ + logger: logger, + manager: manager, + unit: unit, + cfg: cfg, + } + + srvCfg, err := common.ParseFakeShipperConfig(cfg) + if err != nil { + return nil, err + } + + logger.Info().Str("server", srvCfg.Server).Msg("starting GRPC fake shipper server") + lis, err := createListener(srvCfg.Server) + if err != nil { + return nil, err + } + if srvCfg.TLS == nil || srvCfg.TLS.Cert == "" || srvCfg.TLS.Key == "" { + return nil, fmt.Errorf("ssl configuration missing") + } + cert, err := tls.X509KeyPair([]byte(srvCfg.TLS.Cert), []byte(srvCfg.TLS.Key)) + if err != nil { + return nil, err + } + srv := grpc.NewServer(grpc.Creds(credentials.NewServerTLSFromCert(&cert))) + i.srv = srv + common.RegisterFakeEventProtocolServer(srv, i) + i.wg.Go(func() error { + return srv.Serve(lis) + }) + + logger.Trace().Msg("registering kill action for unit") + unit.RegisterAction(&killAction{logger}) + logger.Debug().Str("state", client.UnitStateHealthy.String()).Str("message", healthyMsg).Msg("updating unit state") + _ = unit.UpdateState(client.UnitStateHealthy, healthyMsg, nil) + + return i, nil +} + +func (f *fakeShipperInput) Unit() *client.Unit { + return f.unit +} + +func (f *fakeShipperInput) Update(u *client.Unit) error { + expected, _, config := u.Expected() + if expected == client.UnitStateStopped { + // agent is requesting this to stop + f.logger.Debug().Str("state", client.UnitStateStopping.String()).Str("message", stoppingMsg).Msg("updating unit state") + _ = u.UpdateState(client.UnitStateStopping, stoppingMsg, nil) + go func() { + if f.srv != nil { + f.srv.Stop() + f.wg.Wait() + f.srv = nil + } + f.logger.Debug().Str("state", client.UnitStateStopped.String()).Str("message", stoppedMsg).Msg("updating unit state") + _ = u.UpdateState(client.UnitStateStopped, stoppedMsg, nil) + }() + return nil + } + + if config.Type == "" { + return fmt.Errorf("unit missing config type") + } + if config.Type != fakeActionOutput { + return fmt.Errorf("unit type changed with the same unit ID: %s", config.Type) + } + // nothing to really do + return nil +} + +func (f *fakeShipperInput) SendEvent(ctx context.Context, event *common.Event) (*common.EventResponse, error) { + err := f.manager.received(ctx, event) + if err != nil { + return nil, err + } + return &common.EventResponse{}, nil +} + +// killAction is an action that causes the whole component to exit (used in testing to simulate crashes) +type killAction struct { + logger zerolog.Logger +} + +func (s *killAction) Name() string { + return "kill" +} + +func (s *killAction) Execute(_ context.Context, _ map[string]interface{}) (map[string]interface{}, error) { + s.logger.Trace().Msg("executing kill action") + os.Exit(1) + return nil, nil +} + +func newRunningUnit(logger zerolog.Logger, manager *stateManager, unit *client.Unit) (runningUnit, error) { + _, logLevel, config := unit.Expected() + if config.Type == "" { + return nil, fmt.Errorf("unit config type empty") + } + if unit.Type() == client.UnitTypeOutput { + switch config.Type { + case fakeActionOutput: + return newFakeActionOutputRuntime(logger, logLevel, unit, config) + } + return nil, fmt.Errorf("unknown output unit config type: %s", config.Type) + } else if unit.Type() == client.UnitTypeInput { + switch config.Type { + case fakeShipper: + return newFakeShipperInput(logger, logLevel, manager, unit, config) + } + return nil, fmt.Errorf("unknown input unit config type: %s", config.Type) + } + return nil, fmt.Errorf("unknown unit type: %+v", unit.Type()) +} + +func newUnitKey(unit *client.Unit) unitKey { + return unitKey{ + unitType: unit.Type(), + unitID: unit.ID(), + } +} + +func toZerologLevel(level client.UnitLogLevel) zerolog.Level { + switch level { + case client.UnitLogLevelError: + return zerolog.ErrorLevel + case client.UnitLogLevelWarn: + return zerolog.WarnLevel + case client.UnitLogLevelInfo: + return zerolog.InfoLevel + case client.UnitLogLevelDebug: + return zerolog.DebugLevel + case client.UnitLogLevelTrace: + return zerolog.TraceLevel + } + return zerolog.InfoLevel +} diff --git a/pkg/component/input_spec.go b/pkg/component/input_spec.go index 0a45a830cd7..335d3d60201 100644 --- a/pkg/component/input_spec.go +++ b/pkg/component/input_spec.go @@ -17,6 +17,7 @@ type InputSpec struct { Description string `config:"description" yaml:"description" validate:"required"` Platforms []string `config:"platforms" yaml:"platforms" validate:"required,min=1"` Outputs []string `config:"outputs" yaml:"outputs" validate:"required,min=1"` + Shippers []string `config:"shippers" yaml:"shippers"` Runtime RuntimeSpec `config:"runtime" yaml:"runtime"` Command *CommandSpec `config:"command,omitempty" yaml:"command,omitempty"` @@ -45,6 +46,13 @@ func (s *InputSpec) Validate() error { } } } + for i, a := range s.Shippers { + for j, b := range s.Shippers { + if i != j && a == b { + return fmt.Errorf("input '%s' defines the shipper '%s' more than once", s.Name, a) + } + } + } for idx, prevention := range s.Runtime.Preventions { _, err := eql.New(prevention.Condition) if err != nil { diff --git a/pkg/component/load.go b/pkg/component/load.go index 4144a353172..be197bec288 100644 --- a/pkg/component/load.go +++ b/pkg/component/load.go @@ -24,6 +24,10 @@ var ( ErrInputNotSupported = newError("input not supported") // ErrInputNotSupportedOnPlatform is returned when the input is supported but not on this platform ErrInputNotSupportedOnPlatform = newError("input not supported on this platform") + // ErrOutputNotSupported is returned when the output is not supported on any platform + ErrOutputNotSupported = newError("output not supported") + // ErrOutputNotSupportedOnPlatform is returned when the input is supported but not on this platform + ErrOutputNotSupportedOnPlatform = newError("output not supported on this platform") ) // InputRuntimeSpec returns the specification for running this input on the current platform. @@ -34,6 +38,14 @@ type InputRuntimeSpec struct { Spec InputSpec `yaml:"spec"` } +// ShipperRuntimeSpec returns the specification for running this shipper on the current platform. +type ShipperRuntimeSpec struct { + ShipperType string `yaml:"shipper_type"` + BinaryName string `yaml:"binary_name"` + BinaryPath string `yaml:"binary_path"` + Spec ShipperSpec `yaml:"spec"` +} + // RuntimeSpecs return all the specifications for inputs that are supported on the current platform. type RuntimeSpecs struct { // platform that was loaded @@ -47,6 +59,12 @@ type RuntimeSpecs struct { // aliasMapping maps aliases to real input name aliasMapping map[string]string + + // shipperSpecs only the shipper specs for the current platform + shipperSpecs map[string]ShipperRuntimeSpec + + // shipperOutputs maps the supported outputs of a shipper to a shippers name + shipperOutputs map[string][]string } type loadRuntimeOpts struct { @@ -78,9 +96,11 @@ func LoadRuntimeSpecs(dir string, platform PlatformDetail, opts ...LoadRuntimeOp if err != nil { return RuntimeSpecs{}, err } - var types []string - mapping := make(map[string]InputRuntimeSpec) - aliases := make(map[string]string) + var inputTypes []string + inputSpecs := make(map[string]InputRuntimeSpec) + inputAliases := make(map[string]string) + shipperSpecs := make(map[string]ShipperRuntimeSpec) + shipperOutputs := make(map[string][]string) for _, match := range matches { binaryName := filepath.Base(match[:len(match)-len(specGlobPattern)+1]) binaryPath := match[:len(match)-len(specGlobPattern)+1] @@ -106,43 +126,66 @@ func LoadRuntimeSpecs(dir string, platform PlatformDetail, opts ...LoadRuntimeOp return RuntimeSpecs{}, fmt.Errorf("failed reading spec %s: %w", match, err) } for _, input := range spec.Inputs { - if !containsStr(types, input.Name) { - types = append(types, input.Name) + if !containsStr(inputTypes, input.Name) { + inputTypes = append(inputTypes, input.Name) } if !containsStr(input.Platforms, platform.String()) { // input spec doesn't support this platform continue } - if existing, exists := mapping[input.Name]; exists { + if existing, exists := inputSpecs[input.Name]; exists { return RuntimeSpecs{}, fmt.Errorf("failed loading spec '%s': input '%s' already exists in spec '%s'", match, input.Name, existing.BinaryName) } - if existing, exists := aliases[input.Name]; exists { + if existing, exists := inputAliases[input.Name]; exists { return RuntimeSpecs{}, fmt.Errorf("failed loading spec '%s': input '%s' collides with an alias from another input '%s'", match, input.Name, existing) } for _, alias := range input.Aliases { - if existing, exists := mapping[alias]; exists { + if existing, exists := inputSpecs[alias]; exists { return RuntimeSpecs{}, fmt.Errorf("failed loading spec '%s': input alias '%s' collides with an already defined input in spec '%s'", match, alias, existing.BinaryName) } - if existing, exists := aliases[alias]; exists { + if existing, exists := inputAliases[alias]; exists { return RuntimeSpecs{}, fmt.Errorf("failed loading spec '%s': input alias '%s' collides with an already defined input alias for input '%s'", match, alias, existing) } } - mapping[input.Name] = InputRuntimeSpec{ + inputSpecs[input.Name] = InputRuntimeSpec{ InputType: input.Name, BinaryName: binaryName, BinaryPath: binaryPath, Spec: input, } for _, alias := range input.Aliases { - aliases[alias] = input.Name + inputAliases[alias] = input.Name + } + } + for _, shipper := range spec.Shippers { + // map the native outputs that the shipper supports + for _, output := range shipper.Outputs { + shippers, _ := shipperOutputs[output] + shippers = append(shippers, shipper.Name) + shipperOutputs[output] = shippers + } + if !containsStr(shipper.Platforms, platform.String()) { + // input spec doesn't support this platform (but shipper is still mapped into shipperOutputs) + continue + } + if existing, exists := shipperSpecs[shipper.Name]; exists { + return RuntimeSpecs{}, fmt.Errorf("failed loading spec '%s': shipper '%s' already exists in spec '%s'", match, shipper.Name, existing.BinaryName) + } + shipperSpecs[shipper.Name] = ShipperRuntimeSpec{ + ShipperType: shipper.Name, + BinaryName: binaryName, + BinaryPath: binaryPath, + Spec: shipper, } } } return RuntimeSpecs{ - platform: platform, - inputTypes: types, - inputSpecs: mapping, - aliasMapping: aliases, + platform: platform, + inputTypes: inputTypes, + inputSpecs: inputSpecs, + aliasMapping: inputAliases, + shipperSpecs: shipperSpecs, + shipperOutputs: shipperOutputs, }, nil } @@ -169,6 +212,41 @@ func (r *RuntimeSpecs) GetInput(inputType string) (InputRuntimeSpec, error) { return InputRuntimeSpec{}, ErrInputNotSupported } +// GetShipper returns the shipper runtime specification for this shipper on this platform. +func (r *RuntimeSpecs) GetShipper(shipperType string) (ShipperRuntimeSpec, bool) { + runtime, ok := r.shipperSpecs[shipperType] + return runtime, ok +} + +// GetShippers returns the shippers that support the outputType. +func (r *RuntimeSpecs) GetShippers(outputType string) ([]ShipperRuntimeSpec, error) { + shipperNames, ok := r.shipperOutputs[outputType] + if !ok { + // no shippers support that outputType + return nil, nil + } + platformErr := false + shippers := make([]ShipperRuntimeSpec, 0, len(shipperNames)) + for _, name := range shipperNames { + shipper, ok := r.shipperSpecs[name] + if !ok { + // not supported on this platform + platformErr = true + continue + } + shippers = append(shippers, shipper) + } + if len(shippers) > 0 { + return shippers, nil + } + if platformErr { + // supported by at least one shipper, but not on this platform + return nil, ErrOutputNotSupportedOnPlatform + } + // not supported by any shippers + return nil, ErrOutputNotSupported +} + // ServiceSpecs returns only the input specification that are based on the service runtime. func (r *RuntimeSpecs) ServiceSpecs() []InputRuntimeSpec { var services []InputRuntimeSpec diff --git a/pkg/component/output_spec.go b/pkg/component/output_spec.go deleted file mode 100644 index d7d80b9d074..00000000000 --- a/pkg/component/output_spec.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package component - -import "fmt" - -// OutputSpec is the specification for an output type. -type OutputSpec struct { - Name string `config:"name" yaml:"name" validate:"required"` - Description string `config:"description" yaml:"description" validate:"required"` - Platforms []string `config:"platforms" yaml:"platforms" validate:"required,min=1"` - - Command *CommandSpec `config:"command,omitempty" yaml:"command,omitempty"` -} - -// Validate ensures correctness of output specification. -func (s *OutputSpec) Validate() error { - if s.Command == nil { - return fmt.Errorf("input %s must define either command or service", s.Name) - } - for i, a := range s.Platforms { - for j, b := range s.Platforms { - if i != j && a == b { - return fmt.Errorf("input %s defines the platform %s more than once", s.Name, a) - } - } - } - return nil -} diff --git a/pkg/component/outputs.go b/pkg/component/outputs.go deleted file mode 100644 index faaddbdfd8c..00000000000 --- a/pkg/component/outputs.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package component - -const ( - // Elasticsearch represents the elasticsearch output - Elasticsearch = "elasticsearch" - // Kafka represents the kafka output - Kafka = "kafka" - // Logstash represents the logstash output - Logstash = "logstash" - // Redis represents the redis output - Redis = "redis" - // Shipper represents support for using the elastic-agent-shipper - Shipper = "shipper" -) - -// Outputs defines the outputs that a component can support -var Outputs = []string{Elasticsearch, Kafka, Logstash, Redis, Shipper} diff --git a/pkg/component/runtime/command.go b/pkg/component/runtime/command.go index 22c1898fcdc..2575a35d5f1 100644 --- a/pkg/component/runtime/command.go +++ b/pkg/component/runtime/command.go @@ -31,8 +31,8 @@ const ( runDirMod = 0770 - envAgentComponentID = "AGENT_COMPONENT_ID" - envAgentComponentInputType = "AGENT_COMPONENT_INPUT_TYPE" + envAgentComponentID = "AGENT_COMPONENT_ID" + envAgentComponentType = "AGENT_COMPONENT_TYPE" stateUnknownMessage = "Unknown" ) @@ -68,19 +68,21 @@ type CommandRuntime struct { // NewCommandRuntime creates a new command runtime for the provided component. func NewCommandRuntime(comp component.Component, monitor MonitoringManager) (ComponentRuntime, error) { - if comp.Spec.Spec.Command == nil { - return nil, errors.New("must have command defined in specification") - } - return &CommandRuntime{ + c := &CommandRuntime{ current: comp, + monitor: monitor, ch: make(chan ComponentState), actionCh: make(chan actionMode), procCh: make(chan procState), compCh: make(chan component.Component), actionState: actionStop, state: newComponentState(&comp), - monitor: monitor, - }, nil + } + cmdSpec := c.getCommandSpec() + if cmdSpec == nil { + return nil, errors.New("must have command defined in specification") + } + return c, nil } // Run starts the runtime for the component. @@ -89,8 +91,9 @@ func NewCommandRuntime(comp component.Component, monitor MonitoringManager) (Com // called before any of the other methods in the interface and once the context is done none of those methods should // ever be called again. func (c *CommandRuntime) Run(ctx context.Context, comm Communicator) error { - checkinPeriod := c.current.Spec.Spec.Command.Timeouts.Checkin - restartPeriod := c.current.Spec.Spec.Command.Timeouts.Restart + cmdSpec := c.getCommandSpec() + checkinPeriod := cmdSpec.Timeouts.Checkin + restartPeriod := cmdSpec.Timeouts.Restart c.forceCompState(client.UnitStateStarting, "Starting") t := time.NewTicker(checkinPeriod) defer t.Stop() @@ -269,19 +272,19 @@ func (c *CommandRuntime) start(comm Communicator) error { // already running return nil } - cmdSpec := c.current.Spec.Spec.Command + cmdSpec := c.getCommandSpec() env := make([]string, 0, len(cmdSpec.Env)+2) for _, e := range cmdSpec.Env { env = append(env, fmt.Sprintf("%s=%s", e.Name, e.Value)) } env = append(env, fmt.Sprintf("%s=%s", envAgentComponentID, c.current.ID)) - env = append(env, fmt.Sprintf("%s=%s", envAgentComponentInputType, c.current.Spec.InputType)) + env = append(env, fmt.Sprintf("%s=%s", envAgentComponentType, c.getSpecType())) uid, gid := os.Geteuid(), os.Getegid() workDir, err := c.workDir(uid, gid) if err != nil { return err } - path, err := filepath.Abs(c.current.Spec.BinaryPath) + path, err := filepath.Abs(c.getSpecBinaryPath()) if err != nil { return fmt.Errorf("failed to determine absolute path: %w", err) } @@ -293,7 +296,7 @@ func (c *CommandRuntime) start(comm Communicator) error { if err := c.monitor.Prepare(); err != nil { return err } - args := c.monitor.EnrichArgs(c.current.ID, c.current.Spec.BinaryName, cmdSpec.Args) + args := c.monitor.EnrichArgs(c.current.ID, c.getSpecBinaryName(), cmdSpec.Args) // differentiate data paths dataPath := filepath.Join(paths.Home(), "run", c.current.ID) @@ -331,7 +334,7 @@ func (c *CommandRuntime) stop(ctx context.Context) error { // cleanup reserved resources related to monitoring defer c.monitor.Cleanup(c.current.ID) //nolint:errcheck // this is ok - cmdSpec := c.current.Spec.Spec.Command + cmdSpec := c.getCommandSpec() go func(info *process.Info, timeout time.Duration) { t := time.NewTimer(timeout) defer t.Stop() @@ -409,6 +412,46 @@ func (c *CommandRuntime) workDir(uid int, gid int) (string, error) { return path, nil } +func (c *CommandRuntime) getSpecType() string { + if c.current.InputSpec != nil { + return c.current.InputSpec.InputType + } + if c.current.ShipperSpec != nil { + return c.current.ShipperSpec.ShipperType + } + return "" +} + +func (c *CommandRuntime) getSpecBinaryName() string { + if c.current.InputSpec != nil { + return c.current.InputSpec.BinaryName + } + if c.current.ShipperSpec != nil { + return c.current.ShipperSpec.BinaryName + } + return "" +} + +func (c *CommandRuntime) getSpecBinaryPath() string { + if c.current.InputSpec != nil { + return c.current.InputSpec.BinaryPath + } + if c.current.ShipperSpec != nil { + return c.current.ShipperSpec.BinaryPath + } + return "" +} + +func (c *CommandRuntime) getCommandSpec() *component.CommandSpec { + if c.current.InputSpec != nil { + return c.current.InputSpec.Spec.Command + } + if c.current.ShipperSpec != nil { + return c.current.ShipperSpec.Spec.Command + } + return nil +} + func attachOutErr(cmd *exec.Cmd) error { cmd.Stdout = os.Stdout cmd.Stderr = os.Stderr diff --git a/pkg/component/runtime/manager.go b/pkg/component/runtime/manager.go index d713a9a10c4..0c4befc5e2f 100644 --- a/pkg/component/runtime/manager.go +++ b/pkg/component/runtime/manager.go @@ -56,6 +56,12 @@ type ComponentComponentState struct { State ComponentState `yaml:"state"` } +// ComponentUnitDiagnosticRequest used to request diagnostics from specific unit. +type ComponentUnitDiagnosticRequest struct { + Component component.Component + Unit component.Unit +} + // ComponentUnitDiagnostic provides a structure to map a component/unit to diagnostic results. type ComponentUnitDiagnostic struct { Component component.Component @@ -82,8 +88,9 @@ type Manager struct { waitMx sync.RWMutex waitReady map[string]waitForReady - mx sync.RWMutex - current map[string]*componentRuntimeState + mx sync.RWMutex + current map[string]*componentRuntimeState + shipperConns map[string]*shipperConn subMx sync.RWMutex subscriptions map[string][]*Subscription @@ -109,6 +116,7 @@ func NewManager(logger *logger.Logger, listenAddr string, agentInfo *info.AgentI tracer: tracer, waitReady: make(map[string]waitForReady), current: make(map[string]*componentRuntimeState), + shipperConns: make(map[string]*shipperConn), subscriptions: make(map[string][]*Subscription), errCh: make(chan error), monitor: monitor, @@ -286,7 +294,7 @@ func (m *Manager) State() []ComponentComponentState { } // PerformAction executes an action on a unit. -func (m *Manager) PerformAction(ctx context.Context, unit component.Unit, name string, params map[string]interface{}) (map[string]interface{}, error) { +func (m *Manager) PerformAction(ctx context.Context, comp component.Component, unit component.Unit, name string, params map[string]interface{}) (map[string]interface{}, error) { id, err := uuid.NewV4() if err != nil { return nil, err @@ -298,7 +306,7 @@ func (m *Manager) PerformAction(ctx context.Context, unit component.Unit, name s return nil, err } } - runtime := m.getRuntimeFromUnit(unit) + runtime := m.getRuntimeFromUnit(comp, unit) if runtime == nil { return nil, ErrNoUnit } @@ -345,21 +353,21 @@ func (m *Manager) PerformAction(ctx context.Context, unit component.Unit, name s // PerformDiagnostics executes the diagnostic action for the provided units. If no units are provided then // it performs diagnostics for all current units. -func (m *Manager) PerformDiagnostics(ctx context.Context, units ...component.Unit) []ComponentUnitDiagnostic { +func (m *Manager) PerformDiagnostics(ctx context.Context, req ...ComponentUnitDiagnosticRequest) []ComponentUnitDiagnostic { // build results from units var results []ComponentUnitDiagnostic - if len(units) > 0 { - for _, u := range units { - r := m.getRuntimeFromUnit(u) + if len(req) > 0 { + for _, q := range req { + r := m.getRuntimeFromUnit(q.Component, q.Unit) if r == nil { results = append(results, ComponentUnitDiagnostic{ - Unit: u, + Unit: q.Unit, Err: ErrNoUnit, }) } else { results = append(results, ComponentUnitDiagnostic{ Component: r.currComp, - Unit: u, + Unit: q.Unit, }) } } @@ -395,7 +403,7 @@ func (m *Manager) PerformDiagnostics(ctx context.Context, units ...component.Uni // already in error don't perform diagnostics continue } - diag, err := m.performDiagAction(ctx, r.Unit) + diag, err := m.performDiagAction(ctx, r.Component, r.Unit) if err != nil { r.Err = err } else { @@ -608,6 +616,13 @@ func (m *Manager) update(components []component.Component, teardown bool) error m.mx.Lock() defer m.mx.Unlock() + // prepare the components to add consistent shipper connection information between + // the connected components in the model + err := m.connectShippers(components) + if err != nil { + return err + } + touched := make(map[string]bool) for _, comp := range components { touched[comp.ID] = true @@ -742,13 +757,15 @@ func (m *Manager) getRuntimeFromToken(token string) *componentRuntimeState { return nil } -func (m *Manager) getRuntimeFromUnit(unit component.Unit) *componentRuntimeState { +func (m *Manager) getRuntimeFromUnit(comp component.Component, unit component.Unit) *componentRuntimeState { m.mx.RLock() defer m.mx.RUnlock() - for _, comp := range m.current { - for _, u := range comp.currComp.Units { - if u.Type == unit.Type && u.ID == unit.ID { - return comp + for _, c := range m.current { + if c.currComp.ID == comp.ID { + for _, u := range c.currComp.Units { + if u.Type == unit.Type && u.ID == unit.ID { + return c + } } } } @@ -769,7 +786,7 @@ func (m *Manager) getListenAddr() string { return m.listenAddr } -func (m *Manager) performDiagAction(ctx context.Context, unit component.Unit) ([]*proto.ActionDiagnosticUnitResult, error) { +func (m *Manager) performDiagAction(ctx context.Context, comp component.Component, unit component.Unit) ([]*proto.ActionDiagnosticUnitResult, error) { ctx, cancel := context.WithTimeout(ctx, diagnosticTimeout) defer cancel() @@ -778,7 +795,7 @@ func (m *Manager) performDiagAction(ctx context.Context, unit component.Unit) ([ return nil, err } - runtime := m.getRuntimeFromUnit(unit) + runtime := m.getRuntimeFromUnit(comp, unit) if runtime == nil { return nil, ErrNoUnit } diff --git a/pkg/component/runtime/manager_shipper.go b/pkg/component/runtime/manager_shipper.go new file mode 100644 index 00000000000..bb6cb7678e8 --- /dev/null +++ b/pkg/component/runtime/manager_shipper.go @@ -0,0 +1,127 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package runtime + +import ( + "fmt" + + "github.com/elastic/elastic-agent-client/v7/pkg/client" + "github.com/elastic/elastic-agent-client/v7/pkg/proto" + "github.com/elastic/elastic-agent/internal/pkg/core/authority" + "github.com/elastic/elastic-agent/pkg/component" +) + +func (m *Manager) connectShippers(components []component.Component) error { + // ensure that all shipper components have created connection information (must happen before we connect the units) + shippersTouched := make(map[string]bool) + for i, comp := range components { + if comp.ShipperSpec != nil { + // running shipper (ensure connection information is created) + shippersTouched[comp.ID] = true + conn, ok := m.shipperConns[comp.ID] + if !ok { + ca, err := authority.NewCA() + if err != nil { + return fmt.Errorf("failed to create connection CA for shipper %q: %w", comp.ID, err) + } + conn = &shipperConn{ + addr: getShipperAddr(comp.ID), + ca: ca, + pairs: make(map[string]*authority.Pair), + } + m.shipperConns[comp.ID] = conn + } + + // each input unit needs its corresponding + pairsTouched := make(map[string]bool) + for j, unit := range comp.Units { + if unit.Type == client.UnitTypeInput { + pairsTouched[unit.ID] = true + pair, err := pairGetOrCreate(conn, unit.ID) + if err != nil { + return fmt.Errorf("failed to get/create certificate pait for shipper %q/%q: %w", comp.ID, unit.ID, err) + } + cfg, cfgErr := injectShipperConn(unit.Config, conn.addr, conn.ca, pair) + unit.Config = cfg + unit.Err = cfgErr + comp.Units[j] = unit + } + } + + // cleanup any pairs that are no-longer used + for pairID := range conn.pairs { + touch, _ := pairsTouched[pairID] + if !touch { + delete(conn.pairs, pairID) + } + } + components[i] = comp + } + } + + // cleanup any shippers that are no-longer used + for shipperID := range m.shipperConns { + touch, _ := shippersTouched[shipperID] + if !touch { + delete(m.shipperConns, shipperID) + } + } + + // connect the output units with the same connection information + for i, comp := range components { + if comp.Shipper != nil { + conn, ok := m.shipperConns[comp.Shipper.ComponentID] + if !ok { + return fmt.Errorf("component %q references a non-existing shipper %q", comp.ID, comp.Shipper.ComponentID) + } + pair, ok := conn.pairs[comp.ID] + if !ok { + return fmt.Errorf("component %q references shipper %q that doesn't know about the component", comp.ID, comp.Shipper.ComponentID) + } + for j, unit := range comp.Units { + if unit.Type == client.UnitTypeOutput { + cfg, cfgErr := injectShipperConn(unit.Config, conn.addr, conn.ca, pair) + unit.Config = cfg + unit.Err = cfgErr + comp.Units[j] = unit + } + } + components[i] = comp + } + } + + return nil +} + +func pairGetOrCreate(conn *shipperConn, pairID string) (*authority.Pair, error) { + var err error + pair, ok := conn.pairs[pairID] + if ok { + return pair, nil + } + pair, err = conn.ca.GeneratePairWithName(pairID) + if err != nil { + return nil, err + } + conn.pairs[pairID] = pair + return pair, nil +} + +func injectShipperConn(cfg *proto.UnitExpectedConfig, addr string, ca *authority.CertificateAuthority, pair *authority.Pair) (*proto.UnitExpectedConfig, error) { + if cfg == nil { + // unit configuration had an error generating (do nothing) + return cfg, nil + } + source := cfg.Source.AsMap() + source["server"] = addr + source["ssl"] = map[string]interface{}{ + "certificate_authorities": []interface{}{ + string(ca.Crt()), + }, + "certificate": string(pair.Crt), + "key": string(pair.Key), + } + return component.ExpectedConfig(source) +} diff --git a/pkg/component/runtime/manager_shipper_unix.go b/pkg/component/runtime/manager_shipper_unix.go new file mode 100644 index 00000000000..f3805a3ebb4 --- /dev/null +++ b/pkg/component/runtime/manager_shipper_unix.go @@ -0,0 +1,33 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build !windows +// +build !windows + +package runtime + +import ( + "crypto/sha256" + "fmt" + "path/filepath" + + "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" +) + +func getShipperAddr(componentID string) string { + // when installed the address is fixed to a location + if info.RunningInstalled() { + return fmt.Sprintf(paths.ShipperSocketPipePattern, componentID) + } + + // unix socket path must be less than 104 characters + path := fmt.Sprintf("unix://%s.sock", filepath.Join(paths.TempDir(), fmt.Sprintf("elastic-agent-%s-pipe", componentID))) + if len(path) < 104 { + return path + } + // place in global /tmp to ensure that its small enough to fit; current path is way to long + // for it to be used, but needs to be unique per Agent (in the case that multiple are running) + return fmt.Sprintf(`unix:///tmp/elastic-agent/%x.sock`, sha256.Sum256([]byte(path))) +} diff --git a/pkg/component/runtime/manager_shipper_windows.go b/pkg/component/runtime/manager_shipper_windows.go new file mode 100644 index 00000000000..bb45b4650dc --- /dev/null +++ b/pkg/component/runtime/manager_shipper_windows.go @@ -0,0 +1,29 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build windows +// +build windows + +package runtime + +import ( + "crypto/sha256" + "fmt" + + "github.com/elastic/elastic-agent/internal/pkg/agent/application/info" + "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" +) + +func getShipperAddr(componentID string) string { + // when installed the address is fixed to a location + if info.RunningInstalled() { + return fmt.Sprintf(paths.ShipperSocketPipePattern, componentID) + } + + // not install, adjust the path based on data path + data := paths.Data() + // entire string cannot be longer than 256 characters, this forces the + // length to always be 87 characters (but unique per data path) + return fmt.Sprintf(`\\.\pipe\elastic-agent-%x-%s-pipe`, sha256.Sum256([]byte(data)), componentID) +} diff --git a/pkg/component/runtime/manager_test.go b/pkg/component/runtime/manager_test.go index b71a24c35e0..a26c9f037a4 100644 --- a/pkg/component/runtime/manager_test.go +++ b/pkg/component/runtime/manager_test.go @@ -15,6 +15,8 @@ import ( "testing" "time" + "github.com/gofrs/uuid" + "github.com/stretchr/testify/require" "go.elastic.co/apm/apmtest" @@ -43,6 +45,16 @@ var ( }, }, } + fakeShipperSpec = component.ShipperSpec{ + Name: "fake-shipper", + Command: &component.CommandSpec{ + Timeouts: component.CommandTimeoutSpec{ + Checkin: 30 * time.Second, + Restart: 10 * time.Millisecond, // quick restart during tests + Stop: 30 * time.Second, + }, + }, + } ) func TestManager_SimpleComponentErr(t *testing.T) { @@ -169,10 +181,10 @@ func TestManager_FakeInput_StartStop(t *testing.T) { require.NoError(t, err) } - binaryPath := testBinary(t) + binaryPath := testBinary(t, "component") comp := component.Component{ ID: "fake-default", - Spec: component.InputRuntimeSpec{ + InputSpec: &component.InputRuntimeSpec{ InputType: "fake", BinaryName: "", BinaryPath: binaryPath, @@ -294,10 +306,10 @@ func TestManager_FakeInput_BadUnitToGood(t *testing.T) { require.NoError(t, err) } - binaryPath := testBinary(t) + binaryPath := testBinary(t, "component") comp := component.Component{ ID: "fake-default", - Spec: component.InputRuntimeSpec{ + InputSpec: &component.InputRuntimeSpec{ InputType: "fake", BinaryName: "", BinaryPath: binaryPath, @@ -465,10 +477,10 @@ func TestManager_FakeInput_GoodUnitToBad(t *testing.T) { require.NoError(t, err) } - binaryPath := testBinary(t) + binaryPath := testBinary(t, "component") comp := component.Component{ ID: "fake-default", - Spec: component.InputRuntimeSpec{ + InputSpec: &component.InputRuntimeSpec{ InputType: "fake", BinaryName: "", BinaryPath: binaryPath, @@ -620,10 +632,10 @@ func TestManager_FakeInput_Configure(t *testing.T) { require.NoError(t, err) } - binaryPath := testBinary(t) + binaryPath := testBinary(t, "component") comp := component.Component{ ID: "fake-default", - Spec: component.InputRuntimeSpec{ + InputSpec: &component.InputRuntimeSpec{ InputType: "fake", BinaryName: "", BinaryPath: binaryPath, @@ -746,10 +758,10 @@ func TestManager_FakeInput_RemoveUnit(t *testing.T) { require.NoError(t, err) } - binaryPath := testBinary(t) + binaryPath := testBinary(t, "component") comp := component.Component{ ID: "fake-default", - Spec: component.InputRuntimeSpec{ + InputSpec: &component.InputRuntimeSpec{ InputType: "fake", BinaryName: "", BinaryPath: binaryPath, @@ -904,10 +916,10 @@ func TestManager_FakeInput_ActionState(t *testing.T) { require.NoError(t, err) } - binaryPath := testBinary(t) + binaryPath := testBinary(t, "component") comp := component.Component{ ID: "fake-default", - Spec: component.InputRuntimeSpec{ + InputSpec: &component.InputRuntimeSpec{ InputType: "fake", BinaryName: "", BinaryPath: binaryPath, @@ -949,7 +961,7 @@ func TestManager_FakeInput_ActionState(t *testing.T) { // subscription channel go func() { actionCtx, actionCancel := context.WithTimeout(context.Background(), 15*time.Second) - _, err := m.PerformAction(actionCtx, comp.Units[0], "set_state", map[string]interface{}{ + _, err := m.PerformAction(actionCtx, comp, comp.Units[0], "set_state", map[string]interface{}{ "state": int(client.UnitStateDegraded), "message": "Action Set Degraded", }) @@ -1034,10 +1046,10 @@ func TestManager_FakeInput_Restarts(t *testing.T) { require.NoError(t, err) } - binaryPath := testBinary(t) + binaryPath := testBinary(t, "component") comp := component.Component{ ID: "fake-default", - Spec: component.InputRuntimeSpec{ + InputSpec: &component.InputRuntimeSpec{ InputType: "fake", BinaryName: "", BinaryPath: binaryPath, @@ -1085,7 +1097,7 @@ func TestManager_FakeInput_Restarts(t *testing.T) { if !killed { killed = true actionCtx, actionCancel := context.WithTimeout(context.Background(), 500*time.Millisecond) - _, err := m.PerformAction(actionCtx, comp.Units[0], "kill", nil) + _, err := m.PerformAction(actionCtx, comp, comp.Units[0], "kill", nil) actionCancel() if !errors.Is(err, context.DeadlineExceeded) { // should have got deadline exceeded for this call @@ -1173,10 +1185,10 @@ func TestManager_FakeInput_RestartsOnMissedCheckins(t *testing.T) { require.NoError(t, err) } - binaryPath := testBinary(t) + binaryPath := testBinary(t, "component") comp := component.Component{ ID: "fake-default", - Spec: component.InputRuntimeSpec{ + InputSpec: &component.InputRuntimeSpec{ InputType: "fake", BinaryName: "", BinaryPath: binaryPath, @@ -1294,10 +1306,10 @@ func TestManager_FakeInput_InvalidAction(t *testing.T) { require.NoError(t, err) } - binaryPath := testBinary(t) + binaryPath := testBinary(t, "component") comp := component.Component{ ID: "fake-default", - Spec: component.InputRuntimeSpec{ + InputSpec: &component.InputRuntimeSpec{ InputType: "fake", BinaryName: "", BinaryPath: binaryPath, @@ -1336,7 +1348,7 @@ func TestManager_FakeInput_InvalidAction(t *testing.T) { subErrCh <- fmt.Errorf("unit failed: %s", unit.Message) } else if unit.State == client.UnitStateHealthy { actionCtx, actionCancel := context.WithTimeout(context.Background(), 5*time.Second) - _, err := m.PerformAction(actionCtx, comp.Units[0], "invalid_missing_action", nil) + _, err := m.PerformAction(actionCtx, comp, comp.Units[0], "invalid_missing_action", nil) actionCancel() if err == nil { subErrCh <- fmt.Errorf("should have returned an error") @@ -1418,7 +1430,7 @@ func TestManager_FakeInput_MultiComponent(t *testing.T) { require.NoError(t, err) } - binaryPath := testBinary(t) + binaryPath := testBinary(t, "component") runtimeSpec := component.InputRuntimeSpec{ InputType: "fake", BinaryName: "", @@ -1427,8 +1439,8 @@ func TestManager_FakeInput_MultiComponent(t *testing.T) { } components := []component.Component{ { - ID: "fake-0", - Spec: runtimeSpec, + ID: "fake-0", + InputSpec: &runtimeSpec, Units: []component.Unit{ { ID: "fake-input-0-0", @@ -1460,8 +1472,8 @@ func TestManager_FakeInput_MultiComponent(t *testing.T) { }, }, { - ID: "fake-1", - Spec: runtimeSpec, + ID: "fake-1", + InputSpec: &runtimeSpec, Units: []component.Unit{ { ID: "fake-input-1-0", @@ -1493,8 +1505,8 @@ func TestManager_FakeInput_MultiComponent(t *testing.T) { }, }, { - ID: "fake-2", - Spec: runtimeSpec, + ID: "fake-2", + InputSpec: &runtimeSpec, Units: []component.Unit{ { ID: "fake-input-2-0", @@ -1630,10 +1642,10 @@ func TestManager_FakeInput_LogLevel(t *testing.T) { require.NoError(t, err) } - binaryPath := testBinary(t) + binaryPath := testBinary(t, "component") comp := component.Component{ ID: "fake-default", - Spec: component.InputRuntimeSpec{ + InputSpec: &component.InputRuntimeSpec{ InputType: "fake", BinaryName: "", BinaryPath: binaryPath, @@ -1687,7 +1699,7 @@ func TestManager_FakeInput_LogLevel(t *testing.T) { } actionCtx, actionCancel := context.WithTimeout(context.Background(), 5*time.Second) - _, err := m.PerformAction(actionCtx, comp.Units[0], "invalid_missing_action", nil) + _, err := m.PerformAction(actionCtx, comp, comp.Units[0], "invalid_missing_action", nil) actionCancel() if err == nil { subErrCh <- fmt.Errorf("should have returned an error") @@ -1745,6 +1757,281 @@ LOOP: require.NoError(t, err) } +func TestManager_FakeShipper(t *testing.T) { + /* + This test runs one instance of the fake/component and an instance of the fake/shipper. They get connected + together, and it ensures that a test event is sent between each instance. Below is a breakdown on how this + test performs this work and ensures that an event is sent between the two instances. + + 1. Wait for the shipper input (GRPC server) is healthy. + 2. Wait for the component output (GRPC client) is healthy. + 3. Create a unique ID to use for the event ID. + 4. Send `record_event` action to the shipper input (GRPC server); won't return until it actually gets the event. + 5. Send `send_event` action to the component fake input (GRPC client); returns once sent. + 6. Wait for `record_event` action to return from the shipper input (GRPC server). + */ + + testPaths(t) + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + ai, _ := info.NewAgentInfo(true) + m, err := NewManager(newErrorLogger(t), "localhost:0", ai, apmtest.DiscardTracer, newTestMonitoringMgr()) + require.NoError(t, err) + errCh := make(chan error) + go func() { + err := m.Run(ctx) + if errors.Is(err, context.Canceled) { + err = nil + } + errCh <- err + }() + + waitCtx, waitCancel := context.WithTimeout(ctx, 1*time.Second) + defer waitCancel() + if err := m.WaitForReady(waitCtx); err != nil { + require.NoError(t, err) + } + + componentPath := testBinary(t, "component") + shipperPath := testBinary(t, "shipper") + comps := []component.Component{ + { + ID: "fake-default", + InputSpec: &component.InputRuntimeSpec{ + InputType: "fake", + BinaryName: "", + BinaryPath: componentPath, + Spec: fakeInputSpec, + }, + Units: []component.Unit{ + { + ID: "fake-input", + Type: client.UnitTypeInput, + LogLevel: client.UnitLogLevelTrace, + Config: component.MustExpectedConfig(map[string]interface{}{ + "type": "fake", + "state": int(client.UnitStateHealthy), + "message": "Fake Healthy", + }), + }, + { + ID: "fake-default", + Type: client.UnitTypeOutput, + LogLevel: client.UnitLogLevelTrace, + Config: component.MustExpectedConfig(map[string]interface{}{ + "type": "fake-shipper", + }), + }, + }, + Shipper: &component.ShipperReference{ + ComponentID: "fake-shipper-default", + UnitID: "fake-default", + }, + }, + { + ID: "fake-shipper-default", + ShipperSpec: &component.ShipperRuntimeSpec{ + ShipperType: "fake-shipper", + BinaryName: "", + BinaryPath: shipperPath, + Spec: fakeShipperSpec, + }, + Units: []component.Unit{ + { + ID: "fake-default", + Type: client.UnitTypeInput, + LogLevel: client.UnitLogLevelTrace, + Config: component.MustExpectedConfig(map[string]interface{}{ + "id": "fake-default", + "type": "fake-shipper", + "units": []interface{}{ + map[string]interface{}{ + "id": "fake-input", + "config": map[string]interface{}{ + "type": "fake", + "state": int(client.UnitStateHealthy), + "message": "Fake Healthy", + }, + }, + }, + }), + }, + { + ID: "fake-default", + Type: client.UnitTypeOutput, + LogLevel: client.UnitLogLevelTrace, + Config: component.MustExpectedConfig(map[string]interface{}{ + "type": "fake-action-output", + }), + }, + }, + }, + } + + subCtx, subCancel := context.WithCancel(context.Background()) + defer subCancel() + subErrCh := make(chan error) + go func() { + shipperOn := false + compConnected := false + + sendEvent := func() (bool, error) { + if !shipperOn || !compConnected { + // wait until connected + return false, nil + } + + // send an event between component and the fake shipper + eventID, err := uuid.NewV4() + if err != nil { + return true, err + } + + // wait for the event on the shipper side + gotEvt := make(chan error) + go func() { + actionCtx, actionCancel := context.WithTimeout(context.Background(), 10*time.Second) + _, err := m.PerformAction(actionCtx, comps[1], comps[1].Units[1], "record_event", map[string]interface{}{ + "id": eventID.String(), + }) + actionCancel() + gotEvt <- err + }() + + // send the fake event + actionCtx, actionCancel := context.WithTimeout(context.Background(), 5*time.Second) + _, err = m.PerformAction(actionCtx, comps[0], comps[0].Units[0], "send_event", map[string]interface{}{ + "id": eventID.String(), + }) + actionCancel() + if err != nil { + return true, err + } + + err = <-gotEvt + if err == nil { + t.Logf("successfully sent event from fake input to fake shipper, event ID: %s", eventID.String()) + } + return true, err + } + + shipperSub := m.Subscribe(subCtx, "fake-shipper-default") + compSub := m.Subscribe(subCtx, "fake-default") + for { + select { + case <-subCtx.Done(): + return + case state := <-shipperSub.Ch(): + t.Logf("shipper state changed: %+v", state) + if state.State == client.UnitStateFailed { + subErrCh <- fmt.Errorf("shipper failed: %s", state.Message) + } else { + unit, ok := state.Units[ComponentUnitKey{UnitType: client.UnitTypeInput, UnitID: "fake-default"}] + if ok { + if unit.State == client.UnitStateFailed { + subErrCh <- fmt.Errorf("unit failed: %s", unit.Message) + } else if unit.State == client.UnitStateHealthy { + shipperOn = true + ok, err := sendEvent() + if ok { + if err != nil { + subErrCh <- err + } else { + // successful; turn it all off + err := m.Update([]component.Component{}) + if err != nil { + subErrCh <- err + } + } + } + } else if unit.State == client.UnitStateStopped { + subErrCh <- nil + } else if unit.State == client.UnitStateStarting { + // acceptable + } else { + // unknown state that should not have occurred + subErrCh <- fmt.Errorf("unit reported unexpected state: %v", unit.State) + } + } else { + subErrCh <- errors.New("unit missing: fake-input") + } + } + case state := <-compSub.Ch(): + t.Logf("component state changed: %+v", state) + if state.State == client.UnitStateFailed { + subErrCh <- fmt.Errorf("component failed: %s", state.Message) + } else { + unit, ok := state.Units[ComponentUnitKey{UnitType: client.UnitTypeOutput, UnitID: "fake-default"}] + if ok { + if unit.State == client.UnitStateFailed { + subErrCh <- fmt.Errorf("unit failed: %s", unit.Message) + } else if unit.State == client.UnitStateHealthy { + compConnected = true + ok, err := sendEvent() + if ok { + if err != nil { + subErrCh <- err + } else { + // successful; turn it all off + err := m.Update([]component.Component{}) + if err != nil { + subErrCh <- err + } + } + } + } else if unit.State == client.UnitStateStopped { + subErrCh <- nil + } else if unit.State == client.UnitStateStarting || unit.State == client.UnitStateConfiguring { + // acceptable + } else { + // unknown state that should not have occurred + subErrCh <- fmt.Errorf("unit reported unexpected state: %v", unit.State) + } + } else { + subErrCh <- errors.New("unit missing: fake-input") + } + } + } + } + }() + + defer drainErrChan(errCh) + defer drainErrChan(subErrCh) + + startTimer := time.NewTimer(100 * time.Millisecond) + defer startTimer.Stop() + select { + case <-startTimer.C: + err = m.Update(comps) + require.NoError(t, err) + case err := <-errCh: + t.Fatalf("failed early: %s", err) + } + + endTimer := time.NewTimer(30 * time.Second) + defer endTimer.Stop() +LOOP: + for { + select { + case <-endTimer.C: + t.Fatalf("timed out after 30 seconds") + case err := <-errCh: + require.NoError(t, err) + case err := <-subErrCh: + require.NoError(t, err) + break LOOP + } + } + + subCancel() + cancel() + + err = <-errCh + require.NoError(t, err) +} + func newErrorLogger(t *testing.T) *logger.Logger { t.Helper() @@ -1796,7 +2083,10 @@ func testPaths(t *testing.T) { versioned := paths.IsVersionHome() topPath := paths.Top() - tmpDir := t.TempDir() + tmpDir, err := os.MkdirTemp("", "at-*") + if err != nil { + t.Fatalf("failed to create temp directory: %s", err) + } paths.SetVersionHome(false) paths.SetTop(tmpDir) @@ -1807,11 +2097,11 @@ func testPaths(t *testing.T) { }) } -func testBinary(t *testing.T) string { +func testBinary(t *testing.T, name string) string { t.Helper() var err error - binaryPath := filepath.Join("..", "fake", "fake") + binaryPath := filepath.Join("..", "fake", name, name) binaryPath, err = filepath.Abs(binaryPath) if err != nil { t.Fatalf("failed abs %s: %s", binaryPath, err) diff --git a/pkg/component/runtime/runtime.go b/pkg/component/runtime/runtime.go index e06702b2141..0ed1b46c26c 100644 --- a/pkg/component/runtime/runtime.go +++ b/pkg/component/runtime/runtime.go @@ -57,12 +57,23 @@ type ComponentRuntime interface { func NewComponentRuntime(comp component.Component, logger *logger.Logger, monitor MonitoringManager) (ComponentRuntime, error) { if comp.Err != nil { return NewFailedRuntime(comp) - } else if comp.Spec.Spec.Command != nil { - return NewCommandRuntime(comp, monitor) - } else if comp.Spec.Spec.Service != nil { - return NewServiceRuntime(comp, logger) } - return nil, errors.New("unknown component runtime") + if comp.InputSpec != nil { + if comp.InputSpec.Spec.Command != nil { + return NewCommandRuntime(comp, monitor) + } + if comp.InputSpec.Spec.Service != nil { + return NewServiceRuntime(comp, logger) + } + return nil, errors.New("unknown component runtime") + } + if comp.ShipperSpec != nil { + if comp.ShipperSpec.Spec.Command != nil { + return NewCommandRuntime(comp, monitor) + } + return nil, errors.New("components for shippers can only support command runtime") + } + return nil, errors.New("component missing specification") } type componentRuntimeState struct { diff --git a/pkg/component/runtime/service.go b/pkg/component/runtime/service.go index 9c55b9fa4d7..41cf1b517cc 100644 --- a/pkg/component/runtime/service.go +++ b/pkg/component/runtime/service.go @@ -23,8 +23,10 @@ const ( ) var ( + // ErrOperationSpecUndefined error for missing specification. ErrOperationSpecUndefined = errors.New("operation spec undefined") - ErrInvalidServiceSpec = errors.New("invalid service spec") + // ErrInvalidServiceSpec error invalid service specification. + ErrInvalidServiceSpec = errors.New("invalid service spec") ) type executeServiceCommandFunc func(ctx context.Context, log *logger.Logger, binaryPath string, spec *component.ServiceOperationsCommandSpec) error @@ -46,7 +48,13 @@ type ServiceRuntime struct { // NewServiceRuntime creates a new command runtime for the provided component. func NewServiceRuntime(comp component.Component, logger *logger.Logger) (ComponentRuntime, error) { - if comp.Spec.Spec.Service == nil { + if comp.ShipperSpec != nil { + return nil, errors.New("service runtime not supported for a shipper specification") + } + if comp.InputSpec == nil { + return nil, errors.New("service runtime requires an input specification to be defined") + } + if comp.InputSpec.Spec.Service == nil { return nil, errors.New("must have service defined in specification") } @@ -111,7 +119,7 @@ func (s *ServiceRuntime) Run(ctx context.Context, comm Communicator) (err error) // Start connection info if cis == nil { - cis, err = newConnInfoServer(s.log, comm, s.comp.Spec.Spec.Service.CPort) + cis, err = newConnInfoServer(s.log, comm, s.comp.InputSpec.Spec.Service.CPort) if err != nil { err = fmt.Errorf("failed to start connection info service %s: %w", s.name(), err) break @@ -321,7 +329,7 @@ func (s *ServiceRuntime) checkStatus(checkinPeriod time.Duration, lastCheckin *t } func (s *ServiceRuntime) checkinPeriod() time.Duration { - checkinPeriod := s.comp.Spec.Spec.Service.Timeouts.Checkin + checkinPeriod := s.comp.InputSpec.Spec.Service.Timeouts.Checkin if checkinPeriod == 0 { checkinPeriod = defaultCheckServiceStatusInterval } @@ -395,27 +403,27 @@ func (s *ServiceRuntime) compState(state client.UnitState, missedCheckins int) { } func (s *ServiceRuntime) name() string { - return s.comp.Spec.Spec.Name + return s.comp.InputSpec.Spec.Name } // check executes the service check command func (s *ServiceRuntime) check(ctx context.Context) error { - if s.comp.Spec.Spec.Service.Operations.Check == nil { - s.log.Errorf("missing check spec for %s service", s.comp.Spec.BinaryName) + if s.comp.InputSpec.Spec.Service.Operations.Check == nil { + s.log.Errorf("missing check spec for %s service", s.comp.InputSpec.BinaryName) return ErrOperationSpecUndefined } - s.log.Debugf("check if the %s is installed", s.comp.Spec.BinaryName) - return s.executeServiceCommandImpl(ctx, s.log, s.comp.Spec.BinaryPath, s.comp.Spec.Spec.Service.Operations.Check) + s.log.Debugf("check if the %s is installed", s.comp.InputSpec.BinaryName) + return s.executeServiceCommandImpl(ctx, s.log, s.comp.InputSpec.BinaryPath, s.comp.InputSpec.Spec.Service.Operations.Check) } // install executes the service install command func (s *ServiceRuntime) install(ctx context.Context) error { - if s.comp.Spec.Spec.Service.Operations.Install == nil { - s.log.Errorf("missing install spec for %s service", s.comp.Spec.BinaryName) + if s.comp.InputSpec.Spec.Service.Operations.Install == nil { + s.log.Errorf("missing install spec for %s service", s.comp.InputSpec.BinaryName) return ErrOperationSpecUndefined } - s.log.Debugf("install %s service", s.comp.Spec.BinaryName) - return s.executeServiceCommandImpl(ctx, s.log, s.comp.Spec.BinaryPath, s.comp.Spec.Spec.Service.Operations.Install) + s.log.Debugf("install %s service", s.comp.InputSpec.BinaryName) + return s.executeServiceCommandImpl(ctx, s.log, s.comp.InputSpec.BinaryPath, s.comp.InputSpec.Spec.Service.Operations.Install) } // uninstall executes the service uninstall command @@ -429,10 +437,10 @@ func UninstallService(ctx context.Context, log *logger.Logger, comp component.Co } func uninstallService(ctx context.Context, log *logger.Logger, comp component.Component, executeServiceCommandImpl executeServiceCommandFunc) error { - if comp.Spec.Spec.Service.Operations.Uninstall == nil { - log.Errorf("missing uninstall spec for %s service", comp.Spec.BinaryName) + if comp.InputSpec.Spec.Service.Operations.Uninstall == nil { + log.Errorf("missing uninstall spec for %s service", comp.InputSpec.BinaryName) return ErrOperationSpecUndefined } - log.Debugf("uninstall %s service", comp.Spec.BinaryName) - return executeServiceCommandImpl(ctx, log, comp.Spec.BinaryPath, comp.Spec.Spec.Service.Operations.Uninstall) + log.Debugf("uninstall %s service", comp.InputSpec.BinaryName) + return executeServiceCommandImpl(ctx, log, comp.InputSpec.BinaryPath, comp.InputSpec.Spec.Service.Operations.Uninstall) } diff --git a/pkg/component/runtime/shipper.go b/pkg/component/runtime/shipper.go new file mode 100644 index 00000000000..dd257ae3408 --- /dev/null +++ b/pkg/component/runtime/shipper.go @@ -0,0 +1,13 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package runtime + +import "github.com/elastic/elastic-agent/internal/pkg/core/authority" + +type shipperConn struct { + addr string + ca *authority.CertificateAuthority + pairs map[string]*authority.Pair +} diff --git a/pkg/component/shipper_spec.go b/pkg/component/shipper_spec.go new file mode 100644 index 00000000000..310bd17ec35 --- /dev/null +++ b/pkg/component/shipper_spec.go @@ -0,0 +1,33 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package component + +import "fmt" + +// ShipperSpec is the specification for a shipper type. +type ShipperSpec struct { + Name string `config:"name" yaml:"name" validate:"required"` + Description string `config:"description" yaml:"description" validate:"required"` + Platforms []string `config:"platforms" yaml:"platforms" validate:"required,min=1"` + Outputs []string `config:"outputs" yaml:"outputs" validate:"required,min=1"` + Runtime RuntimeSpec `config:"runtime" yaml:"runtime"` + + Command *CommandSpec `config:"command,omitempty" yaml:"command,omitempty"` +} + +// Validate ensures correctness of output specification. +func (s *ShipperSpec) Validate() error { + if s.Command == nil { + return fmt.Errorf("shipper '%s' must define command (no other type is supported for shippers)", s.Name) + } + for i, a := range s.Platforms { + for j, b := range s.Platforms { + if i != j && a == b { + return fmt.Errorf("shipper '%s' defines the platform '%s' more than once", s.Name, a) + } + } + } + return nil +} diff --git a/pkg/component/spec.go b/pkg/component/spec.go index be20b92208c..e7ec47a5811 100644 --- a/pkg/component/spec.go +++ b/pkg/component/spec.go @@ -12,9 +12,10 @@ import ( // Spec a components specification. type Spec struct { - Name string `yaml:"name,omitempty"` - Version int `config:"version" yaml:"version" validate:"required"` - Inputs []InputSpec `config:"inputs,omitempty" yaml:"inputs,omitempty"` + Name string `yaml:"name,omitempty"` + Version int `config:"version" yaml:"version" validate:"required"` + Inputs []InputSpec `config:"inputs,omitempty" yaml:"inputs,omitempty"` + Shippers []ShipperSpec `config:"shippers,omitempty" yaml:"shippers,omitempty"` } // Validate ensures correctness of component specification. @@ -40,6 +41,24 @@ func (s *Spec) Validate() error { inputsToPlatforms[input.Name] = a } } + shippersToPlatforms := make(map[string][]string) + for i, shipper := range s.Shippers { + a, ok := shippersToPlatforms[shipper.Name] + if !ok { + shippersToPlatforms[shipper.Name] = make([]string, len(shipper.Platforms)) + copy(shippersToPlatforms[shipper.Name], shipper.Platforms) + continue + } + for _, platform := range shipper.Platforms { + for _, existing := range a { + if existing == platform { + return fmt.Errorf("shipper '%s' at shippers.%d defines the same platform as a previous definition", shipper.Name, i) + } + } + a = append(a, platform) + shippersToPlatforms[shipper.Name] = a + } + } return nil } diff --git a/specs/filebeat.spec.yml b/specs/filebeat.spec.yml index 07f3cb7666e..a5aaf39f910 100644 --- a/specs/filebeat.spec.yml +++ b/specs/filebeat.spec.yml @@ -1,170 +1,193 @@ -version: 2 -inputs: - - name: aws-cloudwatch - description: "AWS Cloudwatch" - platforms: &platforms - - linux/amd64 - - linux/arm64 - - darwin/amd64 - - darwin/arm64 - - windows/amd64 - - container/amd64 - - container/arm64 - outputs: &outputs - - elasticsearch - - kafka - - logstash - - redis - command: - args: &args - - "-E" - - "setup.ilm.enabled=false" - - "-E" - - "setup.template.enabled=false" - - "-E" - - "management.enabled=true" - - "-E" - - "logging.level=debug" - - "-E" - - "gc_percent=${FILEBEAT_GOGC:100}" - - "-E" - - "filebeat.config.modules.enabled=false" - - name: aws-s3 - description: "AWS S3" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: azure-eventhub - description: "Azure Eventhub" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: cloudfoundry - description: "PCF Cloudfoundry" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: container - description: "Container logs" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: docker - aliases: - - log/docker - description: "Docker logs" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: gcp-pubsub - description: "GCP Pub-Sub" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: http_endpoint - description: "HTTP Endpoint" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: httpjson - description: "HTTP JSON Endpoint" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: journald - description: "Journald" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: kafka - description: "Kafka" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: log - aliases: - - logfile - - event/file - description: "Logfile" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: mqtt - description: "MQTT" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: netflow - description: "Netflow" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: o365audit - description: "Office 365 Audit" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: redis - aliases: - - log/redis_slowlog - description: "Redis" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: syslog - aliases: - - log/syslog - description: "Syslog" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: tcp - aliases: - - event/tcp - description: "TCP" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: udp - aliases: - - event/udp - description: "UDP" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: unix - description: "Unix Socket" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: winlog - description: "Winlog" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: filestream - description: "Filestream" - platforms: *platforms - outputs: *outputs - command: - args: *args +version: 2 +inputs: + - name: aws-cloudwatch + description: "AWS Cloudwatch" + platforms: &platforms + - linux/amd64 + - linux/arm64 + - darwin/amd64 + - darwin/arm64 + - windows/amd64 + - container/amd64 + - container/arm64 + outputs: &outputs + - elasticsearch + - kafka + - logstash + - redis + shippers: &shippers + - shipper + command: + args: &args + - "-E" + - "setup.ilm.enabled=false" + - "-E" + - "setup.template.enabled=false" + - "-E" + - "management.enabled=true" + - "-E" + - "logging.level=debug" + - "-E" + - "gc_percent=${FILEBEAT_GOGC:100}" + - "-E" + - "filebeat.config.modules.enabled=false" + - name: aws-s3 + description: "AWS S3" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: azure-eventhub + description: "Azure Eventhub" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: cloudfoundry + description: "PCF Cloudfoundry" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: container + description: "Container logs" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: docker + aliases: + - log/docker + description: "Docker logs" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: gcp-pubsub + description: "GCP Pub-Sub" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: http_endpoint + description: "HTTP Endpoint" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: httpjson + description: "HTTP JSON Endpoint" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: journald + description: "Journald" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: kafka + description: "Kafka" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: log + aliases: + - logfile + - event/file + description: "Logfile" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: mqtt + description: "MQTT" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: netflow + description: "Netflow" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: o365audit + description: "Office 365 Audit" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: redis + aliases: + - log/redis_slowlog + description: "Redis" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: syslog + aliases: + - log/syslog + description: "Syslog" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: tcp + aliases: + - event/tcp + description: "TCP" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: udp + aliases: + - event/udp + description: "UDP" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: unix + description: "Unix Socket" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: winlog + description: "Winlog" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: filestream + description: "Filestream" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args diff --git a/specs/metricbeat.spec.yml b/specs/metricbeat.spec.yml index b160a4f29e7..b7c88ad4864 100644 --- a/specs/metricbeat.spec.yml +++ b/specs/metricbeat.spec.yml @@ -1,163 +1,187 @@ -version: 2 -inputs: - - name: beat/metrics - description: "Beat metrics" - platforms: &platforms - - linux/amd64 - - linux/arm64 - - darwin/amd64 - - darwin/arm64 - - windows/amd64 - - container/amd64 - - container/arm64 - outputs: &outputs - - elasticsearch - - kafka - - logstash - - redis - command: - args: &args - - "-E" - - "setup.ilm.enabled=false" - - "-E" - - "setup.template.enabled=false" - - "-E" - - "management.enabled=true" - - "-E" - - "logging.level=debug" - - "-E" - - "gc_percent=${METRICBEAT_GOGC:100}" - - "-E" - - "metricbeat.config.modules.enabled=false" - - name: docker/metrics - description: "Docker metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: elasticsearch/metrics - description: "Elasticsearch metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: kibana/metrics - description: "Kibana metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: kubernetes/metrics - description: "Kubernetes metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: linux/metrics - description: "Linux metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: logstash/metrics - description: "Logstash metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: mongodb/metrics - description: "Mongodb metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: mysql/metrics - description: "MySQL metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: postgresql/metrics - description: "PostgreSQL metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: redis/metrics - description: "Redis metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: system/metrics - description: "System metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: uwsgi/metrics - description: "UWSGI metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: windows/metrics - description: "Windows metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: aws/metrics - description: "AWS metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: awsfargate/metrics - description: "AWS Fargate metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: azure/metrics - description: "Azure metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: cloudfoundry/metrics - description: "PCF Cloudfoundry metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: containerd/metrics - description: "Containerd metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: mssql/metrics - description: "Microsoft SQL Server metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: oracle/metrics - description: "Oracle Database metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: syncgateway/metrics - description: "Couchbase Sync Gateway metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: http/metrics - description: "HTTP metrics" - platforms: *platforms - outputs: *outputs - command: - args: *args +version: 2 +inputs: + - name: beat/metrics + description: "Beat metrics" + platforms: &platforms + - linux/amd64 + - linux/arm64 + - darwin/amd64 + - darwin/arm64 + - windows/amd64 + - container/amd64 + - container/arm64 + outputs: &outputs + - elasticsearch + - kafka + - logstash + - redis + shippers: &shippers + - shipper + command: + args: &args + - "-E" + - "setup.ilm.enabled=false" + - "-E" + - "setup.template.enabled=false" + - "-E" + - "management.enabled=true" + - "-E" + - "logging.level=debug" + - "-E" + - "gc_percent=${METRICBEAT_GOGC:100}" + - "-E" + - "metricbeat.config.modules.enabled=false" + - name: docker/metrics + description: "Docker metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: elasticsearch/metrics + description: "Elasticsearch metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: kibana/metrics + description: "Kibana metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: kubernetes/metrics + description: "Kubernetes metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: linux/metrics + description: "Linux metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: logstash/metrics + description: "Logstash metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: mongodb/metrics + description: "Mongodb metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: mysql/metrics + description: "MySQL metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: postgresql/metrics + description: "PostgreSQL metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: redis/metrics + description: "Redis metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: system/metrics + description: "System metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: uwsgi/metrics + description: "UWSGI metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: windows/metrics + description: "Windows metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: aws/metrics + description: "AWS metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: awsfargate/metrics + description: "AWS Fargate metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: azure/metrics + description: "Azure metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: cloudfoundry/metrics + description: "PCF Cloudfoundry metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: containerd/metrics + description: "Containerd metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: mssql/metrics + description: "Microsoft SQL Server metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: oracle/metrics + description: "Oracle Database metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: syncgateway/metrics + description: "Couchbase Sync Gateway metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args + - name: http/metrics + description: "HTTP metrics" + platforms: *platforms + outputs: *outputs + shippers: *shippers + command: + args: *args diff --git a/specs/shipper.spec.yml b/specs/shipper.spec.yml new file mode 100644 index 00000000000..b9cd74fe439 --- /dev/null +++ b/specs/shipper.spec.yml @@ -0,0 +1,18 @@ +version: 2 +shippers: + - name: shipper + description: "Elastic Agent Shipper" + platforms: + - linux/amd64 + - linux/arm64 + - darwin/amd64 + - darwin/arm64 + - windows/amd64 + - container/amd64 + - container/arm64 + outputs: + - elasticsearch + - kafka + - logstash + - redis + command: {} From 2ebabb2980a41efbf278cfc715834642c915370a Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Mon, 7 Nov 2022 22:07:46 -0500 Subject: [PATCH 38/49] More work on the logging. --- .../application/monitoring/v1_monitor.go | 18 ++- internal/pkg/agent/cmd/run.go | 5 + pkg/component/runtime/command.go | 11 +- pkg/component/runtime/log_writer.go | 140 +++++------------- pkg/component/runtime/log_writer_test.go | 66 +++------ pkg/component/spec.go | 18 +++ pkg/core/logger/logger.go | 7 + 7 files changed, 109 insertions(+), 156 deletions(-) diff --git a/internal/pkg/agent/application/monitoring/v1_monitor.go b/internal/pkg/agent/application/monitoring/v1_monitor.go index 7eb3f7ac107..54bfbaf7d41 100644 --- a/internal/pkg/agent/application/monitoring/v1_monitor.go +++ b/internal/pkg/agent/application/monitoring/v1_monitor.go @@ -295,7 +295,7 @@ func (b *BeatsMonitor) injectLogsInput(cfg map[string]interface{}, componentIDTo filepath.Join(logsDrop, agentName+"-*.ndjson"), filepath.Join(logsDrop, agentName+"-watcher-*.ndjson"), }, - "index": fmt.Sprintf("logs-elastic_agent-%s", monitoringNamespace), + "index": fmt.Sprintf("logs-%%{[data_stream.dataset]}-%s", monitoringNamespace), "close": map[string]interface{}{ "on_state_change": map[string]interface{}{ "inactive": "5m", @@ -321,11 +321,15 @@ func (b *BeatsMonitor) injectLogsInput(cfg map[string]interface{}, componentIDTo }, }, map[string]interface{}{ - "add_fields": map[string]interface{}{ - "target": "event", - "fields": map[string]interface{}{ - "dataset": "elastic_agent", + "copy_fields": map[string]interface{}{ + "fields": []interface{}{ + map[string]interface{}{ + "from": "event.dataset", + "to": "data_stream.dataset", + }, }, + "fail_on_error": false, + "ignore_missing": true, }, }, map[string]interface{}{ @@ -356,6 +360,7 @@ func (b *BeatsMonitor) injectLogsInput(cfg map[string]interface{}, componentIDTo }}, }, } + /* TODO(blakerouse): Does shipping logs work with this disabled? for unit, binaryName := range componentIDToBinary { if !isSupportedBinary(binaryName) { continue @@ -371,7 +376,7 @@ func (b *BeatsMonitor) injectLogsInput(cfg map[string]interface{}, componentIDTo "dataset": fmt.Sprintf("elastic_agent.%s", fixedBinaryName), "namespace": monitoringNamespace, }, - "index": fmt.Sprintf("logs-elastic_agent.%s-%s", fixedBinaryName, monitoringNamespace), + "index": fmt.Sprintf("logs-%%{[data_stream.dataset]}-%s", monitoringNamespace), "paths": []interface{}{logFile, logFile + "*"}, "close": map[string]interface{}{ "on_state_change": map[string]interface{}{ @@ -434,6 +439,7 @@ func (b *BeatsMonitor) injectLogsInput(cfg map[string]interface{}, componentIDTo }, }) } + */ inputs := []interface{}{ map[string]interface{}{ diff --git a/internal/pkg/agent/cmd/run.go b/internal/pkg/agent/cmd/run.go index e6f9ec8d0f7..c44c30d1323 100644 --- a/internal/pkg/agent/cmd/run.go +++ b/internal/pkg/agent/cmd/run.go @@ -103,6 +103,11 @@ func run(override cfgOverrider, modifiers ...component.PlatformModifier) error { return err } + // add event.dataset to all log messages; required so all logs go to correct data_stream + logger = logger.With("event", map[string]interface{}{ + "dataset": "elastic_agent", + }) + cfg, err = tryDelayEnroll(ctx, logger, cfg, override) if err != nil { err = errors.New(err, "failed to perform delayed enrollment") diff --git a/pkg/component/runtime/command.go b/pkg/component/runtime/command.go index b4735b530a6..b056adedf82 100644 --- a/pkg/component/runtime/command.go +++ b/pkg/component/runtime/command.go @@ -308,7 +308,7 @@ func (c *CommandRuntime) start(comm Communicator) error { proc, err := process.Start(path, process.WithArgs(args), process.WithEnv(env), - process.WithCmdOptions(attachOutErr(c.logger), dirPath(workDir))) + process.WithCmdOptions(attachOutErr(c.current), dirPath(workDir))) if err != nil { return err } @@ -414,10 +414,13 @@ func (c *CommandRuntime) workDir(uid int, gid int) (string, error) { return path, nil } -func attachOutErr(logger *logger.Logger) process.CmdOption { +func attachOutErr(comp component.Component) process.CmdOption { return func(cmd *exec.Cmd) error { - cmd.Stdout = newLogWriter(logger.Core()) - cmd.Stderr = newLogWriter(logger.Core()) + logger := logger.NewWithoutConfig("").With("component", comp.ID).With("type", comp.Spec.InputType).With("event", map[string]interface{}{ + "dataset": fmt.Sprintf("elastic_agent.%s", comp.ID), + }) + cmd.Stdout = newLogWriter(logger.Core(), comp.Spec.Spec.Command.Log) + cmd.Stderr = newLogWriter(logger.Core(), comp.Spec.Spec.Command.Log) return nil } } diff --git a/pkg/component/runtime/log_writer.go b/pkg/component/runtime/log_writer.go index 8b9572bed17..0a532aa8794 100644 --- a/pkg/component/runtime/log_writer.go +++ b/pkg/component/runtime/log_writer.go @@ -8,6 +8,9 @@ import ( "bytes" "encoding/json" "errors" + "fmt" + "github.com/elastic/elastic-agent/pkg/component" + "k8s.io/utils/strings/slices" "strings" "time" @@ -25,12 +28,14 @@ type zapcoreWriter interface { // `Write` handles parsing lines as either ndjson or plain text. type logWriter struct { loggerCore zapcoreWriter + logCfg component.CommandLogSpec remainder []byte } -func newLogWriter(core zapcoreWriter) *logWriter { +func newLogWriter(core zapcoreWriter, logCfg component.CommandLogSpec) *logWriter { return &logWriter{ loggerCore: core, + logCfg: logCfg, } } @@ -44,11 +49,7 @@ func (r *logWriter) Write(p []byte) (int, error) { idx := bytes.IndexByte(p[offset:], '\n') if idx < 0 { // not all used add to remainder to be used on next call - if r.remainder == nil || len(r.remainder) == 0 { - r.remainder = p[offset:] - } else { - r.remainder = append(r.remainder, p[offset:]...) - } + r.remainder = append(r.remainder, p[offset:]...) return len(p), nil } @@ -70,6 +71,9 @@ func (r *logWriter) Write(p []byte) (int, error) { continue } str := strings.TrimSpace(string(line)) + if str[0:1] == "ty" { + fmt.Println("found it") + } // try to parse line as JSON if str[0] == '{' && r.handleJSON(str) { // handled as JSON @@ -89,10 +93,10 @@ func (r *logWriter) handleJSON(line string) bool { if err := json.Unmarshal([]byte(line), &evt); err != nil { return false } - lvl := getLevel(evt) - ts := getTimestamp(evt) - msg := getMessage(evt) - fields := getFields(evt) + lvl := getLevel(evt, r.logCfg.LevelField) + ts := getTimestamp(evt, r.logCfg.TimeField, r.logCfg.TimeFormat) + msg := getMessage(evt, r.logCfg.MessageField) + fields := getFields(evt, r.logCfg.IgnoreFields) _ = r.loggerCore.Write(zapcore.Entry{ Level: lvl, Time: ts, @@ -101,21 +105,11 @@ func (r *logWriter) handleJSON(line string) bool { return true } -func getLevel(evt map[string]interface{}) zapcore.Level { +func getLevel(evt map[string]interface{}, field string) zapcore.Level { lvl := zapcore.InfoLevel - err := unmarshalLevel(&lvl, getStrVal(evt, "log.level")) - if err != nil { - err := unmarshalLevel(&lvl, getStrVal(evt, "log", "level")) - if err != nil { - err := unmarshalLevel(&lvl, getStrVal(evt, "level")) - if err == nil { - deleteVal(evt, "level") - } - } else { - deleteVal(evt, "log", "level") - } - } else { - deleteVal(evt, "log.level") + err := unmarshalLevel(&lvl, getStrVal(evt, field)) + if err == nil { + delete(evt, field) } return lvl } @@ -131,95 +125,43 @@ func unmarshalLevel(lvl *zapcore.Level, val string) error { return lvl.UnmarshalText([]byte(val)) } -func getMessage(evt map[string]interface{}) string { - msg := getStrVal(evt, "message") - if msg == "" { - msg = getStrVal(evt, "msg") - if msg != "" { - deleteVal(evt, "msg") - } - } else { - deleteVal(evt, "message") +func getMessage(evt map[string]interface{}, field string) string { + msg := getStrVal(evt, field) + if msg != "" { + delete(evt, field) } return msg } -func getTimestamp(evt map[string]interface{}) time.Time { - t, err := time.Parse(time.RFC3339Nano, getStrVal(evt, "@timestamp")) - if err != nil { - t, err = time.Parse(time.RFC3339Nano, getStrVal(evt, "timestamp")) - if err != nil { - t, err = time.Parse(time.RFC3339Nano, getStrVal(evt, "time")) - if err != nil { - t = time.Now() - } else { - deleteVal(evt, "time") - } - } else { - deleteVal(evt, "timestamp") - } - } else { - deleteVal(evt, "@timestamp") +func getTimestamp(evt map[string]interface{}, field string, format string) time.Time { + t, err := time.Parse(format, getStrVal(evt, field)) + if err == nil { + delete(evt, field) + return t } - return t + return time.Now() } -func getFields(evt map[string]interface{}) []zapcore.Field { +func getFields(evt map[string]interface{}, ignore []string) []zapcore.Field { fields := make([]zapcore.Field, 0, len(evt)) for k, v := range evt { + if len(ignore) > 0 && slices.Contains(ignore, k) { + // ignore field + continue + } fields = append(fields, zap.Any(k, v)) } return fields } -func getStrVal(evt map[string]interface{}, fields ...string) string { - if len(fields) == 0 { - panic("must provide at least one field") - } - last := len(fields) - 1 - for i, field := range fields { - if i == last { - raw, ok := evt[field] - if !ok { - return "" - } - str, ok := raw.(string) - if !ok { - return "" - } - return str - } - raw, ok := evt[field] - if !ok { - return "" - } - nested, ok := raw.(map[string]interface{}) - if !ok { - return "" - } - evt = nested - } - return "" -} - -func deleteVal(evt map[string]interface{}, fields ...string) { - if len(fields) == 0 { - panic("must provide at least one field") +func getStrVal(evt map[string]interface{}, field string) string { + raw, ok := evt[field] + if !ok { + return "" } - last := len(fields) - 1 - for i, field := range fields { - if i == last { - delete(evt, field) - return - } - raw, ok := evt[field] - if !ok { - return - } - nested, ok := raw.(map[string]interface{}) - if !ok { - return - } - evt = nested + str, ok := raw.(string) + if !ok { + return "" } + return str } diff --git a/pkg/component/runtime/log_writer_test.go b/pkg/component/runtime/log_writer_test.go index 7c23bc242c8..2b1556eaff8 100644 --- a/pkg/component/runtime/log_writer_test.go +++ b/pkg/component/runtime/log_writer_test.go @@ -5,6 +5,7 @@ package runtime import ( + "github.com/elastic/elastic-agent/pkg/component" "sort" "testing" "time" @@ -23,9 +24,10 @@ type wrote struct { func TestLogWriter(t *testing.T) { scenarios := []struct { - Name string - Lines []string - Wrote []wrote + Name string + Config component.CommandLogSpec + Lines []string + Wrote []wrote }{ { Name: "multi plain text line", @@ -79,22 +81,24 @@ func TestLogWriter(t *testing.T) { }, }, { - Name: "json log lines", + Name: "json log line split", + Config: component.CommandLogSpec{ + LevelField: "log.level", + TimeField: "@timestamp", + TimeFormat: time.RFC3339Nano, + MessageField: "message", + IgnoreFields: []string{"ignore"}, + }, Lines: []string{ - `{"@timestamp": "2009-11-10T23:00:00Z", "log.level": "debug", "message": "message field", "string": "extra", "int": 50}`, - "\n", - `{"timestamp": "2009-11-10T23:00:01Z", "log": {"level": "warn"}, "msg": "msg field", "string": "extra next", "int": 100}`, - "\n", - `{"time": "2009-11-10T23:00:02Z", "level": "trace", "message": "message field", "nested": {"key": "value"}}`, - "\n", - `{"level": "error", "message": "error string"}`, + `{"@timestamp": "2009-11-10T23:00:00Z", "log.level": "debug", "message": "message`, + ` field", "string": "extra", "int": 50, "ignore": "other"}`, "\n", }, Wrote: []wrote{ { entry: zapcore.Entry{ Level: zapcore.DebugLevel, - Time: parseTime("2009-11-10T23:00:00Z"), + Time: parseTime("2009-11-10T23:00:00Z", time.RFC3339Nano), Message: "message field", }, fields: []zapcore.Field{ @@ -102,38 +106,6 @@ func TestLogWriter(t *testing.T) { zap.Float64("int", 50), }, }, - { - entry: zapcore.Entry{ - Level: zapcore.WarnLevel, - Time: parseTime("2009-11-10T23:00:01Z"), - Message: "msg field", - }, - fields: []zapcore.Field{ - zap.String("string", "extra next"), - zap.Float64("int", 100), - zap.Any("log", map[string]interface{}{}), - }, - }, - { - entry: zapcore.Entry{ - Level: zapcore.DebugLevel, - Time: parseTime("2009-11-10T23:00:02Z"), - Message: "message field", - }, - fields: []zapcore.Field{ - zap.Any("nested", map[string]interface{}{ - "key": "value", - }), - }, - }, - { - entry: zapcore.Entry{ - Level: zapcore.ErrorLevel, - Time: time.Time{}, - Message: "error string", - }, - fields: []zapcore.Field{}, - }, }, }, { @@ -157,7 +129,7 @@ func TestLogWriter(t *testing.T) { for _, scenario := range scenarios { t.Run(scenario.Name, func(t *testing.T) { c := &captureCore{} - w := newLogWriter(c) + w := newLogWriter(c, scenario.Config) for _, line := range scenario.Lines { l := len([]byte(line)) c, err := w.Write([]byte(line)) @@ -199,8 +171,8 @@ func (c *captureCore) Write(entry zapcore.Entry, fields []zapcore.Field) error { return nil } -func parseTime(t string) time.Time { - v, err := time.Parse(time.RFC3339Nano, t) +func parseTime(t string, format string) time.Time { + v, err := time.Parse(format, t) if err != nil { panic(err) } diff --git a/pkg/component/spec.go b/pkg/component/spec.go index be20b92208c..7954c3e579a 100644 --- a/pkg/component/spec.go +++ b/pkg/component/spec.go @@ -59,6 +59,7 @@ type CommandSpec struct { Args []string `config:"args,omitempty" yaml:"args,omitempty"` Env []CommandEnvSpec `config:"env,omitempty" yaml:"env,omitempty"` Timeouts CommandTimeoutSpec `config:"timeouts" yaml:"timeouts"` + Log CommandLogSpec `config:"log" yaml:"log"` } // CommandEnvSpec is the specification that defines environment variables that will be set to execute the subprocess. @@ -81,6 +82,23 @@ func (t *CommandTimeoutSpec) InitDefaults() { t.Stop = 30 * time.Second } +// CommandLogSpec is the logging specification for subprocess. +type CommandLogSpec struct { + LevelField string `config:"level_field" yaml:"level_field"` + TimeField string `config:"time_field" yaml:"time_field"` + TimeFormat string `config:"time_format" yaml:"time_format"` + MessageField string `config:"message_field" yaml:"message_field"` + IgnoreFields []string `config:"ignore_fields" yaml:"ignore_fields"` +} + +// InitDefaults initialized the defaults for the log specification. +func (t *CommandLogSpec) InitDefaults() { + t.LevelField = "log.level" + t.TimeField = "@timestamp" + t.TimeFormat = "2006-01-02T15:04:05.000Z0700" + t.MessageField = "message" +} + // ServiceTimeoutSpec is the timeout specification for subprocess. type ServiceTimeoutSpec struct { Checkin time.Duration `config:"checkin" yaml:"checkin"` diff --git a/pkg/core/logger/logger.go b/pkg/core/logger/logger.go index 049fd271038..8c1aa50e98e 100644 --- a/pkg/core/logger/logger.go +++ b/pkg/core/logger/logger.go @@ -58,6 +58,13 @@ func NewFromConfig(name string, cfg *Config, logInternal bool) (*Logger, error) return new(name, cfg, logInternal) } +// NewWithoutConfig returns a new logger without having a configuration. +// +// Use only when a clean logger is needed, and it is known that the logging configuration has already been performed. +func NewWithoutConfig(name string) *Logger { + return logp.NewLogger(name) +} + func new(name string, cfg *Config, logInternal bool) (*Logger, error) { commonCfg, err := toCommonConfig(cfg) if err != nil { From 5fda6fe2b7d5af3256084fbd37d6846bed53ee01 Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Mon, 7 Nov 2022 22:49:47 -0500 Subject: [PATCH 39/49] More fixes. --- .../application/monitoring/v1_monitor.go | 101 +----------------- pkg/component/runtime/command.go | 24 +++-- pkg/component/runtime/log_writer.go | 30 +++--- pkg/component/runtime/log_writer_test.go | 10 +- pkg/component/runtime/runtime.go | 4 +- pkg/component/spec.go | 16 +-- 6 files changed, 51 insertions(+), 134 deletions(-) diff --git a/internal/pkg/agent/application/monitoring/v1_monitor.go b/internal/pkg/agent/application/monitoring/v1_monitor.go index 54bfbaf7d41..2cc2a196ad9 100644 --- a/internal/pkg/agent/application/monitoring/v1_monitor.go +++ b/internal/pkg/agent/application/monitoring/v1_monitor.go @@ -280,12 +280,13 @@ func (b *BeatsMonitor) injectMonitoringOutput(source, dest map[string]interface{ func (b *BeatsMonitor) injectLogsInput(cfg map[string]interface{}, componentIDToBinary map[string]string, monitoringOutput string) error { monitoringNamespace := b.monitoringNamespace() - //fixedAgentName := strings.ReplaceAll(agentName, "-", "_") logsDrop := filepath.Dir(loggingPath("unit", b.operatingSystem)) - - streams := []interface{}{ + inputs := []interface{}{ map[string]interface{}{ - idKey: "logs-monitoring-agent", + idKey: "logs-monitoring-agent", + "name": "logs-monitoring-agent", + "type": "filestream", + useOutputKey: monitoringOutput, "data_stream": map[string]interface{}{ "type": "logs", "dataset": "elastic_agent", @@ -357,100 +358,8 @@ func (b *BeatsMonitor) injectLogsInput(cfg map[string]interface{}, componentIDTo }, "ignore_missing": true, }, - }}, - }, - } - /* TODO(blakerouse): Does shipping logs work with this disabled? - for unit, binaryName := range componentIDToBinary { - if !isSupportedBinary(binaryName) { - continue - } - - fixedBinaryName := strings.ReplaceAll(binaryName, "-", "_") - name := strings.ReplaceAll(unit, "-", "_") // conform with index naming policy - logFile := loggingPath(unit, b.operatingSystem) - streams = append(streams, map[string]interface{}{ - idKey: "logs-monitoring-" + name, - "data_stream": map[string]interface{}{ - "type": "logs", - "dataset": fmt.Sprintf("elastic_agent.%s", fixedBinaryName), - "namespace": monitoringNamespace, - }, - "index": fmt.Sprintf("logs-%%{[data_stream.dataset]}-%s", monitoringNamespace), - "paths": []interface{}{logFile, logFile + "*"}, - "close": map[string]interface{}{ - "on_state_change": map[string]interface{}{ - "inactive": "5m", }, }, - "parsers": []interface{}{ - map[string]interface{}{ - "ndjson": map[string]interface{}{ - "overwrite_keys": true, - "message_key": "message", - }, - }, - }, - "processors": []interface{}{ - map[string]interface{}{ - "add_fields": map[string]interface{}{ - "target": "data_stream", - "fields": map[string]interface{}{ - "type": "logs", - "dataset": fmt.Sprintf("elastic_agent.%s", fixedBinaryName), - "namespace": monitoringNamespace, - }, - }, - }, - map[string]interface{}{ - "add_fields": map[string]interface{}{ - "target": "event", - "fields": map[string]interface{}{ - "dataset": fmt.Sprintf("elastic_agent.%s", fixedBinaryName), - }, - }, - }, - map[string]interface{}{ - "add_fields": map[string]interface{}{ - "target": "elastic_agent", - "fields": map[string]interface{}{ - "id": b.agentInfo.AgentID(), - "version": b.agentInfo.Version(), - "snapshot": b.agentInfo.Snapshot(), - }, - }, - }, - map[string]interface{}{ - "add_fields": map[string]interface{}{ - "target": "agent", - "fields": map[string]interface{}{ - "id": b.agentInfo.AgentID(), - }, - }, - }, - map[string]interface{}{ - "drop_fields": map[string]interface{}{ - "fields": []interface{}{ - "ecs.version", //coming from logger, already added by libbeat - }, - "ignore_missing": true, - }, - }, - }, - }) - } - */ - - inputs := []interface{}{ - map[string]interface{}{ - idKey: "logs-monitoring-agent", - "name": "logs-monitoring-agent", - "type": "filestream", - useOutputKey: monitoringOutput, - "data_stream": map[string]interface{}{ - "namespace": monitoringNamespace, - }, - "streams": streams, }, } inputsNode, found := cfg[inputsKey] diff --git a/pkg/component/runtime/command.go b/pkg/component/runtime/command.go index 92286d311e5..b51461384c0 100644 --- a/pkg/component/runtime/command.go +++ b/pkg/component/runtime/command.go @@ -12,6 +12,7 @@ import ( "os/exec" "path/filepath" "runtime" + "strings" "time" "github.com/elastic/elastic-agent/pkg/core/logger" @@ -85,8 +86,11 @@ func NewCommandRuntime(comp component.Component, logger *logger.Logger, monitor if cmdSpec == nil { return nil, errors.New("must have command defined in specification") } - - c.logger = logger.With("component", comp.ID).With("type", cmdSpec.) + c.logger = logger.With("component", map[string]interface{}{ + "id": comp.ID, + "type": c.getSpecType(), + "binary": c.getSpecBinaryName(), + }) return c, nil } @@ -311,7 +315,7 @@ func (c *CommandRuntime) start(comm Communicator) error { proc, err := process.Start(path, process.WithArgs(args), process.WithEnv(env), - process.WithCmdOptions(attachOutErr(c.current), dirPath(workDir))) + process.WithCmdOptions(attachOutErr(c.current, c.getCommandSpec(), c.getSpecType(), c.getSpecBinaryName()), dirPath(workDir))) if err != nil { return err } @@ -457,13 +461,17 @@ func (c *CommandRuntime) getCommandSpec() *component.CommandSpec { return nil } -func attachOutErr(comp component.Component) process.CmdOption { +func attachOutErr(comp component.Component, cmdSpec *component.CommandSpec, typeStr string, binaryName string) process.CmdOption { return func(cmd *exec.Cmd) error { - logger := logger.NewWithoutConfig("").With("component", comp.ID).With("type", comp.Spec.InputType).With("event", map[string]interface{}{ - "dataset": fmt.Sprintf("elastic_agent.%s", comp.ID), + logger := logger.NewWithoutConfig("").With("component", map[string]interface{}{ + "id": comp.ID, + "type": typeStr, + "binary": binaryName, + }).With("event", map[string]interface{}{ + "dataset": fmt.Sprintf("elastic_agent.%s", strings.ReplaceAll(comp.ID, "-", "_")), }) - cmd.Stdout = newLogWriter(logger.Core(), comp.Spec.Spec.Command.Log) - cmd.Stderr = newLogWriter(logger.Core(), comp.Spec.Spec.Command.Log) + cmd.Stdout = newLogWriter(logger.Core(), cmdSpec.Log) + cmd.Stderr = newLogWriter(logger.Core(), cmdSpec.Log) return nil } } diff --git a/pkg/component/runtime/log_writer.go b/pkg/component/runtime/log_writer.go index 0a532aa8794..4b24f6a24c2 100644 --- a/pkg/component/runtime/log_writer.go +++ b/pkg/component/runtime/log_writer.go @@ -93,10 +93,10 @@ func (r *logWriter) handleJSON(line string) bool { if err := json.Unmarshal([]byte(line), &evt); err != nil { return false } - lvl := getLevel(evt, r.logCfg.LevelField) - ts := getTimestamp(evt, r.logCfg.TimeField, r.logCfg.TimeFormat) - msg := getMessage(evt, r.logCfg.MessageField) - fields := getFields(evt, r.logCfg.IgnoreFields) + lvl := getLevel(evt, r.logCfg.LevelKey) + ts := getTimestamp(evt, r.logCfg.TimeKey, r.logCfg.TimeFormat) + msg := getMessage(evt, r.logCfg.MessageKey) + fields := getFields(evt, r.logCfg.IgnoreKeys) _ = r.loggerCore.Write(zapcore.Entry{ Level: lvl, Time: ts, @@ -105,11 +105,11 @@ func (r *logWriter) handleJSON(line string) bool { return true } -func getLevel(evt map[string]interface{}, field string) zapcore.Level { +func getLevel(evt map[string]interface{}, key string) zapcore.Level { lvl := zapcore.InfoLevel - err := unmarshalLevel(&lvl, getStrVal(evt, field)) + err := unmarshalLevel(&lvl, getStrVal(evt, key)) if err == nil { - delete(evt, field) + delete(evt, key) } return lvl } @@ -125,18 +125,18 @@ func unmarshalLevel(lvl *zapcore.Level, val string) error { return lvl.UnmarshalText([]byte(val)) } -func getMessage(evt map[string]interface{}, field string) string { - msg := getStrVal(evt, field) +func getMessage(evt map[string]interface{}, key string) string { + msg := getStrVal(evt, key) if msg != "" { - delete(evt, field) + delete(evt, key) } return msg } -func getTimestamp(evt map[string]interface{}, field string, format string) time.Time { - t, err := time.Parse(format, getStrVal(evt, field)) +func getTimestamp(evt map[string]interface{}, key string, format string) time.Time { + t, err := time.Parse(format, getStrVal(evt, key)) if err == nil { - delete(evt, field) + delete(evt, key) return t } return time.Now() @@ -154,8 +154,8 @@ func getFields(evt map[string]interface{}, ignore []string) []zapcore.Field { return fields } -func getStrVal(evt map[string]interface{}, field string) string { - raw, ok := evt[field] +func getStrVal(evt map[string]interface{}, key string) string { + raw, ok := evt[key] if !ok { return "" } diff --git a/pkg/component/runtime/log_writer_test.go b/pkg/component/runtime/log_writer_test.go index 2b1556eaff8..402ef4b2314 100644 --- a/pkg/component/runtime/log_writer_test.go +++ b/pkg/component/runtime/log_writer_test.go @@ -83,11 +83,11 @@ func TestLogWriter(t *testing.T) { { Name: "json log line split", Config: component.CommandLogSpec{ - LevelField: "log.level", - TimeField: "@timestamp", - TimeFormat: time.RFC3339Nano, - MessageField: "message", - IgnoreFields: []string{"ignore"}, + LevelKey: "log.level", + TimeKey: "@timestamp", + TimeFormat: time.RFC3339Nano, + MessageKey: "message", + IgnoreKeys: []string{"ignore"}, }, Lines: []string{ `{"@timestamp": "2009-11-10T23:00:00Z", "log.level": "debug", "message": "message`, diff --git a/pkg/component/runtime/runtime.go b/pkg/component/runtime/runtime.go index 0ed1b46c26c..aa780a002e5 100644 --- a/pkg/component/runtime/runtime.go +++ b/pkg/component/runtime/runtime.go @@ -60,7 +60,7 @@ func NewComponentRuntime(comp component.Component, logger *logger.Logger, monito } if comp.InputSpec != nil { if comp.InputSpec.Spec.Command != nil { - return NewCommandRuntime(comp, monitor) + return NewCommandRuntime(comp, logger, monitor) } if comp.InputSpec.Spec.Service != nil { return NewServiceRuntime(comp, logger) @@ -69,7 +69,7 @@ func NewComponentRuntime(comp component.Component, logger *logger.Logger, monito } if comp.ShipperSpec != nil { if comp.ShipperSpec.Spec.Command != nil { - return NewCommandRuntime(comp, monitor) + return NewCommandRuntime(comp, logger, monitor) } return nil, errors.New("components for shippers can only support command runtime") } diff --git a/pkg/component/spec.go b/pkg/component/spec.go index 2ea0b5a7dae..df03fec308a 100644 --- a/pkg/component/spec.go +++ b/pkg/component/spec.go @@ -103,19 +103,19 @@ func (t *CommandTimeoutSpec) InitDefaults() { // CommandLogSpec is the logging specification for subprocess. type CommandLogSpec struct { - LevelField string `config:"level_field" yaml:"level_field"` - TimeField string `config:"time_field" yaml:"time_field"` - TimeFormat string `config:"time_format" yaml:"time_format"` - MessageField string `config:"message_field" yaml:"message_field"` - IgnoreFields []string `config:"ignore_fields" yaml:"ignore_fields"` + LevelKey string `config:"level_key" yaml:"level_key"` + TimeKey string `config:"time_key" yaml:"time_key"` + TimeFormat string `config:"time_format" yaml:"time_format"` + MessageKey string `config:"message_key" yaml:"message_key"` + IgnoreKeys []string `config:"ignore_keys" yaml:"ignore_keys"` } // InitDefaults initialized the defaults for the log specification. func (t *CommandLogSpec) InitDefaults() { - t.LevelField = "log.level" - t.TimeField = "@timestamp" + t.LevelKey = "log.level" + t.TimeKey = "@timestamp" t.TimeFormat = "2006-01-02T15:04:05.000Z0700" - t.MessageField = "message" + t.MessageKey = "message" } // ServiceTimeoutSpec is the timeout specification for subprocess. From 692aaa69ac961c819e3a9abe25148a912dd5a191 Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Mon, 7 Nov 2022 23:03:43 -0500 Subject: [PATCH 40/49] Change back to streams. --- .../application/monitoring/v1_monitor.go | 22 ++++++++++++++----- 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/internal/pkg/agent/application/monitoring/v1_monitor.go b/internal/pkg/agent/application/monitoring/v1_monitor.go index 2cc2a196ad9..89a234ef5d4 100644 --- a/internal/pkg/agent/application/monitoring/v1_monitor.go +++ b/internal/pkg/agent/application/monitoring/v1_monitor.go @@ -281,12 +281,10 @@ func (b *BeatsMonitor) injectMonitoringOutput(source, dest map[string]interface{ func (b *BeatsMonitor) injectLogsInput(cfg map[string]interface{}, componentIDToBinary map[string]string, monitoringOutput string) error { monitoringNamespace := b.monitoringNamespace() logsDrop := filepath.Dir(loggingPath("unit", b.operatingSystem)) - inputs := []interface{}{ + + streams := []interface{}{ map[string]interface{}{ - idKey: "logs-monitoring-agent", - "name": "logs-monitoring-agent", - "type": "filestream", - useOutputKey: monitoringOutput, + idKey: "logs-monitoring-agent", "data_stream": map[string]interface{}{ "type": "logs", "dataset": "elastic_agent", @@ -358,8 +356,20 @@ func (b *BeatsMonitor) injectLogsInput(cfg map[string]interface{}, componentIDTo }, "ignore_missing": true, }, - }, + }}, + }, + } + + inputs := []interface{}{ + map[string]interface{}{ + idKey: "logs-monitoring-agent", + "name": "logs-monitoring-agent", + "type": "filestream", + useOutputKey: monitoringOutput, + "data_stream": map[string]interface{}{ + "namespace": monitoringNamespace, }, + "streams": streams, }, } inputsNode, found := cfg[inputsKey] From c877db0720aa0349c7f2bfb8f5482ada06ac4f73 Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Mon, 7 Nov 2022 23:07:13 -0500 Subject: [PATCH 41/49] Fix go.mod. --- NOTICE.txt | 424 ++++++++++++++++++++++++++--------------------------- go.mod | 2 +- 2 files changed, 213 insertions(+), 213 deletions(-) diff --git a/NOTICE.txt b/NOTICE.txt index cdd71e1a34f..f4f75896f65 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -6845,6 +6845,218 @@ Contents of probable licence file $GOMODCACHE/k8s.io/client-go@v0.23.4/LICENSE: limitations under the License. +-------------------------------------------------------------------------------- +Dependency : k8s.io/utils +Version: v0.0.0-20211116205334-6203023598ed +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/k8s.io/utils@v0.0.0-20211116205334-6203023598ed/LICENSE: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + ================================================================================ @@ -16726,218 +16938,6 @@ Contents of probable licence file $GOMODCACHE/k8s.io/kube-openapi@v0.0.0-2021111 limitations under the License. --------------------------------------------------------------------------------- -Dependency : k8s.io/utils -Version: v0.0.0-20211116205334-6203023598ed -Licence type (autodetected): Apache-2.0 --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/k8s.io/utils@v0.0.0-20211116205334-6203023598ed/LICENSE: - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - -------------------------------------------------------------------------------- Dependency : sigs.k8s.io/json Version: v0.0.0-20211020170558-c049b76a60c6 diff --git a/go.mod b/go.mod index 4d44c91ae45..35522946374 100644 --- a/go.mod +++ b/go.mod @@ -58,6 +58,7 @@ require ( k8s.io/api v0.23.4 k8s.io/apimachinery v0.23.4 k8s.io/client-go v0.23.4 + k8s.io/utils v0.0.0-20211116205334-6203023598ed ) require ( @@ -137,7 +138,6 @@ require ( gopkg.in/yaml.v3 v3.0.1 // indirect howett.net/plist v1.0.0 // indirect k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect - k8s.io/utils v0.0.0-20211116205334-6203023598ed // indirect sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect sigs.k8s.io/yaml v1.2.0 // indirect From 99b5427fb64990710e05632f6c808bb866aa462b Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Mon, 7 Nov 2022 23:10:10 -0500 Subject: [PATCH 42/49] Fix import. --- NOTICE.txt | 424 +++++++++++------------ go.mod | 2 +- pkg/component/runtime/log_writer.go | 16 +- pkg/component/runtime/log_writer_test.go | 4 +- 4 files changed, 227 insertions(+), 219 deletions(-) diff --git a/NOTICE.txt b/NOTICE.txt index f4f75896f65..cdd71e1a34f 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -6845,218 +6845,6 @@ Contents of probable licence file $GOMODCACHE/k8s.io/client-go@v0.23.4/LICENSE: limitations under the License. --------------------------------------------------------------------------------- -Dependency : k8s.io/utils -Version: v0.0.0-20211116205334-6203023598ed -Licence type (autodetected): Apache-2.0 --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/k8s.io/utils@v0.0.0-20211116205334-6203023598ed/LICENSE: - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - ================================================================================ @@ -16938,6 +16726,218 @@ Contents of probable licence file $GOMODCACHE/k8s.io/kube-openapi@v0.0.0-2021111 limitations under the License. +-------------------------------------------------------------------------------- +Dependency : k8s.io/utils +Version: v0.0.0-20211116205334-6203023598ed +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/k8s.io/utils@v0.0.0-20211116205334-6203023598ed/LICENSE: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + -------------------------------------------------------------------------------- Dependency : sigs.k8s.io/json Version: v0.0.0-20211020170558-c049b76a60c6 diff --git a/go.mod b/go.mod index 35522946374..4d44c91ae45 100644 --- a/go.mod +++ b/go.mod @@ -58,7 +58,6 @@ require ( k8s.io/api v0.23.4 k8s.io/apimachinery v0.23.4 k8s.io/client-go v0.23.4 - k8s.io/utils v0.0.0-20211116205334-6203023598ed ) require ( @@ -138,6 +137,7 @@ require ( gopkg.in/yaml.v3 v3.0.1 // indirect howett.net/plist v1.0.0 // indirect k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect + k8s.io/utils v0.0.0-20211116205334-6203023598ed // indirect sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect sigs.k8s.io/yaml v1.2.0 // indirect diff --git a/pkg/component/runtime/log_writer.go b/pkg/component/runtime/log_writer.go index 4b24f6a24c2..5d772dfd240 100644 --- a/pkg/component/runtime/log_writer.go +++ b/pkg/component/runtime/log_writer.go @@ -9,14 +9,13 @@ import ( "encoding/json" "errors" "fmt" - "github.com/elastic/elastic-agent/pkg/component" - "k8s.io/utils/strings/slices" "strings" "time" "go.uber.org/zap" - "go.uber.org/zap/zapcore" + + "github.com/elastic/elastic-agent/pkg/component" ) type zapcoreWriter interface { @@ -145,7 +144,7 @@ func getTimestamp(evt map[string]interface{}, key string, format string) time.Ti func getFields(evt map[string]interface{}, ignore []string) []zapcore.Field { fields := make([]zapcore.Field, 0, len(evt)) for k, v := range evt { - if len(ignore) > 0 && slices.Contains(ignore, k) { + if len(ignore) > 0 && contains(ignore, k) { // ignore field continue } @@ -165,3 +164,12 @@ func getStrVal(evt map[string]interface{}, key string) string { } return str } + +func contains(s []string, val string) bool { + for _, v := range s { + if v == val { + return true + } + } + return false +} diff --git a/pkg/component/runtime/log_writer_test.go b/pkg/component/runtime/log_writer_test.go index 402ef4b2314..5da512e9f77 100644 --- a/pkg/component/runtime/log_writer_test.go +++ b/pkg/component/runtime/log_writer_test.go @@ -5,7 +5,6 @@ package runtime import ( - "github.com/elastic/elastic-agent/pkg/component" "sort" "testing" "time" @@ -13,8 +12,9 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "go.uber.org/zap" - "go.uber.org/zap/zapcore" + + "github.com/elastic/elastic-agent/pkg/component" ) type wrote struct { From 4903c1bd7d227bdfb46f798af647cc62ff83680f Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Wed, 9 Nov 2022 11:57:33 -0500 Subject: [PATCH 43/49] Fix issues with merge of main. --- NOTICE.txt | 4 ++-- go.mod | 2 +- go.sum | 4 ++-- pkg/component/runtime/command.go | 5 ++--- pkg/component/runtime/runtime.go | 4 ++-- pkg/component/spec.go | 18 ++++++++++++++++++ 6 files changed, 27 insertions(+), 10 deletions(-) diff --git a/NOTICE.txt b/NOTICE.txt index 7bc5103d040..cdd71e1a34f 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -1273,11 +1273,11 @@ SOFTWARE -------------------------------------------------------------------------------- Dependency : github.com/elastic/elastic-agent-libs -Version: v0.2.6 +Version: v0.2.15 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-libs@v0.2.6/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-libs@v0.2.15/LICENSE: Apache License Version 2.0, January 2004 diff --git a/go.mod b/go.mod index df1845dff01..4d44c91ae45 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,7 @@ require ( github.com/elastic/e2e-testing v1.99.2-0.20220117192005-d3365c99b9c4 github.com/elastic/elastic-agent-autodiscover v0.2.1 github.com/elastic/elastic-agent-client/v7 v7.0.0-20220804181728-b0328d2fe484 - github.com/elastic/elastic-agent-libs v0.2.6 + github.com/elastic/elastic-agent-libs v0.2.15 github.com/elastic/elastic-agent-system-metrics v0.4.4 github.com/elastic/go-licenser v0.4.0 github.com/elastic/go-sysinfo v1.8.1 diff --git a/go.sum b/go.sum index 73ded2d2cf3..ac08a20814c 100644 --- a/go.sum +++ b/go.sum @@ -387,8 +387,8 @@ github.com/elastic/elastic-agent-autodiscover v0.2.1/go.mod h1:gPnzzfdYNdgznAb+i github.com/elastic/elastic-agent-client/v7 v7.0.0-20220804181728-b0328d2fe484 h1:uJIMfLgCenJvxsVmEjBjYGxt0JddCgw2IxgoNfcIXOk= github.com/elastic/elastic-agent-client/v7 v7.0.0-20220804181728-b0328d2fe484/go.mod h1:fkvyUfFwyAG5OnMF0h+FV9sC0Xn9YLITwQpSuwungQs= github.com/elastic/elastic-agent-libs v0.2.5/go.mod h1:chO3rtcLyGlKi9S0iGVZhYCzDfdDsAQYBc+ui588AFE= -github.com/elastic/elastic-agent-libs v0.2.6 h1:DpcUcCVYZ7lNtHLUlyT1u/GtGAh49wpL15DTH7+8O5o= -github.com/elastic/elastic-agent-libs v0.2.6/go.mod h1:chO3rtcLyGlKi9S0iGVZhYCzDfdDsAQYBc+ui588AFE= +github.com/elastic/elastic-agent-libs v0.2.15 h1:hdAbrZZ2mCPcQLRCE3E8xw3mHKl8HFMt36w7jan/XGo= +github.com/elastic/elastic-agent-libs v0.2.15/go.mod h1:0J9lzJh+BjttIiVjYDLncKYCEWUUHiiqnuI64y6C6ss= github.com/elastic/elastic-agent-system-metrics v0.4.4 h1:Br3S+TlBhijrLysOvbHscFhgQ00X/trDT5VEnOau0E0= github.com/elastic/elastic-agent-system-metrics v0.4.4/go.mod h1:tF/f9Off38nfzTZHIVQ++FkXrDm9keFhFpJ+3pQ00iI= github.com/elastic/elastic-package v0.32.1/go.mod h1:l1fEnF52XRBL6a5h6uAemtdViz2bjtjUtgdQcuRhEAY= diff --git a/pkg/component/runtime/command.go b/pkg/component/runtime/command.go index b81eff1d279..cc5b9f65634 100644 --- a/pkg/component/runtime/command.go +++ b/pkg/component/runtime/command.go @@ -17,11 +17,11 @@ import ( "github.com/elastic/elastic-agent-client/v7/pkg/client" - "github.com/elastic/elastic-agent/pkg/core/logger" "github.com/elastic/elastic-agent/internal/pkg/agent/application/paths" - "github.com/elastic/elastic-agent/pkg/utils" "github.com/elastic/elastic-agent/pkg/component" + "github.com/elastic/elastic-agent/pkg/core/logger" "github.com/elastic/elastic-agent/pkg/core/process" + "github.com/elastic/elastic-agent/pkg/utils" ) type actionMode int @@ -80,7 +80,6 @@ func NewCommandRuntime(comp component.Component, logger *logger.Logger, monitor compCh: make(chan component.Component), actionState: actionStop, state: newComponentState(&comp), - } cmdSpec := c.getCommandSpec() if cmdSpec == nil { diff --git a/pkg/component/runtime/runtime.go b/pkg/component/runtime/runtime.go index 0ed1b46c26c..aa780a002e5 100644 --- a/pkg/component/runtime/runtime.go +++ b/pkg/component/runtime/runtime.go @@ -60,7 +60,7 @@ func NewComponentRuntime(comp component.Component, logger *logger.Logger, monito } if comp.InputSpec != nil { if comp.InputSpec.Spec.Command != nil { - return NewCommandRuntime(comp, monitor) + return NewCommandRuntime(comp, logger, monitor) } if comp.InputSpec.Spec.Service != nil { return NewServiceRuntime(comp, logger) @@ -69,7 +69,7 @@ func NewComponentRuntime(comp component.Component, logger *logger.Logger, monito } if comp.ShipperSpec != nil { if comp.ShipperSpec.Spec.Command != nil { - return NewCommandRuntime(comp, monitor) + return NewCommandRuntime(comp, logger, monitor) } return nil, errors.New("components for shippers can only support command runtime") } diff --git a/pkg/component/spec.go b/pkg/component/spec.go index e7ec47a5811..fd109414736 100644 --- a/pkg/component/spec.go +++ b/pkg/component/spec.go @@ -78,6 +78,7 @@ type CommandSpec struct { Args []string `config:"args,omitempty" yaml:"args,omitempty"` Env []CommandEnvSpec `config:"env,omitempty" yaml:"env,omitempty"` Timeouts CommandTimeoutSpec `config:"timeouts" yaml:"timeouts"` + Log CommandLogSpec `config:"log" yaml:"log"` } // CommandEnvSpec is the specification that defines environment variables that will be set to execute the subprocess. @@ -100,6 +101,23 @@ func (t *CommandTimeoutSpec) InitDefaults() { t.Stop = 30 * time.Second } +// CommandLogSpec is the log specification for subprocess. +type CommandLogSpec struct { + LevelKey string `config:"level_key" yaml:"level_key"` + TimeKey string `config:"time_key" yaml:"time_key"` + TimeFormat string `config:"time_format" yaml:"time_format"` + MessageKey string `config:"message_key" yaml:"message_key"` + IgnoreKeys []string `config:"ignore_keys" yaml:"ignore_keys"` +} + +// InitDefaults initialized the defaults for the timeouts. +func (t *CommandLogSpec) InitDefaults() { + t.LevelKey = "log.level" + t.TimeKey = "@timestamp" + t.TimeFormat = "2006-01-02T15:04:05.000Z0700" + t.MessageKey = "message" +} + // ServiceTimeoutSpec is the timeout specification for subprocess. type ServiceTimeoutSpec struct { Checkin time.Duration `config:"checkin" yaml:"checkin"` From 4d7e0fb5cd1db26e0f2d32aab107190bb158a372 Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Wed, 9 Nov 2022 12:03:50 -0500 Subject: [PATCH 44/49] remove log helper. --- pkg/core/logger/logger.go | 7 ------- 1 file changed, 7 deletions(-) diff --git a/pkg/core/logger/logger.go b/pkg/core/logger/logger.go index 8c1aa50e98e..049fd271038 100644 --- a/pkg/core/logger/logger.go +++ b/pkg/core/logger/logger.go @@ -58,13 +58,6 @@ func NewFromConfig(name string, cfg *Config, logInternal bool) (*Logger, error) return new(name, cfg, logInternal) } -// NewWithoutConfig returns a new logger without having a configuration. -// -// Use only when a clean logger is needed, and it is known that the logging configuration has already been performed. -func NewWithoutConfig(name string) *Logger { - return logp.NewLogger(name) -} - func new(name string, cfg *Config, logInternal bool) (*Logger, error) { commonCfg, err := toCommonConfig(cfg) if err != nil { From b6e221608aa6c455d3c16ed1c2ee0e425196a74a Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Wed, 9 Nov 2022 13:01:47 -0500 Subject: [PATCH 45/49] Add NewWithoutConfig. --- pkg/core/logger/logger.go | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/pkg/core/logger/logger.go b/pkg/core/logger/logger.go index 049fd271038..8c1aa50e98e 100644 --- a/pkg/core/logger/logger.go +++ b/pkg/core/logger/logger.go @@ -58,6 +58,13 @@ func NewFromConfig(name string, cfg *Config, logInternal bool) (*Logger, error) return new(name, cfg, logInternal) } +// NewWithoutConfig returns a new logger without having a configuration. +// +// Use only when a clean logger is needed, and it is known that the logging configuration has already been performed. +func NewWithoutConfig(name string) *Logger { + return logp.NewLogger(name) +} + func new(name string, cfg *Config, logInternal bool) (*Logger, error) { commonCfg, err := toCommonConfig(cfg) if err != nil { From 925c5b62e1854bb47fb79cb2122397e0649f8016 Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Wed, 23 Nov 2022 15:35:56 -0500 Subject: [PATCH 46/49] Fix the spawned filestream to ingest logs into elasticsearch for monitoring. --- .../application/monitoring/v1_monitor.go | 86 +++++++++++------- internal/pkg/agent/cmd/run.go | 5 -- pkg/component/runtime/command.go | 10 +-- specs/apm-server.spec.yml | 48 +++++----- specs/auditbeat.spec.yml | 88 ++++++++++--------- specs/cloudbeat.spec.yml | 80 +++++++++-------- specs/filebeat.spec.yml | 4 +- specs/heartbeat.spec.yml | 4 +- specs/metricbeat.spec.yml | 4 +- specs/osquerybeat.spec.yml | 54 ++++++------ specs/packetbeat.spec.yml | 60 +++++++------ 11 files changed, 240 insertions(+), 203 deletions(-) diff --git a/internal/pkg/agent/application/monitoring/v1_monitor.go b/internal/pkg/agent/application/monitoring/v1_monitor.go index 7fb1b22c644..5d05dfc737a 100644 --- a/internal/pkg/agent/application/monitoring/v1_monitor.go +++ b/internal/pkg/agent/application/monitoring/v1_monitor.go @@ -284,19 +284,17 @@ func (b *BeatsMonitor) injectLogsInput(cfg map[string]interface{}, componentIDTo streams := []interface{}{ map[string]interface{}{ - idKey: "filestream-monitoring-agent", - // "data_stream" is not used when creating an Input on Filebeat - "data_stream": map[string]interface{}{ - "type": "filestream", - "dataset": "elastic_agent", - "namespace": monitoringNamespace, - }, + idKey: "filestream-monitoring-agent", "type": "filestream", "paths": []interface{}{ filepath.Join(logsDrop, agentName+"-*.ndjson"), filepath.Join(logsDrop, agentName+"-watcher-*.ndjson"), }, - "index": fmt.Sprintf("logs-%%{[data_stream.dataset]}-%s", monitoringNamespace), + "data_stream": map[string]interface{}{ + "type": "logs", + "dataset": "elastic_agent", + "namespace": monitoringNamespace, + }, "close": map[string]interface{}{ "on_state_change": map[string]interface{}{ "inactive": "5m", @@ -313,21 +311,31 @@ func (b *BeatsMonitor) injectLogsInput(cfg map[string]interface{}, componentIDTo }, }, "processors": []interface{}{ + // copy original dataset so we can drop the dataset field map[string]interface{}{ - "add_fields": map[string]interface{}{ - "target": "data_stream", - "fields": map[string]interface{}{ - "type": "logs", - "dataset": "elastic_agent", - "namespace": monitoringNamespace, + "copy_fields": map[string]interface{}{ + "fields": []interface{}{ + map[string]interface{}{ + "from": "data_stream.dataset", + "to": "data_stream.dataset_original", + }, }, }, }, + // drop the dataset field so following copy_field can copy to it + map[string]interface{}{ + "drop_fields": map[string]interface{}{ + "fields": []interface{}{ + "data_stream.dataset", + }, + }, + }, + // copy component.dataset as the real dataset map[string]interface{}{ "copy_fields": map[string]interface{}{ "fields": []interface{}{ map[string]interface{}{ - "from": "event.dataset", + "from": "component.dataset", "to": "data_stream.dataset", }, }, @@ -335,31 +343,52 @@ func (b *BeatsMonitor) injectLogsInput(cfg map[string]interface{}, componentIDTo "ignore_missing": true, }, }, + // possible it's a log message from agent itself (doesn't have component.dataset) map[string]interface{}{ - "add_fields": map[string]interface{}{ - "target": "elastic_agent", - "fields": map[string]interface{}{ - "id": b.agentInfo.AgentID(), - "version": b.agentInfo.Version(), - "snapshot": b.agentInfo.Snapshot(), + "copy_fields": map[string]interface{}{ + "fields": []interface{}{ + map[string]interface{}{ + "from": "data_stream.dataset_original", + "to": "data_stream.dataset", + }, }, + "fail_on_error": false, }, }, + // drop the original dataset copied and the event.dataset (as it will be updated) map[string]interface{}{ - "add_fields": map[string]interface{}{ - "target": "agent", - "fields": map[string]interface{}{ - "id": b.agentInfo.AgentID(), + "drop_fields": map[string]interface{}{ + "fields": []interface{}{ + "data_stream.dataset_original", + "event.dataset", }, }, }, + // update event.dataset with the now used data_stream.dataset + map[string]interface{}{ + "copy_fields": map[string]interface{}{ + "fields": []interface{}{ + map[string]interface{}{ + "from": "data_stream.dataset", + "to": "event.dataset", + }, + }, + }, + }, + // coming from logger, added by agent (drop) map[string]interface{}{ "drop_fields": map[string]interface{}{ "fields": []interface{}{ - "ecs.version", //coming from logger, already added by libbeat + "ecs.version", }, "ignore_missing": true, }, + }, + // adjust destination data_stream based on the data_stream fields + map[string]interface{}{ + "add_formatted_index": map[string]interface{}{ + "index": "%{[data_stream.type]}-%{[data_stream.dataset]}-%{[data_stream.namespace]}", + }, }}, }, } @@ -370,10 +399,7 @@ func (b *BeatsMonitor) injectLogsInput(cfg map[string]interface{}, componentIDTo "name": "filestream-monitoring-agent", "type": "filestream", useOutputKey: monitoringOutput, - "data_stream": map[string]interface{}{ - "namespace": monitoringNamespace, - }, - "streams": streams, + "streams": streams, }, } inputsNode, found := cfg[inputsKey] diff --git a/internal/pkg/agent/cmd/run.go b/internal/pkg/agent/cmd/run.go index c44c30d1323..e6f9ec8d0f7 100644 --- a/internal/pkg/agent/cmd/run.go +++ b/internal/pkg/agent/cmd/run.go @@ -103,11 +103,6 @@ func run(override cfgOverrider, modifiers ...component.PlatformModifier) error { return err } - // add event.dataset to all log messages; required so all logs go to correct data_stream - logger = logger.With("event", map[string]interface{}{ - "dataset": "elastic_agent", - }) - cfg, err = tryDelayEnroll(ctx, logger, cfg, override) if err != nil { err = errors.New(err, "failed to perform delayed enrollment") diff --git a/pkg/component/runtime/command.go b/pkg/component/runtime/command.go index cc5b9f65634..405a4329db5 100644 --- a/pkg/component/runtime/command.go +++ b/pkg/component/runtime/command.go @@ -462,12 +462,12 @@ func (c *CommandRuntime) getCommandSpec() *component.CommandSpec { func attachOutErr(comp component.Component, cmdSpec *component.CommandSpec, typeStr string, binaryName string) process.CmdOption { return func(cmd *exec.Cmd) error { + dataset := fmt.Sprintf("elastic_agent.%s", strings.ReplaceAll(strings.ReplaceAll(comp.ID, "-", "_"), "/", "_")) logger := logger.NewWithoutConfig("").With("component", map[string]interface{}{ - "id": comp.ID, - "type": typeStr, - "binary": binaryName, - }).With("event", map[string]interface{}{ - "dataset": fmt.Sprintf("elastic_agent.%s", strings.ReplaceAll(comp.ID, "-", "_")), + "id": comp.ID, + "type": typeStr, + "binary": binaryName, + "dataset": dataset, }) cmd.Stdout = newLogWriter(logger.Core(), cmdSpec.Log) cmd.Stderr = newLogWriter(logger.Core(), cmdSpec.Log) diff --git a/specs/apm-server.spec.yml b/specs/apm-server.spec.yml index e646e9facce..0545d7ec307 100644 --- a/specs/apm-server.spec.yml +++ b/specs/apm-server.spec.yml @@ -1,23 +1,25 @@ -version: 2 -inputs: - - name: apm - description: "APM Server" - platforms: - - linux/amd64 - - linux/arm64 - - darwin/amd64 - - darwin/arm64 - - windows/amd64 - - container/amd64 - - container/arm64 - outputs: - - elasticsearch - - kafka - - logstash - - redis - command: - args: - - "-E" - - "management.enabled=true" - - "-E" - - "gc_percent=${APMSERVER_GOGC:100}" +version: 2 +inputs: + - name: apm + description: "APM Server" + platforms: + - linux/amd64 + - linux/arm64 + - darwin/amd64 + - darwin/arm64 + - windows/amd64 + - container/amd64 + - container/arm64 + outputs: + - elasticsearch + - kafka + - logstash + - redis + command: + args: + - "-E" + - "management.enabled=true" + - "-E" + - "gc_percent=${APMSERVER_GOGC:100}" + - "-E" + - "logging.to_stderr=true" diff --git a/specs/auditbeat.spec.yml b/specs/auditbeat.spec.yml index f8c46a96873..a54a47fbbe8 100644 --- a/specs/auditbeat.spec.yml +++ b/specs/auditbeat.spec.yml @@ -1,43 +1,45 @@ -version: 2 -inputs: - - name: audit/auditd - description: "Auditd" - platforms: &platforms - - linux/amd64 - - linux/arm64 - - darwin/amd64 - - darwin/arm64 - - windows/amd64 - - container/amd64 - - container/arm64 - outputs: &outputs - - elasticsearch - - kafka - - logstash - - redis - command: - args: &args - - "-E" - - "setup.ilm.enabled=false" - - "-E" - - "setup.template.enabled=false" - - "-E" - - "management.enabled=true" - - "-E" - - "logging.level=debug" - - "-E" - - "gc_percent=${AUDITBEAT_GOGC:100}" - - "-E" - - "auditbeat.config.modules.enabled=false" - - name: audit/file_integrity - description: "Audit File Integrity" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: audit/system - description: "Audit System" - platforms: *platforms - outputs: *outputs - command: - args: *args +version: 2 +inputs: + - name: audit/auditd + description: "Auditd" + platforms: &platforms + - linux/amd64 + - linux/arm64 + - darwin/amd64 + - darwin/arm64 + - windows/amd64 + - container/amd64 + - container/arm64 + outputs: &outputs + - elasticsearch + - kafka + - logstash + - redis + command: + args: &args + - "-E" + - "setup.ilm.enabled=false" + - "-E" + - "setup.template.enabled=false" + - "-E" + - "management.enabled=true" + - "-E" + - "logging.level=info" + - "-E" + - "logging.to_stderr=true" + - "-E" + - "gc_percent=${AUDITBEAT_GOGC:100}" + - "-E" + - "auditbeat.config.modules.enabled=false" + - name: audit/file_integrity + description: "Audit File Integrity" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: audit/system + description: "Audit System" + platforms: *platforms + outputs: *outputs + command: + args: *args diff --git a/specs/cloudbeat.spec.yml b/specs/cloudbeat.spec.yml index 1ecbe47e330..337ac250622 100644 --- a/specs/cloudbeat.spec.yml +++ b/specs/cloudbeat.spec.yml @@ -1,39 +1,41 @@ -version: 2 -inputs: - - name: cloudbeat - description: "Cloudbeat" - platforms: &platforms - - linux/amd64 - - linux/arm64 - - darwin/amd64 - - darwin/arm64 - - windows/amd64 - - container/amd64 - - container/arm64 - outputs: &outputs - - elasticsearch - - kafka - - logstash - - redis - command: - args: &args - - "-E" - - "management.enabled=true" - - "-E" - - "setup.ilm.enabled=false" - - "-E" - - "setup.template.enabled=false" - - "-E" - - "gc_percent=${CLOUDBEAT_GOGC:100}" - - name: cloudbeat/cis_k8s - description: "CIS Kubernetes monitoring" - platforms: *platforms - outputs: *outputs - command: - args: *args - - name: cloudbeat/cis_eks - description: "CIS elastic Kubernetes monitoring" - platforms: *platforms - outputs: *outputs - command: - args: *args \ No newline at end of file +version: 2 +inputs: + - name: cloudbeat + description: "Cloudbeat" + platforms: &platforms + - linux/amd64 + - linux/arm64 + - darwin/amd64 + - darwin/arm64 + - windows/amd64 + - container/amd64 + - container/arm64 + outputs: &outputs + - elasticsearch + - kafka + - logstash + - redis + command: + args: &args + - "-E" + - "management.enabled=true" + - "-E" + - "setup.ilm.enabled=false" + - "-E" + - "setup.template.enabled=false" + - "-E" + - "logging.to_stderr=true" + - "-E" + - "gc_percent=${CLOUDBEAT_GOGC:100}" + - name: cloudbeat/cis_k8s + description: "CIS Kubernetes monitoring" + platforms: *platforms + outputs: *outputs + command: + args: *args + - name: cloudbeat/cis_eks + description: "CIS elastic Kubernetes monitoring" + platforms: *platforms + outputs: *outputs + command: + args: *args diff --git a/specs/filebeat.spec.yml b/specs/filebeat.spec.yml index e18fcbb1e65..609fa1f5804 100644 --- a/specs/filebeat.spec.yml +++ b/specs/filebeat.spec.yml @@ -26,7 +26,9 @@ inputs: - "-E" - "management.enabled=true" - "-E" - - "logging.level=debug" + - "logging.level=info" + - "-E" + - "logging.to_stderr=true" - "-E" - "gc_percent=${FILEBEAT_GOGC:100}" - "-E" diff --git a/specs/heartbeat.spec.yml b/specs/heartbeat.spec.yml index ba6a08934b8..4036020396a 100644 --- a/specs/heartbeat.spec.yml +++ b/specs/heartbeat.spec.yml @@ -21,7 +21,9 @@ inputs: - "-E" - "management.enabled=true" - "-E" - - "logging.level=debug" + - "logging.level=info" + - "-E" + - "logging.to_stderr=true" - "-E" - "gc_percent=${HEARTBEAT_GOGC:100}" - name: synthetics/http diff --git a/specs/metricbeat.spec.yml b/specs/metricbeat.spec.yml index b7c88ad4864..e795c3b6710 100644 --- a/specs/metricbeat.spec.yml +++ b/specs/metricbeat.spec.yml @@ -26,7 +26,9 @@ inputs: - "-E" - "management.enabled=true" - "-E" - - "logging.level=debug" + - "logging.level=info" + - "-E" + - "logging.to_stderr=true" - "-E" - "gc_percent=${METRICBEAT_GOGC:100}" - "-E" diff --git a/specs/osquerybeat.spec.yml b/specs/osquerybeat.spec.yml index 31edb9a3edb..2bf4e53b8f8 100644 --- a/specs/osquerybeat.spec.yml +++ b/specs/osquerybeat.spec.yml @@ -1,26 +1,28 @@ -version: 2 -inputs: - - name: osquery - description: "Osquery" - platforms: - - linux/amd64 - - linux/arm64 - - darwin/amd64 - - darwin/arm64 - - windows/amd64 - - container/amd64 - - container/arm64 - outputs: - - elasticsearch - command: - args: - - "-E" - - "setup.ilm.enabled=false" - - "-E" - - "setup.template.enabled=false" - - "-E" - - "management.enabled=true" - - "-E" - - "logging.level=debug" - - "-E" - - "gc_percent=${OSQUERYBEAT_GOGC:100}" +version: 2 +inputs: + - name: osquery + description: "Osquery" + platforms: + - linux/amd64 + - linux/arm64 + - darwin/amd64 + - darwin/arm64 + - windows/amd64 + - container/amd64 + - container/arm64 + outputs: + - elasticsearch + command: + args: + - "-E" + - "setup.ilm.enabled=false" + - "-E" + - "setup.template.enabled=false" + - "-E" + - "management.enabled=true" + - "-E" + - "logging.level=info" + - "-E" + - "logging.to_stderr=true" + - "-E" + - "gc_percent=${OSQUERYBEAT_GOGC:100}" diff --git a/specs/packetbeat.spec.yml b/specs/packetbeat.spec.yml index 0519078cac8..cd788b89add 100644 --- a/specs/packetbeat.spec.yml +++ b/specs/packetbeat.spec.yml @@ -1,29 +1,31 @@ -version: 2 -inputs: - - name: packet - description: "Packet Capture" - platforms: - - linux/amd64 - - linux/arm64 - - darwin/amd64 - - darwin/arm64 - - windows/amd64 - - container/amd64 - - container/arm64 - outputs: - - elasticsearch - - kafka - - logstash - - redis - command: - args: - - "-E" - - "setup.ilm.enabled=false" - - "-E" - - "setup.template.enabled=false" - - "-E" - - "management.enabled=true" - - "-E" - - "logging.level=debug" - - "-E" - - "gc_percent=${PACKETBEAT_GOGC:100}" +version: 2 +inputs: + - name: packet + description: "Packet Capture" + platforms: + - linux/amd64 + - linux/arm64 + - darwin/amd64 + - darwin/arm64 + - windows/amd64 + - container/amd64 + - container/arm64 + outputs: + - elasticsearch + - kafka + - logstash + - redis + command: + args: + - "-E" + - "setup.ilm.enabled=false" + - "-E" + - "setup.template.enabled=false" + - "-E" + - "management.enabled=true" + - "-E" + - "logging.level=info" + - "-E" + - "logging.to_stderr=true" + - "-E" + - "gc_percent=${PACKETBEAT_GOGC:100}" From c3f45d4c0a77b399d7c35496e9017d9ec600f492 Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Wed, 23 Nov 2022 15:41:47 -0500 Subject: [PATCH 47/49] Add changelog entry. --- ...pawned-components-to-simplify-logging.yaml | 31 +++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100644 changelog/fragments/1669236059-Capture-stdout-stderr-of-all-spawned-components-to-simplify-logging.yaml diff --git a/changelog/fragments/1669236059-Capture-stdout-stderr-of-all-spawned-components-to-simplify-logging.yaml b/changelog/fragments/1669236059-Capture-stdout-stderr-of-all-spawned-components-to-simplify-logging.yaml new file mode 100644 index 00000000000..8a480c1666e --- /dev/null +++ b/changelog/fragments/1669236059-Capture-stdout-stderr-of-all-spawned-components-to-simplify-logging.yaml @@ -0,0 +1,31 @@ +# Kind can be one of: +# - breaking-change: a change to previously-documented behavior +# - deprecation: functionality that is being removed in a later release +# - bug-fix: fixes a problem in a previous version +# - enhancement: extends functionality but does not break or fix existing behavior +# - feature: new functionality +# - known-issue: problems that we are aware of in a given version +# - security: impacts on the security of a product or a user’s deployment. +# - upgrade: important information for someone upgrading from a prior version +# - other: does not fit into any of the other categories +kind: feature + +# Change summary; a 80ish characters long description of the change. +summary: Capture stdout/stderr of all spawned components to simplify logging + +# Long description; in case the summary is not enough to describe the change +# this field accommodate a description without length limits. +#description: + +# Affected component; a word indicating the component this changeset affects. +component: + +# PR number; optional; the PR number that added the changeset. +# If not present is automatically filled by the tooling finding the PR where this changelog fragment has been added. +# NOTE: the tooling supports backports, so it's able to fill the original PR number instead of the backport PR number. +# Please provide it if you are adding a fragment for a different PR. +pr: 1702 + +# Issue number; optional; the GitHub issue related to this changeset (either closes or is part of). +# If not present is automatically filled by the tooling with the issue linked to the PR number. +issue: 221 From 639144c44414bc2e55f11f1ab1fc2ed5392d982e Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Mon, 28 Nov 2022 11:32:26 -0500 Subject: [PATCH 48/49] Remove debug print. --- pkg/component/runtime/log_writer.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/pkg/component/runtime/log_writer.go b/pkg/component/runtime/log_writer.go index 5d772dfd240..6825769f364 100644 --- a/pkg/component/runtime/log_writer.go +++ b/pkg/component/runtime/log_writer.go @@ -8,7 +8,6 @@ import ( "bytes" "encoding/json" "errors" - "fmt" "strings" "time" @@ -70,9 +69,6 @@ func (r *logWriter) Write(p []byte) (int, error) { continue } str := strings.TrimSpace(string(line)) - if str[0:1] == "ty" { - fmt.Println("found it") - } // try to parse line as JSON if str[0] == '{' && r.handleJSON(str) { // handled as JSON From 6d3db5c25cef37bc37ae6006ef0f7e7d5dcd170f Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Mon, 28 Nov 2022 14:11:58 -0500 Subject: [PATCH 49/49] Update 1669236059-Capture-stdout-stderr-of-all-spawned-components-to-simplify-logging.yaml --- ...ut-stderr-of-all-spawned-components-to-simplify-logging.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelog/fragments/1669236059-Capture-stdout-stderr-of-all-spawned-components-to-simplify-logging.yaml b/changelog/fragments/1669236059-Capture-stdout-stderr-of-all-spawned-components-to-simplify-logging.yaml index 8a480c1666e..8dfa6a9aa2f 100644 --- a/changelog/fragments/1669236059-Capture-stdout-stderr-of-all-spawned-components-to-simplify-logging.yaml +++ b/changelog/fragments/1669236059-Capture-stdout-stderr-of-all-spawned-components-to-simplify-logging.yaml @@ -11,7 +11,7 @@ kind: feature # Change summary; a 80ish characters long description of the change. -summary: Capture stdout/stderr of all spawned components to simplify logging +summary: Capture stdout/stderr of all spawned components and adjust default log level to info for all components # Long description; in case the summary is not enough to describe the change # this field accommodate a description without length limits.