From 2c331180c074c2a9ca4a3f4d8f3eb2f1b34e897d Mon Sep 17 00:00:00 2001 From: Chris Mark Date: Thu, 25 Feb 2021 10:23:43 +0200 Subject: [PATCH 1/6] Add agent_on_k8s docs and k8s provider (#401) --- .../elastic-agent-providers.asciidoc | 182 ++++++++++++++++++ .../running-on-kubernetes-standalone.asciidoc | 138 +++++++++++++ 2 files changed, 320 insertions(+) create mode 100644 docs/en/ingest-management/elastic-agent/running-on-kubernetes-standalone.asciidoc diff --git a/docs/en/ingest-management/elastic-agent/elastic-agent-providers.asciidoc b/docs/en/ingest-management/elastic-agent/elastic-agent-providers.asciidoc index e30a9f8a82..a5b35b915d 100644 --- a/docs/en/ingest-management/elastic-agent/elastic-agent-providers.asciidoc +++ b/docs/en/ingest-management/elastic-agent/elastic-agent-providers.asciidoc @@ -309,3 +309,185 @@ inputs: - add_fields: container.name: other-container ---- + +[[kubernetes-provider]] +==== Kubernetes Provider + +Provides inventory information from Kubernetes. The available keys are: + + +|=== +|Key |Type |Description + +|`kubernetes.namespace` +|`string` +|Namespace of the Pod + +|`kubernetes.pod.name` +|`string` +|Name of the Pod + +|`kubernetes.pod.uuid` +|`string` +|UUID of the Pod + +|`kubernetes.pod.ip` +|`string` +|IP of the Pod + +|`kubernetes.pod.labels` +|`object` +|Object of labels of the Pod + +|`kubernetes.container.name` +|`string` +|Name of the container + +|`kubernetes.container.runtime` +|`string` +|Runtime of the container + +|`kubernetes.container.id` +|`string` +|ID of the container + +|`kubernetes.container.image` +|`string` +|Image of the container +|=== + +Imagine that the Kubernetes provider provides the following inventory: + +[source,json] +---- +[ + { + "id": "1", + "mapping:": {"namespace": "kube-system", "pod": {"name": "kube-controllermanger"}}, + "processors": {"add_fields": {"container.name": "my-container"}} + }, + { + "id": "2", + "mapping:": {"namespace": "kube-system", "pod": {"name": "kube-scheduler"}}, + "processors": {"add_fields": {"kuberentes.namespace": "kube-system", "kubernetes.pod": {"name": "kube-scheduler"}} + } +] +---- + +{agent} automatically prefixes the result with `kuberentes`: + + +[source,json] +--- +[ + {"kubernetes": {"id": "1", "namespace": "kube-system", "pod": {"name": "kube-controllermanger"}}, + {"kubernetes": {"id": "2", "namespace": "kube-system", "pod": {"name": "kube-scheduler"}}, +] +--- + +===== Provider configuration + +[source,yaml] +---- +providers.kubernetes: + node: ${NODE_NAME} + scope: node + #kube_config: /Users/elastic-agent/.kube/config + #sync_period: 600 + #cleanup_timeout: 60 +---- + +`node`:: (Optional) Specify the node to scope {agent} to in case it +cannot be accurately detected, as when running {agent} in host network +mode. +`cleanup_timeout`:: (Optional) Specify the time of inactivity before stopping the +running configuration for a container, 60s by default. +`sync_period`:: (Optional) Specify timeout for listing historical resources. +`kube_config`:: (Optional) Use given config file as configuration for Kubernetes +client. If kube_config is not set, KUBECONFIG environment variable will be +checked and if not present it will fall back to InCluster. +`scope`:: (Optional) Specify at what level autodiscover needs to be done at. `scope` can +either take `node` or `cluster` as values. `node` scope allows discovery of resources in +the specified node. `cluster` scope allows cluster wide discovery. Only `pod` and `node` resources +can be discovered at node scope. + +===== Autodiscover target Pods + +To set the target host dynamically only for a targeted Pod based on its labels, use a variable in the +{agent} policy to return path information from the provider: + +[source,yaml] +---- +- data_stream: + dataset: kubernetes.scheduler + type: metrics + metricsets: + - scheduler + hosts: + - '${kubernetes.pod.ip}:10251' + period: 10s + condition: ${kubernetes.pod.labels.component} == 'kube-scheduler' +---- + +The policy generated by this configuration looks like: + +[source,yaml] +---- +- hosts: + - 172.18.0.4:10251 + metricsets: + - scheduler + module: kubernetes + period: 10s + processors: + - add_fields: + fields: + namespace: kube-system + pod: + ip: 172.18.0.4 + labels: + component: kube-scheduler + tier: control-plane + name: kube-scheduler-kind-control-plane + uid: 6da04645-04b4-4cb2-b203-2ad58abc6cdf + target: kubernetes +---- + +To set the log path of Pods dynamically in the configuration, use a variable in the +{agent} policy to return path information from the provider: + +[source,yaml] +---- +streams: + - data_stream: + dataset: generic + symlinks: true + paths: + - /var/log/containers/*${kubernetes.container.id}.log +---- + +The policy generated by this configuration looks like: + +[source,yaml] +---- +- paths: + - /var/log/containers/*c957652eca53594ce496c7b237d19f05be339ebfe281b99ce1c0a0401e48ce3a.log + processors: + - add_fields: + fields: + container: + id: c957652eca53594ce496c7b237d19f05be339ebfe281b99ce1c0a0401e48ce3a + image: k8s.gcr.io/kube-apiserver:v1.18.2 + name: kube-apiserver + runtime: containerd + namespace: kube-system + pod: + ip: 172.18.0.4 + labels: + component: kube-apiserver + tier: control-plane + name: kube-apiserver-kind-control-plane + uid: f8743f90-50a4-4ef8-9fe9-78c245eb8bf3 + target: kubernetes + symlinks: true +---- \ No newline at end of file diff --git a/docs/en/ingest-management/elastic-agent/running-on-kubernetes-standalone.asciidoc b/docs/en/ingest-management/elastic-agent/running-on-kubernetes-standalone.asciidoc new file mode 100644 index 0000000000..87ce15bef9 --- /dev/null +++ b/docs/en/ingest-management/elastic-agent/running-on-kubernetes-standalone.asciidoc @@ -0,0 +1,138 @@ +[[running-on-kubernetes]] +[role="xpack"] +=== Run {agent} on Kubernetes + +You can use {agent} https://www.docker.elastic.co/r/beats/elastic-agent[Docker images] on Kubernetes to +retrieve cluster metrics. + +ifeval::["{release-state}"=="unreleased"] + +However, version {version} of {agent} has not yet been +released, so no Docker image is currently available for this version. + +endif::[] + + +[float] +==== Kubernetes deploy manifests + +You deploy {agent} in two different ways at the same time: + +* As a https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/[DaemonSet] +to ensure that there's a running instance on each node of the cluster. These +instances are used to retrieve most metrics from the host, such as system +metrics, Docker stats, and metrics from all the services running on top of +Kubernetes. + +* As a single {agent} instance created using a https://kubernetes.io/docs/concepts/workloads/controllers/Deployment/[Deployment]. +This instance is used to retrieve metrics that are unique for the whole +cluster, such as Kubernetes events or +https://github.com/kubernetes/kube-state-metrics[kube-state-metrics]. If `kube-state-metrics` is not already +running, deploy it now (see the +https://github.com/kubernetes/kube-state-metrics#kubernetes-deployment[Kubernetes +deployment] docs) + +Everything is deployed under the `kube-system` namespace by default. To change +the namespace, modify the manifest file. + +To download the manifest file, run: + +["source", "sh", subs="attributes"] +------------------------------------------------ +curl -L -O https://raw.githubusercontent.com/elastic/beats/{branch}/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml +------------------------------------------------ + +This manifest includes Kubernetes integration to collect Kubernetes metrics, +System integration to collect system level metrics/logs from nodes and +Pod's log collection using <>. + +[float] +==== Settings + +User need to set Elasticsearch settings before deploying the manifest: + +[source,yaml] +------------------------------------------------ +- name: ES_USERNAME + value: "elastic" +- name: ES_PASSWORD + value: "passpassMyStr0ngP@ss" +- name: ES_HOST + value: "https://somesuperhostiduuid.europe-west1.gcp.cloud.es.io:443" +------------------------------------------------ + +[float] +===== Running {agent} on master nodes + +Kubernetes master nodes can use https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/[taints] +to limit the workloads that can run on them. The manifest for standalone {agent} defines +tolerations so as to run on master nodes too and being able to collect metrics from the control plane +components of Kuberentes (scheduler, controller manager) +To disable {agent} from running on master nodes remove the following part of the Daemonset spec: + +[source,yaml] +------------------------------------------------ +spec: + tolerations: + - key: node-role.kubernetes.io/master + effect: NoSchedule +------------------------------------------------ + + +[float] +==== Deploy +To deploy to Kubernetes, run: + +["source", "sh", subs="attributes"] +------------------------------------------------ +kubectl create -f elastic-agent-standalone-kubernetes.yaml +------------------------------------------------ + +To check the status, run: + +["source", "sh", subs="attributes"] +------------------------------------------------ +$ kubectl -n kube-system get pods -l app=elastic-agent +NAME READY STATUS RESTARTS AGE +elastic-agent-4665d 1/1 Running 0 81m +elastic-agent-9f466c4b5-l8cm8 1/1 Running 0 81m +elastic-agent-fj2z9 1/1 Running 0 81m +elastic-agent-hs4pb 1/1 Running 0 81m +------------------------------------------------ + +[float] +==== Autodiscover targeted Pods + +It is possible to define autodiscover conditions so as to allow {agent} to automatically +identify Pods and start collecting from them using predefined integrations. For example if a +user want to automatically identify a Redis Pod and start monitoring it using the Redis integration +the following configuration should be added as an extra input in the Daemonset manifest: + +[source,yaml] +------------------------------------------------ +- name: redis + type: redis/metrics + use_output: default + meta: + package: + name: redis + version: 0.3.6 + data_stream: + namespace: default + streams: + - data_stream: + dataset: redis.info + type: metrics + metricsets: + - info + hosts: + - '${kubernetes.pod.ip}:6379' + idle_timeout: 20s + maxconn: 10 + network: tcp + period: 10s + condition: ${kubernetes.pod.labels.app} == 'redis' +------------------------------------------------ + +Users can find more information about how to shape their dynamic inputs +for autodiscover at <> docs. \ No newline at end of file From 17435762bce27e0947a9f52f105ffdaa7b6c0eab Mon Sep 17 00:00:00 2001 From: Brandon Morelli Date: Wed, 21 Apr 2021 10:05:45 -0700 Subject: [PATCH 2/6] docs: add discrete tags --- .../elastic-agent/elastic-agent-providers.asciidoc | 3 +++ 1 file changed, 3 insertions(+) diff --git a/docs/en/ingest-management/elastic-agent/elastic-agent-providers.asciidoc b/docs/en/ingest-management/elastic-agent/elastic-agent-providers.asciidoc index a5b35b915d..a23aecffd9 100644 --- a/docs/en/ingest-management/elastic-agent/elastic-agent-providers.asciidoc +++ b/docs/en/ingest-management/elastic-agent/elastic-agent-providers.asciidoc @@ -310,6 +310,7 @@ inputs: container.name: other-container ---- +[discrete] [[kubernetes-provider]] ==== Kubernetes Provider @@ -385,6 +386,7 @@ Imagine that the Kubernetes provider provides the following inventory: ] --- +[discrete] ===== Provider configuration [source,yaml] @@ -411,6 +413,7 @@ either take `node` or `cluster` as values. `node` scope allows discovery of reso the specified node. `cluster` scope allows cluster wide discovery. Only `pod` and `node` resources can be discovered at node scope. +[discrete] ===== Autodiscover target Pods To set the target host dynamically only for a targeted Pod based on its labels, use a variable in the From 4e690eaffeb2465ce458f5b92370cb12f2eb2663 Mon Sep 17 00:00:00 2001 From: Chris Mark Date: Thu, 1 Apr 2021 22:49:19 +0300 Subject: [PATCH 3/6] Add kubernetes_secrets provider docs (#482) # Conflicts: # docs/en/ingest-management/elastic-agent/elastic-agent-providers.asciidoc --- .../elastic-agent-providers.asciidoc | 35 +++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/docs/en/ingest-management/elastic-agent/elastic-agent-providers.asciidoc b/docs/en/ingest-management/elastic-agent/elastic-agent-providers.asciidoc index a23aecffd9..43211ccfb9 100644 --- a/docs/en/ingest-management/elastic-agent/elastic-agent-providers.asciidoc +++ b/docs/en/ingest-management/elastic-agent/elastic-agent-providers.asciidoc @@ -159,6 +159,41 @@ foo=bar elastic-agent run You can reference the environment variable as `${env.foo}`. +[discrete] +[[kubernetes_secrets-provider]] +==== Kubernetes Secrets Provider + +Provides access to the Kubernetes Secrets API. + +Provider needs a `kubeconfig` file so as to establish connection to Kubernetes API, +or it can automatically reach the API if it runs in an inCluster environment (Agent runs as Pod). + +[source,yaml] +---- +providers.kubernetes_secrets: + #kube_config: /Users/elastic-agent/.kube/config +---- + +You can reference the Kubernetes Secrets variable as `${kubernetes_secrets.default.somesecret.value}`, +where `default` is the namespace of the Secret, `somesecret` is the name of the Secret and `value` the field +of the Secret to access. + +If you run Agent on Kubernetes the proper rule in the `ClusterRole` is required so as Agent Pod to have access +to Secrets API: + +[source,yaml] +---- +- apiGroups: [""] + resources: + - secrets + verbs: ["get"] +---- + +CAUTION: The above rule will give permission to Agent Pod to access Kubernetes Secrets API. +This means that anyone who have access to Agent Pod (`kubectl exec` for example) will be able to +access Kubernetes Secrets API and get a specific secret no matter which namespace it belongs to. +In this, this option should be carefully considered. + [discrete] [[dynamic-providers]] === Dynamic Providers From 530d6b98ec1f9a0c4ea266f171b5ecc8416a8f74 Mon Sep 17 00:00:00 2001 From: Chris Mark Date: Thu, 8 Apr 2021 15:49:42 +0300 Subject: [PATCH 4/6] Add leaderelection docs (#499) --- .../elastic-agent-providers.asciidoc | 61 +++++++++++++++++++ 1 file changed, 61 insertions(+) diff --git a/docs/en/ingest-management/elastic-agent/elastic-agent-providers.asciidoc b/docs/en/ingest-management/elastic-agent/elastic-agent-providers.asciidoc index 43211ccfb9..e7e358f510 100644 --- a/docs/en/ingest-management/elastic-agent/elastic-agent-providers.asciidoc +++ b/docs/en/ingest-management/elastic-agent/elastic-agent-providers.asciidoc @@ -194,6 +194,67 @@ This means that anyone who have access to Agent Pod (`kubectl exec` for example) access Kubernetes Secrets API and get a specific secret no matter which namespace it belongs to. In this, this option should be carefully considered. +[discrete] +[[kubernetes_leaderelection-provider]] +==== Kubernetes LeaderElection Provider + +Provides the option to enable leaderelection between a set of Agents +running on Kubernetes. Only one Agent at a time will be the holder of the leader +lock and based on this, configurations can be enabled with the condition +that the Agent holds the leadership. This can be useful in cases where we want +only Agent between a set of Agents to collect cluster wide metrics for the +Kubernetes cluster like from `kube-state-metrics` endpoint. + +Provider needs a `kubeconfig` file so as to establish connection to Kubernetes API, +or it can automatically reach the API if it runs in an inCluster environment (Agent runs as Pod). + +[source,yaml] +---- +providers.kubernetes_leaderelection: + #kube_config: /Users/elastic-agent/.kube/config + #leader_lease: agent-k8s-leader-lock +---- + +`kube_config`:: (Optional) Use given config file as configuration for Kubernetes +client. If kube_config is not set, KUBECONFIG environment variable will be +checked and if not present it will fall back to InCluster. +`leader_lease`:: (Optional) Specify the name of the leader lease. +By default it is `elastic-agent-cluster-leader`. + +The available key is: + +|=== +|Key |Type |Description + +|`kubernetes_leaderelection.leader` +|`bool` +|The value of the leadership flag. It is `true` when the Agent is the current leader, `false` otherwise. + +|=== + +[discrete] +===== Enabling confgiurations only when on leadership + +In order to leverage leaderelection provider and enable +specific inputs only when Agent holds the leadership lock, users +can use conditions based on `kubernetes_leaderelection.leader` key. +Below we provide an example that will enable `state_container` +metricset only when the leadership lock is acquired: + +[source,yaml] +---- +- data_stream: + dataset: kubernetes.state_container + type: metrics + metricsets: + - state_container + add_metadata: true + hosts: + - 'kube-state-metrics:8080' + period: 10s + condition: ${kubernetes_leaderelection.leader} == true +---- + [discrete] [[dynamic-providers]] === Dynamic Providers From def49663d3f5c74863832cc719b600b33e7d3a3a Mon Sep 17 00:00:00 2001 From: leahleahy <82116004+leahleahy@users.noreply.github.com> Date: Tue, 20 Apr 2021 15:45:26 -0400 Subject: [PATCH 5/6] Update elastic-agent-providers.asciidoc (#520) Co-authored-by: Brandon Morelli Co-authored-by: Brandon Morelli --- .../elastic-agent-providers.asciidoc | 114 ++++++++---------- 1 file changed, 53 insertions(+), 61 deletions(-) diff --git a/docs/en/ingest-management/elastic-agent/elastic-agent-providers.asciidoc b/docs/en/ingest-management/elastic-agent/elastic-agent-providers.asciidoc index e7e358f510..ca65f25e39 100644 --- a/docs/en/ingest-management/elastic-agent/elastic-agent-providers.asciidoc +++ b/docs/en/ingest-management/elastic-agent/elastic-agent-providers.asciidoc @@ -2,23 +2,22 @@ [[providers]] == Providers -Providers supply the key/values pairs that are used for variable substitution +Providers supply the key-value pairs that are used for variable substitution and conditionals. Each provider's keys are automatically prefixed with the name of the provider in the context of the {agent}. -For example, if a provider named `foo` provides -`{"key1": "value1", "key2": "value2"}`, the key/value pairs are placed in -`{"foo" : {"key1": "value1", "key2": "value2"}}`. To reference the keys, you -would use `{{foo.key1}}` and `{{foo.key2}}`. +For example, a provider named `foo` provides +`{"key1": "value1", "key2": "value2"}`, the key-value pairs are placed in +`{"foo" : {"key1": "value1", "key2": "value2"}}`. To reference the keys, use `{{foo.key1}}` and `{{foo.key2}}`. [discrete] === Provider configuration The provider configuration is specified under the top-level `providers` -key in the `elastic-agent.yml` configuration. By default, all registered -providers are enabled. If a provider cannot connect, it produces no mappings. +key in the `elastic-agent.yml` configuration. All registered +providers are enabled by default. If a provider cannot connect, no mappings are produced. -The following example shows two providers, `local` and `local_dynamic`, that +The following example shows two providers (`local` and `local_dynamic`) that supply custom keys: [source,yaml] @@ -33,9 +32,8 @@ providers: - item: key2 ---- -To explicitly disable a provider, set `enabled: false`. Because all providers -are prefixed and have no name collisions, the name of the provider is the key in -the configuration. +Explicitly disable a provider by setting `enabled: false`. All providers +are prefixed without name collisions. The name of the provider is in the key in the configuration. [source,yaml] ---- @@ -51,17 +49,16 @@ providers: [[context-providers]] === Context providers -Context providers provide the current context of the running {agent}, for -example, agent information (id, version), host information (hostname, IP +Context providers give the current context of the running {agent}, for +example, agent information (ID, version), host information (hostname, IP addresses), and environment information (environment variables). -They can only provide a single key/value mapping. Think of them as singletons; -an update of a key/value mapping will result in a re-evaluation of the entire -configuration. These providers are normally very static, but that's not -required. It is possible for a value to change resulting in re-evaluation. +They can only provide a single key-value mapping. Think of them as singletons; +an update of a key-value mapping results in a re-evaluation of the entire +configuration. These providers are normally very static, but not +required. A value can change which results in re-evaluation. -Context providers use ECS naming when possible to ensure that documentation and -understanding across projects is the same. +Context providers use the Elastic Common Schema (ECS) naming to ensure consistency and understanding throughout documentation and projects. {agent} supports the following context providers: @@ -148,16 +145,16 @@ Provides information about the current host. The available keys are: [[env-provider]] ==== Env Provider -Provides access to the environment variables as key/values. +Provides access to the environment variables as key-value pairs. -For example, if you set the variable foo: +For example, set the variable `foo`: [source,shell] ---- foo=bar elastic-agent run ---- -You can reference the environment variable as `${env.foo}`. +The environment variable can be referenced as `${env.foo}`. [discrete] [[kubernetes_secrets-provider]] @@ -165,8 +162,8 @@ You can reference the environment variable as `${env.foo}`. Provides access to the Kubernetes Secrets API. -Provider needs a `kubeconfig` file so as to establish connection to Kubernetes API, -or it can automatically reach the API if it runs in an inCluster environment (Agent runs as Pod). +The provider needs a `kubeconfig` file to establish connection to the Kubernetes API. +It can automatically reach the API if it's run in an inCluster environment ({agent} runs as pod). [source,yaml] ---- @@ -174,12 +171,11 @@ providers.kubernetes_secrets: #kube_config: /Users/elastic-agent/.kube/config ---- -You can reference the Kubernetes Secrets variable as `${kubernetes_secrets.default.somesecret.value}`, +Reference the Kubernetes Secrets variable as `${kubernetes_secrets.default.somesecret.value}`, where `default` is the namespace of the Secret, `somesecret` is the name of the Secret and `value` the field of the Secret to access. -If you run Agent on Kubernetes the proper rule in the `ClusterRole` is required so as Agent Pod to have access -to Secrets API: +If you run agent on Kubernetes, the proper rule in the `ClusterRole` is required to privide access to the {agent} pod in the Secrets API: [source,yaml] ---- @@ -189,24 +185,22 @@ to Secrets API: verbs: ["get"] ---- -CAUTION: The above rule will give permission to Agent Pod to access Kubernetes Secrets API. -This means that anyone who have access to Agent Pod (`kubectl exec` for example) will be able to -access Kubernetes Secrets API and get a specific secret no matter which namespace it belongs to. -In this, this option should be carefully considered. +CAUTION: The above rule will give permission to {agent} pod to access Kubernetes Secrets API. +Anyone who has access to the {agent} pod (`kubectl exec` for example) will also have +access to the Kubernetes Secrets API. This allows access to a specific secret, regardless of the namespace that it belongs to. +This option should be carefully considered. [discrete] [[kubernetes_leaderelection-provider]] ==== Kubernetes LeaderElection Provider -Provides the option to enable leaderelection between a set of Agents -running on Kubernetes. Only one Agent at a time will be the holder of the leader +Provides the option to enable leaderelection between a set of {agent}s +running on Kubernetes. Only one {agent} at a time will be the holder of the leader lock and based on this, configurations can be enabled with the condition -that the Agent holds the leadership. This can be useful in cases where we want -only Agent between a set of Agents to collect cluster wide metrics for the -Kubernetes cluster like from `kube-state-metrics` endpoint. +that the {agent} holds the leadership. This is useful in cases where the {agent} between a set of {agent}s collects cluster wide metrics for the Kubernetes cluster, such as the `kube-state-metrics` endpoint. -Provider needs a `kubeconfig` file so as to establish connection to Kubernetes API, -or it can automatically reach the API if it runs in an inCluster environment (Agent runs as Pod). +Provider needs a `kubeconfig` file to establish a connection to Kubernetes API. +It can automatically reach the API if it's running in an inCluster environment ({agent} runs as Pod). [source,yaml] ---- @@ -215,11 +209,11 @@ providers.kubernetes_leaderelection: #leader_lease: agent-k8s-leader-lock ---- -`kube_config`:: (Optional) Use given config file as configuration for Kubernetes +`kube_config`:: (Optional) Use the given config file as configuration for the Kubernetes client. If kube_config is not set, KUBECONFIG environment variable will be -checked and if not present it will fall back to InCluster. +checked and will fall back to InCluster if it's not present. `leader_lease`:: (Optional) Specify the name of the leader lease. -By default it is `elastic-agent-cluster-leader`. +This is set to `elastic-agent-cluster-leader` by default. The available key is: @@ -228,17 +222,15 @@ The available key is: |`kubernetes_leaderelection.leader` |`bool` -|The value of the leadership flag. It is `true` when the Agent is the current leader, `false` otherwise. +|The value of the leadership flag. This is set to `true` when the {agent} is the current leader, and is set to `false` otherwise. |=== [discrete] ===== Enabling confgiurations only when on leadership -In order to leverage leaderelection provider and enable -specific inputs only when Agent holds the leadership lock, users -can use conditions based on `kubernetes_leaderelection.leader` key. -Below we provide an example that will enable `state_container` +Use conditions based on the `kubernetes_leaderelection.leader` key to leverage the leaderelection provider and enable specific inputs only when the {agent} holds the leadership lock. +The below example enables the `state_container` metricset only when the leadership lock is acquired: [source,yaml] @@ -259,19 +251,19 @@ metricset only when the leadership lock is acquired: [[dynamic-providers]] === Dynamic Providers -Dynamic providers provide an array of multiple key/value mappings. Each -key/value mapping is combined with the previous context provider's key/value -mapping to provide a new unique key/value mapping that is used to generate a +Dynamic providers give an array of multiple key-value mappings. Each +key-value mapping is combined with the previous context provider's key and value +mapping which provides a new unique mapping that is used to generate a configuration. [discrete] [[local-dynamic-provider]] ==== Local dynamic provider -Allows you to define multiple key/values to generate multiple configurations. +Define multiple key-value pairs to generate multiple configurations. -For example, the following agent policy defines a local dynamic provider that -defines 3 values for `item`: +For example, the following {agent} policy defines a local dynamic provider that +defines three values for `item`: [source,yaml] ---- @@ -350,7 +342,7 @@ Provides inventory information from Docker. The available keys are: |Log path of the container |=== -Imagine that the Docker provider provides the following inventory: +For example, the Docker provider provides the following inventory: [source,json] ---- @@ -453,7 +445,7 @@ Provides inventory information from Kubernetes. The available keys are: |Image of the container |=== -Imagine that the Kubernetes provider provides the following inventory: +Fox example, if the Kubernetes provider provides the following inventory: [source,json] ---- @@ -496,15 +488,15 @@ providers.kubernetes: ---- `node`:: (Optional) Specify the node to scope {agent} to in case it -cannot be accurately detected, as when running {agent} in host network +cannot be accurately detected when running {agent} in host network mode. `cleanup_timeout`:: (Optional) Specify the time of inactivity before stopping the -running configuration for a container, 60s by default. -`sync_period`:: (Optional) Specify timeout for listing historical resources. -`kube_config`:: (Optional) Use given config file as configuration for Kubernetes -client. If kube_config is not set, KUBECONFIG environment variable will be -checked and if not present it will fall back to InCluster. -`scope`:: (Optional) Specify at what level autodiscover needs to be done at. `scope` can +running configuration for a container. This is `60s` by default. +`sync_period`:: (Optional) Specify the timeout for listing historical resources. +`kube_config`:: (Optional) Use the given config file as configuration for Kubernetes +client. If kube_config is not set, the KUBECONFIG environment variable will be +checked and will fall back to InCluster if not present. +`scope`:: (Optional) Specify the level for autodiscover. `scope` can either take `node` or `cluster` as values. `node` scope allows discovery of resources in the specified node. `cluster` scope allows cluster wide discovery. Only `pod` and `node` resources can be discovered at node scope. From 8f9148fcc41b05752ead3d39806c34f3d6569135 Mon Sep 17 00:00:00 2001 From: leahleahy <82116004+leahleahy@users.noreply.github.com> Date: Tue, 20 Apr 2021 15:52:04 -0400 Subject: [PATCH 6/6] Update running-on-kubernetes-standalone.asciidoc (#521) Co-authored-by: Brandon Morelli Co-authored-by: Brandon Morelli --- .../running-on-kubernetes-standalone.asciidoc | 34 +++++++++---------- 1 file changed, 16 insertions(+), 18 deletions(-) diff --git a/docs/en/ingest-management/elastic-agent/running-on-kubernetes-standalone.asciidoc b/docs/en/ingest-management/elastic-agent/running-on-kubernetes-standalone.asciidoc index 87ce15bef9..bbd5998d10 100644 --- a/docs/en/ingest-management/elastic-agent/running-on-kubernetes-standalone.asciidoc +++ b/docs/en/ingest-management/elastic-agent/running-on-kubernetes-standalone.asciidoc @@ -2,13 +2,12 @@ [role="xpack"] === Run {agent} on Kubernetes -You can use {agent} https://www.docker.elastic.co/r/beats/elastic-agent[Docker images] on Kubernetes to +Use {agent} https://www.docker.elastic.co/r/beats/elastic-agent[Docker images] on Kubernetes to retrieve cluster metrics. ifeval::["{release-state}"=="unreleased"] -However, version {version} of {agent} has not yet been -released, so no Docker image is currently available for this version. +A Docker image is not currently available for this version, as version {version} of {agent} has not yet been released. endif::[] @@ -16,7 +15,7 @@ endif::[] [float] ==== Kubernetes deploy manifests -You deploy {agent} in two different ways at the same time: +Deploy {agent} in two different ways at the same time: * As a https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/[DaemonSet] to ensure that there's a running instance on each node of the cluster. These @@ -32,24 +31,23 @@ running, deploy it now (see the https://github.com/kubernetes/kube-state-metrics#kubernetes-deployment[Kubernetes deployment] docs) -Everything is deployed under the `kube-system` namespace by default. To change -the namespace, modify the manifest file. +Everything is deployed under the `kube-system` namespace by default. Change the namespace by modifying the manifest file. -To download the manifest file, run: +Download the manifest file by running: ["source", "sh", subs="attributes"] ------------------------------------------------ curl -L -O https://raw.githubusercontent.com/elastic/beats/{branch}/deploy/kubernetes/elastic-agent-standalone-kubernetes.yaml ------------------------------------------------ -This manifest includes Kubernetes integration to collect Kubernetes metrics, -System integration to collect system level metrics/logs from nodes and -Pod's log collection using <>. +This manifest includes the Kubernetes integration to collect Kubernetes metrics, +System integration to collect system level metrics and logs from nodes, and +the Pod's log collection using <>. [float] ==== Settings -User need to set Elasticsearch settings before deploying the manifest: +Set the Elasticsearch settings before deploying the manifest: [source,yaml] ------------------------------------------------ @@ -64,10 +62,10 @@ User need to set Elasticsearch settings before deploying the manifest: [float] ===== Running {agent} on master nodes -Kubernetes master nodes can use https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/[taints] +Kubernetes master nodes use https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/[taints] to limit the workloads that can run on them. The manifest for standalone {agent} defines -tolerations so as to run on master nodes too and being able to collect metrics from the control plane -components of Kuberentes (scheduler, controller manager) +tolerations to run on master nodes, which collects metrics from the control plane +components of Kuberentes (scheduler, controller manager). To disable {agent} from running on master nodes remove the following part of the Daemonset spec: [source,yaml] @@ -103,9 +101,9 @@ elastic-agent-hs4pb 1/1 Running 0 81m [float] ==== Autodiscover targeted Pods -It is possible to define autodiscover conditions so as to allow {agent} to automatically -identify Pods and start collecting from them using predefined integrations. For example if a -user want to automatically identify a Redis Pod and start monitoring it using the Redis integration +You can define autodiscover conditions to allow {agent} to automatically +identify Pods and start collecting from them using predefined integrations. For example, if a +user wants to automatically identify a Redis Pod and start monitoring it using the Redis integration, the following configuration should be added as an extra input in the Daemonset manifest: [source,yaml] @@ -135,4 +133,4 @@ the following configuration should be added as an extra input in the Daemonset m ------------------------------------------------ Users can find more information about how to shape their dynamic inputs -for autodiscover at <> docs. \ No newline at end of file +for autodiscover at <> docs.