diff --git a/.buildkite/x-pack/pipeline.xpack.heartbeat.yml b/.buildkite/x-pack/pipeline.xpack.heartbeat.yml index 10073a873180..76a20ebdfa89 100644 --- a/.buildkite/x-pack/pipeline.xpack.heartbeat.yml +++ b/.buildkite/x-pack/pipeline.xpack.heartbeat.yml @@ -111,7 +111,7 @@ steps: - label: ":windows: x-pack/heartbeat: Win 2016 Unit Tests" key: "mandatory-win-2016-unit-tests" - skip: "skipping due to elastic/beats#23957 and elastic/beats#23958" + skip: "skipping due missing deps, elastic/ingest-dev#3844" command: | Set-Location -Path x-pack/heartbeat mage build test @@ -134,7 +134,6 @@ steps: # Doesn't exist in Jenkins - label: ":windows: x-pack/heartbeat: Win 2022 Unit Tests" key: "mandatory-win-2022-unit-tests" - skip: "skipping due to elastic/beats#23957 and elastic/beats#23958" command: | Set-Location -Path x-pack/heartbeat mage build unitTest @@ -157,7 +156,6 @@ steps: - group: "Extended Windows Tests" key: "x-pack-heartbeat-extended-win-tests" if: build.env("BUILDKITE_PULL_REQUEST") == "false" || build.env("GITHUB_PR_LABELS") =~ /.*[Ww]indows.*/ - skip: "skipping due to elastic/beats#23957 and elastic/beats#23958" steps: - label: ":windows: x-pack/heartbeat: Win 10 Unit Tests" diff --git a/.golangci.yml b/.golangci.yml index b63b813ebc2e..60d77f468000 100755 --- a/.golangci.yml +++ b/.golangci.yml @@ -120,6 +120,11 @@ linters-settings: - errors - fmt reason: "This package is deprecated, use `fmt.Errorf` with `%w` instead" + - github.com/google/uuid: + # Recommended modules that should be used instead. (Optional) + recommendations: + - github.com/gofrs/uuid/v5 + reason: "Use one uuid library consistently across the codebase" gosimple: # Select the Go version to target. The default is '1.13'. diff --git a/.mergify.yml b/.mergify.yml index a1830f2740ba..13a38af591e2 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -147,6 +147,11 @@ pull_request_rules: To fixup this pull request, you need to add the backport labels for the needed branches, such as: * `backport-v8./d.0` is the label to automatically backport to the `8./d` branch. `/d` is the digit + + **NOTE**: `backport-v8.x` has been added to help with the transition to the new branch 8.x. + label: + add: + - backport-v8.x - name: notify the backport has not been merged yet conditions: - -merged @@ -341,3 +346,16 @@ pull_request_rules: labels: - "backport" title: "[{{ destination_branch }}](backport #{{ number }}) {{ title }}" + - name: backport patches to 8.x branch + conditions: + - merged + - label=backport-v8.x + actions: + backport: + assignees: + - "{{ author }}" + branches: + - "8.x" + labels: + - "backport" + title: "[{{ destination_branch }}](backport #{{ number }}) {{ title }}" diff --git a/CHANGELOG-developer.next.asciidoc b/CHANGELOG-developer.next.asciidoc index 7c53703c73fd..667f55d97695 100644 --- a/CHANGELOG-developer.next.asciidoc +++ b/CHANGELOG-developer.next.asciidoc @@ -202,6 +202,7 @@ The list below covers the major changes between 7.0.0-rc2 and main only. - Bump version of elastic/toutoumomoma to remove internal forks of stdlib debug packages. {pull}40325[40325] - Refactor x-pack/filebeat/input/websocket for generalisation. {pull}40308[40308] - Add a configuration option for TCP/UDP network type. {issue}40407[40407] {pull}40623[40623] +- Added debug logging to parquet reader in x-pack/libbeat/reader. {pull}40651[40651] ==== Deprecated diff --git a/CHANGELOG.asciidoc b/CHANGELOG.asciidoc index 52369a29a03d..339720a41b85 100644 --- a/CHANGELOG.asciidoc +++ b/CHANGELOG.asciidoc @@ -3,6 +3,52 @@ :issue: https://github.com/elastic/beats/issues/ :pull: https://github.com/elastic/beats/pull/ +[[release-notes-8.15.1]] +=== Beats version 8.15.1 +https://github.com/elastic/beats/compare/v8.15.0\...v8.15.1[View commits] + +==== Bugfixes + +*Affecting all Beats* + +- Aborts all active connections for Elasticsearch output. {pull}40572[40572] +- Closes beat Publisher on beat stop and by the Agent manager. {pull}40572[40572] +- Fix handling of escaped brackets in syslog structured data. {issue}40445[40445] {pull}40446[40446] + +*Auditbeat* + +- Fix segfaults that may happen if user runs multiple instances of the package metricset {pull}40525[40525] +- Fix incorrect definition of struct utmp for arm64 {pull}40541[40541] + +*Filebeat* + +- Relax requirements in Okta entity analytics provider user and device profile data shape. {pull}40359[40359] +- Fix bug in Okta entity analytics rate limit logic. {issue}40106[40106] {pull}40267[40267] +- Fix order of configuration for EntraID entity analytics provider. {pull}40487[40487] +- Ensure Entra ID request bodies are not truncated and trace logs are rotated before 100MB. {pull}40494[40494] +- The Elasticsearch output now correctly logs the event fields to the event log file {issue}40509[40509] {pull}40512[40512] +- Fix the "No such input type exist: 'azure-eventhub'" error on the Windows platform {issue}40608[40608] {pull}40609[40609] +- awss3 input: Fix handling of SQS notifications that don't contain a region. {pull}40628[40628] +- Fix credential handling when workload identity is being used in GCS input. {issue}39977[39977] {pull}40663[40663] +- Fix high IO and handling of a corrupted registry log file. {pull}35893[35893] +- Fix filestream's registry GC: registry entries will never be removed if clean_inactive is set to "-1". {pull}40258[40258] + +*Metricbeat* + +- Fix first HTTP 401 error when fetching metrics from the Kubelet API caused by a token update {pull}40636[40636] +- Fix needlessly verbose logging in cgroups setup {issue}40620[40620] + +==== Added + +*Filebeat* + +- Enable file ingestion to report detailed status to Elastic Agent {pull}40075[40075] +- Added `ignore_empty_values` flag in `decode_cef` Filebeat processor. {pull}40268[40268] + +*Metricbeat* + +- Added back `elasticsearch.node.stats.jvm.mem.pools.*` to the `node_stats` metricset {pull}40571[40571] + [[release-notes-8.15.0]] === Beats version 8.15.0 https://github.com/elastic/beats/compare/v8.14.3\...v8.15.0[View commits] diff --git a/CHANGELOG.next.asciidoc b/CHANGELOG.next.asciidoc index 832d3f6a99e7..c856b6397439 100644 --- a/CHANGELOG.next.asciidoc +++ b/CHANGELOG.next.asciidoc @@ -60,6 +60,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Update metrics for the vSphere Host metricset. {pull}40429[40429] - Mark system process metricsets as running if metrics are partially available {pull}40565[40565] - Added back `elasticsearch.node.stats.jvm.mem.pools.*` to the `node_stats` metricset {pull}40571[40571] +- Add support for snapshot in vSphere virtualmachine metricset {pull}40683[40683] *Osquerybeat* @@ -116,8 +117,6 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] *Auditbeat* -- Fix segfaults that may happen if user runs multiple instances of the package metricset {pull}40525[40525] -- Fix incorrect definition of struct utmp for arm64 {pull}40541[40541] *Filebeat* @@ -164,6 +163,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Fix the "No such input type exist: 'azure-eventhub'" error on the Windows platform {issue}40608[40608] {pull}40609[40609] - awss3 input: Fix handling of SQS notifications that don't contain a region. {pull}40628[40628] - Fix credential handling when workload identity is being used in GCS input. {issue}39977[39977] {pull}40663[40663] +- Fix publication of group data from the Okta entity analytics provider. {pull}40681[40681] *Heartbeat* @@ -171,7 +171,6 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] *Metricbeat* -- Fix first HTTP 401 error when fetching metrics from the Kubelet API caused by a token update {pull}40636[40636] - Fix Azure Monitor 429 error by causing metricbeat to retry the request again. {pull}38294[38294] - Fix fields not being parsed correctly in postgresql/database {issue}25301[25301] {pull}37720[37720] - rabbitmq/queue - Change the mapping type of `rabbitmq.queue.consumers.utilisation.pct` to `scaled_float` from `long` because the values fall within the range of `[0.0, 1.0]`. Previously, conversion to integer resulted in reporting either `0` or `1`. @@ -220,6 +219,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Enable early event encoding in the Elasticsearch output, improving cpu and memory use {pull}38572[38572] - The environment variable `BEATS_ADD_CLOUD_METADATA_PROVIDERS` overrides configured/default `add_cloud_metadata` providers {pull}38669[38669] - When running under Elastic-Agent Kafka output allows dynamic topic in `topic` field {pull}40415[40415] +- The script processor has a new configuration option that only uses the cached javascript sessions and prevents the creation of new javascript sessions. *Auditbeat* @@ -296,6 +296,7 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Add new metricset cluster for the vSphere module. {pull}40536[40536] - Disable event normalization for netflow input {pull}40635[40635] - Allow attribute selection in the Active Directory entity analytics provider. {issue}40482[40482] {pull}40662[40662] +- Improve error quality when CEL program does not correctly return an events array. {pull}40580[40580] *Auditbeat* @@ -324,9 +325,11 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] - Add SSL support for aerospike module {pull}38126[38126] - Add new metricset network for the vSphere module. {pull}40559[40559] - Add new metricset resourcepool for the vSphere module. {pull}40456[40456] +- Log the total time taken for GCP `ListTimeSeries` and `AggregatedList` requests {pull}40661[40661] *Metricbeat* +- Add support for new metrics for vSphere module datastorecluster metricset. {pull}40694[40694] *Osquerybeat* @@ -374,3 +377,6 @@ https://github.com/elastic/beats/compare/v8.8.1\...main[Check the HEAD diff] ==== Known Issues + + + diff --git a/NOTICE.txt b/NOTICE.txt index e691397655f2..1144db8a2605 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -17497,12 +17497,12 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- -Dependency : github.com/gofrs/uuid -Version: v4.4.0+incompatible +Dependency : github.com/gofrs/uuid/v5 +Version: v5.2.0 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/gofrs/uuid@v4.4.0+incompatible/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/gofrs/uuid/v5@v5.2.0/LICENSE: Copyright (C) 2013-2018 by Maxim Bublis @@ -18556,43 +18556,6 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. --------------------------------------------------------------------------------- -Dependency : github.com/google/uuid -Version: v1.6.0 -Licence type (autodetected): BSD-3-Clause --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/google/uuid@v1.6.0/LICENSE: - -Copyright (c) 2009,2014 Google Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -------------------------------------------------------------------------------- Dependency : github.com/googleapis/gax-go/v2 Version: v2.13.0 @@ -40751,36 +40714,6 @@ Contents of probable licence file $GOMODCACHE/github.com/godror/knownpb@v0.1.0/L limitations under the License. --------------------------------------------------------------------------------- -Dependency : github.com/gofrs/uuid/v5 -Version: v5.2.0 -Licence type (autodetected): MIT --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/gofrs/uuid/v5@v5.2.0/LICENSE: - -Copyright (C) 2013-2018 by Maxim Bublis - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -------------------------------------------------------------------------------- Dependency : github.com/golang-jwt/jwt/v4 Version: v4.5.0 @@ -42984,6 +42917,43 @@ Contents of probable licence file $GOMODCACHE/github.com/google/shlex@v0.0.0-201 limitations under the License. +-------------------------------------------------------------------------------- +Dependency : github.com/google/uuid +Version: v1.6.0 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/google/uuid@v1.6.0/LICENSE: + +Copyright (c) 2009,2014 Google Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + -------------------------------------------------------------------------------- Dependency : github.com/googleapis/enterprise-certificate-proxy Version: v0.3.2 diff --git a/dev-tools/packaging/package_test.go b/dev-tools/packaging/package_test.go index 9c6d8ecadfe1..a15a72cd097c 100644 --- a/dev-tools/packaging/package_test.go +++ b/dev-tools/packaging/package_test.go @@ -466,6 +466,10 @@ func checkDockerEntryPoint(t *testing.T, p *packageFile, info *dockerInfo) { }) } +// {BeatName}-{OptionalVariantSuffix}-oss-{version}-{os}-{arch}.docker.tar.gz +// For example, `heartbeat-oss-8.16.0-linux-arm64.docker.tar.gz` +var ossSuffixRegexp = regexp.MustCompile(`^(\w+)(-\w+)?-oss-.+$`) + func checkDockerLabels(t *testing.T, p *packageFile, info *dockerInfo, file string) { vendor := info.Config.Labels["org.label-schema.vendor"] if vendor != "Elastic" { @@ -474,12 +478,7 @@ func checkDockerLabels(t *testing.T, p *packageFile, info *dockerInfo, file stri t.Run(fmt.Sprintf("%s license labels", p.Name), func(t *testing.T) { expectedLicense := "Elastic License" - ossPrefix := strings.Join([]string{ - info.Config.Labels["org.label-schema.name"], - "oss", - info.Config.Labels["org.label-schema.version"], - }, "-") - if strings.HasPrefix(filepath.Base(file), ossPrefix) { + if ossSuffixRegexp.MatchString(filepath.Base(file)) { expectedLicense = "ASL 2.0" } licenseLabels := []string{ diff --git a/dev-tools/packaging/packages.yml b/dev-tools/packaging/packages.yml index 06351015169f..7db59f775592 100644 --- a/dev-tools/packaging/packages.yml +++ b/dev-tools/packaging/packages.yml @@ -1,25 +1,24 @@ --- - # This file contains the package specifications for both Community Beats and # Official Beats. The shared section contains YAML anchors that are used to # define common parts of the package in order to not repeat ourselves. shared: - &common - name: '{{.BeatName}}' - service_name: '{{.BeatServiceName}}' - os: '{{.GOOS}}' - arch: '{{.PackageArch}}' - vendor: '{{.BeatVendor}}' - version: '{{ beat_version }}' - license: '{{.BeatLicense}}' - url: '{{.BeatURL}}' - description: '{{.BeatDescription}}' + name: "{{.BeatName}}" + service_name: "{{.BeatServiceName}}" + os: "{{.GOOS}}" + arch: "{{.PackageArch}}" + vendor: "{{.BeatVendor}}" + version: "{{ beat_version }}" + license: "{{.BeatLicense}}" + url: "{{.BeatURL}}" + description: "{{.BeatDescription}}" # Deb/RPM spec for community beats. - &deb_rpm_spec <<: *common - post_install_script: '{{ elastic_beats_dir }}/dev-tools/packaging/files/linux/systemd-daemon-reload.sh' + post_install_script: "{{ elastic_beats_dir }}/dev-tools/packaging/files/linux/systemd-daemon-reload.sh" files: /usr/share/{{.BeatName}}/bin/{{.BeatName}}{{.BinaryExt}}: source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} @@ -28,23 +27,23 @@ shared: source: fields.yml mode: 0644 /usr/share/{{.BeatName}}/LICENSE.txt: - source: '{{ repo.RootDir }}/LICENSE.txt' + source: "{{ repo.RootDir }}/LICENSE.txt" mode: 0644 /usr/share/{{.BeatName}}/NOTICE.txt: - source: '{{ repo.RootDir }}/NOTICE.txt' + source: "{{ repo.RootDir }}/NOTICE.txt" mode: 0644 /usr/share/{{.BeatName}}/README.md: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/common/README.md.tmpl' + template: "{{ elastic_beats_dir }}/dev-tools/packaging/templates/common/README.md.tmpl" mode: 0644 /usr/share/{{.BeatName}}/.build_hash.txt: content: > {{ commit }} mode: 0644 /etc/{{.BeatName}}/{{.BeatName}}.reference.yml: - source: '{{.BeatName}}.reference.yml' + source: "{{.BeatName}}.reference.yml" mode: 0644 /etc/{{.BeatName}}/{{.BeatName}}.yml: - source: '{{.BeatName}}.yml' + source: "{{.BeatName}}.yml" mode: 0600 config: true /usr/share/{{.BeatName}}/kibana: @@ -54,13 +53,13 @@ shared: source: build/golang-crossbuild/god-{{.GOOS}}-{{.Platform.Arch}} mode: 0755 /usr/bin/{{.BeatName}}: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/linux/beatname.sh.tmpl' + template: "{{ elastic_beats_dir }}/dev-tools/packaging/templates/linux/beatname.sh.tmpl" mode: 0755 /lib/systemd/system/{{.BeatServiceName}}.service: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/linux/systemd.unit.tmpl' + template: "{{ elastic_beats_dir }}/dev-tools/packaging/templates/linux/systemd.unit.tmpl" mode: 0644 /etc/init.d/{{.BeatServiceName}}: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/{{.PackageType}}/init.sh.tmpl' + template: "{{ elastic_beats_dir }}/dev-tools/packaging/templates/{{.PackageType}}/init.sh.tmpl" mode: 0755 # MacOS pkg spec for community beats. @@ -70,29 +69,29 @@ shared: # OS X 10.8 Mountain Lion is the oldest supported by Go 1.10. # https://golang.org/doc/go1.10#ports min_supported_osx_version: 10.8 - identifier: 'co.{{.BeatVendor | tolower}}.beats.{{.BeatName}}' + identifier: "co.{{.BeatVendor | tolower}}.beats.{{.BeatName}}" install_path: /Library/Application Support - pre_install_script: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/darwin/scripts/preinstall.tmpl' - post_install_script: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/darwin/scripts/postinstall.tmpl' + pre_install_script: "{{ elastic_beats_dir }}/dev-tools/packaging/templates/darwin/scripts/preinstall.tmpl" + post_install_script: "{{ elastic_beats_dir }}/dev-tools/packaging/templates/darwin/scripts/postinstall.tmpl" files: /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/bin/{{.BeatName}}{{.BinaryExt}}: source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} mode: 0755 /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/LICENSE.txt: - source: '{{ repo.RootDir }}/LICENSE.txt' + source: "{{ repo.RootDir }}/LICENSE.txt" mode: 0644 /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/NOTICE.txt: - source: '{{ repo.RootDir }}/NOTICE.txt' + source: "{{ repo.RootDir }}/NOTICE.txt" mode: 0644 /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/README.md: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/common/README.md.tmpl' + template: "{{ elastic_beats_dir }}/dev-tools/packaging/templates/common/README.md.tmpl" mode: 0644 /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/.build_hash.txt: content: > {{ commit }} mode: 0644 /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/{{.identifier}}.plist: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/darwin/launchd-daemon.plist.tmpl' + template: "{{ elastic_beats_dir }}/dev-tools/packaging/templates/darwin/launchd-daemon.plist.tmpl" mode: 0644 /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/kibana: source: _meta/kibana.generated @@ -101,38 +100,38 @@ shared: source: fields.yml mode: 0644 /etc/{{.BeatName}}/{{.BeatName}}.reference.yml: - source: '{{.BeatName}}.reference.yml' + source: "{{.BeatName}}.reference.yml" mode: 0644 /etc/{{.BeatName}}/{{.BeatName}}.yml: - source: '{{.BeatName}}.yml' + source: "{{.BeatName}}.yml" mode: 0600 config: true - &binary_files - '{{.BeatName}}{{.BinaryExt}}': + "{{.BeatName}}{{.BinaryExt}}": source: build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} mode: 0755 fields.yml: source: fields.yml mode: 0644 LICENSE.txt: - source: '{{ repo.RootDir }}/LICENSE.txt' + source: "{{ repo.RootDir }}/LICENSE.txt" mode: 0644 NOTICE.txt: - source: '{{ repo.RootDir }}/NOTICE.txt' + source: "{{ repo.RootDir }}/NOTICE.txt" mode: 0644 README.md: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/common/README.md.tmpl' + template: "{{ elastic_beats_dir }}/dev-tools/packaging/templates/common/README.md.tmpl" mode: 0644 .build_hash.txt: content: > {{ commit }} mode: 0644 - '{{.BeatName}}.reference.yml': - source: '{{.BeatName}}.reference.yml' + "{{.BeatName}}.reference.yml": + source: "{{.BeatName}}.reference.yml" mode: 0644 - '{{.BeatName}}.yml': - source: '{{.BeatName}}.yml' + "{{.BeatName}}.yml": + source: "{{.BeatName}}.yml" mode: 0600 config: true kibana: @@ -151,44 +150,54 @@ shared: files: <<: *binary_files install-service-{{.BeatName}}.ps1: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/windows/install-service.ps1.tmpl' + template: "{{ elastic_beats_dir }}/dev-tools/packaging/templates/windows/install-service.ps1.tmpl" mode: 0755 uninstall-service-{{.BeatName}}.ps1: - template: '{{ elastic_beats_dir }}/dev-tools/packaging/templates/windows/uninstall-service.ps1.tmpl' + template: "{{ elastic_beats_dir }}/dev-tools/packaging/templates/windows/uninstall-service.ps1.tmpl" mode: 0755 - &docker_spec <<: *binary_spec extra_vars: - from: 'ubuntu:20.04' - buildFrom: 'ubuntu:20.04' - user: '{{ .BeatName }}' - linux_capabilities: '' + from: "--platform=linux/amd64 ubuntu:20.04" + buildFrom: "--platform=linux/amd64 cgr.dev/chainguard/wolfi-base" + user: "{{ .BeatName }}" + linux_capabilities: "" files: - '{{.BeatName}}.yml': - source: '{{.BeatName}}.docker.yml' + "{{.BeatName}}.yml": + source: "{{.BeatName}}.docker.yml" mode: 0600 config: true - &docker_arm_spec <<: *docker_spec extra_vars: - from: 'arm64v8/ubuntu:20.04' - buildFrom: 'arm64v8/ubuntu:20.04' + from: "--platform=linux/arm64 ubuntu:20.04" + buildFrom: "--platform=linux/arm64 cgr.dev/chainguard/wolfi-base" - &docker_ubi_spec extra_vars: - image_name: '{{.BeatName}}-ubi' - from: 'docker.elastic.co/ubi9/ubi-minimal' + image_name: "{{.BeatName}}-ubi" + from: "--platform=linux/amd64 docker.elastic.co/ubi9/ubi-minimal" - &docker_arm_ubi_spec extra_vars: - image_name: '{{.BeatName}}-ubi' - from: 'registry.access.redhat.com/ubi9/ubi-minimal:9.4' + image_name: "{{.BeatName}}-ubi" + from: "--platform=linux/arm64 docker.elastic.co/ubi9/ubi-minimal" + + - &docker_wolfi_spec + extra_vars: + image_name: "{{.BeatName}}-wolfi" + from: "--platform=linux/amd64 cgr.dev/chainguard/wolfi-base" + + - &docker_arm_wolfi_spec + extra_vars: + image_name: "{{.BeatName}}-wolfi" + from: "--platform=linux/arm64 cgr.dev/chainguard/wolfi-base" - &elastic_docker_spec extra_vars: - repository: 'docker.elastic.co/beats' + repository: "docker.elastic.co/beats" # # License modifiers for Apache 2.0 @@ -197,21 +206,21 @@ shared: license: "ASL 2.0" files: LICENSE.txt: - source: '{{ repo.RootDir }}/licenses/APACHE-LICENSE-2.0.txt' + source: "{{ repo.RootDir }}/licenses/APACHE-LICENSE-2.0.txt" mode: 0644 - &apache_license_for_deb_rpm license: "ASL 2.0" files: /usr/share/{{.BeatName}}/LICENSE.txt: - source: '{{ repo.RootDir }}/licenses/APACHE-LICENSE-2.0.txt' + source: "{{ repo.RootDir }}/licenses/APACHE-LICENSE-2.0.txt" mode: 0644 - &apache_license_for_macos_pkg license: "ASL 2.0" files: /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/LICENSE.txt: - source: '{{ repo.RootDir }}/licenses/APACHE-LICENSE-2.0.txt' + source: "{{ repo.RootDir }}/licenses/APACHE-LICENSE-2.0.txt" mode: 0644 # @@ -221,21 +230,21 @@ shared: license: "Elastic License" files: LICENSE.txt: - source: '{{ repo.RootDir }}/licenses/ELASTIC-LICENSE.txt' + source: "{{ repo.RootDir }}/licenses/ELASTIC-LICENSE.txt" mode: 0644 - &elastic_license_for_deb_rpm license: "Elastic License" files: /usr/share/{{.BeatName}}/LICENSE.txt: - source: '{{ repo.RootDir }}/licenses/ELASTIC-LICENSE.txt' + source: "{{ repo.RootDir }}/licenses/ELASTIC-LICENSE.txt" mode: 0644 - &elastic_license_for_macos_pkg license: "Elastic License" files: /Library/Application Support/{{.BeatVendor}}/{{.BeatName}}/LICENSE.txt: - source: '{{ repo.RootDir }}/licenses/ELASTIC-LICENSE.txt' + source: "{{ repo.RootDir }}/licenses/ELASTIC-LICENSE.txt" mode: 0644 # specs is a list of named packaging "flavors". @@ -263,10 +272,17 @@ specs: <<: *deb_rpm_spec - os: linux + arch: amd64 types: [docker] spec: <<: *docker_spec + - os: linux + arch: arm64 + types: [docker] + spec: + <<: *docker_arm_spec + - os: aix types: [tgz] spec: @@ -280,43 +296,77 @@ specs: spec: <<: *windows_binary_spec <<: *apache_license_for_binaries - name: '{{.BeatName}}-oss' + name: "{{.BeatName}}-oss" - os: darwin types: [tgz] spec: <<: *binary_spec <<: *apache_license_for_binaries - name: '{{.BeatName}}-oss' + name: "{{.BeatName}}-oss" - os: linux types: [tgz] spec: <<: *binary_spec <<: *apache_license_for_binaries - name: '{{.BeatName}}-oss' + name: "{{.BeatName}}-oss" - os: linux types: [deb, rpm] spec: <<: *deb_rpm_spec <<: *apache_license_for_deb_rpm - name: '{{.BeatName}}-oss' + name: "{{.BeatName}}-oss" + + - os: linux + types: [docker] + arch: amd64 + spec: + <<: *docker_spec + <<: *elastic_docker_spec + <<: *apache_license_for_binaries + name: "{{.BeatName}}-oss" - os: linux types: [docker] + arch: amd64 spec: <<: *docker_spec + <<: *docker_wolfi_spec <<: *elastic_docker_spec <<: *apache_license_for_binaries - name: '{{.BeatName}}-oss' + name: "{{.BeatName}}-wolfi-oss" + extra_vars: + image_name: "{{.BeatName}}-wolfi-oss" + + - os: linux + types: [docker] + arch: arm64 + spec: + <<: *docker_arm_spec + <<: *elastic_docker_spec + <<: *apache_license_for_binaries + name: "{{.BeatName}}-oss" + + - os: linux + types: [docker] + arch: arm64 + spec: + <<: *docker_arm_spec + <<: *docker_arm_wolfi_spec + <<: *elastic_docker_spec + <<: *apache_license_for_binaries + name: "{{.BeatName}}-wolfi-oss" + extra_vars: + image_name: "{{.BeatName}}-wolfi-oss" - os: aix types: [tgz] spec: <<: *binary_spec <<: *apache_license_for_binaries - name: '{{.BeatName}}-oss' + name: "{{.BeatName}}-oss" # Elastic Beat with Elastic License and binary taken the current directory. elastic_beat_xpack: @@ -364,6 +414,15 @@ specs: <<: *elastic_docker_spec <<: *elastic_license_for_binaries + - os: linux + arch: amd64 + types: [docker] + spec: + <<: *docker_spec + <<: *docker_wolfi_spec + <<: *elastic_docker_spec + <<: *elastic_license_for_binaries + - os: linux arch: arm64 types: [docker] @@ -381,6 +440,15 @@ specs: <<: *elastic_docker_spec <<: *elastic_license_for_binaries + - os: linux + arch: arm64 + types: [docker] + spec: + <<: *docker_arm_spec + <<: *docker_arm_wolfi_spec + <<: *elastic_docker_spec + <<: *elastic_license_for_binaries + - os: aix types: [tgz] spec: @@ -427,7 +495,7 @@ specs: <<: *windows_binary_spec <<: *elastic_license_for_binaries files: - '{{.BeatName}}{{.BinaryExt}}': + "{{.BeatName}}{{.BinaryExt}}": source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - os: darwin @@ -436,7 +504,7 @@ specs: <<: *binary_spec <<: *elastic_license_for_binaries files: - '{{.BeatName}}{{.BinaryExt}}': + "{{.BeatName}}{{.BinaryExt}}": source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - os: linux @@ -445,7 +513,7 @@ specs: <<: *binary_spec <<: *elastic_license_for_binaries files: - '{{.BeatName}}{{.BinaryExt}}': + "{{.BeatName}}{{.BinaryExt}}": source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - os: linux @@ -465,7 +533,7 @@ specs: <<: *elastic_docker_spec <<: *elastic_license_for_binaries files: - '{{.BeatName}}{{.BinaryExt}}': + "{{.BeatName}}{{.BinaryExt}}": source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - os: linux @@ -477,18 +545,19 @@ specs: <<: *elastic_docker_spec <<: *elastic_license_for_binaries files: - '{{.BeatName}}{{.BinaryExt}}': + "{{.BeatName}}{{.BinaryExt}}": source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - os: linux - arch: arm64 + arch: amd64 types: [docker] spec: - <<: *docker_arm_spec + <<: *docker_spec + <<: *docker_wolfi_spec <<: *elastic_docker_spec <<: *elastic_license_for_binaries files: - '{{.BeatName}}{{.BinaryExt}}': + "{{.BeatName}}{{.BinaryExt}}": source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - os: linux @@ -496,126 +565,61 @@ specs: types: [docker] spec: <<: *docker_arm_spec - <<: *docker_arm_ubi_spec <<: *elastic_docker_spec <<: *elastic_license_for_binaries files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - - os: aix - types: [tgz] - spec: - <<: *binary_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': + "{{.BeatName}}{{.BinaryExt}}": source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - os: linux - types: [deb, rpm] - spec: - <<: *elastic_license_for_deb_rpm - files: - /usr/share/{{.BeatName}}/bin/{{.BeatName}}{{.BinaryExt}}: - source: /var/lib/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}} - symlink: true - mode: 0755 - - - os: linux - arch: amd64 - types: [docker] - spec: - <<: *elastic_docker_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - # Complete image gets a 'complete' variant for synthetics and other large - # packages too big to fit in the main image - - os: linux - arch: amd64 - types: [docker] - spec: - <<: *elastic_docker_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - # Cloud specific docker image - - os: linux - arch: amd64 - types: [docker] - spec: - <<: *elastic_docker_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - - os: linux - arch: amd64 - types: [docker] - spec: - <<: *docker_ubi_spec - <<: *elastic_docker_spec - <<: *elastic_license_for_binaries - files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - - os: linux arch: arm64 types: [docker] spec: + <<: *docker_arm_spec + <<: *docker_arm_ubi_spec <<: *elastic_docker_spec <<: *elastic_license_for_binaries files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + "{{.BeatName}}{{.BinaryExt}}": + source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - # Complete image gets a 'complete' variant for synthetics and other large - # packages too big to fit in the main image - os: linux arch: arm64 types: [docker] spec: + <<: *docker_arm_spec + <<: *docker_arm_wolfi_spec <<: *elastic_docker_spec <<: *elastic_license_for_binaries files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + "{{.BeatName}}{{.BinaryExt}}": + source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - # Cloud specific docker image - - os: linux - arch: arm64 - types: [docker] + - os: aix + types: [tgz] spec: - <<: *elastic_docker_spec + <<: *binary_spec <<: *elastic_license_for_binaries files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + "{{.BeatName}}{{.BinaryExt}}": + source: ./{{.XPackDir}}/{{.BeatName}}/build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} - os: linux - arch: arm64 - types: [docker] + types: [deb, rpm] spec: - <<: *docker_arm_ubi_spec - <<: *elastic_docker_spec - <<: *elastic_license_for_binaries + <<: *elastic_license_for_deb_rpm files: - '{{.BeatName}}{{.BinaryExt}}': - source: ./build/golang-crossbuild/{{.BeatName}}-{{.GOOS}}-{{.Platform.Arch}}{{.BinaryExt}} + /usr/share/{{.BeatName}}/bin/{{.BeatName}}{{.BinaryExt}}: + source: /var/lib/{{.BeatName}}/data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}} + symlink: true + mode: 0755 - os: aix types: [tgz] spec: <<: *elastic_license_for_binaries files: - '{{.BeatName}}{{.BinaryExt}}': + "{{.BeatName}}{{.BinaryExt}}": source: data/{{.BeatName}}-{{ commit_short }}/{{.BeatName}}{{.BinaryExt}} symlink: true - mode: 0755 \ No newline at end of file + mode: 0755 diff --git a/dev-tools/packaging/templates/docker/Dockerfile.tmpl b/dev-tools/packaging/templates/docker/Dockerfile.tmpl index 115119be4279..922fc3668991 100644 --- a/dev-tools/packaging/templates/docker/Dockerfile.tmpl +++ b/dev-tools/packaging/templates/docker/Dockerfile.tmpl @@ -6,6 +6,15 @@ # the final image because of permission changes. FROM {{ .buildFrom }} AS home +{{- if contains .buildFrom "wolfi" }} +RUN for iter in {1..10}; do \ + apk update && \ + apk add --no-interactive --no-progress --no-cache libcap-utils && \ + exit_code=0 && break || exit_code=$? && echo "apk error: retry $iter in 10s" && sleep 10; \ + done; \ + (exit $exit_code) +{{- end }} + COPY beat {{ $beatHome }} RUN mkdir -p {{ $beatHome }}/data {{ $beatHome }}/logs && \ @@ -18,13 +27,36 @@ RUN mkdir -p {{ $beatHome }}/data {{ $beatHome }}/logs && \ {{- end }} chmod 0775 {{ $beatHome }}/data {{ $beatHome }}/logs +{{- if .linux_capabilities }} +# Since the beat is stored at the other end of a symlink we must follow the symlink first +# For security reasons setcap does not support symlinks. This is smart in the general case +# but in our specific case since we're building a trusted image from trusted binaries this is +# fine. Thus, we use readlink to follow the link and setcap on the actual binary +RUN readlink -f {{ $beatBinary }} | xargs setcap {{ .linux_capabilities }} +{{- end }} + FROM {{ .from }} +{{- if contains .from "wolfi" }} +RUN for iter in {1..10}; do \ + apk update && \ + apk add --no-interactive --no-progress --no-cache curl shadow bash && \ + exit_code=0 && break || exit_code=$? && echo "apk error: retry $iter in 10s" && sleep 10; \ + done; \ + (exit $exit_code) +{{- end }} + {{- if contains .from "ubi-minimal" }} -RUN microdnf -y update && \ - microdnf -y install findutils shadow-utils && \ - microdnf clean all -{{- else }} +RUN for iter in {1..10}; do \ + microdnf -y update && \ + microdnf -y install findutils shadow-utils && \ + microdnf clean all && \ + exit_code=0 && break || exit_code=$? && echo "microdnf error: retry $iter in 10s" && sleep 10; \ + done; \ + (exit $exit_code) +{{- end }} + +{{- if contains .from "ubuntu" }} RUN for iter in {1..10}; do \ apt-get update -y && \ DEBIAN_FRONTEND=noninteractive apt-get install --no-install-recommends --yes ca-certificates curl gawk libcap2-bin xz-utils && \ @@ -60,7 +92,7 @@ LABEL \ license="{{ .License }}" \ description="{{ .BeatDescription }}" -ENV ELASTIC_CONTAINER "true" +ENV ELASTIC_CONTAINER="true" ENV PATH={{ $beatHome }}:$PATH ENV GODEBUG="madvdontneed=1" @@ -97,19 +129,42 @@ RUN mkdir /licenses COPY --from=home {{ $beatHome }}/LICENSE.txt /licenses COPY --from=home {{ $beatHome }}/NOTICE.txt /licenses -{{- if .linux_capabilities }} -# Since the beat is stored at the other end of a symlink we must follow the symlink first -# For security reasons setcap does not support symlinks. This is smart in the general case -# but in our specific case since we're building a trusted image from trusted binaries this is -# fine. Thus, we use readlink to follow the link and setcap on the actual binary -RUN readlink -f {{ $beatBinary }} | xargs setcap {{ .linux_capabilities }} -{{- end }} - {{- if ne .user "root" }} RUN groupadd --gid 1000 {{ .BeatName }} RUN useradd -M --uid 1000 --gid 1000 --groups 0 --home {{ $beatHome }} {{ .user }} +{{- end }} + +{{- if (and (eq .BeatName "heartbeat") (contains .from "wolfi")) }} +USER root +# Install required dependencies from wolfi repository +RUN for iter in {1..10}; do \ + apk update && \ + apk add --no-interactive --no-progress --no-cache nodejs-18=18.20.4-r0 npm=10.8.3-r0 glib dbus-glib libatk-1.0 \ + libatk-bridge-2.0 cups-libs libxcomposite libxdamage libxrandr libxkbcommon pango alsa-lib \ + font-opensans fontconfig gtk icu-data-full libnss mesa font-noto-cjk font-noto-emoji && \ + exit_code=0 && break || exit_code=$? && echo "apk error: retry $iter in 10s" && sleep 10; \ + done; \ + (exit $exit_code) + +# Setup synthetics env vars +ENV ELASTIC_SYNTHETICS_CAPABLE=true +ENV TZ=UTC +ENV NPM_CONFIG_PREFIX={{ $beatHome }}/.npm +ENV PATH="$NPM_CONFIG_PREFIX/bin:$PATH" + +RUN echo \ + $NPM_CONFIG_PREFIX \ + {{ $beatHome }}/.config \ + {{ $beatHome }}/.synthetics \ + {{ $beatHome }}/.npm \ + {{ $beatHome }}/.cache \ + | xargs -IDIR sh -c 'mkdir -m 0770 -p DIR && chown -R {{ .user }} DIR' + +RUN (npm i -g --loglevel verbose --production --engine-strict @elastic/synthetics@stack_release || sh -c 'tail -n +1 ${NPM_CONFIG_PREFIX}/_logs/* && exit 1') +{{- end }} -{{- if (and (eq .BeatName "heartbeat") (not (contains .from "ubi-minimal"))) }} +{{- if (and (eq .BeatName "heartbeat") (contains .from "ubuntu")) }} +USER root ENV NODE_PATH={{ $beatHome }}/.node RUN echo \ $NODE_PATH \ @@ -117,13 +172,12 @@ RUN echo \ {{ $beatHome }}/.synthetics \ {{ $beatHome }}/.npm \ {{ $beatHome }}/.cache \ - | xargs -IDIR sh -c 'mkdir -p DIR && chmod 0770 DIR' + | xargs -IDIR sh -c 'mkdir -m 0770 -p DIR && chown -R {{ .user }} DIR' # Setup synthetics env vars ENV ELASTIC_SYNTHETICS_CAPABLE=true ENV TZ=UTC -ENV SUITES_DIR={{ $beatHome }}/suites -ENV NODE_VERSION=18.20.3 +ENV NODE_VERSION=18.20.4 ENV PATH="$NODE_PATH/node/bin:$PATH" # Install the latest version of @elastic/synthetics forcefully ignoring the previously # cached node_modules, heartbeat then calls the global executable to run test suites @@ -150,11 +204,10 @@ RUN cd /usr/share/heartbeat/.node \ # Install synthetics as a regular user, installing npm deps as root odesn't work RUN chown -R {{ .user }} $NODE_PATH -USER {{ .user }} + # If this fails dump the NPM logs RUN npm i -g --loglevel verbose --production --engine-strict @elastic/synthetics@stack_release || sh -c 'tail -n +1 /root/.npm/_logs/* && exit 1' -RUN chmod ug+rwX -R $NODE_PATH -USER root +RUN chmod ug+rwX -R $NODE_PATH # Install the deps as needed by the exact version of playwright elastic synthetics uses # We don't use npx playwright install-deps because that could pull a newer version @@ -169,10 +222,8 @@ RUN for iter in {1..10}; do \ done; \ (exit $exit_code) {{- end }} -USER 1000 -{{- else }} -USER 0 -{{- end }} + +USER {{ .user }} {{- range $i, $port := .ExposePorts }} EXPOSE {{ $port }} diff --git a/filebeat/fileset/factory.go b/filebeat/fileset/factory.go index 9b3808e79137..ca7895a90fd9 100644 --- a/filebeat/fileset/factory.go +++ b/filebeat/fileset/factory.go @@ -21,7 +21,7 @@ import ( "fmt" "sync" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" "github.com/mitchellh/hashstructure" "github.com/elastic/beats/v7/libbeat/beat" diff --git a/filebeat/harvester/harvester.go b/filebeat/harvester/harvester.go index 39f9b45bb2bd..4d1b7211a7b2 100644 --- a/filebeat/harvester/harvester.go +++ b/filebeat/harvester/harvester.go @@ -18,7 +18,7 @@ package harvester import ( - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" ) // Harvester contains all methods which must be supported by each harvester diff --git a/filebeat/harvester/registry.go b/filebeat/harvester/registry.go index 989557d08bc9..0097bb82a079 100644 --- a/filebeat/harvester/registry.go +++ b/filebeat/harvester/registry.go @@ -21,7 +21,7 @@ import ( "errors" "sync" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" "github.com/elastic/elastic-agent-libs/logp" ) diff --git a/filebeat/input/filestream/internal/input-logfile/harvester_test.go b/filebeat/input/filestream/internal/input-logfile/harvester_test.go index 91615ae0381b..d8800c85996d 100644 --- a/filebeat/input/filestream/internal/input-logfile/harvester_test.go +++ b/filebeat/input/filestream/internal/input-logfile/harvester_test.go @@ -31,9 +31,9 @@ import ( "github.com/elastic/beats/v7/filebeat/input/filestream/internal/task" input "github.com/elastic/beats/v7/filebeat/input/v2" + "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/common/atomic" "github.com/elastic/beats/v7/libbeat/tests/resources" - "github.com/elastic/beats/v7/x-pack/dockerlogbeat/pipelinemock" "github.com/elastic/elastic-agent-libs/logp" ) @@ -393,7 +393,7 @@ func TestDefaultHarvesterGroup(t *testing.T) { func testDefaultHarvesterGroup(t *testing.T, mockHarvester Harvester) *defaultHarvesterGroup { return &defaultHarvesterGroup{ readers: newReaderGroup(), - pipeline: &pipelinemock.MockPipelineConnector{}, + pipeline: &MockPipeline{}, harvester: mockHarvester, store: testOpenStore(t, "test", nil), identifier: &sourceIdentifier{"filestream::.global::"}, @@ -465,3 +465,71 @@ func (tl *testLogger) Errorf(format string, args ...interface{}) { func (tl *testLogger) String() string { return (*strings.Builder)(tl).String() } + +// MockClient is a mock implementation of the beat.Client interface. +type MockClient struct { + published []beat.Event // Slice to store published events + + closed bool // Flag to indicate if the client is closed + mu sync.Mutex // Mutex to synchronize access to the published events slice +} + +// GetEvents returns all the events published by the mock client. +func (m *MockClient) GetEvents() []beat.Event { + m.mu.Lock() + defer m.mu.Unlock() + + return m.published +} + +// Publish publishes a single event. +func (m *MockClient) Publish(e beat.Event) { + es := make([]beat.Event, 1) + es = append(es, e) + + m.PublishAll(es) +} + +// PublishAll publishes multiple events. +func (m *MockClient) PublishAll(es []beat.Event) { + m.mu.Lock() + defer m.mu.Unlock() + + m.published = append(m.published, es...) +} + +// Close closes the mock client. +func (m *MockClient) Close() error { + m.mu.Lock() + defer m.mu.Unlock() + + if m.closed { + return fmt.Errorf("mock already closed") + } + + m.closed = true + return nil +} + +// MockPipeline is a mock implementation of the beat.Pipeline interface. +type MockPipeline struct { + c beat.Client // Client used by the pipeline + mu sync.Mutex // Mutex to synchronize access to the client +} + +// ConnectWith connects the mock pipeline with a client using the provided configuration. +func (mp *MockPipeline) ConnectWith(config beat.ClientConfig) (beat.Client, error) { + mp.mu.Lock() + defer mp.mu.Unlock() + + c := &MockClient{} + + mp.c = c + + return c, nil +} + +// Connect connects the mock pipeline with a client using the default configuration. +func (mp *MockPipeline) Connect() (beat.Client, error) { + return mp.ConnectWith(beat.ClientConfig{}) +} diff --git a/filebeat/input/journald/pkg/journalfield/conv_expand_test.go b/filebeat/input/journald/pkg/journalfield/conv_expand_test.go index 09daf7c8f5b6..4601a02379c5 100644 --- a/filebeat/input/journald/pkg/journalfield/conv_expand_test.go +++ b/filebeat/input/journald/pkg/journalfield/conv_expand_test.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. -//go:build linux && cgo +//go:build linux package journalfield diff --git a/filebeat/input/journald/pkg/journalfield/conv_test.go b/filebeat/input/journald/pkg/journalfield/conv_test.go index 6f7cde843c50..c92d3f15bcb0 100644 --- a/filebeat/input/journald/pkg/journalfield/conv_test.go +++ b/filebeat/input/journald/pkg/journalfield/conv_test.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. -//go:build linux && cgo +//go:build linux package journalfield diff --git a/filebeat/input/journald/pkg/journalfield/default.go b/filebeat/input/journald/pkg/journalfield/default.go index 1a2c375afce1..d2ec76fcd393 100644 --- a/filebeat/input/journald/pkg/journalfield/default.go +++ b/filebeat/input/journald/pkg/journalfield/default.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. -//go:build linux && cgo +//go:build linux package journalfield diff --git a/filebeat/input/journald/pkg/journalfield/matcher_test.go b/filebeat/input/journald/pkg/journalfield/matcher_test.go index f863529c5229..589e6f4bb703 100644 --- a/filebeat/input/journald/pkg/journalfield/matcher_test.go +++ b/filebeat/input/journald/pkg/journalfield/matcher_test.go @@ -15,7 +15,7 @@ // specific language governing permissions and limitations // under the License. -//go:build linux && cgo +//go:build linux package journalfield diff --git a/filebeat/input/log/harvester.go b/filebeat/input/log/harvester.go index ee08a3ff1844..84a62bcdfa42 100644 --- a/filebeat/input/log/harvester.go +++ b/filebeat/input/log/harvester.go @@ -37,7 +37,7 @@ import ( "sync" "time" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" "golang.org/x/text/transform" "github.com/elastic/beats/v7/libbeat/beat" @@ -191,7 +191,7 @@ func (h *Harvester) ID() uuid.UUID { func (h *Harvester) Setup() error { err := h.open() if err != nil { - return fmt.Errorf("Harvester setup failed. Unexpected file opening error: %s", err) + return fmt.Errorf("Harvester setup failed. Unexpected file opening error: %w", err) } h.reader, err = h.newLogFileReader() @@ -199,7 +199,7 @@ func (h *Harvester) Setup() error { if h.source != nil { h.source.Close() } - return fmt.Errorf("Harvester setup failed. Unexpected encoding line reader error: %s", err) + return fmt.Errorf("Harvester setup failed. Unexpected encoding line reader error: %w", err) } h.metrics = newHarvesterProgressMetrics(h.id.String()) @@ -325,20 +325,20 @@ func (h *Harvester) Run() error { message, err := h.reader.Next() if err != nil { - switch err { - case ErrFileTruncate: + switch { + case errors.Is(err, ErrFileTruncate): logger.Info("File was truncated. Begin reading file from offset 0.") h.state.Offset = 0 filesTruncated.Add(1) - case ErrRemoved: + case errors.Is(err, ErrRemoved): logger.Info("File was removed. Closing because close_removed is enabled.") - case ErrRenamed: + case errors.Is(err, ErrRenamed): logger.Info("File was renamed. Closing because close_renamed is enabled.") - case ErrClosed: + case errors.Is(err, ErrClosed): logger.Info("Reader was closed. Closing.") - case io.EOF: + case errors.Is(err, io.EOF): logger.Info("End of file reached. Closing because close_eof is enabled.") - case ErrInactive: + case errors.Is(err, ErrInactive): logger.Infof("File is inactive. Closing because close_inactive of %v reached.", h.config.CloseInactive) default: logger.Errorf("Read line error: %v", err) @@ -519,7 +519,7 @@ func (h *Harvester) shouldExportLine(line string) bool { func (h *Harvester) openFile() error { fi, err := os.Stat(h.state.Source) if err != nil { - return fmt.Errorf("failed to stat source file %s: %v", h.state.Source, err) + return fmt.Errorf("failed to stat source file %s: %w", h.state.Source, err) } if fi.Mode()&os.ModeNamedPipe != 0 { return fmt.Errorf("failed to open file %s, named pipes are not supported", h.state.Source) @@ -527,7 +527,7 @@ func (h *Harvester) openFile() error { f, err := file_helper.ReadOpen(h.state.Source) if err != nil { - return fmt.Errorf("Failed opening %s: %s", h.state.Source, err) + return fmt.Errorf("Failed opening %s: %w", h.state.Source, err) } harvesterOpenFiles.Add(1) @@ -549,7 +549,7 @@ func (h *Harvester) validateFile(f *os.File) error { info, err := f.Stat() if err != nil { - return fmt.Errorf("Failed getting stats for file %s: %s", h.state.Source, err) + return fmt.Errorf("Failed getting stats for file %s: %w", h.state.Source, err) } if !info.Mode().IsRegular() { @@ -563,8 +563,7 @@ func (h *Harvester) validateFile(f *os.File) error { h.encoding, err = h.encodingFactory(f) if err != nil { - - if err == transform.ErrShortSrc { + if errors.Is(err, transform.ErrShortSrc) { logger.Infof("Initialising encoding for '%v' failed due to file being too short", f) } else { logger.Errorf("Initialising encoding for '%v' failed: %v", f, err) @@ -588,12 +587,12 @@ func (h *Harvester) initFileOffset(file *os.File) (int64, error) { // continue from last known offset if h.state.Offset > 0 { h.logger.Debugf("Set previous offset: %d ", h.state.Offset) - return file.Seek(h.state.Offset, os.SEEK_SET) + return file.Seek(h.state.Offset, io.SeekStart) } // get offset from file in case of encoding factory was required to read some data. h.logger.Debug("Setting offset to: 0") - return file.Seek(0, os.SEEK_CUR) + return file.Seek(0, io.SeekCurrent) } // getState returns an updated copy of the harvester state diff --git a/filebeat/input/log/input.go b/filebeat/input/log/input.go index 40c42ddeebdf..ad93632b372c 100644 --- a/filebeat/input/log/input.go +++ b/filebeat/input/log/input.go @@ -27,7 +27,7 @@ import ( "sync" "time" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" "github.com/elastic/beats/v7/filebeat/channel" "github.com/elastic/beats/v7/filebeat/harvester" diff --git a/filebeat/input/redis/harvester.go b/filebeat/input/redis/harvester.go index 79929a4ee4fd..141c5f157738 100644 --- a/filebeat/input/redis/harvester.go +++ b/filebeat/input/redis/harvester.go @@ -22,7 +22,7 @@ import ( "strings" "time" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" rd "github.com/gomodule/redigo/redis" "github.com/elastic/beats/v7/libbeat/beat" @@ -82,8 +82,12 @@ func (h *Harvester) Run() error { default: } // Writes Slowlog get and slowlog reset both to the buffer so they are executed together - h.conn.Send("SLOWLOG", "GET") - h.conn.Send("SLOWLOG", "RESET") + if err := h.conn.Send("SLOWLOG", "GET"); err != nil { + return fmt.Errorf("error sending slowlog get: %w", err) + } + if err := h.conn.Send("SLOWLOG", "RESET"); err != nil { + return fmt.Errorf("error sending slowlog reset: %w", err) + } // Flush the buffer to execute both commands and receive the reply from SLOWLOG GET h.conn.Flush() @@ -91,13 +95,13 @@ func (h *Harvester) Run() error { // Receives first reply from redis which is the one from GET logs, err := rd.Values(h.conn.Receive()) if err != nil { - return fmt.Errorf("error receiving slowlog data: %s", err) + return fmt.Errorf("error receiving slowlog data: %w", err) } // Read reply from RESET _, err = h.conn.Receive() if err != nil { - return fmt.Errorf("error receiving reset data: %s", err) + return fmt.Errorf("error receiving reset data: %w", err) } for _, item := range logs { @@ -115,7 +119,11 @@ func (h *Harvester) Run() error { var log log var args []string - rd.Scan(entry, &log.id, &log.timestamp, &log.duration, &args) + _, err = rd.Scan(entry, &log.id, &log.timestamp, &log.duration, &args) + if err != nil { + logp.Err("Error scanning slowlog entry: %s", err) + continue + } // This splits up the args into cmd, key, args. argsLen := len(args) @@ -144,7 +152,7 @@ func (h *Harvester) Run() error { slowlogEntry["args"] = log.args } - h.forwarder.Send(beat.Event{ + err = h.forwarder.Send(beat.Event{ Timestamp: time.Unix(log.timestamp, 0).UTC(), Fields: mapstr.M{ "message": strings.Join(args, " "), @@ -156,6 +164,10 @@ func (h *Harvester) Run() error { }, }, }) + if err != nil { + logp.Err("Error sending beat event: %s", err) + continue + } } return nil } diff --git a/go.mod b/go.mod index a47bf7ec1543..5081c345039d 100644 --- a/go.mod +++ b/go.mod @@ -87,7 +87,6 @@ require ( github.com/godbus/dbus/v5 v5.1.0 github.com/godror/godror v0.33.2 github.com/gofrs/flock v0.8.1 - github.com/gofrs/uuid v4.4.0+incompatible github.com/gogo/protobuf v1.3.2 github.com/golang/mock v1.6.0 github.com/golang/snappy v0.0.4 @@ -95,7 +94,7 @@ require ( github.com/google/flatbuffers v23.5.26+incompatible github.com/google/go-cmp v0.6.0 github.com/google/gopacket v1.1.19 - github.com/google/uuid v1.6.0 + github.com/google/uuid v1.6.0 // indirect github.com/gorhill/cronexpr v0.0.0-20180427100037-88b0669f7d75 github.com/h2non/filetype v1.1.1 github.com/hashicorp/go-retryablehttp v0.7.7 @@ -202,6 +201,7 @@ require ( github.com/elastic/toutoumomoma v0.0.0-20240626215117-76e39db18dfb github.com/foxcpp/go-mockdns v0.0.0-20201212160233-ede2f9158d15 github.com/go-ldap/ldap/v3 v3.4.6 + github.com/gofrs/uuid/v5 v5.2.0 github.com/golang-jwt/jwt/v5 v5.2.1 github.com/google/cel-go v0.19.0 github.com/googleapis/gax-go/v2 v2.13.0 @@ -294,7 +294,6 @@ require ( github.com/gobuffalo/here v0.6.7 // indirect github.com/goccy/go-json v0.10.2 // indirect github.com/godror/knownpb v0.1.0 // indirect - github.com/gofrs/uuid/v5 v5.2.0 // indirect github.com/golang-jwt/jwt/v4 v4.5.0 // indirect github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe // indirect github.com/golang-sql/sqlexp v0.1.0 // indirect diff --git a/go.sum b/go.sum index ec32ee823dde..838744dab133 100644 --- a/go.sum +++ b/go.sum @@ -862,8 +862,6 @@ github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= -github.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1YrTJupqA= -github.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gofrs/uuid/v5 v5.2.0 h1:qw1GMx6/y8vhVsx626ImfKMuS5CvJmhIKKtuyvfajMM= github.com/gofrs/uuid/v5 v5.2.0/go.mod h1:CDOjlDMVAtN56jqyRUZh58JT31Tiw7/oQyEXZV+9bD8= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= diff --git a/heartbeat/monitors/mocks.go b/heartbeat/monitors/mocks.go index f8747a804005..c172d24464c8 100644 --- a/heartbeat/monitors/mocks.go +++ b/heartbeat/monitors/mocks.go @@ -24,7 +24,7 @@ import ( "testing" "time" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" "github.com/stretchr/testify/require" "github.com/elastic/elastic-agent-libs/config" diff --git a/heartbeat/monitors/wrappers/monitorstate/esloader_test.go b/heartbeat/monitors/wrappers/monitorstate/esloader_test.go index db70c1cc72e7..88664f562c85 100644 --- a/heartbeat/monitors/wrappers/monitorstate/esloader_test.go +++ b/heartbeat/monitors/wrappers/monitorstate/esloader_test.go @@ -27,7 +27,7 @@ import ( "testing" "time" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" "github.com/stretchr/testify/require" "github.com/elastic/go-elasticsearch/v8" diff --git a/heartbeat/monitors/wrappers/summarizer/plugstatestat.go b/heartbeat/monitors/wrappers/summarizer/plugstatestat.go index cf7e90af5f30..1c3dc8876809 100644 --- a/heartbeat/monitors/wrappers/summarizer/plugstatestat.go +++ b/heartbeat/monitors/wrappers/summarizer/plugstatestat.go @@ -20,7 +20,7 @@ package summarizer import ( "fmt" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" "github.com/elastic/beats/v7/heartbeat/eventext" "github.com/elastic/beats/v7/heartbeat/look" diff --git a/heartbeat/tracer/tracer_test.go b/heartbeat/tracer/tracer_test.go index 45d0a4125e79..654ec53fcf67 100644 --- a/heartbeat/tracer/tracer_test.go +++ b/heartbeat/tracer/tracer_test.go @@ -27,7 +27,7 @@ import ( "testing" "time" - "github.com/google/uuid" + "github.com/gofrs/uuid/v5" "github.com/stretchr/testify/require" ) @@ -63,7 +63,7 @@ func TestSockTracer(t *testing.T) { t.Run(tt.name, func(t *testing.T) { t.Parallel() - sockName, err := uuid.NewRandom() + sockName, err := uuid.NewV4() require.NoError(t, err) sockPath := filepath.Join(os.TempDir(), sockName.String()) @@ -94,7 +94,7 @@ func TestSockTracerWaitSuccess(t *testing.T) { waitFor := 5 * time.Second delay := time.Millisecond * 1500 - sockName, err := uuid.NewRandom() + sockName, err := uuid.NewV4() require.NoError(t, err) sockPath := filepath.Join(os.TempDir(), sockName.String()) diff --git a/libbeat/autodiscover/autodiscover_test.go b/libbeat/autodiscover/autodiscover_test.go index 93918180a928..5343c0939415 100644 --- a/libbeat/autodiscover/autodiscover_test.go +++ b/libbeat/autodiscover/autodiscover_test.go @@ -26,7 +26,7 @@ import ( "testing" "time" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -129,7 +129,7 @@ func (m *mockAdapter) Create(_ beat.PipelineConnector, config *conf.C) (cfgfile. func (m *mockAdapter) Runners() []*mockRunner { m.mutex.Lock() defer m.mutex.Unlock() - var res []*mockRunner + res := make([]*mockRunner, 0, len(m.runners)) for _, r := range m.runners { res = append(res, r.Clone()) } diff --git a/libbeat/autodiscover/provider.go b/libbeat/autodiscover/provider.go index 8d70e80ee290..74a6d56f23d6 100644 --- a/libbeat/autodiscover/provider.go +++ b/libbeat/autodiscover/provider.go @@ -21,7 +21,7 @@ import ( "fmt" "strings" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" "github.com/elastic/beats/v7/libbeat/cfgfile" "github.com/elastic/elastic-agent-autodiscover/bus" diff --git a/libbeat/autodiscover/providers/docker/docker.go b/libbeat/autodiscover/providers/docker/docker.go index a659135c7e70..8398762aa27a 100644 --- a/libbeat/autodiscover/providers/docker/docker.go +++ b/libbeat/autodiscover/providers/docker/docker.go @@ -25,7 +25,7 @@ import ( "strconv" "time" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" "github.com/elastic/beats/v7/libbeat/autodiscover" "github.com/elastic/beats/v7/libbeat/autodiscover/template" diff --git a/libbeat/autodiscover/providers/docker/docker_integration_test.go b/libbeat/autodiscover/providers/docker/docker_integration_test.go index f8a760779782..80106998fc69 100644 --- a/libbeat/autodiscover/providers/docker/docker_integration_test.go +++ b/libbeat/autodiscover/providers/docker/docker_integration_test.go @@ -23,7 +23,7 @@ import ( "testing" "time" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" "github.com/stretchr/testify/assert" "github.com/elastic/beats/v7/libbeat/autodiscover/template" @@ -73,12 +73,18 @@ func TestDockerStart(t *testing.T) { if err != nil { t.Fatal(err) } - defer d.ContainerRemove(ID) + defer func() { + if err := d.ContainerRemove(ID); err != nil { + t.Log(err) + } + }() checkEvent(t, listener, ID, true) // Kill - d.ContainerKill(ID) + if err := d.ContainerKill(ID); err != nil { + t.Log(err) + } checkEvent(t, listener, ID, false) } diff --git a/libbeat/autodiscover/providers/jolokia/discovery.go b/libbeat/autodiscover/providers/jolokia/discovery.go index cf0f1f5199d3..1834766dd36f 100644 --- a/libbeat/autodiscover/providers/jolokia/discovery.go +++ b/libbeat/autodiscover/providers/jolokia/discovery.go @@ -25,7 +25,7 @@ import ( "sync" "time" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" s "github.com/elastic/beats/v7/libbeat/common/schema" c "github.com/elastic/beats/v7/libbeat/common/schema/mapstriface" @@ -238,7 +238,9 @@ func (d *Discovery) sendProbe(config InterfaceConfig) { if timeout > config.Interval { timeout = config.Interval } - conn.SetDeadline(time.Now().Add(timeout)) + if err := conn.SetDeadline(time.Now().Add(timeout)); err != nil { + log.Error(err.Error()) + } if _, err := conn.WriteTo(queryMessage, &discoveryAddress); err != nil { log.Error(err.Error()) @@ -249,6 +251,7 @@ func (d *Discovery) sendProbe(config InterfaceConfig) { for { n, _, err := conn.ReadFrom(b) if err != nil { + //nolint:errorlint // false positive if netErr, ok := err.(net.Error); !ok || !netErr.Timeout() { log.Error(err.Error()) } diff --git a/libbeat/autodiscover/providers/jolokia/jolokia.go b/libbeat/autodiscover/providers/jolokia/jolokia.go index beb566805c44..0c3cfa62b7e9 100644 --- a/libbeat/autodiscover/providers/jolokia/jolokia.go +++ b/libbeat/autodiscover/providers/jolokia/jolokia.go @@ -20,7 +20,7 @@ package jolokia import ( "fmt" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" "github.com/elastic/beats/v7/libbeat/autodiscover" "github.com/elastic/beats/v7/libbeat/autodiscover/template" @@ -30,6 +30,7 @@ import ( ) func init() { + //nolint:errcheck // init function autodiscover.Registry.AddProvider("jolokia", AutodiscoverBuilder) } @@ -42,7 +43,6 @@ type DiscoveryProber interface { // Provider is the Jolokia Discovery autodiscover provider type Provider struct { - config *Config bus bus.Bus builders autodiscover.Builders appenders autodiscover.Appenders diff --git a/libbeat/autodiscover/providers/kubernetes/kubernetes.go b/libbeat/autodiscover/providers/kubernetes/kubernetes.go index 1ee4d01a06c5..823989787fdb 100644 --- a/libbeat/autodiscover/providers/kubernetes/kubernetes.go +++ b/libbeat/autodiscover/providers/kubernetes/kubernetes.go @@ -29,7 +29,7 @@ import ( "k8s.io/client-go/tools/leaderelection" "k8s.io/client-go/tools/leaderelection/resourcelock" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" "github.com/elastic/beats/v7/libbeat/autodiscover" "github.com/elastic/beats/v7/libbeat/autodiscover/template" diff --git a/libbeat/autodiscover/providers/kubernetes/kubernetes_test.go b/libbeat/autodiscover/providers/kubernetes/kubernetes_test.go index b767d87be044..12f5c824423d 100644 --- a/libbeat/autodiscover/providers/kubernetes/kubernetes_test.go +++ b/libbeat/autodiscover/providers/kubernetes/kubernetes_test.go @@ -23,7 +23,7 @@ import ( "testing" "time" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" "github.com/stretchr/testify/require" v1 "k8s.io/api/coordination/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/libbeat/autodiscover/providers/kubernetes/node.go b/libbeat/autodiscover/providers/kubernetes/node.go index 4747c751cbfc..27957998575f 100644 --- a/libbeat/autodiscover/providers/kubernetes/node.go +++ b/libbeat/autodiscover/providers/kubernetes/node.go @@ -25,7 +25,7 @@ import ( "github.com/elastic/elastic-agent-autodiscover/utils" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/equality" k8s "k8s.io/client-go/kubernetes" diff --git a/libbeat/autodiscover/providers/kubernetes/node_test.go b/libbeat/autodiscover/providers/kubernetes/node_test.go index 5336ddde758b..bd4aa1fa2111 100644 --- a/libbeat/autodiscover/providers/kubernetes/node_test.go +++ b/libbeat/autodiscover/providers/kubernetes/node_test.go @@ -21,7 +21,7 @@ import ( "testing" "time" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" "github.com/stretchr/testify/assert" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/libbeat/autodiscover/providers/kubernetes/pod.go b/libbeat/autodiscover/providers/kubernetes/pod.go index c5f9c721eb90..764dab44f462 100644 --- a/libbeat/autodiscover/providers/kubernetes/pod.go +++ b/libbeat/autodiscover/providers/kubernetes/pod.go @@ -24,7 +24,7 @@ import ( "sync" "time" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" k8s "k8s.io/client-go/kubernetes" "github.com/elastic/elastic-agent-autodiscover/bus" diff --git a/libbeat/autodiscover/providers/kubernetes/pod_test.go b/libbeat/autodiscover/providers/kubernetes/pod_test.go index 84712615ec1d..bb8731275b36 100644 --- a/libbeat/autodiscover/providers/kubernetes/pod_test.go +++ b/libbeat/autodiscover/providers/kubernetes/pod_test.go @@ -22,7 +22,7 @@ import ( "testing" "time" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" "github.com/stretchr/testify/assert" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" diff --git a/libbeat/autodiscover/providers/kubernetes/service.go b/libbeat/autodiscover/providers/kubernetes/service.go index e9e71c921bd8..c1604cd0fe45 100644 --- a/libbeat/autodiscover/providers/kubernetes/service.go +++ b/libbeat/autodiscover/providers/kubernetes/service.go @@ -25,7 +25,7 @@ import ( "github.com/elastic/elastic-agent-autodiscover/utils" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" k8s "k8s.io/client-go/kubernetes" "github.com/elastic/elastic-agent-autodiscover/bus" diff --git a/libbeat/autodiscover/providers/kubernetes/service_test.go b/libbeat/autodiscover/providers/kubernetes/service_test.go index 90ff678e11ce..b7e0701fbd6f 100644 --- a/libbeat/autodiscover/providers/kubernetes/service_test.go +++ b/libbeat/autodiscover/providers/kubernetes/service_test.go @@ -21,7 +21,7 @@ import ( "testing" "time" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" "github.com/stretchr/testify/assert" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -511,6 +511,7 @@ func TestServiceEventer_NamespaceWatcher(t *testing.T) { } for _, test := range tests { + test := test t.Run(test.name, func(t *testing.T) { config := conf.MustNewConfigFrom(&test.cfg) diff --git a/libbeat/beat/info.go b/libbeat/beat/info.go index 23dfe9be0be4..57f9a570fbc3 100644 --- a/libbeat/beat/info.go +++ b/libbeat/beat/info.go @@ -20,7 +20,7 @@ package beat import ( "time" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" ) // Info stores a beats instance meta data. diff --git a/libbeat/cfgfile/cfgfile.go b/libbeat/cfgfile/cfgfile.go index ca19af8cb9f6..21d052d64da2 100644 --- a/libbeat/cfgfile/cfgfile.go +++ b/libbeat/cfgfile/cfgfile.go @@ -18,6 +18,7 @@ package cfgfile import ( + "flag" "fmt" "os" "path/filepath" @@ -97,6 +98,8 @@ func GetDefaultCfgfile() string { } // HandleFlags adapts default config settings based on command line flags. +// This also stores if -E management.enabled=true was set on command line +// to determine if running the Beat under agent. func HandleFlags() error { // default for the home path is the binary location home, err := filepath.Abs(filepath.Dir(os.Args[0])) @@ -114,6 +117,27 @@ func HandleFlags() error { common.PrintConfigDebugf(overwrites, "CLI setting overwrites (-E flag):") } + // Enable check to see if beat is running under Agent + // This is stored in a package so the modules which don't have + // access to the config can check this value. + type management struct { + Enabled bool `config:"management.enabled"` + } + var managementSettings management + cfgFlag := flag.Lookup("E") + if cfgFlag == nil { + fleetmode.SetAgentMode(false) + return nil + } + cfgObject, _ := cfgFlag.Value.(*config.SettingsFlag) + cliCfg := cfgObject.Config() + + err = cliCfg.Unpack(&managementSettings) + if err != nil { + fleetmode.SetAgentMode(false) + return nil //nolint:nilerr // unpacking failing isn't an error for this case + } + fleetmode.SetAgentMode(managementSettings.Enabled) return nil } diff --git a/libbeat/cmd/instance/beat.go b/libbeat/cmd/instance/beat.go index 42f377a76e74..137c5577d81d 100644 --- a/libbeat/cmd/instance/beat.go +++ b/libbeat/cmd/instance/beat.go @@ -38,7 +38,7 @@ import ( "sync" "time" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" "go.uber.org/zap" "github.com/elastic/beats/v7/libbeat/api" @@ -265,11 +265,6 @@ func NewBeat(name, indexPrefix, v string, elasticLicensed bool, initFuncs []func return nil, err } - eid, err := uuid.FromString(metricreport.EphemeralID().String()) - if err != nil { - return nil, fmt.Errorf("failed to generate EphemeralID from UUID string: %w", err) - } - b := beat.Beat{ Info: beat.Info{ Beat: name, @@ -281,7 +276,7 @@ func NewBeat(name, indexPrefix, v string, elasticLicensed bool, initFuncs []func ID: id, FirstStart: time.Now(), StartTime: time.Now(), - EphemeralID: eid, + EphemeralID: metricreport.EphemeralID(), }, Fields: fields, Registry: reload.NewRegistry(), diff --git a/libbeat/cmd/instance/beat_test.go b/libbeat/cmd/instance/beat_test.go index e3df2a6b5be0..ebfecf191c7b 100644 --- a/libbeat/cmd/instance/beat_test.go +++ b/libbeat/cmd/instance/beat_test.go @@ -35,7 +35,7 @@ import ( "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/go-ucfg/yaml" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/libbeat/common/event_test.go b/libbeat/common/event_test.go index 0d598d021745..d6bd9eab99f2 100644 --- a/libbeat/common/event_test.go +++ b/libbeat/common/event_test.go @@ -22,7 +22,7 @@ import ( "testing" "time" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" "github.com/stretchr/testify/assert" "github.com/elastic/elastic-agent-libs/logp" @@ -206,7 +206,6 @@ func TestConvertWithNullEmission(t *testing.T) { Output mapstr.M } - type String string type TestStruct struct { A interface{} } diff --git a/libbeat/common/fleetmode/fleet_mode.go b/libbeat/common/fleetmode/fleet_mode.go index af179b887eac..97a17804f647 100644 --- a/libbeat/common/fleetmode/fleet_mode.go +++ b/libbeat/common/fleetmode/fleet_mode.go @@ -17,33 +17,18 @@ package fleetmode -import ( - "flag" - - "github.com/elastic/elastic-agent-libs/config" -) +var managementEnabled bool + +// SetAgentMode stores if the Beat is running under Elastic Agent. +// Normally this is called when the command line flags are parsed. +// This is stored as a package level variable because some components +// (like filebeat/metricbeat modules) don't have access to the +// configuration information to determine this on their own. +func SetAgentMode(enabled bool) { + managementEnabled = enabled +} -// Enabled checks to see if filebeat/metricbeat is running under Agent -// The management setting is stored in the main Beat runtime object, but we can't see that from a module -// So instead we check the CLI flags, since Agent starts filebeat/metricbeat with "-E", "management.enabled=true" +// Enabled returns true if the Beat is running under Elastic Agent. func Enabled() bool { - type management struct { - Enabled bool `config:"management.enabled"` - } - var managementSettings management - - cfgFlag := flag.Lookup("E") - if cfgFlag == nil { - return false - } - - cfgObject, _ := cfgFlag.Value.(*config.SettingsFlag) - cliCfg := cfgObject.Config() - - err := cliCfg.Unpack(&managementSettings) - if err != nil { - return false - } - - return managementSettings.Enabled + return managementEnabled } diff --git a/libbeat/docs/release.asciidoc b/libbeat/docs/release.asciidoc index 8dfc6167732b..ae01beb5c4ff 100644 --- a/libbeat/docs/release.asciidoc +++ b/libbeat/docs/release.asciidoc @@ -8,6 +8,7 @@ This section summarizes the changes in each release. Also read <> for more detail about changes that affect upgrade. +* <> * <> * <> * <> diff --git a/libbeat/idxmgmt/lifecycle/client_handler_integration_test.go b/libbeat/idxmgmt/lifecycle/client_handler_integration_test.go index 2df919d55d54..67b9a1cfb06d 100644 --- a/libbeat/idxmgmt/lifecycle/client_handler_integration_test.go +++ b/libbeat/idxmgmt/lifecycle/client_handler_integration_test.go @@ -25,7 +25,7 @@ import ( "testing" "time" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/libbeat/monitoring/inputmon/input.go b/libbeat/monitoring/inputmon/input.go index 7814f79234f9..38876aed8488 100644 --- a/libbeat/monitoring/inputmon/input.go +++ b/libbeat/monitoring/inputmon/input.go @@ -21,7 +21,7 @@ import ( "encoding/json" "strings" - "github.com/google/uuid" + "github.com/gofrs/uuid/v5" "github.com/elastic/elastic-agent-libs/logp" "github.com/elastic/elastic-agent-libs/monitoring" @@ -59,7 +59,7 @@ func NewInputRegistry(inputType, id string, optionalParent *monitoring.Registry) // logs during support interactions. log := logp.NewLogger("metric_registry") // Make an orthogonal ID to allow tracking register/deregister pairs. - uuid := uuid.New().String() + uuid := uuid.Must(uuid.NewV4()).String() log.Infow("registering", "input_type", inputType, "id", id, "key", key, "uuid", uuid) reg = parentRegistry.NewRegistry(key) diff --git a/libbeat/outputs/elasticsearch/callbacks.go b/libbeat/outputs/elasticsearch/callbacks.go index dcbbd971adb9..5196fe13802c 100644 --- a/libbeat/outputs/elasticsearch/callbacks.go +++ b/libbeat/outputs/elasticsearch/callbacks.go @@ -22,7 +22,7 @@ import ( "github.com/elastic/beats/v7/libbeat/esleg/eslegclient" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" ) // ConnectCallback defines the type for the function to be called when the Elasticsearch client successfully connects to the cluster diff --git a/libbeat/processors/add_host_metadata/add_host_metadata.go b/libbeat/processors/add_host_metadata/add_host_metadata.go index 5fe28194b555..3a7e7b40c55f 100644 --- a/libbeat/processors/add_host_metadata/add_host_metadata.go +++ b/libbeat/processors/add_host_metadata/add_host_metadata.go @@ -23,7 +23,7 @@ import ( "sync" "time" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" "github.com/elastic/elastic-agent-libs/monitoring" "github.com/elastic/go-sysinfo" diff --git a/libbeat/processors/add_kubernetes_metadata/docs/add_kubernetes_metadata.asciidoc b/libbeat/processors/add_kubernetes_metadata/docs/add_kubernetes_metadata.asciidoc index e81b3a8419e6..06cfcbf53401 100644 --- a/libbeat/processors/add_kubernetes_metadata/docs/add_kubernetes_metadata.asciidoc +++ b/libbeat/processors/add_kubernetes_metadata/docs/add_kubernetes_metadata.asciidoc @@ -6,7 +6,8 @@ ++++ The `add_kubernetes_metadata` processor annotates each event with relevant -metadata based on which Kubernetes pod the event originated from. +metadata based on which Kubernetes pod the event originated from. This processor only adds metadata to the events that do not have it yet present. + At startup, it detects an `in_cluster` environment and caches the Kubernetes-related metadata. Events are only annotated if a valid configuration is detected. If it's not able to detect a valid Kubernetes configuration, diff --git a/libbeat/processors/add_kubernetes_metadata/kubernetes.go b/libbeat/processors/add_kubernetes_metadata/kubernetes.go index e8600b6d85c5..2dedf99d2362 100644 --- a/libbeat/processors/add_kubernetes_metadata/kubernetes.go +++ b/libbeat/processors/add_kubernetes_metadata/kubernetes.go @@ -116,7 +116,7 @@ func New(cfg *config.C) (beat.Processor, error) { kubernetesAvailable: false, } - // complete processor's initialisation asynchronously so as to re-try on failing k8s client initialisations in case + // complete processor's initialisation asynchronously to re-try on failing k8s client initialisations in case // the k8s node is not yet ready. go processor.init(config, cfg) @@ -260,17 +260,14 @@ func (k *kubernetesAnnotator) init(config kubeAnnotatorConfig, cfg *config.C) { watcher.AddEventHandler(kubernetes.ResourceEventHandlerFuncs{ AddFunc: func(obj interface{}) { pod := obj.(*kubernetes.Pod) - k.log.Debugf("Adding kubernetes pod: %s/%s", pod.GetNamespace(), pod.GetName()) k.addPod(pod) }, UpdateFunc: func(obj interface{}) { pod := obj.(*kubernetes.Pod) - k.log.Debugf("Updating kubernetes pod: %s/%s", pod.GetNamespace(), pod.GetName()) k.updatePod(pod) }, DeleteFunc: func(obj interface{}) { pod := obj.(*kubernetes.Pod) - k.log.Debugf("Removing pod: %s/%s", pod.GetNamespace(), pod.GetName()) k.removePod(pod) }, }) @@ -316,19 +313,17 @@ func (k *kubernetesAnnotator) Run(event *beat.Event) (*beat.Event, error) { return event, nil } if kubernetesMetadataExist(event) { - k.log.Debug("Skipping add_kubernetes_metadata processor as kubernetes metadata already exist") return event, nil } + index := k.matchers.MetadataIndex(event.Fields) if index == "" { k.log.Debug("No container match string, not adding kubernetes data") return event, nil } - k.log.Debugf("Using the following index key %s", index) metadata := k.cache.get(index) if metadata == nil { - k.log.Debugf("Index key %s did not match any of the cached resources", index) return event, nil } @@ -381,7 +376,6 @@ func (k *kubernetesAnnotator) Close() error { func (k *kubernetesAnnotator) addPod(pod *kubernetes.Pod) { metadata := k.indexers.GetMetadata(pod) for _, m := range metadata { - k.log.Debugf("Created index %s for pod %s/%s", m.Index, pod.GetNamespace(), pod.GetName()) k.cache.set(m.Index, m.Data) } } @@ -391,7 +385,6 @@ func (k *kubernetesAnnotator) updatePod(pod *kubernetes.Pod) { // Add it again only if it is not being deleted if pod.GetObjectMeta().GetDeletionTimestamp() != nil { - k.log.Debugf("Removing kubernetes pod being terminated: %s/%s", pod.GetNamespace(), pod.GetName()) return } diff --git a/libbeat/processors/script/javascript/config.go b/libbeat/processors/script/javascript/config.go index 03415a96d1b2..4074ab3f974c 100644 --- a/libbeat/processors/script/javascript/config.go +++ b/libbeat/processors/script/javascript/config.go @@ -24,14 +24,15 @@ import ( // Config defines the Javascript source files to use for the processor. type Config struct { - Tag string `config:"tag"` // Processor ID for debug and metrics. - Source string `config:"source"` // Inline script to execute. - File string `config:"file"` // Source file. - Files []string `config:"files"` // Multiple source files. - Params map[string]interface{} `config:"params"` // Parameters to pass to script. - Timeout time.Duration `config:"timeout" validate:"min=0"` // Execution timeout. - TagOnException string `config:"tag_on_exception"` // Tag to add to events when an exception happens. - MaxCachedSessions int `config:"max_cached_sessions" validate:"min=0"` // Max. number of cached VM sessions. + Tag string `config:"tag"` // Processor ID for debug and metrics. + Source string `config:"source"` // Inline script to execute. + File string `config:"file"` // Source file. + Files []string `config:"files"` // Multiple source files. + Params map[string]interface{} `config:"params"` // Parameters to pass to script. + Timeout time.Duration `config:"timeout" validate:"min=0"` // Execution timeout. + TagOnException string `config:"tag_on_exception"` // Tag to add to events when an exception happens. + MaxCachedSessions int `config:"max_cached_sessions" validate:"min=0"` // Max. number of cached VM sessions. + OnlyCachedSessions bool `config:"only_cached_sessions"` // Only use cached VM sessions. } // Validate returns an error if one (and only one) option is not set. @@ -57,7 +58,8 @@ func (c Config) Validate() error { func defaultConfig() Config { return Config{ - TagOnException: "_js_exception", - MaxCachedSessions: 4, + TagOnException: "_js_exception", + MaxCachedSessions: 4, + OnlyCachedSessions: false, } } diff --git a/libbeat/processors/script/javascript/session.go b/libbeat/processors/script/javascript/session.go index 5b08e7d6052f..94e344d4e22a 100644 --- a/libbeat/processors/script/javascript/session.go +++ b/libbeat/processors/script/javascript/session.go @@ -91,7 +91,7 @@ func newSession(p *goja.Program, conf Config, test bool) (*session, error) { // Measure load times start := time.Now() defer func() { - took := time.Now().Sub(start) + took := time.Since(start) logger.Debugf("Load of javascript pipeline took %v", took) }() // Setup JS runtime. @@ -217,9 +217,9 @@ func (s *session) runProcessFunc(b *beat.Event) (out *beat.Event, err error) { } err = fmt.Errorf("unexpected panic in javascript processor: %v", r) if s.tagOnException != "" { - mapstr.AddTags(b.Fields, []string{s.tagOnException}) + _ = mapstr.AddTags(b.Fields, []string{s.tagOnException}) } - appendString(b.Fields, "error.message", err.Error(), false) + _ = appendString(b.Fields, "error.message", err.Error(), false) } }() @@ -238,9 +238,9 @@ func (s *session) runProcessFunc(b *beat.Event) (out *beat.Event, err error) { if _, err = s.processFunc(goja.Undefined(), s.evt.JSObject()); err != nil { if s.tagOnException != "" { - mapstr.AddTags(b.Fields, []string{s.tagOnException}) + _ = mapstr.AddTags(b.Fields, []string{s.tagOnException}) } - appendString(b.Fields, "error.message", err.Error(), false) + _ = appendString(b.Fields, "error.message", err.Error(), false) return b, fmt.Errorf("failed in process function: %w", err) } @@ -273,8 +273,9 @@ func init() { } type sessionPool struct { - New func() *session - C chan *session + New func() *session + C chan *session + NewSessionsAllowed bool } func newSessionPool(p *goja.Program, c Config) (*sessionPool, error) { @@ -288,14 +289,28 @@ func newSessionPool(p *goja.Program, c Config) (*sessionPool, error) { s, _ := newSession(p, c, false) return s }, - C: make(chan *session, c.MaxCachedSessions), + C: make(chan *session, c.MaxCachedSessions), + NewSessionsAllowed: !c.OnlyCachedSessions, } pool.Put(s) + // If we are not allowed to create new sessions, pre-cache requested sessions + if !pool.NewSessionsAllowed { + for i := 0; i < c.MaxCachedSessions-1; i++ { + pool.Put(pool.New()) + } + } + return &pool, nil } func (p *sessionPool) Get() *session { + + if !p.NewSessionsAllowed { + return <-p.C + } + + // Try to get a session from the pool, if none is available, create a new one select { case s := <-p.C: return s diff --git a/libbeat/publisher/processing/default_test.go b/libbeat/publisher/processing/default_test.go index 6fe057850373..81bdb25dcbab 100644 --- a/libbeat/publisher/processing/default_test.go +++ b/libbeat/publisher/processing/default_test.go @@ -22,7 +22,7 @@ import ( "testing" "time" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/libbeat/tests/integration/framework.go b/libbeat/tests/integration/framework.go index bb7e0f6c447b..1daf8948bc14 100644 --- a/libbeat/tests/integration/framework.go +++ b/libbeat/tests/integration/framework.go @@ -38,7 +38,7 @@ import ( "testing" "time" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" "github.com/stretchr/testify/require" "github.com/elastic/beats/v7/libbeat/common/atomic" @@ -319,7 +319,7 @@ func (b *BeatProc) LogMatch(match string) bool { logFile := b.openLogFile() defer logFile.Close() - found := false + var found bool found, b.logFileOffset = b.logRegExpMatch(re, logFile, b.logFileOffset) if found { return found @@ -382,7 +382,7 @@ func (b *BeatProc) LogContains(s string) bool { logFile := b.openLogFile() defer logFile.Close() - found := false + var found bool found, b.logFileOffset = b.searchStrInLogs(logFile, s, b.logFileOffset) if found { return found diff --git a/metricbeat/docs/fields.asciidoc b/metricbeat/docs/fields.asciidoc index 0340311dff35..8f3ccce6fff1 100644 --- a/metricbeat/docs/fields.asciidoc +++ b/metricbeat/docs/fields.asciidoc @@ -67130,6 +67130,67 @@ format: bytes Average amount of time for a write operation from the datastore in milliseconds. +type: long + +-- + +[float] +=== datastorecluster + +Datastore Cluster + + + +*`vsphere.datastorecluster.name`*:: ++ +-- +The Datastore Cluster name. + + +type: keyword + +-- + +*`vsphere.datastorecluster.capacity.bytes`*:: ++ +-- +Total capacity of this storage pod, in bytes. + + +type: long + +format: bytes + +-- + +*`vsphere.datastorecluster.free_space.bytes`*:: ++ +-- +Total free space on this storage pod, in bytes. + + +type: long + +format: bytes + +-- + +*`vsphere.datastorecluster.datastore.names`*:: ++ +-- +List of all the Datastore names associated with the Datastore Cluster. + + +type: keyword + +-- + +*`vsphere.datastorecluster.datastore.count`*:: ++ +-- +Number of datastores in the Datastore Cluster. + + type: long -- @@ -67906,7 +67967,7 @@ type: long *`vsphere.virtualmachine.cpu.total.mhz`*:: + -- -Total CPU in Mhz. +Total Reserved CPU in Mhz. type: long @@ -68049,6 +68110,25 @@ type: keyword The uptime of the VM in seconds. +type: long + +-- + + +*`vsphere.virtualmachine.snapshot.info`*:: ++ +-- +Deatils of the snapshots of this virtualmachine. + +type: object + +-- + +*`vsphere.virtualmachine.snapshot.count`*:: ++ +-- +The number of snapshots of this virtualmachine. + type: long -- diff --git a/metricbeat/docs/modules/system.asciidoc b/metricbeat/docs/modules/system.asciidoc index a5da08c4fff6..2fc3930d8444 100644 --- a/metricbeat/docs/modules/system.asciidoc +++ b/metricbeat/docs/modules/system.asciidoc @@ -29,6 +29,21 @@ https://gitlab.com/apparmor/apparmor/wikis/TechnicalDoc_Proc_and_ptrace[AppArmor and other LSM software], even though the System module doesn't use `ptrace` directly. +[TIP] +.How and when metrics are collected +==== +Certain metrics monitored by the System module require multiple values to be +collected. +For example, the `system.process.cpu.total.norm.pct` field reports the percentage +of CPU time spent by the process since the last event. For this percentage to be +determined, the process needs to appear at least twice so that a performance delta +can be calculated. + +Note that in some cases a field like this may be missing from the System module +metricset if the process has not been available long enough to be included in +two periods of metric collection. +==== + [float] === Dashboard diff --git a/metricbeat/docs/modules/vsphere.asciidoc b/metricbeat/docs/modules/vsphere.asciidoc index 63a109879050..762f7ba255e5 100644 --- a/metricbeat/docs/modules/vsphere.asciidoc +++ b/metricbeat/docs/modules/vsphere.asciidoc @@ -11,7 +11,7 @@ This file is generated! See scripts/mage/docs_collector.go The vSphere module uses the https://github.com/vmware/govmomi[Govmomi] library to collect metrics from any Vmware SDK URL (ESXi/VCenter). This library is built for and tested against ESXi and vCenter 5.5, 6.0 and 6.5. -By default it enables the metricsets `cluster`, `network`, `resourcepool`, `datastore`, `host` and `virtualmachine`. +By default it enables the metricsets `cluster`, `datastore`, `datastorecluster`, `host`, `network`, `resourcepool` and `virtualmachine`. [float] === Dashboard @@ -35,7 +35,7 @@ in <>. Here is an example configuration: metricbeat.modules: - module: vsphere enabled: true - metricsets: ["cluster", "datastore", "host", "virtualmachine", "network", "resourcepool"] + metricsets: ["cluster", "datastore", "datastorecluster", "host", "network", "resourcepool", "virtualmachine"] # Real-time data collection – An ESXi Server collects data for each performance counter every 20 seconds. period: 20s hosts: ["https://localhost/sdk"] @@ -57,6 +57,8 @@ The following metricsets are available: * <> +* <> + * <> * <> @@ -69,6 +71,8 @@ include::vsphere/cluster.asciidoc[] include::vsphere/datastore.asciidoc[] +include::vsphere/datastorecluster.asciidoc[] + include::vsphere/host.asciidoc[] include::vsphere/network.asciidoc[] diff --git a/metricbeat/docs/modules/vsphere/datastorecluster.asciidoc b/metricbeat/docs/modules/vsphere/datastorecluster.asciidoc new file mode 100644 index 000000000000..07614187be1f --- /dev/null +++ b/metricbeat/docs/modules/vsphere/datastorecluster.asciidoc @@ -0,0 +1,29 @@ +//// +This file is generated! See scripts/mage/docs_collector.go +//// +:edit_url: https://github.com/elastic/beats/edit/main/metricbeat/module/vsphere/datastorecluster/_meta/docs.asciidoc + + +[[metricbeat-metricset-vsphere-datastorecluster]] +=== vSphere datastorecluster metricset + +beta[] + +include::../../../module/vsphere/datastorecluster/_meta/docs.asciidoc[] + +This is a default metricset. If the host module is unconfigured, this metricset is enabled by default. + +:edit_url: + +==== Fields + +For a description of each field in the metricset, see the +<> section. + +Here is an example document generated by this metricset: + +[source,json] +---- +include::../../../module/vsphere/datastorecluster/_meta/data.json[] +---- +:edit_url!: \ No newline at end of file diff --git a/metricbeat/docs/modules_list.asciidoc b/metricbeat/docs/modules_list.asciidoc index c50e63f2d3f0..c7a53f5b9093 100644 --- a/metricbeat/docs/modules_list.asciidoc +++ b/metricbeat/docs/modules_list.asciidoc @@ -311,8 +311,9 @@ This file is generated! See scripts/mage/docs_collector.go |<> |image:./images/icon-yes.png[Prebuilt dashboards are available] | .1+| .1+| |<> |<> |image:./images/icon-yes.png[Prebuilt dashboards are available] | -.6+| .6+| |<> beta[] +.7+| .7+| |<> beta[] |<> +|<> beta[] |<> |<> beta[] |<> beta[] diff --git a/metricbeat/include/list_common.go b/metricbeat/include/list_common.go index 89bc495bd8da..3cbb38284730 100644 --- a/metricbeat/include/list_common.go +++ b/metricbeat/include/list_common.go @@ -183,6 +183,7 @@ import ( _ "github.com/elastic/beats/v7/metricbeat/module/vsphere" _ "github.com/elastic/beats/v7/metricbeat/module/vsphere/cluster" _ "github.com/elastic/beats/v7/metricbeat/module/vsphere/datastore" + _ "github.com/elastic/beats/v7/metricbeat/module/vsphere/datastorecluster" _ "github.com/elastic/beats/v7/metricbeat/module/vsphere/host" _ "github.com/elastic/beats/v7/metricbeat/module/vsphere/network" _ "github.com/elastic/beats/v7/metricbeat/module/vsphere/resourcepool" diff --git a/metricbeat/mb/builders.go b/metricbeat/mb/builders.go index c9b1ace587d9..4c55e3e39442 100644 --- a/metricbeat/mb/builders.go +++ b/metricbeat/mb/builders.go @@ -22,7 +22,7 @@ import ( "fmt" "strings" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" "github.com/joeshaw/multierror" conf "github.com/elastic/elastic-agent-libs/config" @@ -108,8 +108,7 @@ func createModule(r *Register, bm BaseModule) (Module, error) { func initMetricSets(r *Register, m Module) ([]MetricSet, error) { var ( - errs multierror.Errors - metricsets []MetricSet + errs multierror.Errors ) bms, err := newBaseMetricSets(r, m) @@ -117,6 +116,7 @@ func initMetricSets(r *Register, m Module) ([]MetricSet, error) { return nil, err } + metricsets := make([]MetricSet, 0, len(bms)) for _, bm := range bms { registration, err := r.metricSetRegistration(bm.Module().Name(), bm.Name()) if err != nil { diff --git a/metricbeat/metricbeat.reference.yml b/metricbeat/metricbeat.reference.yml index 816e2fcc8046..890031ddb4ca 100644 --- a/metricbeat/metricbeat.reference.yml +++ b/metricbeat/metricbeat.reference.yml @@ -1007,7 +1007,7 @@ metricbeat.modules: #------------------------------- VSphere Module ------------------------------- - module: vsphere enabled: true - metricsets: ["cluster", "datastore", "host", "virtualmachine", "network", "resourcepool"] + metricsets: ["cluster", "datastore", "datastorecluster", "host", "network", "resourcepool", "virtualmachine"] # Real-time data collection – An ESXi Server collects data for each performance counter every 20 seconds. period: 20s hosts: ["https://localhost/sdk"] diff --git a/metricbeat/module/system/_meta/docs.asciidoc b/metricbeat/module/system/_meta/docs.asciidoc index 8a106240d784..3b3dc461ebe4 100644 --- a/metricbeat/module/system/_meta/docs.asciidoc +++ b/metricbeat/module/system/_meta/docs.asciidoc @@ -18,6 +18,21 @@ https://gitlab.com/apparmor/apparmor/wikis/TechnicalDoc_Proc_and_ptrace[AppArmor and other LSM software], even though the System module doesn't use `ptrace` directly. +[TIP] +.How and when metrics are collected +==== +Certain metrics monitored by the System module require multiple values to be +collected. +For example, the `system.process.cpu.total.norm.pct` field reports the percentage +of CPU time spent by the process since the last event. For this percentage to be +determined, the process needs to appear at least twice so that a performance delta +can be calculated. + +Note that in some cases a field like this may be missing from the System module +metricset if the process has not been available long enough to be included in +two periods of metric collection. +==== + [float] === Dashboard diff --git a/metricbeat/module/vsphere/_meta/README.md b/metricbeat/module/vsphere/_meta/README.md index 06373b27e634..5e40dd26ba7a 100644 --- a/metricbeat/module/vsphere/_meta/README.md +++ b/metricbeat/module/vsphere/_meta/README.md @@ -1,7 +1,7 @@ # Testing using GOVCSIM. -To test the vsphere module without a real Vmware SDK URL you can use the following setup. Govcsim is a vCenter Server and ESXi API based simulator written using govmomi. It creates a vCenter Server model with a datacenter, hosts, cluster, resource pools, networks and a datastore. +To test the vsphere module without a real Vmware SDK URL you can use the following setup. Govcsim is a vCenter Server and ESXi API based simulator written using govmomi. It creates a vCenter Server model with a datacenter, datastore cluster, hosts, cluster, resource pools, networks and a datastore. Requirements: @@ -30,10 +30,11 @@ Now setup your metricbeat config to connect to Govcsim: metricsets: - cluster - datastore + - datastorecluster - host - - virtualmachine - network - resourcepool + - virtualmachine enabled: true period: 5s hosts: ["https://127.0.0.1:8989/sdk"] diff --git a/metricbeat/module/vsphere/_meta/config.reference.yml b/metricbeat/module/vsphere/_meta/config.reference.yml index 5e6bfdb488ca..91a32da76776 100644 --- a/metricbeat/module/vsphere/_meta/config.reference.yml +++ b/metricbeat/module/vsphere/_meta/config.reference.yml @@ -1,6 +1,6 @@ - module: vsphere enabled: true - metricsets: ["cluster", "datastore", "host", "virtualmachine", "network", "resourcepool"] + metricsets: ["cluster", "datastore", "datastorecluster", "host", "network", "resourcepool", "virtualmachine"] # Real-time data collection – An ESXi Server collects data for each performance counter every 20 seconds. period: 20s hosts: ["https://localhost/sdk"] diff --git a/metricbeat/module/vsphere/_meta/config.yml b/metricbeat/module/vsphere/_meta/config.yml index 09927fb32376..173be03fc4fb 100644 --- a/metricbeat/module/vsphere/_meta/config.yml +++ b/metricbeat/module/vsphere/_meta/config.yml @@ -1,11 +1,12 @@ - module: vsphere - #metricsets: + # metricsets: # - cluster # - datastore + # - datastorecluster # - host - # - virtualmachine # - network # - resourcepool + # - virtualmachine # Real-time data collection – An ESXi Server collects data for each performance counter every 20 seconds. period: 20s hosts: ["https://localhost/sdk"] diff --git a/metricbeat/module/vsphere/_meta/docs.asciidoc b/metricbeat/module/vsphere/_meta/docs.asciidoc index e1329634c212..353cfda1e7b6 100644 --- a/metricbeat/module/vsphere/_meta/docs.asciidoc +++ b/metricbeat/module/vsphere/_meta/docs.asciidoc @@ -1,6 +1,6 @@ The vSphere module uses the https://github.com/vmware/govmomi[Govmomi] library to collect metrics from any Vmware SDK URL (ESXi/VCenter). This library is built for and tested against ESXi and vCenter 5.5, 6.0 and 6.5. -By default it enables the metricsets `cluster`, `network`, `resourcepool`, `datastore`, `host` and `virtualmachine`. +By default it enables the metricsets `cluster`, `datastore`, `datastorecluster`, `host`, `network`, `resourcepool` and `virtualmachine`. [float] === Dashboard diff --git a/metricbeat/module/vsphere/datastorecluster/_meta/data.json b/metricbeat/module/vsphere/datastorecluster/_meta/data.json new file mode 100644 index 000000000000..10a4d2f98de8 --- /dev/null +++ b/metricbeat/module/vsphere/datastorecluster/_meta/data.json @@ -0,0 +1,33 @@ +{ + "@timestamp": "2016-05-23T08:05:34.853Z", + "service": { + "address": "https://localhost:8980/sdk", + "type": "vsphere" + }, + "event": { + "dataset": "vsphere.datastorecluster", + "module": "vsphere", + "duration": 15443161 + }, + "metricset": { + "period": 20000, + "name": "datastorecluster" + }, + "vsphere": { + "datastorecluster": { + "name": "datastore_cluster1", + "capacity": { + "bytes": 8795019280384 + }, + "free_space": { + "bytes": 8788836876288 + }, + "datastore": { + "count": 1, + "names": [ + "LocalDS_0" + ] + } + } + } +} \ No newline at end of file diff --git a/metricbeat/module/vsphere/datastorecluster/_meta/docs.asciidoc b/metricbeat/module/vsphere/datastorecluster/_meta/docs.asciidoc new file mode 100644 index 000000000000..858a9fa3d50b --- /dev/null +++ b/metricbeat/module/vsphere/datastorecluster/_meta/docs.asciidoc @@ -0,0 +1 @@ +This is the Datastore Cluster metricset of the module vsphere. diff --git a/metricbeat/module/vsphere/datastorecluster/_meta/fields.yml b/metricbeat/module/vsphere/datastorecluster/_meta/fields.yml new file mode 100644 index 000000000000..50ce1ca9d448 --- /dev/null +++ b/metricbeat/module/vsphere/datastorecluster/_meta/fields.yml @@ -0,0 +1,28 @@ +- name: datastorecluster + type: group + release: beta + description: > + Datastore Cluster + fields: + - name: name + type: keyword + description: > + The Datastore Cluster name. + - name: capacity.bytes + type: long + description: > + Total capacity of this storage pod, in bytes. + format: bytes + - name: free_space.bytes + type: long + description: > + Total free space on this storage pod, in bytes. + format: bytes + - name: datastore.names + type: keyword + description: > + List of all the Datastore names associated with the Datastore Cluster. + - name: datastore.count + type: long + description: > + Number of datastores in the Datastore Cluster. \ No newline at end of file diff --git a/metricbeat/module/vsphere/datastorecluster/data.go b/metricbeat/module/vsphere/datastorecluster/data.go new file mode 100644 index 000000000000..d7c7903c62c5 --- /dev/null +++ b/metricbeat/module/vsphere/datastorecluster/data.go @@ -0,0 +1,40 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package datastorecluster + +import ( + "github.com/vmware/govmomi/vim25/mo" + + "github.com/elastic/elastic-agent-libs/mapstr" +) + +func (m *DatastoreClusterMetricSet) mapEvent(datastoreCluster mo.StoragePod, data *metricData) mapstr.M { + return mapstr.M{ + "name": datastoreCluster.Name, + "capacity": mapstr.M{ + "bytes": datastoreCluster.Summary.Capacity, + }, + "free_space": mapstr.M{ + "bytes": datastoreCluster.Summary.FreeSpace, + }, + "datastore": mapstr.M{ + "names": data.assetNames.outputDsNames, + "count": len(data.assetNames.outputDsNames), + }, + } +} diff --git a/metricbeat/module/vsphere/datastorecluster/data_test.go b/metricbeat/module/vsphere/datastorecluster/data_test.go new file mode 100644 index 000000000000..41d1736777ef --- /dev/null +++ b/metricbeat/module/vsphere/datastorecluster/data_test.go @@ -0,0 +1,57 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package datastorecluster + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/vmware/govmomi/vim25/mo" + "github.com/vmware/govmomi/vim25/types" +) + +func TestEventMapping(t *testing.T) { + datastoreClusterTest := mo.StoragePod{ + Summary: &types.StoragePodSummary{ + Capacity: 100, + FreeSpace: 50, + }, + Folder: mo.Folder{ + ManagedEntity: mo.ManagedEntity{ + Name: "Folder1", + }, + }, + } + + event := (&DatastoreClusterMetricSet{}).mapEvent(datastoreClusterTest, &metricData{assetNames: assetNames{outputDsNames: []string{"DCS_0"}}}) + + name, _ := event.GetValue("name") + assert.Equal(t, "Folder1", name) + + capacity, _ := event.GetValue("capacity.bytes") + assert.Equal(t, int64(100), capacity) + + freeSpace, _ := event.GetValue("free_space.bytes") + assert.Equal(t, int64(50), freeSpace) + + datastoreNames, _ := event.GetValue("datastore.names") + assert.Equal(t, []string{"DCS_0"}, datastoreNames) + + datastoreCount, _ := event.GetValue("datastore.count") + assert.Equal(t, 1, datastoreCount) +} diff --git a/metricbeat/module/vsphere/datastorecluster/datastorecluster.go b/metricbeat/module/vsphere/datastorecluster/datastorecluster.go new file mode 100644 index 000000000000..dd26fa7ba046 --- /dev/null +++ b/metricbeat/module/vsphere/datastorecluster/datastorecluster.go @@ -0,0 +1,138 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package datastorecluster + +import ( + "context" + "fmt" + "strings" + + "github.com/vmware/govmomi" + "github.com/vmware/govmomi/property" + "github.com/vmware/govmomi/view" + "github.com/vmware/govmomi/vim25/mo" + + "github.com/elastic/beats/v7/metricbeat/mb" + "github.com/elastic/beats/v7/metricbeat/module/vsphere" +) + +// init registers the MetricSet with the central registry as soon as the program +// starts. The New function will be called later to instantiate an instance of +// the MetricSet for each network is defined in the module's configuration. After the +// MetricSet has been created then Fetch will begin to be called periodically. + +func init() { + mb.Registry.MustAddMetricSet("vsphere", "datastorecluster", New, + mb.WithHostParser(vsphere.HostParser), + mb.DefaultMetricSet(), + ) +} + +// MetricSet type defines all fields of the MetricSet. +type DatastoreClusterMetricSet struct { + *vsphere.MetricSet +} + +// New creates a new instance of the MetricSet. +func New(base mb.BaseMetricSet) (mb.MetricSet, error) { + ms, err := vsphere.NewMetricSet(base) + if err != nil { + return nil, fmt.Errorf("failed to create vSphere metricset: %w", err) + } + return &DatastoreClusterMetricSet{ms}, nil +} + +type metricData struct { + assetNames assetNames +} + +type assetNames struct { + outputDsNames []string +} + +func (m *DatastoreClusterMetricSet) Fetch(ctx context.Context, reporter mb.ReporterV2) error { + ctx, cancel := context.WithCancel(ctx) + defer cancel() + + client, err := govmomi.NewClient(ctx, m.HostURL, m.Insecure) + if err != nil { + return fmt.Errorf("error in NewClient: %w", err) + } + + defer func() { + if err := client.Logout(ctx); err != nil { + m.Logger().Errorf("error trying to logout from vSphere: %w", err) + } + }() + + c := client.Client + + v, err := view.NewManager(c).CreateContainerView(ctx, c.ServiceContent.RootFolder, []string{"StoragePod"}, true) + if err != nil { + return fmt.Errorf("error in creating container view: %w", err) + } + + defer func() { + if err := v.Destroy(ctx); err != nil { + m.Logger().Errorf("error trying to destroy view from vSphere: %w", err) + } + }() + + var datastoreCluster []mo.StoragePod + err = v.Retrieve(ctx, []string{"StoragePod"}, []string{"name", "summary", "childEntity"}, &datastoreCluster) + if err != nil { + return fmt.Errorf("error in retrieve from vsphere: %w", err) + } + + pc := property.DefaultCollector(c) + for i := range datastoreCluster { + if ctx.Err() != nil { + return ctx.Err() + } + + assetNames, err := getAssetNames(ctx, pc, &datastoreCluster[i]) + if err != nil { + m.Logger().Errorf("Failed to retrieve object from host %s: %w", datastoreCluster[i].Name, err) + } + + reporter.Event(mb.Event{MetricSetFields: m.mapEvent(datastoreCluster[i], &metricData{assetNames: assetNames})}) + } + + return nil +} + +func getAssetNames(ctx context.Context, pc *property.Collector, dsc *mo.StoragePod) (assetNames, error) { + var objects []mo.ManagedEntity + if len(dsc.ChildEntity) > 0 { + if err := pc.Retrieve(ctx, dsc.ChildEntity, []string{"name"}, &objects); err != nil { + return assetNames{}, err + } + } + + outputDsNames := make([]string, 0) + for _, ob := range objects { + if ob.Reference().Type == "Datastore" { + name := strings.ReplaceAll(ob.Name, ".", "_") + outputDsNames = append(outputDsNames, name) + } + } + + return assetNames{ + outputDsNames: outputDsNames, + }, nil +} diff --git a/metricbeat/module/vsphere/datastorecluster/datastorecluster_test.go b/metricbeat/module/vsphere/datastorecluster/datastorecluster_test.go new file mode 100644 index 000000000000..216484212917 --- /dev/null +++ b/metricbeat/module/vsphere/datastorecluster/datastorecluster_test.go @@ -0,0 +1,88 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package datastorecluster + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/vmware/govmomi/simulator" + + mbtest "github.com/elastic/beats/v7/metricbeat/mb/testing" + "github.com/elastic/elastic-agent-libs/mapstr" +) + +func TestFetchEventContents(t *testing.T) { + model := simulator.VPX() + model.Pod = 1 + err := model.Create() + require.NoError(t, err, "failed to create model") + t.Cleanup(model.Remove) + + ts := model.Service.NewServer() + t.Cleanup(ts.Close) + + f := mbtest.NewReportingMetricSetV2WithContext(t, getConfig(ts)) + events, errs := mbtest.ReportingFetchV2WithContext(f) + require.Empty(t, errs, "Expected no errors during fetch") + require.NotEmpty(t, events, "Expected to receive at least one event") + + event := events[0].MetricSetFields + + t.Logf("Fetched event from %s/%s event: %+v", f.Module().Name(), f.Name(), event) + + name, ok := event["name"].(string) + require.True(t, ok, "Expected 'name' field to be of type mapstr.M") + assert.NotNil(t, name, "Expected 'name' field to be non-nil") + + capacity, ok := event["capacity"].(mapstr.M) + require.True(t, ok, "Expected 'capacity' field to be of type mapstr.M") + assert.GreaterOrEqual(t, capacity["bytes"], int64(0), "Expected 'capacity.bytes' to be non-negative") + + freeSpace, ok := event["free_space"].(mapstr.M) + require.True(t, ok, "Expected 'free_space' field to be of type mapstr.M") + assert.GreaterOrEqual(t, freeSpace["bytes"], int64(0), "Expected 'free_space.bytes' to be non-negative") +} + +func TestDatastoreCluster(t *testing.T) { + model := simulator.VPX() + model.Pod = 1 + err := model.Create() + require.NoError(t, err, "failed to create model") + t.Cleanup(model.Remove) + + ts := model.Service.NewServer() + t.Cleanup(ts.Close) + + f := mbtest.NewReportingMetricSetV2WithContext(t, getConfig(ts)) + + err = mbtest.WriteEventsReporterV2WithContext(f, t, "") + assert.NoError(t, err, "failed to write events with reporter") +} + +func getConfig(ts *simulator.Server) map[string]interface{} { + return map[string]interface{}{ + "module": "vsphere", + "metricsets": []string{"datastorecluster"}, + "hosts": []string{ts.URL.String()}, + "username": "user", + "password": "pass", + "insecure": true, + } +} diff --git a/metricbeat/module/vsphere/fields.go b/metricbeat/module/vsphere/fields.go index c662eb7e840f..729c11e62645 100644 --- a/metricbeat/module/vsphere/fields.go +++ b/metricbeat/module/vsphere/fields.go @@ -32,5 +32,5 @@ func init() { // AssetVsphere returns asset data. // This is the base64 encoded zlib format compressed contents of module/vsphere. func AssetVsphere() string { - return "eJzUXEtvGzkSvvtXFOayCeBo7j4skE0wkwDrZBB5fDUodknNNZvsJaslyL9+QLJfUj/0Ysu2Dj7IEuurd7GqWp/gGbd3sLZ5igZvAEiQxDv4bT337/x2A5Cg5UbkJLS6g3/fAACU/4VMJ4V0XzMokVm8gxW7AVgKlIm98x/9BIpl2CbhXrTN3YeNLvLynR4quwe1D+OysISmfr/vQPeqYS2QWOv9XmLh9SUcDUIttcmY+8is9YF9RG1UCSNmSddMjmMbOq19ovtrO/+tTnzG7UabpOf/I/xVr/8KS6CXwKQEShG+VuADUWDWai4YYQIbQan/TCn22SBergtFg3ilVqvzwP4osgUaB7eGeQLCRj/2iWu1FPsozlcQSzJhrbMRrhUZLWeo2EJin1IClYXWEpk6Tw7fVSI4I7SwSZFSNGDJCE4NDihxgLBQQhlW15WxVlHj22dYIqPC4CDKCmGq7b49na+s6a3zm7Z0putc0dUdylO9vA2zVyH9CA+gq4KtO3WAHtJGm+d3ZAM/AuK3bwYl0FMsYSzT9etkBFffIbt1RPUaS7qc5YwL2s6WBnG22FJHfIOqPSCzPwwi+AOd1Jw8asT7+gulwh10yXdgkiYmo+J8cCfGB1pYTKLi/NtiMhHMnPdnCcuZxORpKTXb/8ABsH+h4aiIrdCBrYHWdMHTHYCdhy/3Al9aBy1eCP1DSLRbS5j5U4bT6Kwv9J2ryybcuaPtCNW+gHYBu/tBLK1zWT8GofNY9jsnbZw9fP/9J3wpqyy2Whlc+aj5XeUF/f6zoLwg+Jmj8XcH6ywJ5si1Gihx4mbU3TK+n6JBFtezfzHybuIOFmrl3QWWRmeXerlHKhmh4lXkzGKB/rxGr06WOb/wIUlk6EAB84RBV0rs4QWEgkxIKaxX7YD1WWJURLT+uT/vQPisb9zZRC7/eD/A7jqb2t0f78ecfWMExa0BKst2J9eWTfpSuw5AX8OwPeUzLbtTBu7dzk6uAP1FpNVoCWiqayKqtTBaZahodlmJmBehTMjSl5i1zJe//nbCuv/20m+Ojm6p22iEQ7F3DGVfD8cj7KvhQ3Qbl5g4DiS73apDaKaJg0nTi9KqrkYGsAj7PGsVrWwVN1A9pG2Ht2WpUlervDAGFcktLNDFMa6VLTJfj4M2FXxU5NCdHM48cwmuBccqprEQhCaPaoKA2DNaF5S5znKJhMAUzL/Mv7s3MqaSEFbydGsFZxIC0OMSuOdsojj9TaxStATl8bBmskBg3GhrvaE74tZfNJya6mr3eODRC71KCar2gXCf8+VSk0qEfQZkPAXbU/cebVDxs/kQfEeJUNWJPQ7++Df9eZH5uOPweZE74w4Z3fhOawu0MxNP+3T0GWbaTNVQufeHX45uqj5KLHzTtE8uRhf3tll3kkebt0+Ri4GdxuUo5anLEHU8kmlKEFU1mw8WIBWOBVPJRiSUzsgwZTMX+aZJEi4kASPYpIKn4ea0YRZaZCEpjKtGHHKhCM2ayRk8pMJllNygRUXW/7dGXd19K3ZOd4COHAxyFOvrCqGi+TYkMFmmqvyjrXOXs2r2Q9qqwlmvNM7mL2f8Gcnu2Pk0XlhSarM57oQVtNr0psVVkzkKFBqjzTXF5udNgerpIizRXkmSbajnSdW7WkyQoWxRY1CPQ5gVkgRnlq6g+prW+b7TwJ1Y912sp2m+JdfJld/BehzExOg8x+QKim9ngkqcFfWToE6s9DpDXQTyet4+Ci/2GOIhRdCuwJESUmSS0pKEr43rLoVL4weauQ3GIicxcDU5rxvmZV8eu4urbJz4CHkqzNcecUw21FkLQwWTkDGeCjXU0hxey4m2gFne7j4Z30tMzlnEZJyjtWIh++2pf63tgKS662yMwDFHoFXVmbMQ9gsLgwmQhtzotUgQyBX0vQV7s5fkvjeL7ald1E6n5b5AyiwkSMh9WV5DD6MYYW0xsk3QC/GNrbj+8ItN5XXJ7yk4HlXg1/f6hrQCV90YOwnaNI0cJ6rRi+VUqeRPVC6PVGmkhPBjDELc/ZmHbb7POXzA2Wp2W6H4MCemEmaSj7fwVVgyYlEQJo8hYOba0MehWP3uvKSTBd6kw5yDslkpsbowHHOt5RTp61d5PjgCJw+3D4yxo9lTGEB2p8MQQU//YVZwPyrO0Xj2FUcfOIQlwe1t7wi5jS7BjKm+cf3bgOfHpBKz/RW/Nr4+pcCIYtrn9/N9kPcj+YfOtNjJonKLivtboJRR8KXH+7Cn77l2LnYLzAJnkhehRFts4euveVdgsJc4BB/QaWzeArEdxtpaK3dh9sJIfxAP45y4jjeJ1awKtNTbxN2lcbmE/3SUSsFAQUKKlxDh9pyod0RVszTYx91nzN9ArsGXn2ZNyta1Isg1pLUbQ4LYGvlEix+nWEoZY67FfRllSos5PtDEsqLciDUbWJE4yOsRfDoeXXnrfGBYxcLCyjBVFl8dZkPZo7T6ZFPmrsVp42V97B/HejjrPXDeYrf+VimJBdIGUXX3aU+SxIb5xt87EEV1Df+X9ajB5ox3lmeP53zBpNRaTcy7FS/1/bCkCIkRa/+I8Km+fRxjeo0mRTb8XOZ7TA1NfPxQSehjsJ2NkBIWWC+6da2HLfQaQZANRU5z4v8LYXygjVBmVCuRV0sfjYQqhfd41+DiJmkwhTPAx/vLsgjXWW7QDq0rHeT6SD9quG0IVopsmGzvpT7en+tM0/TIHBeq1Scz7Zv++x28ROxSvULnp5xSjOgCrtxGOxFirYbwtfJbBztTI4gGTzr9yQl/9RL7Erl0YVCMPOw/c3/iLym2/XZkRy4q3bJBDPdlFhvek9QRQ8Q+1fJJRLWCeZj6DMOY+jmZ9NWekxmjHPk5mc9rJiRbSDxIvL0oPNy8ibEurJdlzybOVvNgQyYS1vKxsCgL4vEFu7MmHk+y3gzjo23v3EcAywtLOnsKOaMXol78DztP44c3ny75pRBPuExWb23b/F398NPOvK95jq71+x/VDG23kHjNgV/S+6tPx+Cc6ndkrqUg1fNrMm9PP+einOqe9HP3jsRUsnNPOmrwE30hrVlFK9e8mlW0nmesb/4JAAD//68uyFk=" + return "eJzUXM1v2zgWv/eveJjLtkDqueewQDfFTAts2kGdyTWgqWeLG4rUkpQN568fkBQl2fqwbFNK4kNRODbf733yfcmf4Rn3t7DVeYoKPwAYZjjewm/bpXvntw8ACWqqWG6YFLfw7w8AAOVfIZNJwe3XFHIkGm9hQz4ArBnyRN+6j34GQTJskrAvs8/th5Us8vKdDiqHBzUPo7zQBlX1fteB9lXBWqEhjfc7ifnXnT8amFhLlRH7kUXjA8eImqgSYog2smJyGFvfac0T7b+69ddw4jPud1IlHX8f4C+8/su0AbkGwjmYFOFrAO+JAtFaUkYMJrBjJnWfKcW+6MVLZSFML14uxeYysD+KbIXKwq1gnoGw1o9+olKs2TGKyxVEkoxpbW2ESmGU5AsUZMWxSymeykpKjkRcJofvImGUGNSwS9GkqEAbxaipcUCJA5iGEkq/umbGGqLGty+wRmIKhb0oA8JU6mN7ulxZ01vnN6nNha4zo6tblOd6eRNmp0K6EZ5AF4KtPbWHHpqdVM/vyAZ+eMRv3wxKoOdYwtBN162TAVxdhxzmEeE1dOlSkhPKzH6xVoiL1d60xNer2hMy+0MhgjvQSs3Ko0J8rD+fKtxCm3wLppGG8Kg4H+yJ8YEWGpOoOP/WmEwEM6fdt4SmhGPytOaSHH/gBNi/UFEUhmzQgq2AVnTB0e2BnfsvdwJfawstXgj9g3HUe20wc6f0X6OLrtB3qS7rcGeP1gNUuwLaFeweB7G0usu6MTCZx7LfpZHK2sP333/CXZllkc1G4cZFze8iL8zvPwuTFwZ+5qhc7aCtJcESqRQ9KU7cG/Uwje+mqJDE9exfxDg3sQczsXHuAmsls2u93CHlxKCgIXJmsUB/2aJTJ8msX7iQxDK0oIA4wiCDEjt4ASYgY5wz7VTbY33aEFNEtP6lO+9E+Kwq7mwil3+872F3m03t7o/3Q86+U8zEzQGCZduTK8s28lq79kBfw7Ad5Qstuz8NnLAfUwe0uxaRobwwblh9OOiQnC5bqvQkfqJXZSAuCjANuryXcpncWOU5kufbpM2fn3ROaFwP8qDt4eAOBymiwq69cOLQM6ZB1jKRvkZUwDxNgE7qJhkTp4D1tFrOLudcV6HRNfWhJfR8UGyZkiJDYRbX1Xt54XP+LH2JWZjc/fW3Fdb9t5cej86LEKijEfa+MYayK27jEXal7Sm6s3lWcuhZr+4zUlSlRQ8Wpp8XjQqUbCLHzLR5e4dAWQV+WiiFwvA9rNAmJVQKXWSuuAapAnwUxqI7P6Ba5hLcMoohQSE+o5g8RWEGDHlGbTMsKrOco0EgApZ3y+/2jYyIxIeVPN1rRgkHD3RcNu44myjp+sY2KWoD5fGwJbxAIFRJrZ2hW+LadQ2smqrSdTzw6FVbUIKofMA3Z1ztU+eFTD8DEpqC7ihiRxtU/NS8D76lZFBUWXoc/PHbdssic3HH4nMit8bt03PlxiYN0FfkRxlmUk3VHb13h1+PbqqmaCx80/RCr0YXt8apxkKDk5inyMnAwRRikPLUaYgYj2SaFESEydHJBCTgWBGR7Fhi0oVRROjMRr5pLgkbkoAY2KWMpr4NsiMaGmQhKZTNRixyJgyqLeELeLClnsJcoUZhtPtrhTo0sgI75ztASw4KKbLtvEIINN+GBCa7qYJ/NHVu76yKfX9thXDWKY2L+csJfUajD+x8Gi8sKTXZHHbCAK0yvWlxVWRGgUKlpJpTbK4X4qmeL8IS7UySbEK9TKrO1WKC9GmLGII6DmFWcMMo0WYG1Ve0LvedGu7Eum9jPU/zDblOrvwW1nEQEyXzHJMZFN+8CYI4A/WzoE6s9OqGugrkfN4+CC/2TPEhRZA2weEcUiTcpCUJlxtXXQp7jZ9o5tYYi9ywntLksm6Yk3157CGusnHiIuS5MF97XjnZhHbLlCkIh4zQlIm+lmb/jl206V1Z3X1WrpeYXLJVTShFrdmKd9tT947qCUm1d1OJAcucASlCZ06DXxYuFCZgJORKblmCfnrVmbDXS4b2e4vYntpGbXVaLv+kREOCBqlLyyvofhTDtC4GVoM6Ib6xffUfbuhWlktu6cjyKDy/rtfXpxWYdf3zLGjTNHKsqAYLy6mukj9R2HskXCMlhB9DEOIuwz3s82PO4SMuNoubgOLj0hCREJV8uoGvTBvFVoXB5NEHzFwq86kvVr87L2ndAm/SYS5BWe+HaVkoirmUfIrr61d5PlgCZw+3T4yxo9mTH0C2p8MQQU//IZpRNyrOUTn2BUUXOJg2jOqbzhFyE12CGRFd4/q3Ac+NSTlmx/u6TXxdSoEBxTTP7+b7JO8j+YfWtNjKIrhF4P4GTEqM96XHe//QjePautgNEA2UcFr4FG21h6+/lm2BwdHFwWiPTmPz5okdMNbUWrnYdhRGuoO4H+fEdbxJrGZToDadTdxDGtdL+E9LqRQMFIZx9uIj3JETdY6oKpZ6+7jHjLkKZA6+3DRrUrbmiiBzSOswhnix1fKJFj/OsZQyxszFfRllSosZH2hiWVGu2Jb0rEic5HUEn5ZHm95aH+hXMdOwUUSUyVeLWZ/2CCk+65TYsjitvayL/XGs+7PeA+cNdqtvlZJYodkhivZy/FmS2BHX+HsHoghl+L+0Q+2Xey/nfEU4l1JMzLtmL1V9WFKERLGte97/XN8ex5jcokqR9D9k/R6vhjo+fgwS+uRtZ8c4hxVWi25t6yEruUVgRvskpz7x/wVTLtBGSDPCSuRs10ctoaDwDu/qXdw0ElRhDfDx/rpbhMosV6j71pVOcj3Sj2pua4JBkTWTzb3Ux/tLnWm6BzpEo0+mmpX++x28ROxSvULnp5xSDOgCZm6jnQmxUoP/Wvmtk52pAUS9J53/5IQrvdixRK5dGGQDv9yxsP/EX1Js+u3AjlxUumWDGO7LW6x/T1JGDBHHVMvHisUGln7qM/AE2sTPyaSzPyfzCzWq7UgIkR+Y+bIljJMVx5PEmxvD/V2cGHvDcl02b+KsN/d2ZiJhLZ8Pi7IpHl+wB/vi8STrzDA+2ubyfQSwtNBGZk/+8uiEKFf/w9ZvbPg3n675/R9HuLy13tra+bv6ObeDwV/9QF3jIdUwTDvMKF5z8pd0/pbbGJxT/TrUXAoSHb8R9fb0cynKqQqmn4fFEhHJQcE0agIUfTOt3kkr973qnbQeqQiS61TGW7phYi177aAzcLdY+4rEMF6ZZ4Coq98YmM8oXWFemeB4JPV//gkAAP//jtwuLA==" } diff --git a/metricbeat/module/vsphere/virtualmachine/_meta/data.json b/metricbeat/module/vsphere/virtualmachine/_meta/data.json index d04a8efb42db..d2917e5e6a9a 100644 --- a/metricbeat/module/vsphere/virtualmachine/_meta/data.json +++ b/metricbeat/module/vsphere/virtualmachine/_meta/data.json @@ -66,7 +66,21 @@ "datastore.names": [ "VxRailtec-Virtual-SAN-Datastore-247df-bc1d-5aad2" ], - "host.id": "host-20" + "host.id": "host-20", + "snapshot.info": [ + { + "Name": "Snapshot_1", + "Description": "Test snapshot 1", + "CreateTime": "2024-09-01T12:34:56Z" + + }, + { + "Name": "Snapshot_2", + "Description": "Test snapshot 2", + "CreateTime": "2024-09-03T2:34:56Z" + } + ], + "snapshot.count": 2 } } } diff --git a/metricbeat/module/vsphere/virtualmachine/_meta/fields.yml b/metricbeat/module/vsphere/virtualmachine/_meta/fields.yml index 6720da187652..fcfe2b9bbb30 100644 --- a/metricbeat/module/vsphere/virtualmachine/_meta/fields.yml +++ b/metricbeat/module/vsphere/virtualmachine/_meta/fields.yml @@ -27,7 +27,7 @@ - name: cpu.total.mhz type: long description: > - Total CPU in Mhz. + Total Reserved CPU in Mhz. - name: cpu.free.mhz type: long description: > @@ -91,5 +91,14 @@ type: long description: > The uptime of the VM in seconds. + - name: snapshot + type: group + fields: + - name: info + type: object + description: Deatils of the snapshots of this virtualmachine. + - name: count + type: long + description: The number of snapshots of this virtualmachine. diff --git a/metricbeat/module/vsphere/virtualmachine/data.go b/metricbeat/module/vsphere/virtualmachine/data.go index 5c814105f90b..69d0bd1df541 100644 --- a/metricbeat/module/vsphere/virtualmachine/data.go +++ b/metricbeat/module/vsphere/virtualmachine/data.go @@ -72,5 +72,10 @@ func (m *MetricSet) mapEvent(data VMData) mapstr.M { if len(data.DatastoreNames) > 0 { event["datastore.names"] = data.DatastoreNames } + if len(data.Snapshots) > 0 { + event["snapshot.info"] = data.Snapshots + event["snapshot.count"] = len(data.Snapshots) + } + return event } diff --git a/metricbeat/module/vsphere/virtualmachine/data_test.go b/metricbeat/module/vsphere/virtualmachine/data_test.go index 351daea7c118..e11d39a81973 100644 --- a/metricbeat/module/vsphere/virtualmachine/data_test.go +++ b/metricbeat/module/vsphere/virtualmachine/data_test.go @@ -19,6 +19,7 @@ package virtualmachine import ( "testing" + "time" "github.com/stretchr/testify/assert" "github.com/vmware/govmomi/vim25/mo" @@ -58,6 +59,22 @@ func TestEventMapping(t *testing.T) { "customField1": "value1", "customField2": "value2", }, + Snapshots: []VMSnapshotData{ + { + ID: 123, + Name: "Snapshot_1", + Description: "Test snapshot 1", + CreateTime: time.Time{}, + State: types.VirtualMachinePowerStatePoweredOff, + }, + { + ID: 745, + Name: "Snapshot_2", + Description: "Test snapshot 2", + CreateTime: time.Time{}, + State: types.VirtualMachinePowerStatePoweredOn, + }, + }, } event := m.mapEvent(data) @@ -108,6 +125,23 @@ func TestEventMapping(t *testing.T) { "network.names": []string{"network-1", "network-2"}, "network_names": []string{"network-1", "network-2"}, "datastore.names": []string{"ds1", "ds2"}, + "snapshot.info": []VMSnapshotData{ + { + ID: 123, + Name: "Snapshot_1", + Description: "Test snapshot 1", + CreateTime: time.Time{}, + State: types.VirtualMachinePowerStatePoweredOff, + }, + { + ID: 745, + Name: "Snapshot_2", + Description: "Test snapshot 2", + CreateTime: time.Time{}, + State: types.VirtualMachinePowerStatePoweredOn, + }, + }, + "snapshot.count": 2, } // Assert that the output event matches the expected event diff --git a/metricbeat/module/vsphere/virtualmachine/virtualmachine.go b/metricbeat/module/vsphere/virtualmachine/virtualmachine.go index 107aabbc4bd3..82c928004102 100644 --- a/metricbeat/module/vsphere/virtualmachine/virtualmachine.go +++ b/metricbeat/module/vsphere/virtualmachine/virtualmachine.go @@ -22,6 +22,7 @@ import ( "errors" "fmt" "strings" + "time" "github.com/elastic/beats/v7/metricbeat/mb" "github.com/elastic/beats/v7/metricbeat/module/vsphere" @@ -56,6 +57,15 @@ type VMData struct { NetworkNames []string DatastoreNames []string CustomFields mapstr.M + Snapshots []VMSnapshotData +} + +type VMSnapshotData struct { + ID int32 + Name string + Description string + CreateTime time.Time + State types.VirtualMachinePowerState } // New creates a new instance of the MetricSet. @@ -136,6 +146,7 @@ func (m *MetricSet) Fetch(ctx context.Context, reporter mb.ReporterV2) error { var hostID, hostName string var networkNames, datastoreNames []string var customFields mapstr.M + var snapshots []VMSnapshotData if host := vm.Summary.Runtime.Host; host != nil { hostID = host.Value @@ -179,6 +190,10 @@ func (m *MetricSet) Fetch(ctx context.Context, reporter mb.ReporterV2) error { } } + if vm.Snapshot != nil { + snapshots = fetchSnapshots(vm.Snapshot.RootSnapshotList) + } + data := VMData{ VM: vm, HostID: hostID, @@ -186,6 +201,7 @@ func (m *MetricSet) Fetch(ctx context.Context, reporter mb.ReporterV2) error { NetworkNames: networkNames, DatastoreNames: datastoreNames, CustomFields: customFields, + Snapshots: snapshots, } reporter.Event(mb.Event{ @@ -270,3 +286,22 @@ func getHostSystem(ctx context.Context, c *vim25.Client, ref types.ManagedObject } return &hs, nil } + +func fetchSnapshots(snapshotTree []types.VirtualMachineSnapshotTree) []VMSnapshotData { + snapshots := make([]VMSnapshotData, 0, len(snapshotTree)) + for _, snapshot := range snapshotTree { + snapshots = append(snapshots, VMSnapshotData{ + ID: snapshot.Id, + Name: snapshot.Name, + Description: snapshot.Description, + CreateTime: snapshot.CreateTime, + State: snapshot.State, + }) + + // Recursively add child snapshots + if len(snapshot.ChildSnapshotList) > 0 { + snapshots = append(snapshots, fetchSnapshots(snapshot.ChildSnapshotList)...) + } + } + return snapshots +} diff --git a/metricbeat/modules.d/vsphere.yml.disabled b/metricbeat/modules.d/vsphere.yml.disabled index 87c916cc6e54..717ce1326d50 100644 --- a/metricbeat/modules.d/vsphere.yml.disabled +++ b/metricbeat/modules.d/vsphere.yml.disabled @@ -2,13 +2,14 @@ # Docs: https://www.elastic.co/guide/en/beats/metricbeat/main/metricbeat-module-vsphere.html - module: vsphere - #metricsets: + # metricsets: # - cluster # - datastore + # - datastorecluster # - host - # - virtualmachine # - network # - resourcepool + # - virtualmachine # Real-time data collection – An ESXi Server collects data for each performance counter every 20 seconds. period: 20s hosts: ["https://localhost/sdk"] diff --git a/packetbeat/protos/mongodb/mongodb.go b/packetbeat/protos/mongodb/mongodb.go index e65824e5de25..749342cf3d7d 100644 --- a/packetbeat/protos/mongodb/mongodb.go +++ b/packetbeat/protos/mongodb/mongodb.go @@ -32,6 +32,8 @@ import ( "github.com/elastic/beats/v7/packetbeat/procs" "github.com/elastic/beats/v7/packetbeat/protos" "github.com/elastic/beats/v7/packetbeat/protos/tcp" + + "go.mongodb.org/mongo-driver/bson/primitive" ) var debugf = logp.MakeDebug("mongodb") @@ -54,7 +56,7 @@ type mongodbPlugin struct { type transactionKey struct { tcp common.HashableTCPTuple - id int + id int32 } var unmatchedRequests = monitoring.NewInt(nil, "mongodb.unmatched_requests") @@ -232,7 +234,7 @@ func (mongodb *mongodbPlugin) handleMongodb( func (mongodb *mongodbPlugin) onRequest(conn *mongodbConnectionData, msg *mongodbMessage) { // publish request only transaction - if !awaitsReply(msg.opCode) { + if !awaitsReply(msg) { mongodb.onTransComplete(msg, nil) return } @@ -273,7 +275,6 @@ func (mongodb *mongodbPlugin) onResponse(conn *mongodbConnectionData, msg *mongo func (mongodb *mongodbPlugin) onTransComplete(requ, resp *mongodbMessage) { trans := newTransaction(requ, resp) debugf("Mongodb transaction completed: %s", trans.mongodb) - mongodb.publishTransaction(trans) } @@ -294,8 +295,9 @@ func newTransaction(requ, resp *mongodbMessage) *transaction { } trans.params = requ.params trans.resource = requ.resource - trans.bytesIn = requ.messageLength + trans.bytesIn = int(requ.messageLength) trans.documents = requ.documents + trans.requestDocuments = requ.documents // preserving request documents that contains mongodb query for the new OP_MSG based protocol } // fill response @@ -308,7 +310,7 @@ func newTransaction(requ, resp *mongodbMessage) *transaction { trans.documents = resp.documents trans.endTime = resp.ts - trans.bytesOut = resp.messageLength + trans.bytesOut = int(resp.messageLength) } @@ -325,10 +327,17 @@ func (mongodb *mongodbPlugin) ReceivedFin(tcptuple *common.TCPTuple, dir uint8, return private } -func copyMapWithoutKey(d map[string]interface{}, key string) map[string]interface{} { +func copyMapWithoutKey(d map[string]interface{}, keys ...string) map[string]interface{} { res := map[string]interface{}{} for k, v := range d { - if k != key { + found := false + for _, excludeKey := range keys { + if k == excludeKey { + found = true + break + } + } + if !found { res[k] = v } } @@ -337,29 +346,40 @@ func copyMapWithoutKey(d map[string]interface{}, key string) map[string]interfac func reconstructQuery(t *transaction, full bool) (query string) { query = t.resource + "." + t.method + "(" + var doc interface{} + if len(t.params) > 0 { - var err error - var params string if !full { // remove the actual data. // TODO: review if we need to add other commands here switch t.method { case "insert": - params, err = doc2str(copyMapWithoutKey(t.params, "documents")) + doc = copyMapWithoutKey(t.params, "documents") case "update": - params, err = doc2str(copyMapWithoutKey(t.params, "updates")) + doc = copyMapWithoutKey(t.params, "updates") case "findandmodify": - params, err = doc2str(copyMapWithoutKey(t.params, "update")) + doc = copyMapWithoutKey(t.params, "update") } } else { - params, err = doc2str(t.params) + doc = t.params } - if err != nil { - debugf("Error marshaling params: %v", err) - } else { - query += params + } else if len(t.requestDocuments) > 0 { // This recovers the query document from OP_MSG + if m, ok := t.requestDocuments[0].(primitive.M); ok { + excludeKeys := []string{"lsid"} + if !full { + excludeKeys = append(excludeKeys, "documents") + } + doc = copyMapWithoutKey(m, excludeKeys...) } } + + queryString, err := doc2str(doc) + if err != nil { + debugf("Error marshaling query document: %v", err) + } else { + query += queryString + } + query += ")" skip, _ := t.event["numberToSkip"].(int) if skip > 0 { @@ -370,7 +390,7 @@ func reconstructQuery(t *transaction, full bool) (query string) { if limit > 0 && limit < 0x7fffffff { query += fmt.Sprintf(".limit(%d)", limit) } - return + return query } func (mongodb *mongodbPlugin) publishTransaction(t *transaction) { diff --git a/packetbeat/protos/mongodb/mongodb_parser.go b/packetbeat/protos/mongodb/mongodb_parser.go index 8a7be126902b..acb743ee517e 100644 --- a/packetbeat/protos/mongodb/mongodb_parser.go +++ b/packetbeat/protos/mongodb/mongodb_parser.go @@ -43,13 +43,13 @@ func mongodbMessageParser(s *stream) (bool, bool) { return true, false } - if length > len(s.data) { + if int(length) > len(s.data) { // Not yet reached the end of message return true, false } // Tell decoder to only consider current message - d.truncate(length) + d.truncate(int(length)) // fill up the header common to all messages // see http://docs.mongodb.org/meta-driver/latest/legacy/mongodb-wire-protocol/#standard-message-header @@ -72,8 +72,7 @@ func mongodbMessageParser(s *stream) (bool, bool) { } s.message.opCode = opCode - s.message.isResponse = false // default is that the message is a request. If not opReplyParse will set this to false - s.message.expectsResponse = false + s.message.isResponse = false // default is that the message is a request. If not opReplyParse will set this to true debugf("opCode = %d (%v)", s.message.opCode, s.message.opCode) // then split depending on operation type @@ -93,11 +92,9 @@ func mongodbMessageParser(s *stream) (bool, bool) { s.message.method = "insert" return opInsertParse(d, s.message) case opQuery: - s.message.expectsResponse = true return opQueryParse(d, s.message) case opGetMore: s.message.method = "getMore" - s.message.expectsResponse = true return opGetMoreParse(d, s.message) case opDelete: s.message.method = "delete" @@ -107,6 +104,11 @@ func mongodbMessageParser(s *stream) (bool, bool) { return opKillCursorsParse(d, s.message) case opMsg: s.message.method = "msg" + // The assumption is that the message with responseTo == 0 is the request + // TODO: handle the cases where moreToCome flag is set (multiple responses chained by responseTo) + if s.message.responseTo > 0 { + s.message.isResponse = true + } return opMsgParse(d, s.message) } @@ -141,7 +143,7 @@ func opReplyParse(d *decoder, m *mongodbMessage) (bool, bool) { debugf("Prepare to read %d document from reply", m.event["numberReturned"]) documents := make([]interface{}, numberReturned) - for i := 0; i < numberReturned; i++ { + for i := int32(0); i < numberReturned; i++ { var document bson.M document, err = d.readDocument() if err != nil { @@ -235,19 +237,6 @@ func opInsertParse(d *decoder, m *mongodbMessage) (bool, bool) { return true, true } -func extractDocuments(query map[string]interface{}) []interface{} { - docsVi, present := query["documents"] - if !present { - return []interface{}{} - } - - docs, ok := docsVi.([]interface{}) - if !ok { - return []interface{}{} - } - return docs -} - // Try to guess whether this key:value pair found in // the query represents a command. func isDatabaseCommand(key string, val interface{}) bool { @@ -387,12 +376,14 @@ func opKillCursorsParse(d *decoder, m *mongodbMessage) (bool, bool) { func opMsgParse(d *decoder, m *mongodbMessage) (bool, bool) { // ignore flagbits - _, err := d.readInt32() + flagBits, err := d.readInt32() if err != nil { logp.Err("An error occurred while parsing OP_MSG message: %s", err) return false, false } + m.SetFlagBits(flagBits) + // read sections kind, err := d.readByte() if err != nil { @@ -423,7 +414,7 @@ func opMsgParse(d *decoder, m *mongodbMessage) (bool, bool) { } m.event["message"] = cstring var documents []interface{} - for d.i < start+size { + for d.i < start+int(size) { document, err := d.readDocument() if err != nil { logp.Err("An error occurred while parsing OP_MSG message: %s", err) @@ -432,7 +423,8 @@ func opMsgParse(d *decoder, m *mongodbMessage) (bool, bool) { documents = append(documents, document) } m.documents = documents - + case msgKindInternal: + // Ignore the internal purposes section default: logp.Err("Unknown message kind: %v", kind) return false, false @@ -482,25 +474,25 @@ func (d *decoder) readByte() (byte, error) { return d.in[i], nil } -func (d *decoder) readInt32() (int, error) { +func (d *decoder) readInt32() (int32, error) { b, err := d.readBytes(4) if err != nil { return 0, err } - return int((uint32(b[0]) << 0) | + return int32((uint32(b[0]) << 0) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)), nil } -func (d *decoder) readInt64() (int, error) { +func (d *decoder) readInt64() (int64, error) { b, err := d.readBytes(8) if err != nil { return 0, err } - return int((uint64(b[0]) << 0) | + return int64((uint64(b[0]) << 0) | (uint64(b[1]) << 8) | (uint64(b[2]) << 16) | (uint64(b[3]) << 24) | @@ -516,7 +508,7 @@ func (d *decoder) readDocument() (bson.M, error) { if err != nil { return nil, err } - d.i = start + documentLength + d.i = start + int(documentLength) if len(d.in) < d.i { return nil, errors.New("document out of bounds") } diff --git a/packetbeat/protos/mongodb/mongodb_parser_test.go b/packetbeat/protos/mongodb/mongodb_parser_test.go index af4a647dd11c..6071d25ca31f 100644 --- a/packetbeat/protos/mongodb/mongodb_parser_test.go +++ b/packetbeat/protos/mongodb/mongodb_parser_test.go @@ -20,6 +20,9 @@ package mongodb import ( + "encoding/json" + "os" + "path/filepath" "testing" "github.com/stretchr/testify/assert" @@ -77,6 +80,39 @@ func TestMongodbParser_simpleRequest(t *testing.T) { } } +func TestMongodbParser_OpMsg(t *testing.T) { + files := []string{ + "1req.bin", + "1res.bin", + "2req.bin", + "2req.bin", + "3req.bin", + "3res.bin", + } + + for _, fn := range files { + data, err := os.ReadFile(filepath.Join("testdata", fn)) + if err != nil { + t.Fatal(err) + } + + st := &stream{data: data, message: new(mongodbMessage)} + + ok, complete := mongodbMessageParser(st) + + if !ok { + t.Errorf("Parsing returned error") + } + if !complete { + t.Errorf("Expecting a complete message") + } + _, err = json.Marshal(st.message.documents) + if err != nil { + t.Fatal(err) + } + } +} + func TestMongodbParser_unknownOpCode(t *testing.T) { var data []byte data = addInt32(data, 16) // length = 16 @@ -107,39 +143,6 @@ func addInt32(in []byte, v int32) []byte { return append(in, byte(u), byte(u>>8), byte(u>>16), byte(u>>24)) } -func Test_extract_documents(t *testing.T) { - type io struct { - Input map[string]interface{} - Output []interface{} - } - tests := []io{ - { - Input: map[string]interface{}{ - "a": 1, - "documents": []interface{}{"a", "b", "c"}, - }, - Output: []interface{}{"a", "b", "c"}, - }, - { - Input: map[string]interface{}{ - "a": 1, - }, - Output: []interface{}{}, - }, - { - Input: map[string]interface{}{ - "a": 1, - "documents": 1, - }, - Output: []interface{}{}, - }, - } - - for _, test := range tests { - assert.Equal(t, test.Output, extractDocuments(test.Input)) - } -} - func Test_isDatabaseCommand(t *testing.T) { type io struct { Key string diff --git a/packetbeat/protos/mongodb/mongodb_structs.go b/packetbeat/protos/mongodb/mongodb_structs.go index 4870e1516ede..67a9e26de8d2 100644 --- a/packetbeat/protos/mongodb/mongodb_structs.go +++ b/packetbeat/protos/mongodb/mongodb_structs.go @@ -33,16 +33,20 @@ type mongodbMessage struct { cmdlineTuple *common.ProcessTuple direction uint8 - isResponse bool - expectsResponse bool + isResponse bool // Standard message header fields from mongodb wire protocol // see http://docs.mongodb.org/meta-driver/latest/legacy/mongodb-wire-protocol/#standard-message-header - messageLength int - requestID int - responseTo int + messageLength int32 + requestID int32 + responseTo int32 opCode opCode + // decoded flagBits + checkSumPresent bool + moreToCome bool + exhaustAllowed bool + // deduced from content. Either an operation from the original wire protocol or the name of a command (passed through a query) // List of commands: http://docs.mongodb.org/manual/reference/command/ // List of original protocol operations: http://docs.mongodb.org/meta-driver/latest/legacy/mongodb-wire-protocol/#request-opcodes @@ -57,6 +61,12 @@ type mongodbMessage struct { event mapstr.M } +func (m *mongodbMessage) SetFlagBits(flagBits int32) { + m.checkSumPresent = flagBits&0x1 != 0 // 0 bit + m.moreToCome = flagBits&0x2 != 0 // 1 bit + m.exhaustAllowed = flagBits&0x10000 != 0 // 16 bit +} + // Represent a stream being parsed that contains a mongodb message type stream struct { tcptuple *common.TCPTuple @@ -90,12 +100,13 @@ type transaction struct { mongodb mapstr.M - event mapstr.M - method string - resource string - error string - params map[string]interface{} - documents []interface{} + event mapstr.M + method string + resource string + error string + params map[string]interface{} + requestDocuments []interface{} + documents []interface{} } type msgKind byte @@ -103,6 +114,7 @@ type msgKind byte const ( msgKindBody msgKind = 0 msgKindDocumentSequence msgKind = 1 + msgKindInternal msgKind = 2 ) type opCode int32 @@ -147,8 +159,15 @@ func (o opCode) String() string { return fmt.Sprintf("(value=%d)", int32(o)) } -func awaitsReply(c opCode) bool { - return c == opQuery || c == opGetMore +func awaitsReply(msg *mongodbMessage) bool { + opCode := msg.opCode + // The request of opMsg type doesn't get response if moreToCome is set + // From documentation: https://www.mongodb.com/docs/manual/reference/mongodb-wire-protocol + // "Requests with the moreToCome bit set will not receive a reply" + if !msg.isResponse && opCode == opMsg && !msg.moreToCome { + return true + } + return opCode == opQuery || opCode == opGetMore } // List of mongodb user commands (send through a query of the legacy protocol) diff --git a/packetbeat/protos/mongodb/mongodb_structs_test.go b/packetbeat/protos/mongodb/mongodb_structs_test.go new file mode 100644 index 000000000000..8e5ec0fddcac --- /dev/null +++ b/packetbeat/protos/mongodb/mongodb_structs_test.go @@ -0,0 +1,86 @@ +// Licensed to Elasticsearch B.V. under one or more contributor +// license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright +// ownership. Elasticsearch B.V. licenses this file to you under +// the Apache License, Version 2.0 (the "License"); you may +// not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +package mongodb + +import ( + "testing" + + "github.com/google/go-cmp/cmp" +) + +func TestSetFlagBits(t *testing.T) { + tests := []struct { + name string + flagBits int32 + wantMsg mongodbMessage + }{ + { + name: "none", + flagBits: 0b0000, + wantMsg: mongodbMessage{}, + }, + { + name: "checksumpresent", + flagBits: 0b0001, + wantMsg: mongodbMessage{checkSumPresent: true}, + }, + { + name: "moreToCome", + flagBits: 0b00010, + wantMsg: mongodbMessage{moreToCome: true}, + }, + { + name: "checksumpresent_moreToCome", + flagBits: 0b00011, + wantMsg: mongodbMessage{checkSumPresent: true, moreToCome: true}, + }, + { + name: "exhaustallowed", + flagBits: 0x10000, + wantMsg: mongodbMessage{exhaustAllowed: true}, + }, + { + name: "checksumpresent_exhaustallowed", + flagBits: 0x10001, + wantMsg: mongodbMessage{checkSumPresent: true, exhaustAllowed: true}, + }, + { + name: "checksumpresent_moreToCome_exhaustallowed", + flagBits: 0x10003, + wantMsg: mongodbMessage{checkSumPresent: true, moreToCome: true, exhaustAllowed: true}, + }, + } + + flagBitsComparer := cmp.Comparer(func(m1, m2 mongodbMessage) bool { + return m1.checkSumPresent == m2.checkSumPresent && + m1.moreToCome == m2.moreToCome && + m1.exhaustAllowed == m2.exhaustAllowed + }) + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + var gotMsg mongodbMessage + gotMsg.SetFlagBits(tc.flagBits) + + diff := cmp.Diff(tc.wantMsg, gotMsg, flagBitsComparer) + if diff != "" { + t.Fatal(diff) + } + }) + } +} diff --git a/packetbeat/protos/mongodb/testdata/1req.bin b/packetbeat/protos/mongodb/testdata/1req.bin new file mode 100644 index 000000000000..d29c6d1cc260 Binary files /dev/null and b/packetbeat/protos/mongodb/testdata/1req.bin differ diff --git a/packetbeat/protos/mongodb/testdata/1res.bin b/packetbeat/protos/mongodb/testdata/1res.bin new file mode 100644 index 000000000000..495fe160e8a8 Binary files /dev/null and b/packetbeat/protos/mongodb/testdata/1res.bin differ diff --git a/packetbeat/protos/mongodb/testdata/2req.bin b/packetbeat/protos/mongodb/testdata/2req.bin new file mode 100644 index 000000000000..3464cde7bd4d Binary files /dev/null and b/packetbeat/protos/mongodb/testdata/2req.bin differ diff --git a/packetbeat/protos/mongodb/testdata/2res.bin b/packetbeat/protos/mongodb/testdata/2res.bin new file mode 100644 index 000000000000..d5ccd0bf6c8a Binary files /dev/null and b/packetbeat/protos/mongodb/testdata/2res.bin differ diff --git a/packetbeat/protos/mongodb/testdata/3req.bin b/packetbeat/protos/mongodb/testdata/3req.bin new file mode 100644 index 000000000000..22461fa0a61d Binary files /dev/null and b/packetbeat/protos/mongodb/testdata/3req.bin differ diff --git a/packetbeat/protos/mongodb/testdata/3res.bin b/packetbeat/protos/mongodb/testdata/3res.bin new file mode 100644 index 000000000000..6950e63146fd Binary files /dev/null and b/packetbeat/protos/mongodb/testdata/3res.bin differ diff --git a/winlogbeat/eventlog/wineventlog.go b/winlogbeat/eventlog/wineventlog.go index d558a477845e..e418f22bf061 100644 --- a/winlogbeat/eventlog/wineventlog.go +++ b/winlogbeat/eventlog/wineventlog.go @@ -317,7 +317,9 @@ func (l *winEventLog) Open(state checkpoint.EventLogState) error { var err error // we need to defer metrics initialization since when the event log // is used from winlog input it would register it twice due to CheckConfig calls - l.metrics = newInputMetrics(l.channelName, l.id) + if l.metrics == nil { + l.metrics = newInputMetrics(l.channelName, l.id) + } if len(state.Bookmark) > 0 { bookmark, err = win.CreateBookmarkFromXML(state.Bookmark) } else if state.RecordNumber > 0 && l.channelName != "" { diff --git a/winlogbeat/eventlog/wineventlog_experimental.go b/winlogbeat/eventlog/wineventlog_experimental.go index 3a6a9b2fe92b..126e92fdacef 100644 --- a/winlogbeat/eventlog/wineventlog_experimental.go +++ b/winlogbeat/eventlog/wineventlog_experimental.go @@ -158,7 +158,9 @@ func (l *winEventLogExp) Open(state checkpoint.EventLogState) error { l.lastRead = state // we need to defer metrics initialization since when the event log // is used from winlog input it would register it twice due to CheckConfig calls - l.metrics = newInputMetrics(l.channelName, l.id) + if l.metrics == nil { + l.metrics = newInputMetrics(l.channelName, l.id) + } var err error l.iterator, err = win.NewEventIterator( diff --git a/x-pack/auditbeat/module/system/package/package.go b/x-pack/auditbeat/module/system/package/package.go index 944bc083b332..14aaadfdc563 100644 --- a/x-pack/auditbeat/module/system/package/package.go +++ b/x-pack/auditbeat/module/system/package/package.go @@ -24,7 +24,7 @@ import ( "time" "github.com/cespare/xxhash/v2" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" "github.com/joeshaw/multierror" "go.etcd.io/bbolt" diff --git a/x-pack/auditbeat/module/system/process/process.go b/x-pack/auditbeat/module/system/process/process.go index b835a03bfb96..793bb70a4fb0 100644 --- a/x-pack/auditbeat/module/system/process/process.go +++ b/x-pack/auditbeat/module/system/process/process.go @@ -14,7 +14,7 @@ import ( "time" "github.com/cespare/xxhash/v2" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" "github.com/elastic/beats/v7/auditbeat/ab" "github.com/elastic/beats/v7/auditbeat/datastore" @@ -115,7 +115,9 @@ type Process struct { // Hash creates a hash for Process. func (p Process) Hash() uint64 { h := xxhash.New() + //nolint:errcheck // always return nil err h.WriteString(strconv.Itoa(p.Info.PID)) + //nolint:errcheck // always return nil err h.WriteString(p.Info.StartTime.String()) return h.Sum64() } @@ -139,7 +141,9 @@ func (p Process) toMapStr() mapstr.M { func (p Process) entityID(hostID string) string { h := system.NewEntityHash() h.Write([]byte(hostID)) + //nolint:errcheck // no error handling binary.Write(h, binary.LittleEndian, int64(p.Info.PID)) + //nolint:errcheck // no error handling binary.Write(h, binary.LittleEndian, int64(p.Info.StartTime.Nanosecond())) return h.Sum() } @@ -445,13 +449,12 @@ func convertToCacheable(processes []*Process) []cache.Cacheable { } func (ms *MetricSet) getProcesses() ([]*Process, error) { - var processes []*Process - sysinfoProcs, err := sysinfo.Processes() if err != nil { return nil, fmt.Errorf("failed to fetch processes: %w", err) } + processes := make([]*Process, 0, len(sysinfoProcs)) for _, sysinfoProc := range sysinfoProcs { var process *Process diff --git a/x-pack/auditbeat/module/system/user/user.go b/x-pack/auditbeat/module/system/user/user.go index 3220c7ffeb41..f48019af5720 100644 --- a/x-pack/auditbeat/module/system/user/user.go +++ b/x-pack/auditbeat/module/system/user/user.go @@ -10,6 +10,7 @@ import ( "bytes" "encoding/binary" "encoding/gob" + "errors" "fmt" "io" "os/user" @@ -19,7 +20,7 @@ import ( "time" "github.com/cespare/xxhash/v2" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" "github.com/joeshaw/multierror" "github.com/elastic/beats/v7/auditbeat/ab" @@ -136,17 +137,27 @@ type User struct { func (user User) Hash() uint64 { h := xxhash.New() // Use everything except userInfo + //nolint:errcheck // err always nil h.WriteString(user.Name) + //nolint:errcheck // err always nil binary.Write(h, binary.BigEndian, uint8(user.PasswordType)) + //nolint:errcheck // err always nil h.WriteString(user.PasswordChanged.String()) + //nolint:errcheck // err always nil h.Write(user.PasswordHashHash) + //nolint:errcheck // err always nil h.WriteString(user.UID) + //nolint:errcheck // err always nil h.WriteString(user.GID) + //nolint:errcheck // err always nil h.WriteString(user.Dir) + //nolint:errcheck // err always nil h.WriteString(user.Shell) for _, group := range user.Groups { + //nolint:errcheck // err always nil h.WriteString(group.Name) + //nolint:errcheck // err always nil h.WriteString(group.Gid) } @@ -199,11 +210,11 @@ func (user User) PrimaryGroup() *user.Group { } // entityID creates an ID that uniquely identifies this user across machines. -func (u User) entityID(hostID string) string { +func (user User) entityID(hostID string) string { h := system.NewEntityHash() h.Write([]byte(hostID)) - h.Write([]byte(u.Name)) - h.Write([]byte(u.UID)) + h.Write([]byte(user.Name)) + h.Write([]byte(user.UID)) return h.Sum() } @@ -555,7 +566,7 @@ func (ms *MetricSet) restoreUsersFromDisk() (users []*User, err error) { err = decoder.Decode(user) if err == nil { users = append(users, user) - } else if err == io.EOF { + } else if errors.Is(err, io.EOF) { // Read all users break } else { @@ -594,6 +605,7 @@ func (ms *MetricSet) haveFilesChanged() (bool, error) { return true, fmt.Errorf("failed to stat %v: %w", path, err) } + //nolint:unconvert // false positive ctime := time.Unix(int64(stats.Ctim.Sec), int64(stats.Ctim.Nsec)) if ms.lastRead.Before(ctime) { ms.log.Debugf("File changed: %v (lastRead=%v, ctime=%v)", path, ms.lastRead, ctime) diff --git a/x-pack/dockerlogbeat/pipelinemanager/libbeattools.go b/x-pack/dockerlogbeat/pipelinemanager/libbeattools.go index de3436156b42..b4820f734088 100644 --- a/x-pack/dockerlogbeat/pipelinemanager/libbeattools.go +++ b/x-pack/dockerlogbeat/pipelinemanager/libbeattools.go @@ -11,7 +11,7 @@ import ( "os" "time" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/cloudid" diff --git a/x-pack/filebeat/input/awss3/sqs_test.go b/x-pack/filebeat/input/awss3/sqs_test.go index cf82f03c6dec..8ad01a032dc5 100644 --- a/x-pack/filebeat/input/awss3/sqs_test.go +++ b/x-pack/filebeat/input/awss3/sqs_test.go @@ -14,7 +14,7 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/sqs/types" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" "github.com/golang/mock/gomock" "github.com/stretchr/testify/assert" diff --git a/x-pack/filebeat/input/cel/input.go b/x-pack/filebeat/input/cel/input.go index 99d5cfad7889..2096383de392 100644 --- a/x-pack/filebeat/input/cel/input.go +++ b/x-pack/filebeat/input/cel/input.go @@ -366,9 +366,7 @@ func (i input) run(env v2.Context, src *source, cursor map[string]interface{}, p e, ok := state["events"] if !ok { - log.Error("unexpected missing events array from evaluation") - env.UpdateStatus(status.Degraded, "unexpected missing events array from evaluation") - isDegraded = true + return errors.New("unexpected missing events array from evaluation") } var events []interface{} switch e := e.(type) { diff --git a/x-pack/filebeat/input/entityanalytics/internal/collections/uuid_set.go b/x-pack/filebeat/input/entityanalytics/internal/collections/uuid_set.go index 1a36edec554b..46f6422c3038 100644 --- a/x-pack/filebeat/input/entityanalytics/internal/collections/uuid_set.go +++ b/x-pack/filebeat/input/entityanalytics/internal/collections/uuid_set.go @@ -9,7 +9,7 @@ import ( "encoding/json" "sort" - "github.com/google/uuid" + "github.com/gofrs/uuid/v5" ) type UUIDSet struct { diff --git a/x-pack/filebeat/input/entityanalytics/internal/collections/uuid_set_test.go b/x-pack/filebeat/input/entityanalytics/internal/collections/uuid_set_test.go index cc9cdba83b96..f3d05077b027 100644 --- a/x-pack/filebeat/input/entityanalytics/internal/collections/uuid_set_test.go +++ b/x-pack/filebeat/input/entityanalytics/internal/collections/uuid_set_test.go @@ -9,7 +9,7 @@ import ( "fmt" "testing" - "github.com/google/uuid" + "github.com/gofrs/uuid/v5" "github.com/stretchr/testify/require" ) @@ -22,11 +22,11 @@ const ( ) var ( - testUUID1 = uuid.MustParse(testUUID1Str) - testUUID2 = uuid.MustParse(testUUID2Str) - testUUID3 = uuid.MustParse(testUUID3Str) - testUUID4 = uuid.MustParse(testUUID4Str) - testUUID5 = uuid.MustParse(testUUID5Str) + testUUID1 = uuid.Must(uuid.FromString(testUUID1Str)) + testUUID2 = uuid.Must(uuid.FromString(testUUID2Str)) + testUUID3 = uuid.Must(uuid.FromString(testUUID3Str)) + testUUID4 = uuid.Must(uuid.FromString(testUUID4Str)) + testUUID5 = uuid.Must(uuid.FromString(testUUID5Str)) ) func TestNewUUIDSet(t *testing.T) { diff --git a/x-pack/filebeat/input/entityanalytics/internal/collections/uuid_tree.go b/x-pack/filebeat/input/entityanalytics/internal/collections/uuid_tree.go index 07b35284301a..1ebda5ebf41e 100644 --- a/x-pack/filebeat/input/entityanalytics/internal/collections/uuid_tree.go +++ b/x-pack/filebeat/input/entityanalytics/internal/collections/uuid_tree.go @@ -7,7 +7,7 @@ package collections import ( "encoding/json" - "github.com/google/uuid" + "github.com/gofrs/uuid/v5" ) type UUIDTree struct { diff --git a/x-pack/filebeat/input/entityanalytics/internal/collections/uuid_tree_test.go b/x-pack/filebeat/input/entityanalytics/internal/collections/uuid_tree_test.go index f82e7e74267c..01b1e8b6e744 100644 --- a/x-pack/filebeat/input/entityanalytics/internal/collections/uuid_tree_test.go +++ b/x-pack/filebeat/input/entityanalytics/internal/collections/uuid_tree_test.go @@ -9,7 +9,7 @@ import ( "fmt" "testing" - "github.com/google/uuid" + "github.com/gofrs/uuid/v5" "github.com/stretchr/testify/require" ) @@ -36,7 +36,7 @@ func TestUUIDTree_UnmarshalJSON(t *testing.T) { }, "err-bad-uuid-key": { In: []byte(fmt.Sprintf(`{"1":["%s"]}`, testUUID1)), - WantErr: "invalid UUID length: 1", + WantErr: "uuid: incorrect UUID length 1 in string \"1\"", }, "err-bad-uuid-set": { In: []byte(fmt.Sprintf(`{"%s":[1]}`, testUUID1)), diff --git a/x-pack/filebeat/input/entityanalytics/provider/azuread/azure.go b/x-pack/filebeat/input/entityanalytics/provider/azuread/azure.go index 42104ebd8e9e..e6faa4667722 100644 --- a/x-pack/filebeat/input/entityanalytics/provider/azuread/azure.go +++ b/x-pack/filebeat/input/entityanalytics/provider/azuread/azure.go @@ -11,7 +11,7 @@ import ( "fmt" "time" - "github.com/google/uuid" + "github.com/gofrs/uuid/v5" v2 "github.com/elastic/beats/v7/filebeat/input/v2" "github.com/elastic/beats/v7/libbeat/beat" diff --git a/x-pack/filebeat/input/entityanalytics/provider/azuread/fetcher/device.go b/x-pack/filebeat/input/entityanalytics/provider/azuread/fetcher/device.go index 46139d278208..2a8bc0e2561d 100644 --- a/x-pack/filebeat/input/entityanalytics/provider/azuread/fetcher/device.go +++ b/x-pack/filebeat/input/entityanalytics/provider/azuread/fetcher/device.go @@ -5,7 +5,7 @@ package fetcher import ( - "github.com/google/uuid" + "github.com/gofrs/uuid/v5" "github.com/elastic/beats/v7/x-pack/filebeat/input/entityanalytics/internal/collections" "github.com/elastic/elastic-agent-libs/mapstr" diff --git a/x-pack/filebeat/input/entityanalytics/provider/azuread/fetcher/device_test.go b/x-pack/filebeat/input/entityanalytics/provider/azuread/fetcher/device_test.go index 37e6563eb3aa..5f9e1bd11cee 100644 --- a/x-pack/filebeat/input/entityanalytics/provider/azuread/fetcher/device_test.go +++ b/x-pack/filebeat/input/entityanalytics/provider/azuread/fetcher/device_test.go @@ -7,7 +7,7 @@ package fetcher import ( "testing" - "github.com/google/uuid" + "github.com/gofrs/uuid/v5" "github.com/stretchr/testify/require" "github.com/elastic/beats/v7/x-pack/filebeat/input/entityanalytics/internal/collections" @@ -20,59 +20,59 @@ func TestDevice_Merge(t *testing.T) { Want *Device }{ "id-mismatch": { - In: &Device{ID: uuid.MustParse("187f924c-e867-477e-8d74-dd762d6379dd")}, - InOther: &Device{ID: uuid.MustParse("80c3f9af-75ae-45f5-b22b-53f005d5880d")}, - Want: &Device{ID: uuid.MustParse("187f924c-e867-477e-8d74-dd762d6379dd")}, + In: &Device{ID: uuid.Must(uuid.FromString("187f924c-e867-477e-8d74-dd762d6379dd"))}, + InOther: &Device{ID: uuid.Must(uuid.FromString("80c3f9af-75ae-45f5-b22b-53f005d5880d"))}, + Want: &Device{ID: uuid.Must(uuid.FromString("187f924c-e867-477e-8d74-dd762d6379dd"))}, }, "ok": { In: &Device{ - ID: uuid.MustParse("187f924c-e867-477e-8d74-dd762d6379dd"), + ID: uuid.Must(uuid.FromString("187f924c-e867-477e-8d74-dd762d6379dd")), Fields: map[string]interface{}{ "a": "alpha", }, - MemberOf: collections.NewUUIDSet(uuid.MustParse("fcda226a-c920-4d99-81bc-d2d691a6c212")), - TransitiveMemberOf: collections.NewUUIDSet(uuid.MustParse("ca777ad5-9abf-4c9b-be1f-c38c6ec28f28")), - RegisteredOwners: collections.NewUUIDSet(uuid.MustParse("c59fbdb8-e442-46b1-8d72-c8ac0b78ec0a")), + MemberOf: collections.NewUUIDSet(uuid.Must(uuid.FromString("fcda226a-c920-4d99-81bc-d2d691a6c212"))), + TransitiveMemberOf: collections.NewUUIDSet(uuid.Must(uuid.FromString("ca777ad5-9abf-4c9b-be1f-c38c6ec28f28"))), + RegisteredOwners: collections.NewUUIDSet(uuid.Must(uuid.FromString("c59fbdb8-e442-46b1-8d72-c8ac0b78ec0a"))), RegisteredUsers: collections.NewUUIDSet( - uuid.MustParse("27cea005-7377-4175-b2ef-e9d64c977f4d"), - uuid.MustParse("c59fbdb8-e442-46b1-8d72-c8ac0b78ec0a"), + uuid.Must(uuid.FromString("27cea005-7377-4175-b2ef-e9d64c977f4d")), + uuid.Must(uuid.FromString("c59fbdb8-e442-46b1-8d72-c8ac0b78ec0a")), ), }, InOther: &Device{ - ID: uuid.MustParse("187f924c-e867-477e-8d74-dd762d6379dd"), + ID: uuid.Must(uuid.FromString("187f924c-e867-477e-8d74-dd762d6379dd")), Fields: map[string]interface{}{ "b": "beta", }, - MemberOf: collections.NewUUIDSet(uuid.MustParse("a77e8cbb-27a5-49d3-9d5e-801997621f87")), - TransitiveMemberOf: collections.NewUUIDSet(uuid.MustParse("c550d32c-09b2-4851-b0f2-1bc431e26d01")), - RegisteredOwners: collections.NewUUIDSet(uuid.MustParse("81d1b5cd-7cd6-469d-9fe8-0a5c6cf2a7b6")), + MemberOf: collections.NewUUIDSet(uuid.Must(uuid.FromString("a77e8cbb-27a5-49d3-9d5e-801997621f87"))), + TransitiveMemberOf: collections.NewUUIDSet(uuid.Must(uuid.FromString("c550d32c-09b2-4851-b0f2-1bc431e26d01"))), + RegisteredOwners: collections.NewUUIDSet(uuid.Must(uuid.FromString("81d1b5cd-7cd6-469d-9fe8-0a5c6cf2a7b6"))), RegisteredUsers: collections.NewUUIDSet( - uuid.MustParse("5e6d279a-ce2b-43b8-a38f-3110907e1974"), - uuid.MustParse("c59fbdb8-e442-46b1-8d72-c8ac0b78ec0a"), + uuid.Must(uuid.FromString("5e6d279a-ce2b-43b8-a38f-3110907e1974")), + uuid.Must(uuid.FromString("c59fbdb8-e442-46b1-8d72-c8ac0b78ec0a")), ), }, Want: &Device{ - ID: uuid.MustParse("187f924c-e867-477e-8d74-dd762d6379dd"), + ID: uuid.Must(uuid.FromString("187f924c-e867-477e-8d74-dd762d6379dd")), Fields: map[string]interface{}{ "a": "alpha", "b": "beta", }, MemberOf: collections.NewUUIDSet( - uuid.MustParse("fcda226a-c920-4d99-81bc-d2d691a6c212"), - uuid.MustParse("a77e8cbb-27a5-49d3-9d5e-801997621f87"), + uuid.Must(uuid.FromString("fcda226a-c920-4d99-81bc-d2d691a6c212")), + uuid.Must(uuid.FromString("a77e8cbb-27a5-49d3-9d5e-801997621f87")), ), TransitiveMemberOf: collections.NewUUIDSet( - uuid.MustParse("ca777ad5-9abf-4c9b-be1f-c38c6ec28f28"), - uuid.MustParse("c550d32c-09b2-4851-b0f2-1bc431e26d01"), + uuid.Must(uuid.FromString("ca777ad5-9abf-4c9b-be1f-c38c6ec28f28")), + uuid.Must(uuid.FromString("c550d32c-09b2-4851-b0f2-1bc431e26d01")), ), RegisteredOwners: collections.NewUUIDSet( - uuid.MustParse("81d1b5cd-7cd6-469d-9fe8-0a5c6cf2a7b6"), - uuid.MustParse("c59fbdb8-e442-46b1-8d72-c8ac0b78ec0a"), + uuid.Must(uuid.FromString("81d1b5cd-7cd6-469d-9fe8-0a5c6cf2a7b6")), + uuid.Must(uuid.FromString("c59fbdb8-e442-46b1-8d72-c8ac0b78ec0a")), ), RegisteredUsers: collections.NewUUIDSet( - uuid.MustParse("27cea005-7377-4175-b2ef-e9d64c977f4d"), - uuid.MustParse("5e6d279a-ce2b-43b8-a38f-3110907e1974"), - uuid.MustParse("c59fbdb8-e442-46b1-8d72-c8ac0b78ec0a"), + uuid.Must(uuid.FromString("27cea005-7377-4175-b2ef-e9d64c977f4d")), + uuid.Must(uuid.FromString("5e6d279a-ce2b-43b8-a38f-3110907e1974")), + uuid.Must(uuid.FromString("c59fbdb8-e442-46b1-8d72-c8ac0b78ec0a")), ), }, }, diff --git a/x-pack/filebeat/input/entityanalytics/provider/azuread/fetcher/graph/graph.go b/x-pack/filebeat/input/entityanalytics/provider/azuread/fetcher/graph/graph.go index 1f526fb31bb7..c360c590913d 100644 --- a/x-pack/filebeat/input/entityanalytics/provider/azuread/fetcher/graph/graph.go +++ b/x-pack/filebeat/input/entityanalytics/provider/azuread/fetcher/graph/graph.go @@ -20,7 +20,7 @@ import ( "path/filepath" "strings" - "github.com/google/uuid" + "github.com/gofrs/uuid/v5" "go.elastic.co/ecszap" "go.uber.org/zap" "go.uber.org/zap/zapcore" @@ -488,7 +488,7 @@ func newUserFromAPI(u userAPI) (*fetcher.User, error) { if idRaw, ok := newUser.Fields["id"]; ok { idStr, _ := idRaw.(string) - if newUser.ID, err = uuid.Parse(idStr); err != nil { + if newUser.ID, err = uuid.FromString(idStr); err != nil { return nil, fmt.Errorf("unable to unmarshal user, invalid ID: %w", err) } delete(newUser.Fields, "id") @@ -542,7 +542,7 @@ func newDeviceFromAPI(d deviceAPI) (*fetcher.Device, error) { if idRaw, ok := newDevice.Fields["id"]; ok { idStr, _ := idRaw.(string) - if newDevice.ID, err = uuid.Parse(idStr); err != nil { + if newDevice.ID, err = uuid.FromString(idStr); err != nil { return nil, fmt.Errorf("unable to unmarshal device, invalid ID: %w", err) } delete(newDevice.Fields, "id") diff --git a/x-pack/filebeat/input/entityanalytics/provider/azuread/fetcher/graph/graph_test.go b/x-pack/filebeat/input/entityanalytics/provider/azuread/fetcher/graph/graph_test.go index bafddc20a00a..64f31be104de 100644 --- a/x-pack/filebeat/input/entityanalytics/provider/azuread/fetcher/graph/graph_test.go +++ b/x-pack/filebeat/input/entityanalytics/provider/azuread/fetcher/graph/graph_test.go @@ -17,8 +17,8 @@ import ( "testing" "time" + "github.com/gofrs/uuid/v5" "github.com/google/go-cmp/cmp" - "github.com/google/uuid" "github.com/stretchr/testify/require" "gopkg.in/natefinch/lumberjack.v2" @@ -143,11 +143,11 @@ var deviceUserResponses = map[string]apiUserResponse{ var groupsResponse1 = apiGroupResponse{ Groups: []groupAPI{ { - ID: uuid.MustParse("331676df-b8fd-4492-82ed-02b927f8dd80"), + ID: uuid.Must(uuid.FromString("331676df-b8fd-4492-82ed-02b927f8dd80")), DisplayName: "group1", MembersDelta: []memberAPI{ { - ID: uuid.MustParse("5ebc6a0f-05b7-4f42-9c8a-682bbc75d0fc"), + ID: uuid.Must(uuid.FromString("5ebc6a0f-05b7-4f42-9c8a-682bbc75d0fc")), Type: apiUserType, }, }, @@ -158,15 +158,15 @@ var groupsResponse1 = apiGroupResponse{ var groupsResponse2 = apiGroupResponse{ Groups: []groupAPI{ { - ID: uuid.MustParse("d140978f-d641-4f01-802f-4ecc1acf8935"), + ID: uuid.Must(uuid.FromString("d140978f-d641-4f01-802f-4ecc1acf8935")), DisplayName: "group2", MembersDelta: []memberAPI{ { - ID: uuid.MustParse("331676df-b8fd-4492-82ed-02b927f8dd80"), + ID: uuid.Must(uuid.FromString("331676df-b8fd-4492-82ed-02b927f8dd80")), Type: apiGroupType, }, { - ID: uuid.MustParse("5ebc6a0f-05b7-4f42-9c8a-682bbc75d0fc"), + ID: uuid.Must(uuid.FromString("5ebc6a0f-05b7-4f42-9c8a-682bbc75d0fc")), Type: apiGroupType, Removed: &removed{Reason: "changed"}, }, @@ -288,25 +288,25 @@ func TestGraph_Groups(t *testing.T) { wantDeltaLink := "http://" + testSrv.addr + "/groups/delta?$deltatoken=test" wantGroups := []*fetcher.Group{ { - ID: uuid.MustParse("331676df-b8fd-4492-82ed-02b927f8dd80"), + ID: uuid.Must(uuid.FromString("331676df-b8fd-4492-82ed-02b927f8dd80")), Name: "group1", Members: []fetcher.Member{ { - ID: uuid.MustParse("5ebc6a0f-05b7-4f42-9c8a-682bbc75d0fc"), + ID: uuid.Must(uuid.FromString("5ebc6a0f-05b7-4f42-9c8a-682bbc75d0fc")), Type: fetcher.MemberUser, }, }, }, { - ID: uuid.MustParse("d140978f-d641-4f01-802f-4ecc1acf8935"), + ID: uuid.Must(uuid.FromString("d140978f-d641-4f01-802f-4ecc1acf8935")), Name: "group2", Members: []fetcher.Member{ { - ID: uuid.MustParse("331676df-b8fd-4492-82ed-02b927f8dd80"), + ID: uuid.Must(uuid.FromString("331676df-b8fd-4492-82ed-02b927f8dd80")), Type: fetcher.MemberGroup, }, { - ID: uuid.MustParse("5ebc6a0f-05b7-4f42-9c8a-682bbc75d0fc"), + ID: uuid.Must(uuid.FromString("5ebc6a0f-05b7-4f42-9c8a-682bbc75d0fc")), Type: fetcher.MemberGroup, Deleted: true, }, @@ -347,7 +347,7 @@ func TestGraph_Users(t *testing.T) { wantDeltaLink := "http://" + testSrv.addr + "/users/delta?$deltatoken=test" wantUsers := []*fetcher.User{ { - ID: uuid.MustParse("5ebc6a0f-05b7-4f42-9c8a-682bbc75d0fc"), + ID: uuid.Must(uuid.FromString("5ebc6a0f-05b7-4f42-9c8a-682bbc75d0fc")), Fields: map[string]interface{}{ "userPrincipalName": "user.one@example.com", "mail": "user.one@example.com", @@ -362,7 +362,7 @@ func TestGraph_Users(t *testing.T) { }, }, { - ID: uuid.MustParse("d897d560-3d17-4dae-81b3-c898fe82bf84"), + ID: uuid.Must(uuid.FromString("d897d560-3d17-4dae-81b3-c898fe82bf84")), Fields: map[string]interface{}{ "userPrincipalName": "user.two@example.com", "mail": "user.two@example.com", @@ -411,7 +411,7 @@ func TestGraph_Devices(t *testing.T) { wantDeltaLink := "http://" + testSrv.addr + "/devices/delta?$deltatoken=test" wantDevices := []*fetcher.Device{ { - ID: uuid.MustParse("6a59ea83-02bd-468f-a40b-f2c3d1821983"), + ID: uuid.Must(uuid.FromString("6a59ea83-02bd-468f-a40b-f2c3d1821983")), Fields: map[string]interface{}{ "accountEnabled": true, "deviceId": "eab73519-780d-4d43-be6d-a4a89af2a348", @@ -434,15 +434,15 @@ func TestGraph_Devices(t *testing.T) { }, }, RegisteredOwners: collections.NewUUIDSet( - uuid.MustParse("5ebc6a0f-05b7-4f42-9c8a-682bbc75d0fc"), + uuid.Must(uuid.FromString("5ebc6a0f-05b7-4f42-9c8a-682bbc75d0fc")), ), RegisteredUsers: collections.NewUUIDSet( - uuid.MustParse("5ebc6a0f-05b7-4f42-9c8a-682bbc75d0fc"), - uuid.MustParse("d897d560-3d17-4dae-81b3-c898fe82bf84"), + uuid.Must(uuid.FromString("5ebc6a0f-05b7-4f42-9c8a-682bbc75d0fc")), + uuid.Must(uuid.FromString("d897d560-3d17-4dae-81b3-c898fe82bf84")), ), }, { - ID: uuid.MustParse("adbbe40a-0627-4328-89f1-88cac84dbc7f"), + ID: uuid.Must(uuid.FromString("adbbe40a-0627-4328-89f1-88cac84dbc7f")), Fields: map[string]interface{}{ "accountEnabled": true, "deviceId": "2fbbb8f9-ff67-4a21-b867-a344d18a4198", @@ -465,10 +465,10 @@ func TestGraph_Devices(t *testing.T) { }, }, RegisteredOwners: collections.NewUUIDSet( - uuid.MustParse("5ebc6a0f-05b7-4f42-9c8a-682bbc75d0fc"), + uuid.Must(uuid.FromString("5ebc6a0f-05b7-4f42-9c8a-682bbc75d0fc")), ), RegisteredUsers: collections.NewUUIDSet( - uuid.MustParse("5ebc6a0f-05b7-4f42-9c8a-682bbc75d0fc"), + uuid.Must(uuid.FromString("5ebc6a0f-05b7-4f42-9c8a-682bbc75d0fc")), ), }, } diff --git a/x-pack/filebeat/input/entityanalytics/provider/azuread/fetcher/group.go b/x-pack/filebeat/input/entityanalytics/provider/azuread/fetcher/group.go index 77a7c24deebb..b3dc6809382c 100644 --- a/x-pack/filebeat/input/entityanalytics/provider/azuread/fetcher/group.go +++ b/x-pack/filebeat/input/entityanalytics/provider/azuread/fetcher/group.go @@ -4,7 +4,7 @@ package fetcher -import "github.com/google/uuid" +import "github.com/gofrs/uuid/v5" // MemberType indicates the type of member in a Group. type MemberType int diff --git a/x-pack/filebeat/input/entityanalytics/provider/azuread/fetcher/group_test.go b/x-pack/filebeat/input/entityanalytics/provider/azuread/fetcher/group_test.go index a9958ef22450..7c6a1ff5ba33 100644 --- a/x-pack/filebeat/input/entityanalytics/provider/azuread/fetcher/group_test.go +++ b/x-pack/filebeat/input/entityanalytics/provider/azuread/fetcher/group_test.go @@ -7,13 +7,13 @@ package fetcher import ( "testing" - "github.com/google/uuid" + "github.com/gofrs/uuid/v5" "github.com/stretchr/testify/require" ) func TestGroup_ToECS(t *testing.T) { in := Group{ - ID: uuid.MustParse("88ecb4e8-5a1a-461e-a062-f1d3c5aa4ca4"), + ID: uuid.Must(uuid.FromString("88ecb4e8-5a1a-461e-a062-f1d3c5aa4ca4")), Name: "group1", } want := GroupECS{ diff --git a/x-pack/filebeat/input/entityanalytics/provider/azuread/fetcher/mock/mock.go b/x-pack/filebeat/input/entityanalytics/provider/azuread/fetcher/mock/mock.go index 11d77871f3bc..4385385367d1 100644 --- a/x-pack/filebeat/input/entityanalytics/provider/azuread/fetcher/mock/mock.go +++ b/x-pack/filebeat/input/entityanalytics/provider/azuread/fetcher/mock/mock.go @@ -8,7 +8,7 @@ package mock import ( "context" - "github.com/google/uuid" + "github.com/gofrs/uuid/v5" "github.com/elastic/beats/v7/x-pack/filebeat/input/entityanalytics/provider/azuread/fetcher" "github.com/elastic/elastic-agent-libs/logp" @@ -22,43 +22,43 @@ var ( var GroupResponse = []*fetcher.Group{ { - ID: uuid.MustParse("331676df-b8fd-4492-82ed-02b927f8dd80"), + ID: uuid.Must(uuid.FromString("331676df-b8fd-4492-82ed-02b927f8dd80")), Name: "group1", Members: []fetcher.Member{ { - ID: uuid.MustParse("5ebc6a0f-05b7-4f42-9c8a-682bbc75d0fc"), + ID: uuid.Must(uuid.FromString("5ebc6a0f-05b7-4f42-9c8a-682bbc75d0fc")), Type: fetcher.MemberUser, }, { - ID: uuid.MustParse("6a59ea83-02bd-468f-a40b-f2c3d1821983"), + ID: uuid.Must(uuid.FromString("6a59ea83-02bd-468f-a40b-f2c3d1821983")), Type: fetcher.MemberDevice, }, }, }, { - ID: uuid.MustParse("d140978f-d641-4f01-802f-4ecc1acf8935"), + ID: uuid.Must(uuid.FromString("d140978f-d641-4f01-802f-4ecc1acf8935")), Name: "group2", Members: []fetcher.Member{ { - ID: uuid.MustParse("331676df-b8fd-4492-82ed-02b927f8dd80"), + ID: uuid.Must(uuid.FromString("331676df-b8fd-4492-82ed-02b927f8dd80")), Type: fetcher.MemberGroup, }, { - ID: uuid.MustParse("d897d560-3d17-4dae-81b3-c898fe82bf84"), + ID: uuid.Must(uuid.FromString("d897d560-3d17-4dae-81b3-c898fe82bf84")), Type: fetcher.MemberUser, }, { - ID: uuid.MustParse("adbbe40a-0627-4328-89f1-88cac84dbc7f"), + ID: uuid.Must(uuid.FromString("adbbe40a-0627-4328-89f1-88cac84dbc7f")), Type: fetcher.MemberDevice, }, }, }, { - ID: uuid.MustParse("10db9800-3908-40cc-81c5-511fa8ccf7fd"), + ID: uuid.Must(uuid.FromString("10db9800-3908-40cc-81c5-511fa8ccf7fd")), Name: "group3", Members: []fetcher.Member{ { - ID: uuid.MustParse("d140978f-d641-4f01-802f-4ecc1acf8935"), + ID: uuid.Must(uuid.FromString("d140978f-d641-4f01-802f-4ecc1acf8935")), Type: fetcher.MemberGroup, }, }, @@ -67,7 +67,7 @@ var GroupResponse = []*fetcher.Group{ var UserResponse = []*fetcher.User{ { - ID: uuid.MustParse("5ebc6a0f-05b7-4f42-9c8a-682bbc75d0fc"), + ID: uuid.Must(uuid.FromString("5ebc6a0f-05b7-4f42-9c8a-682bbc75d0fc")), Fields: map[string]interface{}{ "userPrincipalName": "user.one@example.com", "mail": "user.one@example.com", @@ -80,7 +80,7 @@ var UserResponse = []*fetcher.User{ }, }, { - ID: uuid.MustParse("d897d560-3d17-4dae-81b3-c898fe82bf84"), + ID: uuid.Must(uuid.FromString("d897d560-3d17-4dae-81b3-c898fe82bf84")), Fields: map[string]interface{}{ "userPrincipalName": "user.two@example.com", "mail": "user.two@example.com", @@ -96,7 +96,7 @@ var UserResponse = []*fetcher.User{ var DeviceResponse = []*fetcher.Device{ { - ID: uuid.MustParse("6a59ea83-02bd-468f-a40b-f2c3d1821983"), + ID: uuid.Must(uuid.FromString("6a59ea83-02bd-468f-a40b-f2c3d1821983")), Fields: map[string]interface{}{ "accountEnabled": true, "deviceId": "eab73519-780d-4d43-be6d-a4a89af2a348", @@ -120,7 +120,7 @@ var DeviceResponse = []*fetcher.Device{ }, }, { - ID: uuid.MustParse("adbbe40a-0627-4328-89f1-88cac84dbc7f"), + ID: uuid.Must(uuid.FromString("adbbe40a-0627-4328-89f1-88cac84dbc7f")), Fields: map[string]interface{}{ "accountEnabled": true, "deviceId": "2fbbb8f9-ff67-4a21-b867-a344d18a4198", diff --git a/x-pack/filebeat/input/entityanalytics/provider/azuread/fetcher/user.go b/x-pack/filebeat/input/entityanalytics/provider/azuread/fetcher/user.go index d629e3ef7bfd..1a99e4ea889a 100644 --- a/x-pack/filebeat/input/entityanalytics/provider/azuread/fetcher/user.go +++ b/x-pack/filebeat/input/entityanalytics/provider/azuread/fetcher/user.go @@ -5,7 +5,7 @@ package fetcher import ( - "github.com/google/uuid" + "github.com/gofrs/uuid/v5" "github.com/elastic/beats/v7/x-pack/filebeat/input/entityanalytics/internal/collections" "github.com/elastic/elastic-agent-libs/mapstr" diff --git a/x-pack/filebeat/input/entityanalytics/provider/azuread/fetcher/user_test.go b/x-pack/filebeat/input/entityanalytics/provider/azuread/fetcher/user_test.go index af5488b2424d..afb361f982fe 100644 --- a/x-pack/filebeat/input/entityanalytics/provider/azuread/fetcher/user_test.go +++ b/x-pack/filebeat/input/entityanalytics/provider/azuread/fetcher/user_test.go @@ -7,7 +7,7 @@ package fetcher import ( "testing" - "github.com/google/uuid" + "github.com/gofrs/uuid/v5" "github.com/stretchr/testify/require" "github.com/elastic/beats/v7/x-pack/filebeat/input/entityanalytics/internal/collections" @@ -20,40 +20,40 @@ func TestUser_Merge(t *testing.T) { Want *User }{ "id-mismatch": { - In: &User{ID: uuid.MustParse("187f924c-e867-477e-8d74-dd762d6379dd")}, - InOther: &User{ID: uuid.MustParse("80c3f9af-75ae-45f5-b22b-53f005d5880d")}, - Want: &User{ID: uuid.MustParse("187f924c-e867-477e-8d74-dd762d6379dd")}, + In: &User{ID: uuid.Must(uuid.FromString("187f924c-e867-477e-8d74-dd762d6379dd"))}, + InOther: &User{ID: uuid.Must(uuid.FromString("80c3f9af-75ae-45f5-b22b-53f005d5880d"))}, + Want: &User{ID: uuid.Must(uuid.FromString("187f924c-e867-477e-8d74-dd762d6379dd"))}, }, "ok": { In: &User{ - ID: uuid.MustParse("187f924c-e867-477e-8d74-dd762d6379dd"), + ID: uuid.Must(uuid.FromString("187f924c-e867-477e-8d74-dd762d6379dd")), Fields: map[string]interface{}{ "a": "alpha", }, - MemberOf: collections.NewUUIDSet(uuid.MustParse("fcda226a-c920-4d99-81bc-d2d691a6c212")), - TransitiveMemberOf: collections.NewUUIDSet(uuid.MustParse("ca777ad5-9abf-4c9b-be1f-c38c6ec28f28")), + MemberOf: collections.NewUUIDSet(uuid.Must(uuid.FromString("fcda226a-c920-4d99-81bc-d2d691a6c212"))), + TransitiveMemberOf: collections.NewUUIDSet(uuid.Must(uuid.FromString("ca777ad5-9abf-4c9b-be1f-c38c6ec28f28"))), }, InOther: &User{ - ID: uuid.MustParse("187f924c-e867-477e-8d74-dd762d6379dd"), + ID: uuid.Must(uuid.FromString("187f924c-e867-477e-8d74-dd762d6379dd")), Fields: map[string]interface{}{ "b": "beta", }, - MemberOf: collections.NewUUIDSet(uuid.MustParse("a77e8cbb-27a5-49d3-9d5e-801997621f87")), - TransitiveMemberOf: collections.NewUUIDSet(uuid.MustParse("c550d32c-09b2-4851-b0f2-1bc431e26d01")), + MemberOf: collections.NewUUIDSet(uuid.Must(uuid.FromString("a77e8cbb-27a5-49d3-9d5e-801997621f87"))), + TransitiveMemberOf: collections.NewUUIDSet(uuid.Must(uuid.FromString("c550d32c-09b2-4851-b0f2-1bc431e26d01"))), }, Want: &User{ - ID: uuid.MustParse("187f924c-e867-477e-8d74-dd762d6379dd"), + ID: uuid.Must(uuid.FromString("187f924c-e867-477e-8d74-dd762d6379dd")), Fields: map[string]interface{}{ "a": "alpha", "b": "beta", }, MemberOf: collections.NewUUIDSet( - uuid.MustParse("fcda226a-c920-4d99-81bc-d2d691a6c212"), - uuid.MustParse("a77e8cbb-27a5-49d3-9d5e-801997621f87"), + uuid.Must(uuid.FromString("fcda226a-c920-4d99-81bc-d2d691a6c212")), + uuid.Must(uuid.FromString("a77e8cbb-27a5-49d3-9d5e-801997621f87")), ), TransitiveMemberOf: collections.NewUUIDSet( - uuid.MustParse("ca777ad5-9abf-4c9b-be1f-c38c6ec28f28"), - uuid.MustParse("c550d32c-09b2-4851-b0f2-1bc431e26d01"), + uuid.Must(uuid.FromString("ca777ad5-9abf-4c9b-be1f-c38c6ec28f28")), + uuid.Must(uuid.FromString("c550d32c-09b2-4851-b0f2-1bc431e26d01")), ), }, }, diff --git a/x-pack/filebeat/input/entityanalytics/provider/azuread/statestore.go b/x-pack/filebeat/input/entityanalytics/provider/azuread/statestore.go index 392ce5f04605..dfa425c6c6ef 100644 --- a/x-pack/filebeat/input/entityanalytics/provider/azuread/statestore.go +++ b/x-pack/filebeat/input/entityanalytics/provider/azuread/statestore.go @@ -10,7 +10,7 @@ import ( "fmt" "time" - "github.com/google/uuid" + "github.com/gofrs/uuid/v5" "github.com/elastic/beats/v7/x-pack/filebeat/input/entityanalytics/internal/collections" "github.com/elastic/beats/v7/x-pack/filebeat/input/entityanalytics/internal/kvstore" diff --git a/x-pack/filebeat/input/entityanalytics/provider/azuread/statestore_test.go b/x-pack/filebeat/input/entityanalytics/provider/azuread/statestore_test.go index ef1ba2b240a6..66f08d018d5e 100644 --- a/x-pack/filebeat/input/entityanalytics/provider/azuread/statestore_test.go +++ b/x-pack/filebeat/input/entityanalytics/provider/azuread/statestore_test.go @@ -11,7 +11,7 @@ import ( "testing" "time" - "github.com/google/uuid" + "github.com/gofrs/uuid/v5" "github.com/stretchr/testify/require" "github.com/elastic/beats/v7/x-pack/filebeat/input/entityanalytics/internal/collections" @@ -116,10 +116,10 @@ func TestStateStore_Close(t *testing.T) { ss.devicesLink = "devices-link" ss.groupsLink = "groups-link" - user1ID := uuid.MustParse("a77e8cbb-27a5-49d3-9d5e-801997621f87") - device1ID := uuid.MustParse("adbbe40a-0627-4328-89f1-88cac84dbc7f") - group1ID := uuid.MustParse("331676df-b8fd-4492-82ed-02b927f8dd80") - group2ID := uuid.MustParse("ec8b17ae-ce9d-4099-97ee-4a959638bc29") + user1ID := uuid.Must(uuid.FromString("a77e8cbb-27a5-49d3-9d5e-801997621f87")) + device1ID := uuid.Must(uuid.FromString("adbbe40a-0627-4328-89f1-88cac84dbc7f")) + group1ID := uuid.Must(uuid.FromString("331676df-b8fd-4492-82ed-02b927f8dd80")) + group2ID := uuid.Must(uuid.FromString("ec8b17ae-ce9d-4099-97ee-4a959638bc29")) ss.users = map[uuid.UUID]*fetcher.User{ user1ID: { diff --git a/x-pack/filebeat/input/entityanalytics/provider/jamf/internal/jamf/jamf_test.go b/x-pack/filebeat/input/entityanalytics/provider/jamf/internal/jamf/jamf_test.go index e11269878cf2..47e6c27f6d30 100644 --- a/x-pack/filebeat/input/entityanalytics/provider/jamf/internal/jamf/jamf_test.go +++ b/x-pack/filebeat/input/entityanalytics/provider/jamf/internal/jamf/jamf_test.go @@ -6,7 +6,6 @@ package jamf import ( "context" - "crypto/tls" "encoding/json" "flag" "fmt" @@ -19,8 +18,8 @@ import ( _ "embed" + "github.com/gofrs/uuid/v5" "github.com/google/go-cmp/cmp" - "github.com/google/uuid" ) var logResponses = flag.Bool("log_response", false, "use to log users/devices returned from the API") @@ -70,16 +69,18 @@ var jamfTests = []struct { if !ok || user != username || pass != password { w.WriteHeader(http.StatusUnauthorized) w.Header().Set("content-type", "application/json;charset=UTF-8") + //nolint:errcheck // no error handling w.Write([]byte("{\n \"httpStatus\" : 401,\n \"errors\" : [ ]\n}")) return } if r.Method != http.MethodPost { w.WriteHeader(http.StatusMethodNotAllowed) w.Header().Set("content-type", "application/json;charset=UTF-8") + //nolint:errcheck // no error handling w.Write([]byte("{\n \"httpStatus\" : 405,\n \"errors\" : [ ]\n}")) return } - tok.Token = uuid.New().String() + tok.Token = uuid.Must(uuid.NewV4()).String() tok.Expires = time.Now().In(time.UTC).Add(time.Hour) fmt.Fprintf(w, "{\n \"token\" : \"%s\",\n \"expires\" : \"%s\"\n}", tok.Token, tok.Expires.Format(time.RFC3339)) })) @@ -87,12 +88,14 @@ var jamfTests = []struct { if r.Header.Get("Authorization") != "Bearer "+tok.Token || !tok.IsValidFor(0) { w.WriteHeader(http.StatusUnauthorized) w.Header().Set("content-type", "application/json;charset=UTF-8") + //nolint:errcheck // no error handling w.Write([]byte("{\n \"httpStatus\" : 401,\n \"errors\" : [ {\n \"code\" : \"INVALID_TOKEN\",\n \"description\" : \"Unauthorized\",\n \"id\" : \"0\",\n \"field\" : null\n } ]\n}")) return false } if r.Method != http.MethodGet { w.WriteHeader(http.StatusMethodNotAllowed) w.Header().Set("content-type", "application/json;charset=UTF-8") + //nolint:errcheck // no error handling w.Write([]byte("{\n \"httpStatus\" : 405,\n \"errors\" : [ ]\n}")) return false } @@ -100,11 +103,13 @@ var jamfTests = []struct { } mux.Handle("/api/preview/computers", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if isValidRequest(w, r) { + //nolint:errcheck // no error handling w.Write(computers) } })) mux.Handle("/JSSResource/users", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { if isValidRequest(w, r) { + //nolint:errcheck // no error handling w.Write(users) } })) @@ -117,13 +122,7 @@ var jamfTests = []struct { } tenant = u.Host - cli := &http.Client{ - Transport: &http.Transport{ - TLSClientConfig: &tls.Config{ - InsecureSkipVerify: true, - }, - }, - } + cli := srv.Client() return tenant, username, password, cli, srv.Close, nil }, @@ -147,6 +146,7 @@ func TestJamf(t *testing.T) { for _, test := range jamfTests { t.Run(test.name, func(t *testing.T) { tenant, username, password, client, cleanup, err := test.context() + //nolint:errorlint // false positive switch err := err.(type) { case nil: case skipError: diff --git a/x-pack/filebeat/input/entityanalytics/provider/jamf/jamf_test.go b/x-pack/filebeat/input/entityanalytics/provider/jamf/jamf_test.go index dc759166d7b6..9ca7128d11c3 100644 --- a/x-pack/filebeat/input/entityanalytics/provider/jamf/jamf_test.go +++ b/x-pack/filebeat/input/entityanalytics/provider/jamf/jamf_test.go @@ -6,7 +6,6 @@ package jamf import ( "context" - "crypto/tls" "encoding/json" "flag" "fmt" @@ -18,8 +17,8 @@ import ( _ "embed" + "github.com/gofrs/uuid/v5" "github.com/google/go-cmp/cmp" - "github.com/google/uuid" "gopkg.in/natefinch/lumberjack.v2" "github.com/elastic/beats/v7/x-pack/filebeat/input/entityanalytics/provider/jamf/internal/jamf" @@ -38,14 +37,12 @@ func TestJamfDoFetch(t *testing.T) { testCleanupStore(store, dbFilename) }) - var ( - wantComputers []*Computer - rawComputers jamf.Computers - ) + var rawComputers jamf.Computers err := json.Unmarshal(computers, &rawComputers) if err != nil { t.Fatalf("failed to unmarshal device data: %v", err) } + wantComputers := make([]*Computer, 0, len(rawComputers.Results)) for _, c := range rawComputers.Results { wantComputers = append(wantComputers, &Computer{ Computer: c, @@ -109,16 +106,18 @@ func testContext() (tenant string, username string, password string, client *htt if !ok || user != username || pass != password { w.WriteHeader(http.StatusUnauthorized) w.Header().Set("content-type", "application/json;charset=UTF-8") + //nolint:errcheck // ignore w.Write([]byte("{\n \"httpStatus\" : 401,\n \"errors\" : [ ]\n}")) return } if r.Method != http.MethodPost { w.WriteHeader(http.StatusMethodNotAllowed) w.Header().Set("content-type", "application/json;charset=UTF-8") + //nolint:errcheck // ignore w.Write([]byte("{\n \"httpStatus\" : 405,\n \"errors\" : [ ]\n}")) return } - tok.Token = uuid.New().String() + tok.Token = uuid.Must(uuid.NewV4()).String() tok.Expires = time.Now().In(time.UTC).Add(time.Hour) fmt.Fprintf(w, "{\n \"token\" : \"%s\",\n \"expires\" : \"%s\"\n}", tok.Token, tok.Expires.Format(time.RFC3339)) })) @@ -126,15 +125,18 @@ func testContext() (tenant string, username string, password string, client *htt if r.Header.Get("Authorization") != "Bearer "+tok.Token || !tok.IsValidFor(0) { w.WriteHeader(http.StatusUnauthorized) w.Header().Set("content-type", "application/json;charset=UTF-8") + //nolint:errcheck // ignore w.Write([]byte("{\n \"httpStatus\" : 401,\n \"errors\" : [ {\n \"code\" : \"INVALID_TOKEN\",\n \"description\" : \"Unauthorized\",\n \"id\" : \"0\",\n \"field\" : null\n } ]\n}")) return } if r.Method != http.MethodGet { w.WriteHeader(http.StatusMethodNotAllowed) w.Header().Set("content-type", "application/json;charset=UTF-8") + //nolint:errcheck // ignore w.Write([]byte("{\n \"httpStatus\" : 405,\n \"errors\" : [ ]\n}")) return } + //nolint:errcheck // ignore w.Write(computers) })) @@ -146,13 +148,7 @@ func testContext() (tenant string, username string, password string, client *htt } tenant = u.Host - cli := &http.Client{ - Transport: &http.Transport{ - TLSClientConfig: &tls.Config{ - InsecureSkipVerify: true, - }, - }, - } + cli := srv.Client() return tenant, username, password, cli, srv.Close, nil } diff --git a/x-pack/filebeat/input/entityanalytics/provider/okta/okta.go b/x-pack/filebeat/input/entityanalytics/provider/okta/okta.go index d3a313c10329..70f9a8f3a555 100644 --- a/x-pack/filebeat/input/entityanalytics/provider/okta/okta.go +++ b/x-pack/filebeat/input/entityanalytics/provider/okta/okta.go @@ -695,6 +695,7 @@ func (p *oktaInput) publishUser(u *User, state *stateStore, inputID string, clie _, _ = userDoc.Put("okta", u.User) _, _ = userDoc.Put("labels.identity_source", inputID) _, _ = userDoc.Put("user.id", u.ID) + _, _ = userDoc.Put("groups", u.Groups) switch u.State { case Deleted: diff --git a/x-pack/filebeat/input/httpjson/value_tpl.go b/x-pack/filebeat/input/httpjson/value_tpl.go index cf7e43cf8e4c..b7258680dea4 100644 --- a/x-pack/filebeat/input/httpjson/value_tpl.go +++ b/x-pack/filebeat/input/httpjson/value_tpl.go @@ -24,7 +24,7 @@ import ( "text/template" "time" - "github.com/google/uuid" + "github.com/gofrs/uuid/v5" "github.com/elastic/beats/v7/libbeat/version" "github.com/elastic/elastic-agent-libs/logp" @@ -483,7 +483,7 @@ func hexDecode(enc string) string { } func uuidString() string { - uuid, err := uuid.NewRandom() + uuid, err := uuid.NewV4() if err != nil { return "" } diff --git a/x-pack/functionbeat/manager/aws/op_cloudformation.go b/x-pack/functionbeat/manager/aws/op_cloudformation.go index 3298d8660805..028c78cd4a4e 100644 --- a/x-pack/functionbeat/manager/aws/op_cloudformation.go +++ b/x-pack/functionbeat/manager/aws/op_cloudformation.go @@ -14,7 +14,7 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/cloudformation" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" "github.com/elastic/beats/v7/x-pack/functionbeat/manager/executor" "github.com/elastic/elastic-agent-libs/logp" diff --git a/x-pack/functionbeat/manager/aws/op_delete_cloudformation.go b/x-pack/functionbeat/manager/aws/op_delete_cloudformation.go index 0f9fb60c5936..4954e70b2666 100644 --- a/x-pack/functionbeat/manager/aws/op_delete_cloudformation.go +++ b/x-pack/functionbeat/manager/aws/op_delete_cloudformation.go @@ -9,7 +9,7 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/cloudformation" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" "github.com/elastic/beats/v7/x-pack/functionbeat/manager/executor" "github.com/elastic/elastic-agent-libs/logp" diff --git a/x-pack/functionbeat/manager/aws/op_update_cloudformation.go b/x-pack/functionbeat/manager/aws/op_update_cloudformation.go index 8441c89ca2cb..9a75909f2479 100644 --- a/x-pack/functionbeat/manager/aws/op_update_cloudformation.go +++ b/x-pack/functionbeat/manager/aws/op_update_cloudformation.go @@ -11,7 +11,7 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/cloudformation" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" "github.com/elastic/beats/v7/x-pack/functionbeat/manager/executor" "github.com/elastic/elastic-agent-libs/logp" diff --git a/x-pack/heartbeat/include/list.go b/x-pack/heartbeat/include/list.go index 20ea9d71688b..d2fac57099d9 100644 --- a/x-pack/heartbeat/include/list.go +++ b/x-pack/heartbeat/include/list.go @@ -4,6 +4,8 @@ // Code generated by beats/dev-tools/cmd/module_include_list/module_include_list.go - DO NOT EDIT. +//go:build linux || darwin || synthetics + package include import ( diff --git a/x-pack/heartbeat/magefile.go b/x-pack/heartbeat/magefile.go index 8e7cd7f664f3..c10004340319 100644 --- a/x-pack/heartbeat/magefile.go +++ b/x-pack/heartbeat/magefile.go @@ -77,6 +77,7 @@ func TestPackages() error { func GenerateModuleIncludeListGo() error { opts := devtools.DefaultIncludeListOptions() opts.ImportDirs = append(opts.ImportDirs, "monitors/*") + opts.BuildTags = "\n//go:build linux || darwin || synthetics\n" return devtools.GenerateIncludeListGo(opts) } diff --git a/x-pack/heartbeat/monitors/browser/synthexec/enrich.go b/x-pack/heartbeat/monitors/browser/synthexec/enrich.go index 05d726d6398a..9201c11d1e58 100644 --- a/x-pack/heartbeat/monitors/browser/synthexec/enrich.go +++ b/x-pack/heartbeat/monitors/browser/synthexec/enrich.go @@ -13,7 +13,7 @@ import ( "github.com/elastic/beats/v7/libbeat/processors/add_data_stream" "github.com/elastic/elastic-agent-libs/mapstr" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" "github.com/elastic/beats/v7/heartbeat/eventext" "github.com/elastic/beats/v7/heartbeat/monitors/stdfields" diff --git a/x-pack/heartbeat/scenarios/basics_test.go b/x-pack/heartbeat/scenarios/basics_test.go index 08794c12146b..4d2d7f741322 100644 --- a/x-pack/heartbeat/scenarios/basics_test.go +++ b/x-pack/heartbeat/scenarios/basics_test.go @@ -14,7 +14,6 @@ import ( "github.com/elastic/go-lookslike/testslike" "github.com/elastic/go-lookslike/validator" - "github.com/elastic/beats/v7/heartbeat/hbtest" "github.com/elastic/beats/v7/heartbeat/hbtestllext" _ "github.com/elastic/beats/v7/heartbeat/monitors/active/http" _ "github.com/elastic/beats/v7/heartbeat/monitors/active/icmp" @@ -111,29 +110,6 @@ func TestLightweightSummaries(t *testing.T) { }) } -func TestBrowserSummaries(t *testing.T) { - t.Parallel() - scenarioDB.RunTagWithSeparateTwists(t, "browser", StdAttemptTwists, func(t *testing.T, mtr *framework.MonitorTestRun, err error) { - all := mtr.Events() - lastEvent := all[len(all)-1] - - testslike.Test(t, - lookslike.Compose( - SummaryValidatorForStatus(mtr.Meta.Status), - hbtest.URLChecks(t, mtr.Meta.URL), - ), - lastEvent.Fields) - - monStatus, _ := lastEvent.GetValue("monitor.status") - summaryIface, _ := lastEvent.GetValue("summary") - summary := summaryIface.(*jobsummary.JobSummary) - require.Equal(t, string(summary.Status), monStatus, "expected summary status and mon status to be equal in event: %v", lastEvent.Fields) - - requireOneSummaryPerAttempt(t, all) - - }) -} - func requireOneSummaryPerAttempt(t *testing.T, events []*beat.Event) { attemptCounter := uint16(1) // ensure we only have one summary per attempt diff --git a/x-pack/heartbeat/scenarios/browserbasics_test.go b/x-pack/heartbeat/scenarios/browserbasics_test.go new file mode 100644 index 000000000000..2543982fb9b0 --- /dev/null +++ b/x-pack/heartbeat/scenarios/browserbasics_test.go @@ -0,0 +1,46 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build linux || darwin || synthetics + +package scenarios + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/elastic/go-lookslike" + "github.com/elastic/go-lookslike/testslike" + + "github.com/elastic/beats/v7/heartbeat/hbtest" + _ "github.com/elastic/beats/v7/heartbeat/monitors/active/http" + _ "github.com/elastic/beats/v7/heartbeat/monitors/active/icmp" + _ "github.com/elastic/beats/v7/heartbeat/monitors/active/tcp" + "github.com/elastic/beats/v7/heartbeat/monitors/wrappers/summarizer/jobsummary" + "github.com/elastic/beats/v7/x-pack/heartbeat/scenarios/framework" +) + +func TestBrowserSummaries(t *testing.T) { + t.Parallel() + scenarioDB.RunTagWithSeparateTwists(t, "browser", StdAttemptTwists, func(t *testing.T, mtr *framework.MonitorTestRun, err error) { + all := mtr.Events() + lastEvent := all[len(all)-1] + + testslike.Test(t, + lookslike.Compose( + SummaryValidatorForStatus(mtr.Meta.Status), + hbtest.URLChecks(t, mtr.Meta.URL), + ), + lastEvent.Fields) + + monStatus, _ := lastEvent.GetValue("monitor.status") + summaryIface, _ := lastEvent.GetValue("summary") + summary := summaryIface.(*jobsummary.JobSummary) + require.Equal(t, string(summary.Status), monStatus, "expected summary status and mon status to be equal in event: %v", lastEvent.Fields) + + requireOneSummaryPerAttempt(t, all) + + }) +} diff --git a/x-pack/heartbeat/scenarios/framework/framework.go b/x-pack/heartbeat/scenarios/framework/framework.go index c6dc0ae9ad6d..a2fb77e63070 100644 --- a/x-pack/heartbeat/scenarios/framework/framework.go +++ b/x-pack/heartbeat/scenarios/framework/framework.go @@ -16,7 +16,7 @@ import ( "github.com/elastic/beats/v7/heartbeat/monitors/stdfields" "github.com/elastic/beats/v7/heartbeat/monitors/wrappers/monitorstate" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" "github.com/stretchr/testify/require" "github.com/elastic/elastic-agent-libs/config" diff --git a/x-pack/libbeat/autodiscover/providers/aws/ec2/provider.go b/x-pack/libbeat/autodiscover/providers/aws/ec2/provider.go index 0c4d77209749..c58967c37c9c 100644 --- a/x-pack/libbeat/autodiscover/providers/aws/ec2/provider.go +++ b/x-pack/libbeat/autodiscover/providers/aws/ec2/provider.go @@ -10,7 +10,7 @@ import ( awssdk "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/ec2" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" "github.com/elastic/beats/v7/libbeat/autodiscover" "github.com/elastic/beats/v7/libbeat/autodiscover/template" @@ -79,7 +79,7 @@ func AutodiscoverBuilder( config.Regions = completeRegionsList } - var clients []ec2.DescribeInstancesAPIClient + clients := make([]ec2.DescribeInstancesAPIClient, 0, len(config.Regions)) for _, region := range config.Regions { if err != nil { logp.Error(fmt.Errorf("error loading AWS config for aws_ec2 autodiscover provider: %w", err)) diff --git a/x-pack/libbeat/autodiscover/providers/aws/ec2/provider_test.go b/x-pack/libbeat/autodiscover/providers/aws/ec2/provider_test.go index a157d2878826..335acc3080ff 100644 --- a/x-pack/libbeat/autodiscover/providers/aws/ec2/provider_test.go +++ b/x-pack/libbeat/autodiscover/providers/aws/ec2/provider_test.go @@ -9,7 +9,7 @@ import ( "testing" "time" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -59,8 +59,8 @@ func Test_internalBuilder(t *testing.T) { // Let run twice to ensure that duplicates don't create two start events // Since we're turning a list of assets into a list of changes the second once() call should be a noop - provider.watcher.once() - provider.watcher.once() + require.NoError(t, provider.watcher.once()) + require.NoError(t, provider.watcher.once()) events.WaitForNumEvents(t, 1, time.Second) assert.Equal(t, 1, events.Len()) @@ -86,8 +86,8 @@ func Test_internalBuilder(t *testing.T) { fetcher.setEC2s([]*ec2Instance{}) // Let run twice to ensure that duplicates don't cause an issue - provider.watcher.once() - provider.watcher.once() + require.NoError(t, provider.watcher.once()) + require.NoError(t, provider.watcher.once()) events.WaitForNumEvents(t, 2, time.Second) require.Equal(t, 2, events.Len()) @@ -105,7 +105,9 @@ func Test_internalBuilder(t *testing.T) { fetcher.setError(errors.New("oops")) // Let run twice to ensure that duplicates don't cause an issue + //nolint:errcheck // ignore provider.watcher.once() + //nolint:errcheck // ignore provider.watcher.once() assert.Equal(t, preErrorEventCount, events.Len()) diff --git a/x-pack/libbeat/autodiscover/providers/aws/elb/provider.go b/x-pack/libbeat/autodiscover/providers/aws/elb/provider.go index ea44596e52af..2440f2c6f83f 100644 --- a/x-pack/libbeat/autodiscover/providers/aws/elb/provider.go +++ b/x-pack/libbeat/autodiscover/providers/aws/elb/provider.go @@ -8,7 +8,7 @@ import ( awssdk "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/service/ec2" "github.com/aws/aws-sdk-go-v2/service/elasticloadbalancingv2" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" "github.com/elastic/beats/v7/libbeat/autodiscover" "github.com/elastic/beats/v7/libbeat/autodiscover/template" diff --git a/x-pack/libbeat/autodiscover/providers/aws/elb/provider_test.go b/x-pack/libbeat/autodiscover/providers/aws/elb/provider_test.go index 688323e26f22..a6b08c8afe86 100644 --- a/x-pack/libbeat/autodiscover/providers/aws/elb/provider_test.go +++ b/x-pack/libbeat/autodiscover/providers/aws/elb/provider_test.go @@ -10,7 +10,7 @@ import ( "testing" "time" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -52,7 +52,7 @@ func (tea *testEventAccumulator) get() []bus.Event { func (tea *testEventAccumulator) waitForNumEvents(t *testing.T, targetLen int, timeout time.Duration) { start := time.Now() - for time.Now().Sub(start) < timeout { + for time.Since(start) < timeout { if tea.len() >= targetLen { return } @@ -100,8 +100,8 @@ func Test_internalBuilder(t *testing.T) { // Let run twice to ensure that duplicates don't create two start events // Since we're turning a list of assets into a list of changes the second once() call should be a noop - provider.watcher.once() - provider.watcher.once() + require.NoError(t, provider.watcher.once()) + require.NoError(t, provider.watcher.once()) events.waitForNumEvents(t, 1, time.Second) assert.Equal(t, 1, events.len()) @@ -129,8 +129,8 @@ func Test_internalBuilder(t *testing.T) { fetcher.setLbls([]*lbListener{}) // Let run twice to ensure that duplicates don't cause an issue - provider.watcher.once() - provider.watcher.once() + require.NoError(t, provider.watcher.once()) + require.NoError(t, provider.watcher.once()) events.waitForNumEvents(t, 2, time.Second) require.Equal(t, 2, events.len()) @@ -148,7 +148,9 @@ func Test_internalBuilder(t *testing.T) { fetcher.setError(errors.New("oops")) // Let run twice to ensure that duplicates don't cause an issue + //nolint:errcheck // ignore provider.watcher.once() + //nolint:errcheck // ignore provider.watcher.once() assert.Equal(t, preErrorEventCount, events.len()) diff --git a/x-pack/libbeat/autodiscover/providers/nomad/nomad.go b/x-pack/libbeat/autodiscover/providers/nomad/nomad.go index 3cfd4ce5feec..4d1bba4bc258 100644 --- a/x-pack/libbeat/autodiscover/providers/nomad/nomad.go +++ b/x-pack/libbeat/autodiscover/providers/nomad/nomad.go @@ -9,7 +9,7 @@ import ( "strings" "time" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" "github.com/elastic/elastic-agent-autodiscover/utils" diff --git a/x-pack/libbeat/autodiscover/providers/nomad/nomad_test.go b/x-pack/libbeat/autodiscover/providers/nomad/nomad_test.go index 6f88261a1f04..95d24f8bad88 100644 --- a/x-pack/libbeat/autodiscover/providers/nomad/nomad_test.go +++ b/x-pack/libbeat/autodiscover/providers/nomad/nomad_test.go @@ -12,7 +12,7 @@ import ( "testing" "time" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" "github.com/hashicorp/nomad/api" "github.com/stretchr/testify/assert" diff --git a/x-pack/libbeat/common/cloudfoundry/cache_test.go b/x-pack/libbeat/common/cloudfoundry/cache_test.go index d77ba9633d55..8d4cef2c2fb3 100644 --- a/x-pack/libbeat/common/cloudfoundry/cache_test.go +++ b/x-pack/libbeat/common/cloudfoundry/cache_test.go @@ -11,7 +11,7 @@ import ( "time" "github.com/cloudfoundry-community/go-cfclient" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" diff --git a/x-pack/libbeat/common/cloudfoundry/test/config.go b/x-pack/libbeat/common/cloudfoundry/test/config.go index 45059fc559c6..c9f2293eb308 100644 --- a/x-pack/libbeat/common/cloudfoundry/test/config.go +++ b/x-pack/libbeat/common/cloudfoundry/test/config.go @@ -8,7 +8,7 @@ import ( "os" "testing" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" ) func GetConfigFromEnv(t *testing.T) map[string]interface{} { diff --git a/x-pack/libbeat/common/nomad/metadata_test.go b/x-pack/libbeat/common/nomad/metadata_test.go index 16855fa9c414..08616be9f83f 100644 --- a/x-pack/libbeat/common/nomad/metadata_test.go +++ b/x-pack/libbeat/common/nomad/metadata_test.go @@ -7,9 +7,10 @@ package nomad import ( "testing" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" "github.com/hashicorp/nomad/api" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" conf "github.com/elastic/elastic-agent-libs/config" "github.com/elastic/elastic-agent-libs/mapstr" @@ -75,6 +76,7 @@ func TestAllocationMetadata(t *testing.T) { "annotations.dedot": false, "include_annotations": []string{"b", "b.key"}, }) + require.NoError(t, err) metaGen, err := NewMetaGenerator(config, nil) if err != nil { @@ -105,6 +107,7 @@ func TestExcludeMetadata(t *testing.T) { config, err := conf.NewConfigFrom(map[string]interface{}{ "exclude_labels": []string{"key1", "canary_tags"}, }) + require.NoError(t, err) metaGen, err := NewMetaGenerator(config, nil) if err != nil { @@ -158,6 +161,7 @@ func TestCronJob(t *testing.T) { } config, err := conf.NewConfigFrom(map[string]interface{}{}) + require.NoError(t, err) metaGen, err := NewMetaGenerator(config, nil) if err != nil { diff --git a/x-pack/libbeat/common/nomad/watcher_test.go b/x-pack/libbeat/common/nomad/watcher_test.go index a8cd6555acc8..488d9ef8bf84 100644 --- a/x-pack/libbeat/common/nomad/watcher_test.go +++ b/x-pack/libbeat/common/nomad/watcher_test.go @@ -12,9 +12,10 @@ import ( "testing" "time" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" api "github.com/hashicorp/nomad/api" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/elastic/beats/v7/libbeat/tests/resources" ) @@ -31,10 +32,12 @@ func nomadRoutes(node api.Node, allocs []api.Allocation, waitIndex uint64) *http payload, err := json.Marshal([]interface{}{node}) if err != nil { w.WriteHeader(http.StatusInternalServerError) + //nolint:errcheck // ignore w.Write([]byte(err.Error())) } w.Header().Add(NomadIndexHeader, fmt.Sprint(time.Now().Unix())) + //nolint:errcheck // ignore w.Write(payload) }) @@ -42,10 +45,12 @@ func nomadRoutes(node api.Node, allocs []api.Allocation, waitIndex uint64) *http payload, err := json.Marshal(allocs) if err != nil { w.WriteHeader(http.StatusInternalServerError) + //nolint:errcheck // ignore w.Write([]byte(err.Error())) } w.Header().Add(NomadIndexHeader, fmt.Sprint(waitIndex)) + //nolint:errcheck // ignore w.Write(payload) }) @@ -300,7 +305,8 @@ func TestAllocationWatcher(t *testing.T) { goroutines := resources.NewGoroutinesChecker() defer goroutines.Check(t) - watcher.Start() + err = watcher.Start() + require.NoError(t, err) defer watcher.Stop() assert.Equal(t, tt.expected, events) diff --git a/x-pack/libbeat/persistentcache/persistentcache_test.go b/x-pack/libbeat/persistentcache/persistentcache_test.go index 416905d2e40b..7a3b61b8cb3a 100644 --- a/x-pack/libbeat/persistentcache/persistentcache_test.go +++ b/x-pack/libbeat/persistentcache/persistentcache_test.go @@ -6,15 +6,13 @@ package persistentcache import ( "fmt" - "io/ioutil" "math/rand" - "os" "path/filepath" "strconv" "testing" "time" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -277,6 +275,7 @@ func BenchmarkPut(b *testing.B) { for i := 0; i < b.N; i++ { cache := c.factory(b, b.Name()) for _, object := range objects { + //nolint:errcheck // benchmarks cache.Put(object.ID, object) } cache.Close() @@ -392,6 +391,7 @@ func BenchmarkGet(b *testing.B) { b.ResetTimer() for i := 0; i < b.N; i++ { expected := objects[rand.Intn(size)] + //nolint:errcheck // benchmarks cache.Get(expected.ID, &result) if expected.ID != result.ID { b.Fatalf("%s != %s", expected.ID, result.ID) @@ -408,29 +408,7 @@ func BenchmarkGet(b *testing.B) { func testOptions(t testing.TB) Options { t.Helper() - tempDir, err := ioutil.TempDir("", "beat-data-dir-") - require.NoError(t, err) - - t.Cleanup(func() { os.RemoveAll(tempDir) }) - return Options{ - RootPath: filepath.Join(tempDir, cacheFile), + RootPath: filepath.Join(t.TempDir(), cacheFile), } } - -func dirSize(tb testing.TB, path string) int64 { - var size int64 - - err := filepath.Walk(path, func(_ string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if !info.IsDir() { - size += info.Size() - } - return nil - }) - require.NoError(tb, err) - - return size -} diff --git a/x-pack/libbeat/processors/add_cloudfoundry_metadata/add_cloudfoundry_metadata.go b/x-pack/libbeat/processors/add_cloudfoundry_metadata/add_cloudfoundry_metadata.go index f20d713c7b6a..26afdbf925b2 100644 --- a/x-pack/libbeat/processors/add_cloudfoundry_metadata/add_cloudfoundry_metadata.go +++ b/x-pack/libbeat/processors/add_cloudfoundry_metadata/add_cloudfoundry_metadata.go @@ -9,7 +9,7 @@ package add_cloudfoundry_metadata import ( "fmt" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" "github.com/elastic/beats/v7/libbeat/beat" "github.com/elastic/beats/v7/libbeat/processors" diff --git a/x-pack/libbeat/processors/add_cloudfoundry_metadata/add_cloudfoundry_metadata_test.go b/x-pack/libbeat/processors/add_cloudfoundry_metadata/add_cloudfoundry_metadata_test.go index 002418c98254..6154d6f0b90a 100644 --- a/x-pack/libbeat/processors/add_cloudfoundry_metadata/add_cloudfoundry_metadata_test.go +++ b/x-pack/libbeat/processors/add_cloudfoundry_metadata/add_cloudfoundry_metadata_test.go @@ -10,7 +10,7 @@ import ( "testing" "github.com/cloudfoundry-community/go-cfclient" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" "github.com/stretchr/testify/assert" "github.com/elastic/beats/v7/libbeat/beat" diff --git a/x-pack/libbeat/reader/parquet/parquet.go b/x-pack/libbeat/reader/parquet/parquet.go index cc8956155961..3fbe357b2126 100644 --- a/x-pack/libbeat/reader/parquet/parquet.go +++ b/x-pack/libbeat/reader/parquet/parquet.go @@ -14,6 +14,8 @@ import ( "github.com/apache/arrow/go/v14/parquet" "github.com/apache/arrow/go/v14/parquet/file" "github.com/apache/arrow/go/v14/parquet/pqarrow" + + "github.com/elastic/elastic-agent-libs/logp" ) // BufferedReader parses parquet inputs from io streams. @@ -21,6 +23,7 @@ type BufferedReader struct { cfg *Config fileReader *file.Reader recordReader pqarrow.RecordReader + log *logp.Logger } // NewBufferedReader creates a new reader that can decode parquet data from an io.Reader. @@ -28,51 +31,62 @@ type BufferedReader struct { // Note: As io.ReadAll is used, the entire data stream would be read into memory, so very large data streams // may cause memory bottleneck issues. func NewBufferedReader(r io.Reader, cfg *Config) (*BufferedReader, error) { - batchSize := 1 - if cfg.BatchSize > 1 { - batchSize = cfg.BatchSize + log := logp.L().Named("reader.parquet") + + if cfg.BatchSize == 0 { + cfg.BatchSize = 1 } + log.Debugw("creating parquet reader", "batch_size", cfg.BatchSize) // reads the contents of the reader object into a byte slice data, err := io.ReadAll(r) if err != nil { return nil, fmt.Errorf("failed to read data from stream reader: %w", err) } + log.Debugw("read data from stream reader", "size", len(data)) // defines a memory allocator for allocating memory for Arrow objects pool := memory.NewCheckedAllocator(&memory.GoAllocator{}) - + // constructs a parquet file reader object from the byte slice data pf, err := file.NewParquetReader(bytes.NewReader(data), file.WithReadProps(parquet.NewReaderProperties(pool))) if err != nil { return nil, fmt.Errorf("failed to create parquet reader: %w", err) } + log.Debugw("created parquet reader") // constructs a reader for converting to Arrow objects from an existing parquet file reader object reader, err := pqarrow.NewFileReader(pf, pqarrow.ArrowReadProperties{ Parallel: cfg.ProcessParallel, - BatchSize: int64(batchSize), + BatchSize: int64(cfg.BatchSize), }, pool) if err != nil { return nil, fmt.Errorf("failed to create pqarrow parquet reader: %w", err) } + log.Debugw("created pqarrow parquet reader") // constructs a record reader that is capable of reding entire sets of arrow records rr, err := reader.GetRecordReader(context.Background(), nil, nil) if err != nil { return nil, fmt.Errorf("failed to create parquet record reader: %w", err) } + log.Debugw("initialization process completed") return &BufferedReader{ cfg: cfg, recordReader: rr, fileReader: pf, + log: log, }, nil } // Next advances the pointer to point to the next record and returns true if the next record exists. // It will return false if there are no more records to read. func (sr *BufferedReader) Next() bool { - return sr.recordReader.Next() + next := sr.recordReader.Next() + if !next { + sr.log.Debugw("no more records to read", "next", next) + } + return next } // Record reads the current record from the parquet file and returns it as a JSON marshaled byte slice. @@ -81,6 +95,7 @@ func (sr *BufferedReader) Next() bool { func (sr *BufferedReader) Record() ([]byte, error) { rec := sr.recordReader.Record() if rec == nil { + sr.log.Debugw("reached the end of the record reader", "record_reader", rec) return nil, io.EOF } defer rec.Release() @@ -88,6 +103,8 @@ func (sr *BufferedReader) Record() ([]byte, error) { if err != nil { return nil, fmt.Errorf("failed to marshal JSON for parquet value: %w", err) } + sr.log.Debugw("records successfully read", "batch_size", sr.cfg.BatchSize) + return val, nil } diff --git a/x-pack/libbeat/reader/parquet/parquet_test.go b/x-pack/libbeat/reader/parquet/parquet_test.go index a4ba04426183..61f4936d1f87 100644 --- a/x-pack/libbeat/reader/parquet/parquet_test.go +++ b/x-pack/libbeat/reader/parquet/parquet_test.go @@ -19,6 +19,8 @@ import ( "github.com/apache/arrow/go/v14/arrow/memory" "github.com/apache/arrow/go/v14/parquet/pqarrow" "github.com/stretchr/testify/assert" + + "github.com/elastic/elastic-agent-libs/logp" ) // all test files are read from/stored within the "testdata" directory @@ -55,6 +57,7 @@ func TestParquetWithRandomData(t *testing.T) { }, } + logp.TestingSetup() for i, tc := range testCases { name := fmt.Sprintf("Test parquet files with rows=%d, and columns=%d", tc.rows, tc.columns) t.Run(name, func(t *testing.T) { @@ -189,6 +192,7 @@ func TestParquetWithFiles(t *testing.T) { }, } + logp.TestingSetup() for _, tc := range testCases { name := fmt.Sprintf("Test parquet files with source file=%s, and target comparison file=%s", tc.parquetFile, tc.jsonFile) t.Run(name, func(t *testing.T) { diff --git a/x-pack/metricbeat/metricbeat.reference.yml b/x-pack/metricbeat/metricbeat.reference.yml index 71541d491fdc..738d7ef2830b 100644 --- a/x-pack/metricbeat/metricbeat.reference.yml +++ b/x-pack/metricbeat/metricbeat.reference.yml @@ -1568,7 +1568,7 @@ metricbeat.modules: #------------------------------- VSphere Module ------------------------------- - module: vsphere enabled: true - metricsets: ["cluster", "datastore", "host", "virtualmachine", "network", "resourcepool"] + metricsets: ["cluster", "datastore", "datastorecluster", "host", "network", "resourcepool", "virtualmachine"] # Real-time data collection – An ESXi Server collects data for each performance counter every 20 seconds. period: 20s hosts: ["https://localhost/sdk"] diff --git a/x-pack/metricbeat/module/azure/app_insights/client.go b/x-pack/metricbeat/module/azure/app_insights/client.go index d2bed8fbf0ee..75e931b71321 100644 --- a/x-pack/metricbeat/module/azure/app_insights/client.go +++ b/x-pack/metricbeat/module/azure/app_insights/client.go @@ -8,7 +8,7 @@ import ( "fmt" "time" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" "github.com/Azure/azure-sdk-for-go/services/preview/appinsights/v1/insights" @@ -40,6 +40,7 @@ func (client *Client) GetMetricValues() (insights.ListMetricsResultsItem, error) var bodyMetrics []insights.MetricsPostBodySchema var result insights.ListMetricsResultsItem for _, metrics := range client.Config.Metrics { + metrics := metrics var aggregations []insights.MetricsAggregation var segments []insights.MetricsSegment for _, agg := range metrics.Aggregation { diff --git a/x-pack/metricbeat/module/gcp/metrics/compute/metadata.go b/x-pack/metricbeat/module/gcp/metrics/compute/metadata.go index c215e471b454..eb9f59713035 100644 --- a/x-pack/metricbeat/module/gcp/metrics/compute/metadata.go +++ b/x-pack/metricbeat/module/gcp/metrics/compute/metadata.go @@ -10,6 +10,7 @@ import ( "fmt" "strconv" "strings" + "time" compute "cloud.google.com/go/compute/apiv1" "cloud.google.com/go/compute/apiv1/computepb" @@ -188,6 +189,11 @@ func (s *metadataCollector) getComputeInstances(ctx context.Context) { defer instancesClient.Close() + start := time.Now() + defer func() { + s.logger.Debugf("Total time taken for compute AggregatedList request: %s", time.Since(start)) + }() + it := instancesClient.AggregatedList(ctx, &computepb.AggregatedListInstancesRequest{ Project: s.projectID, }) diff --git a/x-pack/metricbeat/module/gcp/metrics/metrics_requester.go b/x-pack/metricbeat/module/gcp/metrics/metrics_requester.go index e7e7f081e526..d33d710f77a8 100644 --- a/x-pack/metricbeat/module/gcp/metrics/metrics_requester.go +++ b/x-pack/metricbeat/module/gcp/metrics/metrics_requester.go @@ -124,6 +124,11 @@ func (r *metricsRequester) Metrics(ctx context.Context, serviceName string, alig } } + start := time.Now() + defer func() { + r.logger.Debugf("Total time taken for all ListTimeSeries requests: %s and config period: %s", time.Since(start), r.config.period.AsDuration()) + }() + for mt, meta := range metricsToCollect { wg.Add(1) diff --git a/x-pack/osquerybeat/beater/action_handler_test.go b/x-pack/osquerybeat/beater/action_handler_test.go index f3008e4ee769..d446b883175a 100644 --- a/x-pack/osquerybeat/beater/action_handler_test.go +++ b/x-pack/osquerybeat/beater/action_handler_test.go @@ -9,7 +9,7 @@ import ( "testing" "time" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" "github.com/google/go-cmp/cmp" "github.com/elastic/beats/v7/x-pack/osquerybeat/internal/ecs" diff --git a/x-pack/osquerybeat/beater/osquerybeat.go b/x-pack/osquerybeat/beater/osquerybeat.go index 119d72cdd5af..7ed65de2dc69 100644 --- a/x-pack/osquerybeat/beater/osquerybeat.go +++ b/x-pack/osquerybeat/beater/osquerybeat.go @@ -11,7 +11,7 @@ import ( "sync" "time" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" lru "github.com/hashicorp/golang-lru" "github.com/osquery/osquery-go" kconfig "github.com/osquery/osquery-go/plugin/config" diff --git a/x-pack/osquerybeat/internal/fetch/fetch_test.go b/x-pack/osquerybeat/internal/fetch/fetch_test.go index f234a78f2569..3e89ada6f5a4 100644 --- a/x-pack/osquerybeat/internal/fetch/fetch_test.go +++ b/x-pack/osquerybeat/internal/fetch/fetch_test.go @@ -11,7 +11,7 @@ import ( "os" "testing" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" "github.com/google/go-cmp/cmp" ) diff --git a/x-pack/osquerybeat/internal/osqd/osqueryd_test.go b/x-pack/osquerybeat/internal/osqd/osqueryd_test.go index 8a07cfd9d738..12b20b4e09cd 100644 --- a/x-pack/osquerybeat/internal/osqd/osqueryd_test.go +++ b/x-pack/osquerybeat/internal/osqd/osqueryd_test.go @@ -15,7 +15,7 @@ import ( "github.com/elastic/beats/v7/x-pack/osquerybeat/internal/fileutil" "github.com/elastic/elastic-agent-libs/logp" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" "github.com/google/go-cmp/cmp" ) diff --git a/x-pack/osquerybeat/internal/osqd/osqueryd_windows.go b/x-pack/osquerybeat/internal/osqd/osqueryd_windows.go index 0d14c417cbf3..65550e4332ae 100644 --- a/x-pack/osquerybeat/internal/osqd/osqueryd_windows.go +++ b/x-pack/osquerybeat/internal/osqd/osqueryd_windows.go @@ -11,7 +11,7 @@ import ( "os/exec" "syscall" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" ) const ( diff --git a/x-pack/osquerybeat/internal/pub/publisher_test.go b/x-pack/osquerybeat/internal/pub/publisher_test.go index ad592bf7ec67..7b7d2120e847 100644 --- a/x-pack/osquerybeat/internal/pub/publisher_test.go +++ b/x-pack/osquerybeat/internal/pub/publisher_test.go @@ -9,7 +9,7 @@ import ( "testing" "time" - "github.com/gofrs/uuid" + "github.com/gofrs/uuid/v5" "github.com/google/go-cmp/cmp" "github.com/elastic/beats/v7/libbeat/beat/events"