diff --git a/AGENTS.md b/AGENTS.md index dd46c59ecd..966b4c0526 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -67,5 +67,5 @@ elastic-package uses the objects-based Fleet API (`PackagePolicy`) — not the d - **Input key**: `"{policyTemplate.Name}-{input.Type}"` (e.g. `"apache-logfile"`). - **Stream key**: built by `datasetKey(pkgName, ds)` — uses `ds.Dataset` when set, otherwise `"{pkgName}.{ds.Name}"`. - **Sibling stream disabling**: Fleet auto-enables all streams for an enabled input unless they are explicitly listed with `enabled: false`. Always send `{enabled: false}` for every sibling data stream sharing the same input type within the same policy template. -- **Policy template scoping**: When a policy template declares a `data_streams` list, only include data streams from that list as siblings. Use `packages.DataStreamsForInput(packageRoot, policyTemplate, streamInput)` to get the correct set. -- **Variable format**: the objects-based API expects raw values, not `{"type": ..., "value": ...}` wrappers. `Vars.ToMap()` extracts raw values via `val.Value.Value()`. +- **Policy template scoping**: When a policy template declares a `data_streams` list, only include data streams from that list as siblings. Use `packages.FilterDatastreamsForPolicyTemplate(datastreams, policyTemplate)` on the result of `packages.ReadAllDataStreamManifests(packageRoot)` to get the correct set. +- **Variable format**: the objects-based API expects raw values, not `{"type": ..., "value": ...}` wrappers. `Vars.ToMapStr()` extracts raw values via `val.Value.Value()`. diff --git a/internal/benchrunner/runners/system/runner.go b/internal/benchrunner/runners/system/runner.go index 62b9691a6f..c05041a82f 100644 --- a/internal/benchrunner/runners/system/runner.go +++ b/internal/benchrunner/runners/system/runner.go @@ -487,7 +487,7 @@ func (r *runner) createPackagePolicy(ctx context.Context, pkgManifest *packages. pp.Package.Name = r.scenario.Package pp.Package.Version = r.scenario.Version - policy, err := r.options.KibanaClient.CreatePackagePolicy(ctx, pp) + policy, err := r.options.KibanaClient.CreatePackagePolicy(ctx, pp, kibana.PolicyAPIFormatAuto) if err != nil { return nil, err } diff --git a/internal/kibana/legacypolicy.go b/internal/kibana/legacypolicy.go new file mode 100644 index 0000000000..d8945224a6 --- /dev/null +++ b/internal/kibana/legacypolicy.go @@ -0,0 +1,117 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// Package kibana provides Fleet API client functionality. +// This file contains the legacy (arrays-based) package policy types and +// conversion logic used for old Kibana versions. + +package kibana + +// legacyDataStream identifies a data stream in the legacy package policy format. +type legacyDataStream struct { + Type string `json:"type"` + Dataset string `json:"dataset"` +} + +// legacyStream is one stream entry in the legacy inputs array format. +type legacyStream struct { + ID string `json:"id,omitempty"` + Enabled bool `json:"enabled"` + DataStream legacyDataStream `json:"data_stream"` + Vars map[string]Var `json:"vars,omitempty"` +} + +// legacyInput is one input entry in the legacy inputs array format. +type legacyInput struct { + PolicyTemplate string `json:"policy_template,omitempty"` + Type string `json:"type"` + Enabled bool `json:"enabled"` + Vars map[string]Var `json:"vars,omitempty"` + Streams []legacyStream `json:"streams"` +} + +// legacyPackagePolicy is the legacy (arrays-based) Fleet package policy. +type legacyPackagePolicy struct { + Name string `json:"name"` + Description string `json:"description"` + Namespace string `json:"namespace"` + PolicyID string `json:"policy_id"` + Enabled bool `json:"enabled"` + Package struct { + Name string `json:"name"` + Title string `json:"title"` + Version string `json:"version"` + } `json:"package"` + Inputs []legacyInput `json:"inputs"` + OutputID string `json:"output_id"` + Vars map[string]Var `json:"vars,omitempty"` + Force bool `json:"force"` +} + +// toLegacyMapVar converts Vars to the {value, type} map format expected by the +// legacy Fleet API. +func (v Vars) toLegacyMapVar() map[string]Var { + if len(v) == 0 { + return nil + } + m := make(map[string]Var, len(v)) + for k, val := range v { + m[k] = val + } + return m +} + +// toLegacy converts a PackagePolicy (simplified format) to the +// legacy arrays-based format. +func (p PackagePolicy) toLegacy() legacyPackagePolicy { + legacy := legacyPackagePolicy{ + Name: p.Name, + Description: p.Description, + Namespace: p.Namespace, + PolicyID: p.PolicyID, + Enabled: true, + Force: p.Force, + Vars: p.legacyVars.toLegacyMapVar(), + } + legacy.Package.Name = p.Package.Name + legacy.Package.Title = p.legacyPackageTitle + legacy.Package.Version = p.Package.Version + legacy.OutputID = p.OutputID + + // Convert each input from the simplified map to a legacy input entry. + // Skip disabled inputs to match the legacy (main-branch) behaviour. + for _, i := range p.Inputs { + if !i.Enabled { + continue + } + input := legacyInput{ + PolicyTemplate: i.policyTemplate, + Type: i.inputType, + Enabled: i.Enabled, + Vars: i.legacyVars.toLegacyMapVar(), + Streams: []legacyStream{}, + } + + // Convert each stream from the simplified map to a legacy stream entry. + // Skip disabled streams to match the legacy behaviour. + for _, s := range i.Streams { + if !s.Enabled { + continue + } + stream := legacyStream{ + Enabled: s.Enabled, + DataStream: legacyDataStream{ + Type: s.dataStreamType, + Dataset: s.dataStreamDataset, + }, + Vars: s.legacyVars.toLegacyMapVar(), + } + input.Streams = append(input.Streams, stream) + } + + legacy.Inputs = append(legacy.Inputs, input) + } + + return legacy +} diff --git a/internal/kibana/packagepolicy.go b/internal/kibana/packagepolicy.go new file mode 100644 index 0000000000..3e2969cbee --- /dev/null +++ b/internal/kibana/packagepolicy.go @@ -0,0 +1,285 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package kibana + +import ( + "fmt" + "strconv" + + "github.com/elastic/elastic-package/internal/common" + "github.com/elastic/elastic-package/internal/packages" +) + +// BuildIntegrationPackagePolicy builds a PackagePolicy for an integration package +// given pre-loaded manifests. +func BuildIntegrationPackagePolicy( + policyID, namespace, name string, + manifest packages.PackageManifest, + policyTemplate packages.PolicyTemplate, + dsManifest packages.DataStreamManifest, + inputName string, + inputVars, dsVars common.MapStr, + enabled bool, + allDatastreams []packages.DataStreamManifest, +) (PackagePolicy, error) { + streamIdx := packages.GetDataStreamIndex(inputName, dsManifest) + stream := dsManifest.Streams[streamIdx] + streamInput := stream.Input + + // Data streams for the given policy template. + datastreams := packages.FilterDatastreamsForPolicyTemplate(allDatastreams, policyTemplate) + + // Merge dsVars into inputVars for package/input-level resolution so that + // variables specified under data_stream.vars in the test config are also + // applied at the package or input level when they match those definitions. + // inputVars takes precedence over dsVars. + allInputVars := make(common.MapStr, len(inputVars)+len(dsVars)) + for k, v := range dsVars { + allInputVars[k] = v + } + for k, v := range inputVars { + allInputVars[k] = v + } + + // Build all inputs: the enabled one gets proper streams with user vars; all + // others get streams with manifest defaults and are disabled. + inputs := make(map[string]PackagePolicyInput) + for _, pt := range manifest.PolicyTemplates { + for _, input := range pt.Inputs { + inputKey := fmt.Sprintf("%s-%s", pt.Name, input.Type) + if input.Type == streamInput && pt.Name == policyTemplate.Name { + // The target input: enabled with user-provided vars. + streams := buildStreamsForInput(streamInput, manifest, dsManifest, enabled, dsVars, datastreams) + inputEntry := PackagePolicyInput{ + Enabled: enabled, + Streams: streams, + inputType: streamInput, + policyTemplate: pt.Name, + } + if foundInput := policyTemplate.FindInputByType(streamInput); foundInput != nil { + iv := SetKibanaVariables(foundInput.Vars, allInputVars) + inputEntry.Vars = iv.ToMapStr() + inputEntry.legacyVars = iv + } + inputs[inputKey] = inputEntry + } else { + // A disabled input: use data streams scoped to this policy template + // so that sibling stream keys are correct even when multiple policy + // templates declare different data_streams lists. + ptDatastreams := packages.FilterDatastreamsForPolicyTemplate(allDatastreams, pt) + streams := buildStreamsForInput(input.Type, manifest, packages.DataStreamManifest{}, false, common.MapStr{}, ptDatastreams) + entry := PackagePolicyInput{ + Enabled: false, + inputType: input.Type, + policyTemplate: pt.Name, + } + if len(streams) > 0 { + entry.Streams = streams + } + inputs[inputKey] = entry + } + } + } + + pkgVars := SetKibanaVariables(manifest.Vars, allInputVars) + policy := PackagePolicy{ + Name: name, + Namespace: namespace, + PolicyID: policyID, + Vars: pkgVars.ToMapStr(), + Inputs: inputs, + legacyPackageTitle: manifest.Title, + legacyVars: pkgVars, + } + policy.Package.Name = manifest.Name + policy.Package.Version = manifest.Version + + return policy, nil +} + +// buildStreamsForInput builds a streams map for the inputType. +// All siblings that support the input type are +// explicitly disabled so Fleet does not auto-enable them. +func buildStreamsForInput(inputType string, manifest packages.PackageManifest, dsManifest packages.DataStreamManifest, enabled bool, vars common.MapStr, datastreams []packages.DataStreamManifest) map[string]PackagePolicyStream { + streams := map[string]PackagePolicyStream{} + for _, ds := range datastreams { + s, ok := streamForInput(ds, inputType) + if !ok { + continue + } + + streamVars := SetKibanaVariables(s.Vars, common.MapStr{}) + streamEnabled := false + if ds.Name == dsManifest.Name { + streamEnabled = enabled + streamVars = SetKibanaVariables(s.Vars, vars) + } + streams[datasetKey(manifest.Name, ds)] = PackagePolicyStream{ + Enabled: streamEnabled, + Vars: streamVars.ToMapStr(), + legacyVars: streamVars, + dataStreamType: ds.Type, + dataStreamDataset: datasetKey(manifest.Name, ds), + } + } + return streams +} + +// buildStreamsForInput builds a streams map for the inputType. +// All siblings that support the input type are +// explicitly disabled so Fleet does not auto-enable them. +func streamForInput(ds packages.DataStreamManifest, inputType string) (packages.Stream, bool) { + for _, s := range ds.Streams { + if s.Input == inputType { + return s, true + } + } + return packages.Stream{}, false +} + +// BuildInputPackagePolicy builds a PackagePolicy for an input package +// given pre-loaded manifests. +func BuildInputPackagePolicy( + policyID, namespace, name string, + manifest packages.PackageManifest, + policyTemplate packages.PolicyTemplate, + varValues common.MapStr, + enabled bool, +) PackagePolicy { + streamInput := policyTemplate.Input + + // Disable all other inputs; only enable the target one. + inputs := make(map[string]PackagePolicyInput) + for _, pt := range manifest.PolicyTemplates { + inputs[fmt.Sprintf("%s-%s", pt.Name, pt.Input)] = PackagePolicyInput{ + Enabled: false, + inputType: pt.Input, + policyTemplate: pt.Name, + } + } + + // Dataset key for the stream: .. + streamDataset := fmt.Sprintf("%s.%s", manifest.Name, policyTemplate.Name) + + vars := SetKibanaVariables(policyTemplate.Vars, varValues) + ensureDatasetVar(vars, policyTemplate, varValues) + if policyTemplate.Input == "otelcol" { + ensureUseAPMVar(vars, varValues) + } + inputEntry := PackagePolicyInput{ + Enabled: enabled, + Streams: map[string]PackagePolicyStream{ + streamDataset: { + Enabled: enabled, + Vars: vars.ToMapStr(), + legacyVars: vars, + dataStreamDataset: streamDataset, + // dataStreamType is intentionally empty: input packages + // require Kibana >= 7.16 (simplified API), so legacy + // conversion is not needed. + }, + }, + inputType: streamInput, + policyTemplate: policyTemplate.Name, + legacyVars: vars, + } + inputs[fmt.Sprintf("%s-%s", policyTemplate.Name, streamInput)] = inputEntry + + policy := PackagePolicy{ + Name: name, + Namespace: namespace, + PolicyID: policyID, + Inputs: inputs, + + legacyPackageTitle: manifest.Title, + } + pkgVars := SetKibanaVariables(manifest.Vars, varValues) + policy.Vars = pkgVars.ToMapStr() + policy.legacyVars = pkgVars + policy.Package.Name = manifest.Name + policy.Package.Version = manifest.Version + + return policy +} + +// datasetKey returns the Fleet stream key for a data stream. When the data +// stream manifest declares an explicit dataset, that value is used directly; +// otherwise the key is ".". +func datasetKey(pkgName string, ds packages.DataStreamManifest) string { + if ds.Dataset != "" { + return ds.Dataset + } + return fmt.Sprintf("%s.%s", pkgName, ds.Name) +} + +// ensureDatasetVar guarantees that vars contains a data_stream.dataset entry +// marked fromUser=true, so that it is included in simplified API requests. +// Fleet requires this field for input packages. The value resolution order is: +// 1. user-provided value supplied via varValues (e.g. when the manifest does not declare the variable) +// 2. manifest default already parsed into vars — promoted to fromUser=true +// 3. explicit default in the policy-template var definitions +// 4. policy template name as a final fallback +func ensureDatasetVar(vars Vars, policyTemplate packages.PolicyTemplate, varValues common.MapStr) { + if raw, err := varValues.GetValue("data_stream.dataset"); err == nil { + var val packages.VarValue + if err := val.Unpack(raw); err == nil { + setVarFromUser(vars, "data_stream.dataset", "text", val) + return + } + } + if v, found := vars["data_stream.dataset"]; found { + // Exists as a manifest default; promote it so ToMapStr includes it. + setVarFromUser(vars, "data_stream.dataset", "text", v.Value) + return + } + dataset := policyTemplate.Name + for _, def := range policyTemplate.Vars { + if def.Name == "data_stream.dataset" && def.Default != nil { + if s, ok := def.Default.Value().(string); ok && s != "" { + dataset = s + } + break + } + } + var value packages.VarValue + value.Unpack(dataset) + setVarFromUser(vars, "data_stream.dataset", "text", value) +} + +// ensureUseAPMVar injects use_apm into vars from varValues if not already present. +// It is only meaningful for otelcol inputs. The value must be a bool or +// "true"/"false" string in varValues; if absent or unparseable, vars is unchanged. +func ensureUseAPMVar(vars Vars, varValues common.MapStr) { + raw, err := varValues.GetValue("use_apm") + if err != nil { + return + } + var val packages.VarValue + switch v := raw.(type) { + case bool: + val.Unpack(v) + case string: + b, err := strconv.ParseBool(v) + if err != nil { + return + } + val.Unpack(b) + default: + return + } + if val.Value() != nil { + setVarFromUser(vars, "use_apm", "boolean", val) + } +} + +// setVarFromUser sets vars[name] with fromUser=true so that the variable is included +// in simplified API requests (ToMapStr). It is a no-op when the var is already +// user-set, preserving any value previously established by SetKibanaVariables. +func setVarFromUser(vars Vars, name, varType string, val packages.VarValue) { + if v, found := vars[name]; found && v.fromUser { + return + } + vars[name] = Var{Type: varType, Value: val, fromUser: true} +} diff --git a/internal/kibana/packagepolicy_test.go b/internal/kibana/packagepolicy_test.go new file mode 100644 index 0000000000..8519c0bb6c --- /dev/null +++ b/internal/kibana/packagepolicy_test.go @@ -0,0 +1,540 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package kibana + +import ( + "cmp" + "encoding/json" + "os" + "path/filepath" + "slices" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/elastic/elastic-package/internal/common" + "github.com/elastic/elastic-package/internal/packages" +) + +func TestBuildIntegrationPackagePolicy(t *testing.T) { + tests := []struct { + name string + packageRoot string + policyTemplateName string + dsName string + inputName string + policyName string + inputVars common.MapStr + dsVars common.MapStr + goldenSimplified string + goldenLegacy string + }{ + { + name: "sophos_xg_tcp", + packageRoot: "testdata/packages/sophos_tcp", + policyTemplateName: "sophos", + dsName: "xg", + inputName: "tcp", + policyName: "sophos-xg-test", + inputVars: common.MapStr{}, + dsVars: common.MapStr{ + "syslog_host": "0.0.0.0", + "syslog_port": 9549, + "known_devices": "- hostname: XG230\n serial_number: \"1234567890123456\"\n- hostname: SG430\n serial_number: \"S4000806149EE49\"\n", + }, + goldenSimplified: "testdata/sophos_xg_tcp.json", + goldenLegacy: "testdata/sophos_xg_tcp_legacy.json", + }, + { + // Verifies that an empty yaml multi-valued var (e.g. file_selectors: []) + // is omitted from the simplified API request instead of being sent as the + // string "[]", which Fleet would reject as invalid YAML when substituted + // into Handlebars templates. Reproduces the aws_bedrock case. + name: "empty_yaml_multi_var", + packageRoot: "testdata/packages/test_policy_vars", + policyTemplateName: "test", + dsName: "log", + inputName: "cel", + policyName: "test-log-test", + inputVars: common.MapStr{}, + dsVars: common.MapStr{ + "file_selectors": []interface{}{}, + }, + goldenSimplified: "testdata/test_policy_vars_empty_yaml_multi.json", + goldenLegacy: "testdata/test_policy_vars_empty_yaml_multi_legacy.json", + }, + { + // Verifies that a select type variable with a non-default value ("false") + // is correctly serialised in the simplified API request. + // Reproduces the ti_opencti case where revoked: "false" caused Fleet to + // reject with "Invalid value for select type". + name: "select_var_false_value", + packageRoot: "testdata/packages/test_policy_vars", + policyTemplateName: "test", + dsName: "log", + inputName: "cel", + policyName: "test-log-test", + inputVars: common.MapStr{}, + dsVars: common.MapStr{ + "revoked": "false", + }, + goldenSimplified: "testdata/test_policy_vars_select_false.json", + goldenLegacy: "testdata/test_policy_vars_select_false_legacy.json", + }, + { + // Verifies that a bool variable with a dotted name (e.g. "active.only") + // is found when ucfg has stored it as a nested map {"active": {"only": false}} + // due to PathSep(".") parsing. Reproduces the elasticsearch index_recovery + // case where active.only: false in the test config was silently ignored, + // causing the manifest default (true) to be used instead. + name: "dotted_bool_var_nested_lookup", + packageRoot: "testdata/packages/test_policy_vars", + policyTemplateName: "test", + dsName: "log", + inputName: "cel", + policyName: "test-log-test", + inputVars: common.MapStr{}, + dsVars: common.MapStr{ + "active": common.MapStr{ + "only": false, + }, + }, + goldenSimplified: "testdata/test_policy_vars_dotted_bool.json", + goldenLegacy: "testdata/test_policy_vars_dotted_bool_legacy.json", + }, + { + packageRoot: "testdata/packages/apache", + policyTemplateName: "apache", + dsName: "access", + inputName: "logfile", + policyName: "apache-access-test", + inputVars: common.MapStr{}, + dsVars: common.MapStr{ + "paths": []string{"/tmp/service_logs/access.log*"}, + }, + goldenSimplified: "testdata/apache_access_logfile.json", + goldenLegacy: "testdata/apache_access_logfile_legacy.json", + }, + { + // Verifies that package-level vars specified in dsVars (data_stream.vars + // in the test config) are applied at the package level. This covers the + // endace case where endace_url is a required package-level var but is + // written under data_stream.vars in the system test config. + name: "endace_netflow_pkg_var_in_dsvars", + packageRoot: "testdata/packages/endace_netflow", + policyTemplateName: "endace", + dsName: "log", + inputName: "netflow", + policyName: "endace-log-test", + inputVars: common.MapStr{}, + dsVars: common.MapStr{ + "host": "0.0.0.0", + "port": 2055, + "endace_url": "http://test.elastic.co", + }, + goldenSimplified: "testdata/endace_netflow_pkg_var_in_dsvars.json", + goldenLegacy: "testdata/endace_netflow_pkg_var_in_dsvars_legacy.json", + }, + { + // Verifies that when building a policy for app_insights/azure/metrics, + // the sibling disabled input (app_state-azure/metrics) uses azure.app_state + // as its stream — not azure.app_insights. + name: "azure_app_insights_metrics", + packageRoot: "testdata/packages/azure_application_insights", + policyTemplateName: "app_insights", + dsName: "app_insights", + inputName: "azure/metrics", + policyName: "azure-app-insights-test", + inputVars: common.MapStr{}, + dsVars: common.MapStr{ + "period": "300s", + "metrics": "- id: [\"requests/count\"]\n aggregation: [\"sum\"]\n interval: \"P5M\"\n", + }, + goldenSimplified: "testdata/azure_app_insights_metrics.json", + goldenLegacy: "testdata/azure_app_insights_metrics_legacy.json", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + manifest, err := packages.ReadPackageManifest(filepath.Join(tc.packageRoot, "manifest.yml")) + require.NoError(t, err) + + dsManifest, err := packages.ReadDataStreamManifestFromPackageRoot(tc.packageRoot, tc.dsName) + require.NoError(t, err) + + policyTemplate, err := packages.SelectPolicyTemplateByName(manifest.PolicyTemplates, tc.policyTemplateName) + require.NoError(t, err) + + datastreams, err := packages.ReadAllDataStreamManifests(tc.packageRoot) + require.NoError(t, err) + + pp, err := BuildIntegrationPackagePolicy( + "test-policy-id", "test", tc.policyName, + *manifest, policyTemplate, *dsManifest, + tc.inputName, + tc.inputVars, tc.dsVars, + true, datastreams, + ) + require.NoError(t, err) + + t.Run("simplified", func(t *testing.T) { + got, err := json.MarshalIndent(pp, "", " ") + require.NoError(t, err) + assertJSONGolden(t, tc.goldenSimplified, got) + }) + + t.Run("legacy", func(t *testing.T) { + legacy := pp.toLegacy() + // Sort inputs by type and streams within each input by dataset + // for deterministic comparison (p.Inputs is a map). + slices.SortFunc(legacy.Inputs, func(a, b legacyInput) int { + return cmp.Compare(a.Type, b.Type) + }) + for i := range legacy.Inputs { + slices.SortFunc(legacy.Inputs[i].Streams, func(a, b legacyStream) int { + return cmp.Compare(a.DataStream.Dataset, b.DataStream.Dataset) + }) + } + got, err := json.MarshalIndent(legacy, "", " ") + require.NoError(t, err) + assertJSONGolden(t, tc.goldenLegacy, got) + }) + }) + } +} + +func TestBuildInputPackagePolicy(t *testing.T) { + tests := []struct { + name string + packageRoot string + policyTemplateName string + policyName string + varValues common.MapStr + goldenSimplified string + goldenLegacy string + }{ + { + name: "log_custom_logs", + packageRoot: "testdata/packages/log_input", + policyTemplateName: "logs", + policyName: "log-logs-test", + varValues: common.MapStr{ + "paths": []string{"/tmp/test.log"}, + "data_stream.dataset": "log.custom", + }, + goldenSimplified: "testdata/log_custom_logs.json", + goldenLegacy: "testdata/log_custom_logs_legacy.json", + }, + { + name: "sql_input_custom_dataset", + packageRoot: "../../test/packages/parallel/sql_input", + policyTemplateName: "sql_query", + policyName: "sql-query-test", + varValues: common.MapStr{ + "data_stream.dataset": "custom.sql", + }, + goldenSimplified: "testdata/sql_input_custom_dataset.json", + goldenLegacy: "testdata/sql_input_custom_dataset_legacy.json", + }, + { + // Simulates varValues coming from the test runner, which parses config + // files with ucfg.PathSep("."), causing data_stream.dataset to be stored + // as a nested map {"data_stream": {"dataset": ...}} rather than a flat key. + name: "sql_input_nested_dataset", + packageRoot: "../../test/packages/parallel/sql_input", + policyTemplateName: "sql_query", + policyName: "sql-query-test", + varValues: common.MapStr{ + "data_stream": common.MapStr{ + "dataset": "custom.sql", + }, + }, + goldenSimplified: "testdata/sql_input_custom_dataset.json", + goldenLegacy: "testdata/sql_input_custom_dataset_legacy.json", + }, + { + // No data_stream.dataset provided: the default should be + // "." so the data lands in the + // index template installed by the package. + name: "sql_input_default_dataset", + packageRoot: "../../test/packages/parallel/sql_input", + policyTemplateName: "sql_query", + policyName: "sql-query-test", + varValues: common.MapStr{}, + goldenSimplified: "testdata/sql_input_default_dataset.json", + goldenLegacy: "testdata/sql_input_default_dataset_legacy.json", + }, + { + // OTel input package with use_apm set by the user. The manifest does not + // declare use_apm, so ensureUseAPMVar must inject it from varValues. + name: "otel_traces_use_apm", + packageRoot: "testdata/packages/otel_traces_input", + policyTemplateName: "receiver", + policyName: "otel-traces-test", + varValues: common.MapStr{ + "endpoint": "0.0.0.0:9411", + "use_apm": true, + }, + goldenSimplified: "testdata/otel_traces_use_apm.json", + goldenLegacy: "testdata/otel_traces_use_apm_legacy.json", + }, + { + // Package-level variable: the user overrides the default package-level + // var (custom_tag). BuildInputPackagePolicy must forward manifest.Vars + // into the top-level policy vars just like BuildIntegrationPackagePolicy. + name: "input_with_pkg_vars", + packageRoot: "testdata/packages/input_with_pkg_vars", + policyTemplateName: "logs", + policyName: "input-pkg-vars-test", + varValues: common.MapStr{ + "paths": []string{"/tmp/test.log"}, + "data_stream.dataset": "custom.logs", + "custom_tag": "my-tag", + }, + goldenSimplified: "testdata/input_with_pkg_vars.json", + goldenLegacy: "testdata/input_with_pkg_vars_legacy.json", + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + manifest, err := packages.ReadPackageManifest(filepath.Join(tc.packageRoot, "manifest.yml")) + require.NoError(t, err) + + policyTemplate, err := packages.SelectPolicyTemplateByName(manifest.PolicyTemplates, tc.policyTemplateName) + require.NoError(t, err) + + pp := BuildInputPackagePolicy( + "test-policy-id", "test", tc.policyName, + *manifest, policyTemplate, tc.varValues, + true, + ) + + t.Run("simplified", func(t *testing.T) { + got, err := json.MarshalIndent(pp, "", " ") + require.NoError(t, err) + assertJSONGolden(t, tc.goldenSimplified, got) + }) + + t.Run("legacy", func(t *testing.T) { + legacy := pp.toLegacy() + slices.SortFunc(legacy.Inputs, func(a, b legacyInput) int { + return cmp.Compare(a.Type, b.Type) + }) + for i := range legacy.Inputs { + slices.SortFunc(legacy.Inputs[i].Streams, func(a, b legacyStream) int { + return cmp.Compare(a.DataStream.Dataset, b.DataStream.Dataset) + }) + } + got, err := json.MarshalIndent(legacy, "", " ") + require.NoError(t, err) + assertJSONGolden(t, tc.goldenLegacy, got) + }) + }) + } +} + +// assertJSONGolden compares got against the golden file at goldenPath using +// semantic JSON equality. If the golden file does not yet exist it is created +// from got so that the next run acts as the regression gate. +func assertJSONGolden(t *testing.T, goldenPath string, got []byte) { + t.Helper() + if _, err := os.Stat(goldenPath); os.IsNotExist(err) { + require.NoError(t, os.WriteFile(goldenPath, got, 0o644)) + t.Logf("created golden file %s", goldenPath) + return + } + golden, err := os.ReadFile(goldenPath) + require.NoError(t, err) + assert.JSONEq(t, string(golden), string(got)) +} + +func TestEnsureUseAPMVar(t *testing.T) { + cases := []struct { + name string + vars Vars + varValues common.MapStr + wantUseAPMPresent bool + wantUseAPM bool + wantUnchanged bool // existing keys must be unchanged + }{ + { + name: "use_apm already in vars is left unchanged", + vars: Vars{"use_apm": {Value: varValue(false), Type: "boolean", fromUser: true}}, + varValues: common.MapStr{"use_apm": true}, + wantUseAPMPresent: true, + wantUseAPM: false, + wantUnchanged: true, + }, + { + name: "no use_apm in varValues leaves vars unchanged", + vars: Vars{}, + varValues: common.MapStr{}, + wantUseAPMPresent: false, + wantUnchanged: true, + }, + { + name: "use_apm true is added", + vars: Vars{}, + varValues: common.MapStr{"use_apm": true}, + wantUseAPMPresent: true, + wantUseAPM: true, + }, + { + name: "use_apm false is added", + vars: Vars{}, + varValues: common.MapStr{"use_apm": false}, + wantUseAPMPresent: true, + wantUseAPM: false, + }, + { + name: "use_apm as string true is added", + vars: Vars{}, + varValues: common.MapStr{"use_apm": "true"}, + wantUseAPMPresent: true, + wantUseAPM: true, + }, + { + name: "use_apm as string false is added", + vars: Vars{}, + varValues: common.MapStr{"use_apm": "false"}, + wantUseAPMPresent: true, + wantUseAPM: false, + }, + { + name: "use_apm as unexpected string is not added", + vars: Vars{}, + varValues: common.MapStr{"use_apm": "foo"}, + wantUseAPMPresent: false, + wantUnchanged: true, + }, + { + name: "use_apm as int is not added", + vars: Vars{}, + varValues: common.MapStr{"use_apm": 1}, + wantUseAPMPresent: false, + wantUnchanged: true, + }, + { + name: "other vars are preserved when adding use_apm", + vars: Vars{"other": {Value: varValue("x"), Type: "text", fromUser: true}}, + varValues: common.MapStr{"use_apm": true}, + wantUseAPMPresent: true, + wantUseAPM: true, + }, + { + name: "nil varValues does not add use_apm", + vars: Vars{}, + varValues: nil, + wantUseAPMPresent: false, + wantUnchanged: true, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + got := make(Vars, len(c.vars)) + for k, v := range c.vars { + got[k] = v + } + + ensureUseAPMVar(got, c.varValues) + + if c.wantUnchanged { + assert.Len(t, got, len(c.vars), "vars length should be unchanged") + } + if c.wantUseAPMPresent { + require.Contains(t, got, "use_apm", "vars should contain use_apm") + assert.Equal(t, "boolean", got["use_apm"].Type) + assert.Equal(t, c.wantUseAPM, got["use_apm"].Value.Value()) + } else { + assert.NotContains(t, got, "use_apm", "vars should not contain use_apm") + } + // Original vars must always be preserved. + for k, v := range c.vars { + require.Contains(t, got, k, "original var %q must be preserved", k) + assert.Equal(t, v.Value.Value(), got[k].Value.Value(), "value for %q", k) + } + }) + } +} + +func TestEnsureDatasetVar(t *testing.T) { + cases := []struct { + name string + vars Vars + policyTemplate packages.PolicyTemplate + varValues common.MapStr + wantDataset string + }{ + { + name: "already set with fromUser=true is left unchanged", + vars: Vars{"data_stream.dataset": {Value: varValue("existing"), Type: "text", fromUser: true}}, + policyTemplate: packages.PolicyTemplate{Name: "sql_query"}, + varValues: common.MapStr{"data_stream.dataset": "override"}, + wantDataset: "existing", + }, + { + name: "varValues overrides default", + vars: Vars{}, + policyTemplate: packages.PolicyTemplate{Name: "sql_query"}, + varValues: common.MapStr{"data_stream.dataset": "custom.dataset"}, + wantDataset: "custom.dataset", + }, + { + name: "manifest default in vars is promoted", + vars: Vars{"data_stream.dataset": {Value: varValue("manifest.default"), Type: "text"}}, + policyTemplate: packages.PolicyTemplate{Name: "sql_query"}, + varValues: common.MapStr{}, + wantDataset: "manifest.default", + }, + { + name: "policyTemplate.Vars default is used when no user value", + vars: Vars{}, + policyTemplate: packages.PolicyTemplate{ + Name: "sql_query", + Vars: []packages.Variable{ + {Name: "data_stream.dataset", Default: func() *packages.VarValue { + vv := packages.VarValue{} + vv.Unpack("template.default") + return &vv + }()}, + }, + }, + varValues: common.MapStr{}, + wantDataset: "template.default", + }, + { + name: "falls back to policy template name", + vars: Vars{}, + policyTemplate: packages.PolicyTemplate{Name: "sql_query"}, + varValues: common.MapStr{}, + wantDataset: "sql_query", + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + got := make(Vars, len(c.vars)) + for k, v := range c.vars { + got[k] = v + } + + ensureDatasetVar(got, c.policyTemplate, c.varValues) + + require.Contains(t, got, "data_stream.dataset") + assert.Equal(t, "text", got["data_stream.dataset"].Type) + assert.Equal(t, c.wantDataset, got["data_stream.dataset"].Value.Value()) + }) + } +} + +func varValue(v any) packages.VarValue { + vv := packages.VarValue{} + vv.Unpack(v) + return vv +} diff --git a/internal/kibana/policies.go b/internal/kibana/policies.go index 16d2061bf9..6d4175b839 100644 --- a/internal/kibana/policies.go +++ b/internal/kibana/policies.go @@ -10,7 +10,12 @@ import ( "fmt" "net/http" "path" + "strings" + "github.com/Masterminds/semver/v3" + "gopkg.in/yaml.v3" + + "github.com/elastic/elastic-package/internal/common" "github.com/elastic/elastic-package/internal/packages" ) @@ -192,73 +197,88 @@ func (c *Client) DeletePolicy(ctx context.Context, policyID string) error { type Var struct { Value packages.VarValue `json:"value"` Type string `json:"type"` + + // fromUser indicates if this variable comes from user input, not from a manifest default + fromUser bool } // Vars is a collection of variables either at the package or // data stream level. type Vars map[string]Var -// DataStream represents a data stream within a package. -type DataStream struct { - Type string `json:"type"` - Dataset string `json:"dataset"` -} - -// Stream encapsulates a data stream and it's variables. -type Stream struct { - ID string `json:"id"` - Enabled bool `json:"enabled"` - DataStream DataStream `json:"data_stream"` - Vars Vars `json:"vars"` -} - -// Input represents a package-level input. -type Input struct { - PolicyTemplate string `json:"policy_template,omitempty"` // Name of policy_template from the package manifest that contains this input. If not specified the Kibana uses the first policy_template. - Type string `json:"type"` - Enabled bool `json:"enabled"` - Streams []Stream `json:"streams"` - Vars Vars `json:"vars,omitempty"` -} - -// PackageDataStream represents a request to add a single package's single data stream to a -// Policy in Fleet. -type PackageDataStream struct { - Name string `json:"name"` - Description string `json:"description"` - Namespace string `json:"namespace"` - PolicyID string `json:"policy_id"` - Enabled bool `json:"enabled"` - OutputID string `json:"output_id"` - Inputs []Input `json:"inputs"` - Vars Vars `json:"vars,omitempty"` - Package struct { - Name string `json:"name"` - Title string `json:"title"` - Version string `json:"version"` - } `json:"package"` -} - -// AddPackageDataStreamToPolicy adds a PackageDataStream to a Policy in Fleet. -func (c *Client) AddPackageDataStreamToPolicy(ctx context.Context, r PackageDataStream) error { - reqBody, err := json.Marshal(r) - if err != nil { - return fmt.Errorf("could not convert policy-package (request) to JSON: %w", err) +// ToMapStr converts Vars to the map format expected by PackagePolicyInput and PackagePolicyStream. +// The objects-based Fleet API expects raw values (not {value, type} wrappers). +// Only user-provided values (fromUser == true) are included; manifest defaults are +// omitted so the simplified API can apply them server-side. +// +// For yaml-type variables, map or slice values are YAML-marshaled to a string because +// the simplified API only accepts string|number|bool|array-of-scalars|null for yaml-type vars. +// String values (including comment-only defaults like "#- tz_short: AEST\n") are passed through +// as-is, matching the format sent by the Fleet UI. +func (v Vars) ToMapStr() common.MapStr { + m := make(common.MapStr) + for k, val := range v { + if !val.fromUser { + continue + } + raw := val.Value.Value() + if val.Type == "yaml" && raw != nil { + if _, isString := raw.(string); !isString { + // Maps and slices (e.g. ssl written as YAML map in test configs): + // marshal to a YAML string so the simplified API accepts them. + // Empty results ("[]", "{}") are omitted so Fleet Handlebars + // {{#if}} guards evaluate to false, preventing invalid agent configs. + b, err := yaml.Marshal(raw) + if err != nil { + continue + } + s := strings.TrimRight(string(b), "\n") + if s == "[]" || s == "{}" { + continue + } + raw = s + } + } + m[k] = raw } - - statusCode, respBody, err := c.post(ctx, path.Join(FleetAPI, "package_policies"), reqBody) - if err != nil { - return fmt.Errorf("could not add package to policy: %w", err) + if len(m) == 0 { + return nil } + return m +} - if statusCode != http.StatusOK { - return fmt.Errorf("could not add package to policy; API status code = %d; response body = %s", statusCode, respBody) +// SetKibanaVariables builds a Vars map from variable definitions and user-provided +// values. For each definition, the user-provided value is stored and marked with +// fromUser=true so that ToMapStr (simplified API) includes it; manifest defaults +// are stored with fromUser=false so they are available for the legacy API +// (toLegacyMapVar) but omitted from simplified requests. Multi-valued variables +// with no user value and no manifest default are included as empty arrays. +// Definitions with neither a user value nor a manifest default are omitted. +func SetKibanaVariables(definitions []packages.Variable, values common.MapStr) Vars { + vars := Vars{} + for _, def := range definitions { + rawValue, err := values.GetValue(def.Name) + switch { + case err == nil: + var val packages.VarValue + val.Unpack(rawValue) + vars[def.Name] = Var{Type: def.Type, Value: val, fromUser: true} + case def.Default != nil: + // Fallback to default if available. + vars[def.Name] = Var{Type: def.Type, Value: *def.Default} + case def.Multi: + // Multi-valued var with no value and no default: keep for legacy. + var val packages.VarValue + val.Unpack([]interface{}{}) + vars[def.Name] = Var{Type: def.Type, Value: val} + } } - - return nil + return vars } -// PackagePolicy represents an Package Policy in Fleet. +// PackagePolicy represents a Package Policy in Fleet using the simplified +// (objects-based) inputs format. +// CreatePackagePolicy transparently converts to the legacy format for older stacks. type PackagePolicy struct { ID string `json:"id,omitempty"` Name string `json:"name"` @@ -269,24 +289,78 @@ type PackagePolicy struct { Name string `json:"name"` Version string `json:"version"` } `json:"package"` - Inputs map[string]PackagePolicyInput `json:"inputs,omitempty"` - Force bool `json:"force"` + Inputs map[string]PackagePolicyInput `json:"inputs,omitempty"` + OutputID string `json:"output_id,omitempty"` + // Vars holds package-level variables; for legacy conversion use legacyVars. + Vars map[string]any `json:"vars,omitempty"` + Force bool `json:"force"` + + // Unexported: type-aware vars for legacy ({value,type}) conversion. + legacyPackageTitle string + legacyVars Vars } +// PackagePolicyInput is one input entry in a PackagePolicy (simplified format). type PackagePolicyInput struct { Enabled bool `json:"enabled"` - Vars map[string]interface{} `json:"vars,omitempty"` + Vars map[string]any `json:"vars,omitempty"` Streams map[string]PackagePolicyStream `json:"streams,omitempty"` + + // Unexported fields carry metadata used only for legacy API conversion. + inputType string + policyTemplate string + legacyVars Vars } +// PackagePolicyStream is one stream entry in a PackagePolicyInput (simplified format). type PackagePolicyStream struct { - Enabled bool `json:"enabled"` - Vars map[string]interface{} `json:"vars,omitempty"` + Enabled bool `json:"enabled"` + Vars map[string]any `json:"vars,omitempty"` + + // Unexported fields carry metadata used only for legacy API conversion. + dataStreamType string + dataStreamDataset string + legacyVars Vars +} + +const ( + // PolicyAPIFormatAuto selects the format based on the Kibana version (default). + PolicyAPIFormatAuto = "" + // PolicyAPIFormatSimplified forces the simplified (objects-based) API. + PolicyAPIFormatSimplified = "simplified" + // PolicyAPIFormatLegacy forces the legacy (arrays-based) API. + PolicyAPIFormatLegacy = "legacy" +) + +// simplifiedPolicyAPIMinVersion is the minimum Kibana version that supports +// the simplified (objects-based) inputs format for package policy creation. +// Introduced in Kibana 8.5.0 (PR #139420, September 2022). +var simplifiedPolicyAPIMinVersion = semver.MustParse("8.5.0") + +// supportsSimplifiedPackagePolicyAPI reports whether the connected Kibana +// supports the simplified (objects-based) Fleet package policy API. +// Returns true for managed Kibana (no version available) assuming a modern stack. +func (c *Client) supportsSimplifiedPackagePolicyAPI() bool { + if c.semver == nil { + return true + } + return !c.semver.LessThan(simplifiedPolicyAPIMinVersion) } // CreatePackagePolicy persists the given Package Policy in Fleet. -func (c *Client) CreatePackagePolicy(ctx context.Context, p PackagePolicy) (*PackagePolicy, error) { - reqBody, err := json.Marshal(p) +// format controls which API format to use: "" (auto) selects based on the Kibana +// version, "simplified" forces the objects-based API, "legacy" forces the +// arrays-based API. +func (c *Client) CreatePackagePolicy(ctx context.Context, p PackagePolicy, format string) (*PackagePolicy, error) { + var reqBody []byte + var err error + useSimplified := format == PolicyAPIFormatSimplified || + (format == PolicyAPIFormatAuto && c.supportsSimplifiedPackagePolicyAPI()) + if useSimplified { + reqBody, err = json.Marshal(p) + } else { + reqBody, err = json.Marshal(p.toLegacy()) + } if err != nil { return nil, fmt.Errorf("could not convert package policy (request) to JSON: %w", err) } diff --git a/internal/kibana/policies_test.go b/internal/kibana/policies_test.go new file mode 100644 index 0000000000..19321f8ef1 --- /dev/null +++ b/internal/kibana/policies_test.go @@ -0,0 +1,276 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package kibana + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/elastic/elastic-package/internal/common" + "github.com/elastic/elastic-package/internal/packages" +) + +func TestVarsToMapStr(t *testing.T) { + t.Run("yaml type already a string is passed through as-is", func(t *testing.T) { + // When the value in the test config is already written as a YAML string + // (e.g. ssl: |- ... ), it must not be double-encoded. + yamlStr := "verification_mode: none\ncertificate: /etc/pki/cert.pem\n" + var sslValue packages.VarValue + require.NoError(t, sslValue.Unpack(yamlStr)) + vars := Vars{ + "ssl": Var{Type: "yaml", Value: sslValue, fromUser: true}, + } + + m := vars.ToMapStr() + + require.NotNil(t, m) + assert.Equal(t, yamlStr, m["ssl"]) + }) + + t.Run("yaml type map is serialized to YAML string", func(t *testing.T) { + // When a test config writes a yaml-type var as a YAML map (without the | + // block scalar), go-ucfg parses it as map[string]interface{}. The + // simplified Fleet API only accepts strings for yaml-type vars, so + // ToMapStr must serialize the map to a YAML string. + var sslValue packages.VarValue + require.NoError(t, sslValue.Unpack(map[string]interface{}{"verification_mode": "none"})) + vars := Vars{ + "ssl": Var{Type: "yaml", Value: sslValue, fromUser: true}, + } + + m := vars.ToMapStr() + + require.NotNil(t, m) + assert.Equal(t, "verification_mode: none", m["ssl"]) + }) + + t.Run("yaml type comment-only string is passed through as-is", func(t *testing.T) { + // Comment-only YAML strings provided by the user are passed through unchanged. + commentOnly := "#- tz_short: AEST\n# tz_long: Australia/Sydney\n" + var tzValue packages.VarValue + require.NoError(t, tzValue.Unpack(commentOnly)) + vars := Vars{ + "tz_map": Var{Type: "yaml", Value: tzValue, fromUser: true}, + } + + m := vars.ToMapStr() + + require.NotNil(t, m) + assert.Equal(t, commentOnly, m["tz_map"]) + }) + + t.Run("non-yaml type is passed through as-is", func(t *testing.T) { + var val packages.VarValue + require.NoError(t, val.Unpack("http://localhost:8080")) + vars := Vars{ + "url": Var{Type: "text", Value: val, fromUser: true}, + } + + m := vars.ToMapStr() + + require.NotNil(t, m) + assert.Equal(t, "http://localhost:8080", m["url"]) + }) + + t.Run("nil yaml value is passed through as nil", func(t *testing.T) { + vars := Vars{ + "ssl": Var{Type: "yaml", Value: packages.VarValue{}, fromUser: true}, + } + + m := vars.ToMapStr() + + require.NotNil(t, m) + assert.Nil(t, m["ssl"]) + }) + + t.Run("manifest default is excluded from ToMapStr", func(t *testing.T) { + // Vars with fromUser==false (manifest defaults) must not appear in simplified + // API requests; the server applies them when compiling templates. + var val packages.VarValue + require.NoError(t, val.Unpack("UTC")) + vars := Vars{ + "tz_offset": Var{Type: "text", Value: val}, + } + assert.Nil(t, vars.ToMapStr()) + }) + + t.Run("empty vars returns nil", func(t *testing.T) { + assert.Nil(t, Vars{}.ToMapStr()) + }) +} + +func TestToLegacyPackagePolicy(t *testing.T) { + // Build a PackagePolicy as BuildIntegrationPackagePolicy would produce it, + // then verify the legacy conversion has the right structure. + var periodVal, hostVal packages.VarValue + require.NoError(t, periodVal.Unpack("30s")) + require.NoError(t, hostVal.Unpack("http://localhost:8080")) + + streamVars := Vars{ + "period": Var{Type: "text", Value: periodVal}, + } + inputVars := Vars{ + "hosts": Var{Type: "text", Value: hostVal}, + } + + policy := PackagePolicy{ + Name: "test-policy", + Namespace: "default", + PolicyID: "agent-policy-id", + Inputs: map[string]PackagePolicyInput{ + "apache-apache/metrics": { + Enabled: true, + Vars: inputVars.ToMapStr(), + legacyVars: inputVars, + inputType: "apache/metrics", + policyTemplate: "apache", + Streams: map[string]PackagePolicyStream{ + "apache.status": { + Enabled: true, + Vars: streamVars.ToMapStr(), + legacyVars: streamVars, + dataStreamType: "metrics", + dataStreamDataset: "apache.status", + }, + }, + }, + "apache-logfile": { + Enabled: false, + inputType: "logfile", + policyTemplate: "apache", + }, + }, + } + policy.Package.Name = "apache" + policy.Package.Version = "1.0.0" + + legacy := policy.toLegacy() + + assert.Equal(t, "test-policy", legacy.Name) + assert.Equal(t, "default", legacy.Namespace) + assert.Equal(t, "agent-policy-id", legacy.PolicyID) + assert.True(t, legacy.Enabled, "legacy policy must have enabled=true") + assert.Equal(t, "apache", legacy.Package.Name) + + require.Len(t, legacy.Inputs, 1) + + // Find and verify the enabled input. + enabledInput := &legacy.Inputs[0] + require.Equal(t, "apache/metrics", enabledInput.Type, "only the enabled apache/metrics input should be present") + assert.Equal(t, "apache", enabledInput.PolicyTemplate) + assert.True(t, enabledInput.Enabled) + + // Verify input-level vars use {value, type} wrappers. + require.Contains(t, enabledInput.Vars, "hosts") + assert.Equal(t, "http://localhost:8080", enabledInput.Vars["hosts"].Value.Value()) + assert.Equal(t, "text", enabledInput.Vars["hosts"].Type) + + // Verify stream. + require.Len(t, enabledInput.Streams, 1) + assert.Equal(t, "metrics", enabledInput.Streams[0].DataStream.Type) + assert.Equal(t, "apache.status", enabledInput.Streams[0].DataStream.Dataset) + require.Contains(t, enabledInput.Streams[0].Vars, "period") + assert.Equal(t, "30s", enabledInput.Streams[0].Vars["period"].Value.Value()) + assert.Equal(t, "text", enabledInput.Streams[0].Vars["period"].Type) +} + +func TestSetKibanaVariables(t *testing.T) { + varDef := func(name, typ string, defaultVal any) packages.Variable { + def := packages.Variable{Name: name, Type: typ} + if defaultVal != nil { + vv := packages.VarValue{} + vv.Unpack(defaultVal) + def.Default = &vv + } + return def + } + + cases := []struct { + name string + definitions []packages.Variable + values common.MapStr + wantVars map[string]any // name -> expected Value(). Only vars expected in result. + }{ + { + name: "empty definitions returns empty vars", + definitions: nil, + values: common.MapStr{"any": "value"}, + wantVars: map[string]any{}, + }, + { + name: "definition with default and no values uses default", + definitions: []packages.Variable{varDef("host", "text", "localhost")}, + values: common.MapStr{}, + wantVars: map[string]any{"host": "localhost"}, + }, + { + name: "definition with default overridden by values", + definitions: []packages.Variable{varDef("host", "text", "localhost")}, + values: common.MapStr{"host": "elastic.co"}, + wantVars: map[string]any{"host": "elastic.co"}, + }, + { + name: "definition with no default and no value is omitted", + definitions: []packages.Variable{varDef("optional", "text", nil)}, + values: common.MapStr{}, + wantVars: map[string]any{}, + }, + { + name: "definition with no default but value in values is included", + definitions: []packages.Variable{varDef("optional", "text", nil)}, + values: common.MapStr{"optional": "set"}, + wantVars: map[string]any{"optional": "set"}, + }, + { + name: "nil values uses defaults only", + definitions: []packages.Variable{ + varDef("a", "text", "default_a"), + varDef("b", "text", nil), + }, + values: nil, + wantVars: map[string]any{"a": "default_a"}, + }, + { + name: "multiple definitions mix default and override", + definitions: []packages.Variable{ + varDef("host", "text", "localhost"), + varDef("port", "integer", 9200), + varDef("optional", "text", nil), + }, + values: common.MapStr{"port": 9300}, + wantVars: map[string]any{ + "host": "localhost", + "port": 9300, + }, + }, + { + name: "boolean and list types preserved", + definitions: []packages.Variable{varDef("enabled", "bool", true), varDef("hosts", "text", []any{"a", "b"})}, + values: common.MapStr{}, + wantVars: map[string]any{ + "enabled": true, + "hosts": []any{"a", "b"}, + }, + }, + } + + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + got := SetKibanaVariables(c.definitions, c.values) + + assert.Len(t, got, len(c.wantVars), "number of vars") + for name, wantVal := range c.wantVars { + require.Contains(t, got, name, "var %q should be present", name) + assert.Equal(t, wantVal, got[name].Value.Value(), "var %q value", name) + } + for name := range got { + _, ok := c.wantVars[name] + assert.True(t, ok, "unexpected var %q in result", name) + } + }) + } +} diff --git a/internal/kibana/testdata/apache_access_logfile.json b/internal/kibana/testdata/apache_access_logfile.json new file mode 100644 index 0000000000..fdb29703a4 --- /dev/null +++ b/internal/kibana/testdata/apache_access_logfile.json @@ -0,0 +1,37 @@ +{ + "name": "apache-access-test", + "description": "", + "namespace": "test", + "policy_id": "test-policy-id", + "package": { + "name": "apache", + "version": "3.0.0" + }, + "inputs": { + "apache-apache/metrics": { + "enabled": false, + "streams": { + "apache.status": { + "enabled": false + } + } + }, + "apache-logfile": { + "enabled": true, + "streams": { + "apache.access": { + "enabled": true, + "vars": { + "paths": [ + "/tmp/service_logs/access.log*" + ] + } + }, + "apache.error": { + "enabled": false + } + } + } + }, + "force": false +} diff --git a/internal/kibana/testdata/apache_access_logfile_legacy.json b/internal/kibana/testdata/apache_access_logfile_legacy.json new file mode 100644 index 0000000000..afd8efc2a3 --- /dev/null +++ b/internal/kibana/testdata/apache_access_logfile_legacy.json @@ -0,0 +1,52 @@ +{ + "name": "apache-access-test", + "description": "", + "namespace": "test", + "policy_id": "test-policy-id", + "enabled": true, + "package": { + "name": "apache", + "title": "Apache HTTP Server", + "version": "3.0.0" + }, + "inputs": [ + { + "policy_template": "apache", + "type": "logfile", + "enabled": true, + "streams": [ + { + "enabled": true, + "data_stream": { + "type": "logs", + "dataset": "apache.access" + }, + "vars": { + "ignore_older": { + "value": "72h", + "type": "text" + }, + "paths": { + "value": [ + "/tmp/service_logs/access.log*" + ], + "type": "text" + }, + "preserve_original_event": { + "value": false, + "type": "bool" + }, + "tags": { + "value": [ + "apache-access" + ], + "type": "text" + } + } + } + ] + } + ], + "output_id": "", + "force": false +} \ No newline at end of file diff --git a/internal/kibana/testdata/azure_app_insights_metrics.json b/internal/kibana/testdata/azure_app_insights_metrics.json new file mode 100644 index 0000000000..257357ac96 --- /dev/null +++ b/internal/kibana/testdata/azure_app_insights_metrics.json @@ -0,0 +1,33 @@ +{ + "name": "azure-app-insights-test", + "description": "", + "namespace": "test", + "policy_id": "test-policy-id", + "package": { + "name": "azure_application_insights", + "version": "1.9.1" + }, + "inputs": { + "app_insights-azure/metrics": { + "enabled": true, + "streams": { + "azure.app_insights": { + "enabled": true, + "vars": { + "metrics": "- id: [\"requests/count\"]\n aggregation: [\"sum\"]\n interval: \"P5M\"\n", + "period": "300s" + } + } + } + }, + "app_state-azure/metrics": { + "enabled": false, + "streams": { + "azure.app_state": { + "enabled": false + } + } + } + }, + "force": false +} \ No newline at end of file diff --git a/internal/kibana/testdata/azure_app_insights_metrics_legacy.json b/internal/kibana/testdata/azure_app_insights_metrics_legacy.json new file mode 100644 index 0000000000..a336fc1f16 --- /dev/null +++ b/internal/kibana/testdata/azure_app_insights_metrics_legacy.json @@ -0,0 +1,40 @@ +{ + "name": "azure-app-insights-test", + "description": "", + "namespace": "test", + "policy_id": "test-policy-id", + "enabled": true, + "package": { + "name": "azure_application_insights", + "title": "Azure Application Insights Metrics Overview", + "version": "1.9.1" + }, + "inputs": [ + { + "policy_template": "app_insights", + "type": "azure/metrics", + "enabled": true, + "streams": [ + { + "enabled": true, + "data_stream": { + "type": "metrics", + "dataset": "azure.app_insights" + }, + "vars": { + "metrics": { + "value": "- id: [\"requests/count\"]\n aggregation: [\"sum\"]\n interval: \"P5M\"\n", + "type": "yaml" + }, + "period": { + "value": "300s", + "type": "text" + } + } + } + ] + } + ], + "output_id": "", + "force": false +} \ No newline at end of file diff --git a/internal/kibana/testdata/endace_netflow_pkg_var_in_dsvars.json b/internal/kibana/testdata/endace_netflow_pkg_var_in_dsvars.json new file mode 100644 index 0000000000..71c1e80a6a --- /dev/null +++ b/internal/kibana/testdata/endace_netflow_pkg_var_in_dsvars.json @@ -0,0 +1,31 @@ +{ + "name": "endace-log-test", + "description": "", + "namespace": "test", + "policy_id": "test-policy-id", + "package": { + "name": "endace", + "version": "0.2.0" + }, + "inputs": { + "endace-netflow": { + "enabled": true, + "streams": { + "endace.log": { + "enabled": true, + "vars": { + "host": "0.0.0.0", + "port": 2055 + } + } + } + }, + "endace-packet": { + "enabled": false + } + }, + "vars": { + "endace_url": "http://test.elastic.co" + }, + "force": false +} \ No newline at end of file diff --git a/internal/kibana/testdata/endace_netflow_pkg_var_in_dsvars_legacy.json b/internal/kibana/testdata/endace_netflow_pkg_var_in_dsvars_legacy.json new file mode 100644 index 0000000000..4cebda9a10 --- /dev/null +++ b/internal/kibana/testdata/endace_netflow_pkg_var_in_dsvars_legacy.json @@ -0,0 +1,50 @@ +{ + "name": "endace-log-test", + "description": "", + "namespace": "test", + "policy_id": "test-policy-id", + "enabled": true, + "package": { + "name": "endace", + "title": "Endace", + "version": "0.2.0" + }, + "inputs": [ + { + "policy_template": "endace", + "type": "netflow", + "enabled": true, + "streams": [ + { + "enabled": true, + "data_stream": { + "type": "logs", + "dataset": "endace.log" + }, + "vars": { + "host": { + "value": "0.0.0.0", + "type": "text" + }, + "port": { + "value": 2055, + "type": "integer" + } + } + } + ] + } + ], + "output_id": "", + "vars": { + "endace_url": { + "value": "http://test.elastic.co", + "type": "text" + }, + "endace_view_window": { + "value": "10", + "type": "text" + } + }, + "force": false +} \ No newline at end of file diff --git a/internal/kibana/testdata/input_with_pkg_vars.json b/internal/kibana/testdata/input_with_pkg_vars.json new file mode 100644 index 0000000000..ced49f2b02 --- /dev/null +++ b/internal/kibana/testdata/input_with_pkg_vars.json @@ -0,0 +1,30 @@ +{ + "name": "input-pkg-vars-test", + "description": "", + "namespace": "test", + "policy_id": "test-policy-id", + "package": { + "name": "input_with_pkg_vars", + "version": "0.1.0" + }, + "inputs": { + "logs-logfile": { + "enabled": true, + "streams": { + "input_with_pkg_vars.logs": { + "enabled": true, + "vars": { + "data_stream.dataset": "custom.logs", + "paths": [ + "/tmp/test.log" + ] + } + } + } + } + }, + "vars": { + "custom_tag": "my-tag" + }, + "force": false +} \ No newline at end of file diff --git a/internal/kibana/testdata/input_with_pkg_vars_legacy.json b/internal/kibana/testdata/input_with_pkg_vars_legacy.json new file mode 100644 index 0000000000..b9db8b53bd --- /dev/null +++ b/internal/kibana/testdata/input_with_pkg_vars_legacy.json @@ -0,0 +1,60 @@ +{ + "name": "input-pkg-vars-test", + "description": "", + "namespace": "test", + "policy_id": "test-policy-id", + "enabled": true, + "package": { + "name": "input_with_pkg_vars", + "title": "Input Package With Package-level Variables", + "version": "0.1.0" + }, + "inputs": [ + { + "policy_template": "logs", + "type": "logfile", + "enabled": true, + "vars": { + "data_stream.dataset": { + "value": "custom.logs", + "type": "text" + }, + "paths": { + "value": [ + "/tmp/test.log" + ], + "type": "text" + } + }, + "streams": [ + { + "enabled": true, + "data_stream": { + "type": "", + "dataset": "input_with_pkg_vars.logs" + }, + "vars": { + "data_stream.dataset": { + "value": "custom.logs", + "type": "text" + }, + "paths": { + "value": [ + "/tmp/test.log" + ], + "type": "text" + } + } + } + ] + } + ], + "output_id": "", + "vars": { + "custom_tag": { + "value": "my-tag", + "type": "text" + } + }, + "force": false +} \ No newline at end of file diff --git a/internal/kibana/testdata/log_custom_logs.json b/internal/kibana/testdata/log_custom_logs.json new file mode 100644 index 0000000000..e4762ef850 --- /dev/null +++ b/internal/kibana/testdata/log_custom_logs.json @@ -0,0 +1,27 @@ +{ + "name": "log-logs-test", + "description": "", + "namespace": "test", + "policy_id": "test-policy-id", + "package": { + "name": "log", + "version": "2.4.4" + }, + "inputs": { + "logs-logfile": { + "enabled": true, + "streams": { + "log.logs": { + "enabled": true, + "vars": { + "data_stream.dataset": "log.custom", + "paths": [ + "/tmp/test.log" + ] + } + } + } + } + }, + "force": false +} \ No newline at end of file diff --git a/internal/kibana/testdata/log_custom_logs_legacy.json b/internal/kibana/testdata/log_custom_logs_legacy.json new file mode 100644 index 0000000000..610517fbf1 --- /dev/null +++ b/internal/kibana/testdata/log_custom_logs_legacy.json @@ -0,0 +1,86 @@ +{ + "name": "log-logs-test", + "description": "", + "namespace": "test", + "policy_id": "test-policy-id", + "enabled": true, + "package": { + "name": "log", + "title": "Custom Logs (Deprecated)", + "version": "2.4.4" + }, + "inputs": [ + { + "policy_template": "logs", + "type": "logfile", + "enabled": true, + "vars": { + "custom": { + "value": "", + "type": "yaml" + }, + "data_stream.dataset": { + "value": "log.custom", + "type": "text" + }, + "exclude_files": { + "value": [], + "type": "text" + }, + "ignore_older": { + "value": "72h", + "type": "text" + }, + "paths": { + "value": [ + "/tmp/test.log" + ], + "type": "text" + }, + "tags": { + "value": [], + "type": "text" + } + }, + "streams": [ + { + "enabled": true, + "data_stream": { + "type": "", + "dataset": "log.logs" + }, + "vars": { + "custom": { + "value": "", + "type": "yaml" + }, + "data_stream.dataset": { + "value": "log.custom", + "type": "text" + }, + "exclude_files": { + "value": [], + "type": "text" + }, + "ignore_older": { + "value": "72h", + "type": "text" + }, + "paths": { + "value": [ + "/tmp/test.log" + ], + "type": "text" + }, + "tags": { + "value": [], + "type": "text" + } + } + } + ] + } + ], + "output_id": "", + "force": false +} \ No newline at end of file diff --git a/internal/kibana/testdata/otel_traces_use_apm.json b/internal/kibana/testdata/otel_traces_use_apm.json new file mode 100644 index 0000000000..5464babbd5 --- /dev/null +++ b/internal/kibana/testdata/otel_traces_use_apm.json @@ -0,0 +1,28 @@ +{ + "name": "otel-traces-test", + "description": "", + "namespace": "test", + "policy_id": "test-policy-id", + "package": { + "name": "otel_traces_input", + "version": "0.1.0" + }, + "inputs": { + "receiver-otelcol": { + "enabled": true, + "streams": { + "otel_traces_input.receiver": { + "enabled": true, + "vars": { + "data_stream.dataset": "receiver", + "use_apm": true + } + } + } + } + }, + "vars": { + "endpoint": "0.0.0.0:9411" + }, + "force": false +} \ No newline at end of file diff --git a/internal/kibana/testdata/otel_traces_use_apm_legacy.json b/internal/kibana/testdata/otel_traces_use_apm_legacy.json new file mode 100644 index 0000000000..1d38d3abfc --- /dev/null +++ b/internal/kibana/testdata/otel_traces_use_apm_legacy.json @@ -0,0 +1,64 @@ +{ + "name": "otel-traces-test", + "description": "", + "namespace": "test", + "policy_id": "test-policy-id", + "enabled": true, + "package": { + "name": "otel_traces_input", + "title": "OTel Traces Input Package", + "version": "0.1.0" + }, + "inputs": [ + { + "policy_template": "receiver", + "type": "otelcol", + "enabled": true, + "vars": { + "data_stream.dataset": { + "value": "receiver", + "type": "text" + }, + "parse_tags": { + "value": false, + "type": "bool" + }, + "use_apm": { + "value": true, + "type": "boolean" + } + }, + "streams": [ + { + "enabled": true, + "data_stream": { + "type": "", + "dataset": "otel_traces_input.receiver" + }, + "vars": { + "data_stream.dataset": { + "value": "receiver", + "type": "text" + }, + "parse_tags": { + "value": false, + "type": "bool" + }, + "use_apm": { + "value": true, + "type": "boolean" + } + } + } + ] + } + ], + "output_id": "", + "vars": { + "endpoint": { + "value": "0.0.0.0:9411", + "type": "text" + } + }, + "force": false +} \ No newline at end of file diff --git a/internal/kibana/testdata/packages/apache/data_stream/access/manifest.yml b/internal/kibana/testdata/packages/apache/data_stream/access/manifest.yml new file mode 100644 index 0000000000..c194cafc68 --- /dev/null +++ b/internal/kibana/testdata/packages/apache/data_stream/access/manifest.yml @@ -0,0 +1,45 @@ +title: Apache access logs +type: logs +streams: + - input: logfile + vars: + - name: paths + type: text + title: Paths + multi: true + required: true + show_user: true + default: + - /var/log/apache2/access.log* + - /var/log/apache2/other_vhosts_access.log* + - /var/log/httpd/access_log* + - name: ignore_older + type: text + title: Ignore events older than + default: 72h + required: false + show_user: false + - name: tags + type: text + title: Tags + multi: true + required: true + show_user: false + default: + - apache-access + - name: preserve_original_event + required: true + show_user: true + title: Preserve original event + type: bool + multi: false + default: false + - name: processors + type: yaml + title: Processors + multi: false + required: false + show_user: false + template_path: log.yml.hbs + title: Apache access logs + description: Collect Apache access logs diff --git a/internal/kibana/testdata/packages/apache/data_stream/error/manifest.yml b/internal/kibana/testdata/packages/apache/data_stream/error/manifest.yml new file mode 100644 index 0000000000..0398074e8d --- /dev/null +++ b/internal/kibana/testdata/packages/apache/data_stream/error/manifest.yml @@ -0,0 +1,38 @@ +title: Apache error logs +type: logs +streams: + - input: logfile + vars: + - name: paths + type: text + title: Paths + multi: true + required: true + show_user: true + default: + - /var/log/apache2/error.log* + - /var/log/httpd/error_log* + - name: tags + type: text + title: Tags + multi: true + required: true + show_user: false + default: + - apache-error + - name: preserve_original_event + required: true + show_user: true + title: Preserve original event + type: bool + multi: false + default: false + - name: processors + type: yaml + title: Processors + multi: false + required: false + show_user: false + template_path: log.yml.hbs + title: Apache error logs + description: Collect Apache error logs diff --git a/internal/kibana/testdata/packages/apache/data_stream/status/manifest.yml b/internal/kibana/testdata/packages/apache/data_stream/status/manifest.yml new file mode 100644 index 0000000000..2986a56b2a --- /dev/null +++ b/internal/kibana/testdata/packages/apache/data_stream/status/manifest.yml @@ -0,0 +1,27 @@ +title: Apache status metrics +type: metrics +streams: + - input: apache/metrics + vars: + - name: period + type: text + title: Period + multi: false + required: true + show_user: true + default: 30s + - name: server_status_path + type: text + title: Server Status Path + multi: false + required: true + show_user: false + default: /server-status + - name: processors + type: yaml + title: Processors + multi: false + required: false + show_user: false + title: Apache status metrics + description: Collect Apache status metrics diff --git a/internal/kibana/testdata/packages/apache/manifest.yml b/internal/kibana/testdata/packages/apache/manifest.yml new file mode 100644 index 0000000000..aec7f880ca --- /dev/null +++ b/internal/kibana/testdata/packages/apache/manifest.yml @@ -0,0 +1,57 @@ +format_version: "3.1.5" +name: apache +title: Apache HTTP Server +version: "3.0.0" +description: Collect logs and metrics from Apache servers with Elastic Agent. +type: integration +conditions: + kibana: + version: "^8.13.0 || ^9.0.0" +policy_templates: + - name: apache + title: Apache logs and metrics + description: Collect logs and metrics from Apache instances + inputs: + - type: logfile + title: Collect logs from Apache instances + description: Collecting Apache access and error logs + vars: + - name: condition + title: Condition + type: text + multi: false + required: false + show_user: false + - type: apache/metrics + title: Collect metrics from Apache instances + description: Collecting Apache status metrics + vars: + - name: hosts + type: text + title: Hosts + multi: true + required: true + show_user: true + default: + - http://127.0.0.1 + - name: condition + title: Condition + type: text + multi: false + required: false + show_user: false + - name: ssl + type: yaml + title: SSL Configuration + multi: false + required: false + show_user: false + default: | + #certificate_authorities: + # - | + # -----BEGIN CERTIFICATE----- + # MIIDCjCCAfKgAwIBAgITJ706Mu2wJlKckpIvkWxEHvEyijANBgkqhkiG9w0BAQsF + # -----END CERTIFICATE----- +owner: + type: elastic + github: elastic/obs-infraobs-integrations diff --git a/internal/kibana/testdata/packages/azure_application_insights/data_stream/app_insights/manifest.yml b/internal/kibana/testdata/packages/azure_application_insights/data_stream/app_insights/manifest.yml new file mode 100644 index 0000000000..8c370de0fb --- /dev/null +++ b/internal/kibana/testdata/packages/azure_application_insights/data_stream/app_insights/manifest.yml @@ -0,0 +1,32 @@ +type: metrics +title: Azure Application Insights +dataset: azure.app_insights +streams: + - input: azure/metrics + vars: + - name: period + type: text + title: Period + multi: false + required: true + show_user: true + default: 300s + - name: metrics + type: yaml + title: Metrics + multi: false + required: true + show_user: true + default: | + - id: ["requests/count", "requests/failed"] + segment: ["request/urlHost", "request/name"] + aggregation: ["sum"] + interval: "P5M" + - name: processors + type: yaml + title: Processors + multi: false + required: false + show_user: false + title: Azure Application Insights + description: Azure Application Insights diff --git a/internal/kibana/testdata/packages/azure_application_insights/data_stream/app_state/manifest.yml b/internal/kibana/testdata/packages/azure_application_insights/data_stream/app_state/manifest.yml new file mode 100644 index 0000000000..3cdcae2406 --- /dev/null +++ b/internal/kibana/testdata/packages/azure_application_insights/data_stream/app_state/manifest.yml @@ -0,0 +1,21 @@ +type: metrics +title: Azure Application State +dataset: azure.app_state +streams: + - input: azure/metrics + title: Azure Application State + description: Azure Application State + vars: + - name: period + type: text + title: Period + multi: false + required: true + show_user: true + default: 300s + - name: processors + type: yaml + title: Processors + multi: false + required: false + show_user: false diff --git a/internal/kibana/testdata/packages/azure_application_insights/manifest.yml b/internal/kibana/testdata/packages/azure_application_insights/manifest.yml new file mode 100644 index 0000000000..7f5685da96 --- /dev/null +++ b/internal/kibana/testdata/packages/azure_application_insights/manifest.yml @@ -0,0 +1,47 @@ +format_version: "3.0.2" +name: azure_application_insights +title: Azure Application Insights Metrics Overview +version: "1.9.1" +description: Collect application insights metrics from Azure Monitor with Elastic Agent. +type: integration +conditions: + kibana: + version: "^8.13.0 || ^9.0.0" +vars: + - name: application_id + type: text + title: Application ID + multi: false + required: true + show_user: true + - name: api_key + type: password + title: Api Key + secret: true + multi: false + required: true + show_user: true +policy_templates: + - name: app_insights + title: Azure Application Insights Metrics + description: Collect application insights metrics from Azure Monitor with Elastic Agent. + data_streams: + - app_insights + inputs: + - type: "azure/metrics" + title: "Collect Azure Application Insights metrics" + description: "Collecting Azure Application Insights metrics" + input_group: metrics + - name: app_state + title: Azure Application State Insights Metrics + description: Collect application state related metrics from Azure Monitor with Elastic Agent. + data_streams: + - app_state + inputs: + - type: "azure/metrics" + title: "Collect Azure Application State Insights metrics" + description: "Collecting Azure Application State Insights metrics" + input_group: metrics +owner: + github: elastic/obs-infraobs-integrations + type: elastic diff --git a/internal/kibana/testdata/packages/endace_netflow/data_stream/log/manifest.yml b/internal/kibana/testdata/packages/endace_netflow/data_stream/log/manifest.yml new file mode 100644 index 0000000000..921ff81108 --- /dev/null +++ b/internal/kibana/testdata/packages/endace_netflow/data_stream/log/manifest.yml @@ -0,0 +1,19 @@ +type: logs +title: Endace log +streams: + - input: netflow + vars: + - name: host + type: text + title: Host + multi: false + required: true + show_user: true + default: "0.0.0.0" + - name: port + type: integer + title: Port + multi: false + required: true + show_user: true + default: 2055 diff --git a/internal/kibana/testdata/packages/endace_netflow/manifest.yml b/internal/kibana/testdata/packages/endace_netflow/manifest.yml new file mode 100644 index 0000000000..f38a08423b --- /dev/null +++ b/internal/kibana/testdata/packages/endace_netflow/manifest.yml @@ -0,0 +1,37 @@ +format_version: "3.0.0" +name: endace +title: Endace +version: "0.2.0" +description: Endace integration with package-level vars. +type: integration +conditions: + kibana: + version: "^8.13.0 || ^9.0.0" +vars: + - name: endace_url + type: text + title: Endace UI URL + multi: false + required: true + show_user: true + - name: endace_view_window + type: text + title: View Window Time + multi: false + required: true + show_user: true + default: "10" +policy_templates: + - name: endace + title: Endace Flow logs + description: Capture network traffic + inputs: + - type: packet + title: Capture network traffic + description: Collecting network traffic. + - type: netflow + title: Collect Endace Flow logs + description: Collecting Endace Flow logs using the netflow input. +owner: + github: elastic/integration-experience + type: partner diff --git a/internal/kibana/testdata/packages/input_with_pkg_vars/manifest.yml b/internal/kibana/testdata/packages/input_with_pkg_vars/manifest.yml new file mode 100644 index 0000000000..30d4efde80 --- /dev/null +++ b/internal/kibana/testdata/packages/input_with_pkg_vars/manifest.yml @@ -0,0 +1,41 @@ +format_version: 3.1.5 +name: input_with_pkg_vars +title: Input Package With Package-level Variables +description: >- + Minimal input package used to test that package-level variables are + forwarded correctly by BuildInputPackagePolicy. +type: input +version: 0.1.0 +categories: + - custom +conditions: + kibana: + version: "^8.8.0 || ^9.0.0" +vars: + - name: custom_tag + type: text + title: Custom Tag + description: A tag applied to all events from this package. + required: false + show_user: true + default: "default-tag" +policy_templates: + - name: logs + type: logs + title: Custom log file + description: Collect custom log files. + input: logfile + template_path: input.yml.hbs + vars: + - name: paths + required: true + title: Log file path + type: text + multi: true + - name: data_stream.dataset + required: true + title: Dataset name + type: text +owner: + github: elastic/elastic-package + type: elastic diff --git a/internal/kibana/testdata/packages/log_input/manifest.yml b/internal/kibana/testdata/packages/log_input/manifest.yml new file mode 100644 index 0000000000..090d515d9a --- /dev/null +++ b/internal/kibana/testdata/packages/log_input/manifest.yml @@ -0,0 +1,76 @@ +format_version: 3.1.5 +name: log +title: Custom Logs (Deprecated) +description: >- + Collect custom logs with Elastic Agent. +type: input +version: 2.4.4 +categories: + - custom + - custom_logs +conditions: + kibana: + version: "^8.8.0|| ^9.0.0" +policy_templates: + - name: logs + type: logs + title: Custom log file + description: Collect your custom log files. + input: logfile + template_path: input.yml.hbs + vars: + - name: paths + required: true + title: Log file path + description: Path to log files to be collected + type: text + multi: true + - name: exclude_files + required: false + show_user: false + title: Exclude files + description: Patterns to be ignored + type: text + multi: true + - name: ignore_older + type: text + title: Ignore events older than + default: 72h + required: false + show_user: false + description: >- + If this option is specified, events that are older than the specified amount of time are ignored. Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h". + - name: data_stream.dataset + required: true + title: Dataset name + description: > + Set the name for your dataset. Changing the dataset will send the data to a different index. You can't use `-` in the name of a dataset and only valid characters for [Elasticsearch index names](https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-index_.html). + + type: text + - name: tags + type: text + title: Tags + description: Tags to include in the published event + multi: true + show_user: false + - name: processors + type: yaml + title: Processors + multi: false + required: false + show_user: false + description: >- + Processors are used to reduce the number of fields in the exported event or to enhance the event with metadata. This executes in the agent before the logs are parsed. See [Processors](https://www.elastic.co/guide/en/beats/filebeat/current/filtering-and-enhancing-data.html) for details. + - name: custom + title: Custom configurations + description: > + Additional settings to be added to the configuration. Be careful using this as it might break the input as those settings are not validated and can override the settings specified above. See [`log` input settings docs](https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-input-log.html) for details. + + type: yaml + default: "" +icons: + - src: "/img/icon.svg" + type: "image/svg+xml" +owner: + github: elastic/elastic-agent-data-plane + type: elastic diff --git a/internal/kibana/testdata/packages/otel_traces_input/manifest.yml b/internal/kibana/testdata/packages/otel_traces_input/manifest.yml new file mode 100644 index 0000000000..6aa96f686d --- /dev/null +++ b/internal/kibana/testdata/packages/otel_traces_input/manifest.yml @@ -0,0 +1,38 @@ +format_version: 3.5.0 +name: otel_traces_input +title: OTel Traces Input Package +description: >- + Minimal OTel traces input package used to test that use_apm is forwarded + correctly by BuildInputPackagePolicy when the manifest does not declare it. +type: input +version: 0.1.0 +categories: + - observability +conditions: + kibana: + version: "^9.4.0" +vars: + - name: endpoint + type: text + title: Endpoint + required: true + description: The host:port address to listen on. + default: localhost:9411 + show_user: true +policy_templates: + - name: receiver + type: traces + title: OTel Traces Receiver + description: Collect traces via OpenTelemetry Collector. + input: otelcol + template_path: input.yml.hbs + vars: + - name: parse_tags + type: bool + title: Parse Tags + required: false + default: false + show_user: true +owner: + github: elastic/elastic-package + type: elastic diff --git a/internal/kibana/testdata/packages/sophos_tcp/data_stream/utm/manifest.yml b/internal/kibana/testdata/packages/sophos_tcp/data_stream/utm/manifest.yml new file mode 100644 index 0000000000..a87f38736d --- /dev/null +++ b/internal/kibana/testdata/packages/sophos_tcp/data_stream/utm/manifest.yml @@ -0,0 +1,122 @@ +title: Sophos UTM logs +type: logs +streams: + - input: udp + title: Sophos UTM logs + description: Collect Sophos UTM logs + template_path: udp.yml.hbs + vars: + - name: tags + type: text + title: Tags + multi: true + required: true + show_user: false + default: + - sophos-utm + - forwarded + - name: udp_host + type: text + title: UDP host to listen on + multi: false + required: true + show_user: true + default: localhost + - name: udp_port + type: integer + title: UDP port to listen on + multi: false + required: true + show_user: true + default: 9549 + - name: tz_offset + type: text + title: Timezone offset + multi: false + required: false + show_user: true + default: UTC + - name: preserve_original_event + required: true + show_user: true + title: Preserve original event + type: bool + multi: false + default: false + - input: tcp + title: Sophos UTM logs + description: Collect Sophos UTM logs + template_path: tcp.yml.hbs + vars: + - name: tags + type: text + title: Tags + multi: true + required: true + show_user: false + default: + - sophos-utm + - forwarded + - name: tcp_host + type: text + title: TCP host to listen on + multi: false + required: true + show_user: true + default: localhost + - name: tcp_port + type: integer + title: TCP port to listen on + multi: false + required: true + show_user: true + default: 9549 + - name: tz_offset + type: text + title: Timezone offset + multi: false + required: false + show_user: true + default: UTC + - name: preserve_original_event + required: true + show_user: true + title: Preserve original event + type: bool + multi: false + default: false + - input: logfile + title: Sophos UTM logs + description: Collect Sophos UTM logs from file + vars: + - name: paths + type: text + title: Paths + multi: true + required: true + show_user: true + default: + - /var/log/sophos-utm.log + - name: tags + type: text + title: Tags + multi: true + required: true + show_user: false + default: + - sophos-utm + - forwarded + - name: tz_offset + type: text + title: Timezone offset + multi: false + required: false + show_user: true + default: UTC + - name: preserve_original_event + required: true + show_user: true + title: Preserve original event + type: bool + multi: false + default: false diff --git a/internal/kibana/testdata/packages/sophos_tcp/data_stream/xg/manifest.yml b/internal/kibana/testdata/packages/sophos_tcp/data_stream/xg/manifest.yml new file mode 100644 index 0000000000..1c50c095ef --- /dev/null +++ b/internal/kibana/testdata/packages/sophos_tcp/data_stream/xg/manifest.yml @@ -0,0 +1,231 @@ +type: logs +title: Sophos XG logs +streams: + - input: tcp + vars: + - name: syslog_host + type: text + title: Syslog Host + multi: false + required: true + show_user: true + default: localhost + - name: syslog_port + type: integer + title: Syslog Port + multi: false + required: true + show_user: true + default: 9005 + - name: default_host_name + type: text + title: Default Host Name + multi: false + required: true + show_user: true + default: firewall.localgroup.local + - name: known_devices + type: yaml + title: Known Devices + multi: false + required: true + show_user: true + default: | + - hostname: my_fancy_host + serial_number: "1234567890123456" + - hostname: some_other_host.local + serial_number: "1234567890123457" + - name: tags + type: text + title: Tags + multi: true + required: true + show_user: false + default: + - sophos-xg + - forwarded + - name: preserve_original_event + required: true + show_user: true + title: Preserve original event + type: bool + multi: false + default: false + - name: tz_offset + type: text + title: Timezone + multi: false + required: true + show_user: false + default: UTC + - name: tz_map + type: yaml + title: Timezone Map + multi: false + required: false + show_user: false + default: | + #- tz_short: AEST + # tz_long: Australia/Sydney + - name: ssl + type: yaml + title: SSL Configuration + multi: false + required: false + show_user: false + default: | + #certificate: "/etc/server/cert.pem" + #key: "/etc/server/key.pem" + - name: tcp_options + type: yaml + title: Custom TCP Options + multi: false + required: false + show_user: false + default: | + #max_connections: 1 + #framing: delimiter + #line_delimiter: "\n" + template_path: tcp.yml.hbs + title: Sophos XG logs + description: Collect Sophos XG logs + - input: udp + vars: + - name: syslog_host + type: text + title: Syslog Host + multi: false + required: true + show_user: true + default: localhost + - name: syslog_port + type: integer + title: Syslog Port + multi: false + required: true + show_user: true + default: 9005 + - name: default_host_name + type: text + title: Default Host Name + multi: false + required: true + show_user: true + default: firewall.localgroup.local + - name: known_devices + type: yaml + title: Known Devices + multi: false + required: true + show_user: true + default: | + - hostname: my_fancy_host + serial_number: "1234567890123456" + - hostname: some_other_host.local + serial_number: "1234567890123457" + - name: tags + type: text + title: Tags + multi: true + required: true + show_user: false + default: + - sophos-xg + - forwarded + - name: preserve_original_event + required: true + show_user: true + title: Preserve original event + type: bool + multi: false + default: false + - name: tz_offset + type: text + title: Timezone + multi: false + required: true + show_user: false + default: UTC + - name: tz_map + type: yaml + title: Timezone Map + multi: false + required: false + show_user: false + default: | + #- tz_short: AEST + # tz_long: Australia/Sydney + - name: udp_options + type: yaml + title: Custom UDP Options + multi: false + required: false + show_user: false + default: | + #read_buffer: 100MiB + #max_message_size: 50KiB + #timeout: 300s + template_path: udp.yml.hbs + title: Sophos XG logs + description: Collect Sophos XG logs + - input: logfile + vars: + - name: paths + type: text + title: Paths + multi: true + required: true + show_user: true + - name: default_host_name + type: text + title: Default Host Name + multi: false + required: true + show_user: true + default: firewall.localgroup.local + - name: known_devices + type: yaml + title: Known Devices + multi: false + required: true + show_user: true + default: | + - hostname: my_fancy_host + serial_number: "1234567890123456" + - hostname: some_other_host.local + serial_number: "1234567890123457" + - name: tags + type: text + title: Tags + multi: true + required: true + show_user: false + default: + - sophos-xg + - forwarded + - name: preserve_original_event + required: true + show_user: true + title: Preserve original event + type: bool + multi: false + default: false + - name: tz_offset + type: text + title: Timezone + multi: false + required: true + show_user: false + default: UTC + - name: tz_map + type: yaml + title: Timezone Map + multi: false + required: false + show_user: false + default: | + #- tz_short: AEST + # tz_long: Australia/Sydney + template_path: log.yml.hbs + title: Sophos XG logs + description: Collect Sophos XG logs diff --git a/internal/kibana/testdata/packages/sophos_tcp/manifest.yml b/internal/kibana/testdata/packages/sophos_tcp/manifest.yml new file mode 100644 index 0000000000..31a09e3f3a --- /dev/null +++ b/internal/kibana/testdata/packages/sophos_tcp/manifest.yml @@ -0,0 +1,26 @@ +format_version: "3.0.3" +name: sophos +title: Sophos +version: "3.17.0" +description: Collect logs from Sophos with Elastic Agent. +type: integration +conditions: + kibana: + version: "^8.11.0 || ^9.0.0" +policy_templates: + - name: sophos + title: Sophos logs + description: Collect Sophos logs from syslog or a file. + inputs: + - type: udp + title: Collect logs from Sophos via UDP + description: Collecting syslog from Sophos via UDP + - type: tcp + title: Collect logs from Sophos via TCP + description: Collecting syslog from Sophos via TCP + - type: logfile + title: Collect logs from Sophos via file + description: Collecting syslog from Sophos via file. +owner: + github: elastic/integration-experience + type: elastic diff --git a/internal/kibana/testdata/packages/test_policy_vars/data_stream/log/manifest.yml b/internal/kibana/testdata/packages/test_policy_vars/data_stream/log/manifest.yml new file mode 100644 index 0000000000..b1249f1287 --- /dev/null +++ b/internal/kibana/testdata/packages/test_policy_vars/data_stream/log/manifest.yml @@ -0,0 +1,34 @@ +title: Test logs +type: logs +streams: + - input: cel + vars: + - name: file_selectors + type: yaml + title: File Selectors + multi: true + required: false + show_user: false + - name: active.only + type: bool + title: Active Only + default: true + multi: false + required: false + show_user: true + - name: revoked + type: select + title: Revoked Status + options: + - text: All + value: "" + - text: Active Only + value: "false" + - text: Revoked Only + value: "true" + default: "" + multi: false + required: true + show_user: false + title: Test logs + description: Test logs diff --git a/internal/kibana/testdata/packages/test_policy_vars/manifest.yml b/internal/kibana/testdata/packages/test_policy_vars/manifest.yml new file mode 100644 index 0000000000..d0b56765ec --- /dev/null +++ b/internal/kibana/testdata/packages/test_policy_vars/manifest.yml @@ -0,0 +1,18 @@ +name: test_policy_vars +title: Test Policy Vars +version: 1.0.0 +type: integration +description: Dummy package for testing policy variable handling edge cases. +format_version: 3.1.4 +categories: [] +conditions: + kibana: + version: "^8.0.0" +policy_templates: + - name: test + title: Test + description: Test policy template. + inputs: + - type: cel + title: Test CEL input + description: Test CEL input. diff --git a/internal/kibana/testdata/sophos_xg_tcp.json b/internal/kibana/testdata/sophos_xg_tcp.json new file mode 100644 index 0000000000..76ffb70bc8 --- /dev/null +++ b/internal/kibana/testdata/sophos_xg_tcp.json @@ -0,0 +1,51 @@ +{ + "name": "sophos-xg-test", + "description": "", + "namespace": "test", + "policy_id": "test-policy-id", + "package": { + "name": "sophos", + "version": "3.17.0" + }, + "inputs": { + "sophos-tcp": { + "enabled": true, + "streams": { + "sophos.utm": { + "enabled": false + }, + "sophos.xg": { + "enabled": true, + "vars": { + "syslog_host": "0.0.0.0", + "syslog_port": 9549, + "known_devices": "- hostname: XG230\n serial_number: \"1234567890123456\"\n- hostname: SG430\n serial_number: \"S4000806149EE49\"\n" + } + } + } + }, + "sophos-udp": { + "enabled": false, + "streams": { + "sophos.utm": { + "enabled": false + }, + "sophos.xg": { + "enabled": false + } + } + }, + "sophos-logfile": { + "enabled": false, + "streams": { + "sophos.utm": { + "enabled": false + }, + "sophos.xg": { + "enabled": false + } + } + } + }, + "force": false +} diff --git a/internal/kibana/testdata/sophos_xg_tcp_legacy.json b/internal/kibana/testdata/sophos_xg_tcp_legacy.json new file mode 100644 index 0000000000..a6791ebb79 --- /dev/null +++ b/internal/kibana/testdata/sophos_xg_tcp_legacy.json @@ -0,0 +1,39 @@ +{ + "name": "sophos-xg-test", + "description": "", + "namespace": "test", + "policy_id": "test-policy-id", + "enabled": true, + "package": { + "name": "sophos", + "title": "Sophos", + "version": "3.17.0" + }, + "inputs": [ + { + "policy_template": "sophos", + "type": "tcp", + "enabled": true, + "streams": [ + { + "enabled": true, + "data_stream": {"type": "logs", "dataset": "sophos.xg"}, + "vars": { + "syslog_host": {"value": "0.0.0.0", "type": "text"}, + "syslog_port": {"value": 9549, "type": "integer"}, + "default_host_name": {"value": "firewall.localgroup.local", "type": "text"}, + "known_devices": {"value": "- hostname: XG230\n serial_number: \"1234567890123456\"\n- hostname: SG430\n serial_number: \"S4000806149EE49\"\n", "type": "yaml"}, + "tags": {"value": ["sophos-xg", "forwarded"], "type": "text"}, + "preserve_original_event": {"value": false, "type": "bool"}, + "tz_offset": {"value": "UTC", "type": "text"}, + "tz_map": {"value": "#- tz_short: AEST\n# tz_long: Australia/Sydney\n", "type": "yaml"}, + "ssl": {"value": "#certificate: \"/etc/server/cert.pem\"\n#key: \"/etc/server/key.pem\"\n", "type": "yaml"}, + "tcp_options": {"value": "#max_connections: 1\n#framing: delimiter\n#line_delimiter: \"\\n\"\n", "type": "yaml"} + } + } + ] + } + ], + "force": false, + "output_id": "" +} diff --git a/internal/kibana/testdata/sql_input_custom_dataset.json b/internal/kibana/testdata/sql_input_custom_dataset.json new file mode 100644 index 0000000000..53a3157d6b --- /dev/null +++ b/internal/kibana/testdata/sql_input_custom_dataset.json @@ -0,0 +1,24 @@ +{ + "name": "sql-query-test", + "description": "", + "namespace": "test", + "policy_id": "test-policy-id", + "package": { + "name": "sql_input", + "version": "0.2.0" + }, + "inputs": { + "sql_query-sql/metrics": { + "enabled": true, + "streams": { + "sql_input.sql_query": { + "enabled": true, + "vars": { + "data_stream.dataset": "custom.sql" + } + } + } + } + }, + "force": false +} \ No newline at end of file diff --git a/internal/kibana/testdata/sql_input_custom_dataset_legacy.json b/internal/kibana/testdata/sql_input_custom_dataset_legacy.json new file mode 100644 index 0000000000..97fa7616a1 --- /dev/null +++ b/internal/kibana/testdata/sql_input_custom_dataset_legacy.json @@ -0,0 +1,84 @@ +{ + "name": "sql-query-test", + "description": "", + "namespace": "test", + "policy_id": "test-policy-id", + "enabled": true, + "package": { + "name": "sql_input", + "title": "SQL Input", + "version": "0.2.0" + }, + "inputs": [ + { + "policy_template": "sql_query", + "type": "sql/metrics", + "enabled": true, + "vars": { + "data_stream.dataset": { + "value": "custom.sql", + "type": "text" + }, + "hosts": { + "value": [ + "http://127.0.0.1" + ], + "type": "text" + }, + "period": { + "value": "10s", + "type": "text" + }, + "sql_query": { + "value": "SHOW GLOBAL STATUS LIKE 'Innodb_system%'", + "type": "text" + }, + "sql_response_format": { + "value": "variables", + "type": "text" + } + }, + "streams": [ + { + "enabled": true, + "data_stream": { + "type": "", + "dataset": "sql_input.sql_query" + }, + "vars": { + "data_stream.dataset": { + "value": "custom.sql", + "type": "text" + }, + "hosts": { + "value": [ + "http://127.0.0.1" + ], + "type": "text" + }, + "period": { + "value": "10s", + "type": "text" + }, + "sql_query": { + "value": "SHOW GLOBAL STATUS LIKE 'Innodb_system%'", + "type": "text" + }, + "sql_response_format": { + "value": "variables", + "type": "text" + } + } + } + ] + } + ], + "output_id": "", + "vars": { + "driver": { + "value": "mysql", + "type": "text" + } + }, + "force": false +} \ No newline at end of file diff --git a/internal/kibana/testdata/sql_input_default_dataset.json b/internal/kibana/testdata/sql_input_default_dataset.json new file mode 100644 index 0000000000..6e9616c394 --- /dev/null +++ b/internal/kibana/testdata/sql_input_default_dataset.json @@ -0,0 +1,24 @@ +{ + "name": "sql-query-test", + "description": "", + "namespace": "test", + "policy_id": "test-policy-id", + "package": { + "name": "sql_input", + "version": "0.2.0" + }, + "inputs": { + "sql_query-sql/metrics": { + "enabled": true, + "streams": { + "sql_input.sql_query": { + "enabled": true, + "vars": { + "data_stream.dataset": "sql_query" + } + } + } + } + }, + "force": false +} \ No newline at end of file diff --git a/internal/kibana/testdata/sql_input_default_dataset_legacy.json b/internal/kibana/testdata/sql_input_default_dataset_legacy.json new file mode 100644 index 0000000000..cc1b765f28 --- /dev/null +++ b/internal/kibana/testdata/sql_input_default_dataset_legacy.json @@ -0,0 +1,84 @@ +{ + "name": "sql-query-test", + "description": "", + "namespace": "test", + "policy_id": "test-policy-id", + "enabled": true, + "package": { + "name": "sql_input", + "title": "SQL Input", + "version": "0.2.0" + }, + "inputs": [ + { + "policy_template": "sql_query", + "type": "sql/metrics", + "enabled": true, + "vars": { + "data_stream.dataset": { + "value": "sql_query", + "type": "text" + }, + "hosts": { + "value": [ + "http://127.0.0.1" + ], + "type": "text" + }, + "period": { + "value": "10s", + "type": "text" + }, + "sql_query": { + "value": "SHOW GLOBAL STATUS LIKE 'Innodb_system%'", + "type": "text" + }, + "sql_response_format": { + "value": "variables", + "type": "text" + } + }, + "streams": [ + { + "enabled": true, + "data_stream": { + "type": "", + "dataset": "sql_input.sql_query" + }, + "vars": { + "data_stream.dataset": { + "value": "sql_query", + "type": "text" + }, + "hosts": { + "value": [ + "http://127.0.0.1" + ], + "type": "text" + }, + "period": { + "value": "10s", + "type": "text" + }, + "sql_query": { + "value": "SHOW GLOBAL STATUS LIKE 'Innodb_system%'", + "type": "text" + }, + "sql_response_format": { + "value": "variables", + "type": "text" + } + } + } + ] + } + ], + "output_id": "", + "vars": { + "driver": { + "value": "mysql", + "type": "text" + } + }, + "force": false +} \ No newline at end of file diff --git a/internal/kibana/testdata/test_policy_vars_dotted_bool.json b/internal/kibana/testdata/test_policy_vars_dotted_bool.json new file mode 100644 index 0000000000..87ebb5072b --- /dev/null +++ b/internal/kibana/testdata/test_policy_vars_dotted_bool.json @@ -0,0 +1,24 @@ +{ + "name": "test-log-test", + "description": "", + "namespace": "test", + "policy_id": "test-policy-id", + "package": { + "name": "test_policy_vars", + "version": "1.0.0" + }, + "inputs": { + "test-cel": { + "enabled": true, + "streams": { + "test_policy_vars.log": { + "enabled": true, + "vars": { + "active.only": false + } + } + } + } + }, + "force": false +} \ No newline at end of file diff --git a/internal/kibana/testdata/test_policy_vars_dotted_bool_legacy.json b/internal/kibana/testdata/test_policy_vars_dotted_bool_legacy.json new file mode 100644 index 0000000000..d1e1272735 --- /dev/null +++ b/internal/kibana/testdata/test_policy_vars_dotted_bool_legacy.json @@ -0,0 +1,44 @@ +{ + "name": "test-log-test", + "description": "", + "namespace": "test", + "policy_id": "test-policy-id", + "enabled": true, + "package": { + "name": "test_policy_vars", + "title": "Test Policy Vars", + "version": "1.0.0" + }, + "inputs": [ + { + "policy_template": "test", + "type": "cel", + "enabled": true, + "streams": [ + { + "enabled": true, + "data_stream": { + "type": "logs", + "dataset": "test_policy_vars.log" + }, + "vars": { + "active.only": { + "value": false, + "type": "bool" + }, + "file_selectors": { + "value": [], + "type": "yaml" + }, + "revoked": { + "value": "", + "type": "select" + } + } + } + ] + } + ], + "output_id": "", + "force": false +} \ No newline at end of file diff --git a/internal/kibana/testdata/test_policy_vars_empty_yaml_multi.json b/internal/kibana/testdata/test_policy_vars_empty_yaml_multi.json new file mode 100644 index 0000000000..f1b63ab330 --- /dev/null +++ b/internal/kibana/testdata/test_policy_vars_empty_yaml_multi.json @@ -0,0 +1,21 @@ +{ + "name": "test-log-test", + "description": "", + "namespace": "test", + "policy_id": "test-policy-id", + "package": { + "name": "test_policy_vars", + "version": "1.0.0" + }, + "inputs": { + "test-cel": { + "enabled": true, + "streams": { + "test_policy_vars.log": { + "enabled": true + } + } + } + }, + "force": false +} \ No newline at end of file diff --git a/internal/kibana/testdata/test_policy_vars_empty_yaml_multi_legacy.json b/internal/kibana/testdata/test_policy_vars_empty_yaml_multi_legacy.json new file mode 100644 index 0000000000..0f19d9598f --- /dev/null +++ b/internal/kibana/testdata/test_policy_vars_empty_yaml_multi_legacy.json @@ -0,0 +1,44 @@ +{ + "name": "test-log-test", + "description": "", + "namespace": "test", + "policy_id": "test-policy-id", + "enabled": true, + "package": { + "name": "test_policy_vars", + "title": "Test Policy Vars", + "version": "1.0.0" + }, + "inputs": [ + { + "policy_template": "test", + "type": "cel", + "enabled": true, + "streams": [ + { + "enabled": true, + "data_stream": { + "type": "logs", + "dataset": "test_policy_vars.log" + }, + "vars": { + "active.only": { + "value": true, + "type": "bool" + }, + "file_selectors": { + "value": [], + "type": "yaml" + }, + "revoked": { + "value": "", + "type": "select" + } + } + } + ] + } + ], + "output_id": "", + "force": false +} \ No newline at end of file diff --git a/internal/kibana/testdata/test_policy_vars_select_false.json b/internal/kibana/testdata/test_policy_vars_select_false.json new file mode 100644 index 0000000000..b4e71558e4 --- /dev/null +++ b/internal/kibana/testdata/test_policy_vars_select_false.json @@ -0,0 +1,24 @@ +{ + "name": "test-log-test", + "description": "", + "namespace": "test", + "policy_id": "test-policy-id", + "package": { + "name": "test_policy_vars", + "version": "1.0.0" + }, + "inputs": { + "test-cel": { + "enabled": true, + "streams": { + "test_policy_vars.log": { + "enabled": true, + "vars": { + "revoked": "false" + } + } + } + } + }, + "force": false +} \ No newline at end of file diff --git a/internal/kibana/testdata/test_policy_vars_select_false_legacy.json b/internal/kibana/testdata/test_policy_vars_select_false_legacy.json new file mode 100644 index 0000000000..d7bdddde39 --- /dev/null +++ b/internal/kibana/testdata/test_policy_vars_select_false_legacy.json @@ -0,0 +1,44 @@ +{ + "name": "test-log-test", + "description": "", + "namespace": "test", + "policy_id": "test-policy-id", + "enabled": true, + "package": { + "name": "test_policy_vars", + "title": "Test Policy Vars", + "version": "1.0.0" + }, + "inputs": [ + { + "policy_template": "test", + "type": "cel", + "enabled": true, + "streams": [ + { + "enabled": true, + "data_stream": { + "type": "logs", + "dataset": "test_policy_vars.log" + }, + "vars": { + "active.only": { + "value": true, + "type": "bool" + }, + "file_selectors": { + "value": [], + "type": "yaml" + }, + "revoked": { + "value": "false", + "type": "select" + } + } + } + ] + } + ], + "output_id": "", + "force": false +} \ No newline at end of file diff --git a/internal/packages/packages.go b/internal/packages/packages.go index 2f5bc624b5..af1bcb2120 100644 --- a/internal/packages/packages.go +++ b/internal/packages/packages.go @@ -680,6 +680,46 @@ func ReadDataStreamManifestFromPackageRoot(packageRoot string, name string) (*Da return ReadDataStreamManifest(filepath.Join(packageRoot, "data_stream", name, DataStreamManifestFile)) } +// ReadAllDataStreamManifests reads the manifests for all data streams in a package. +func ReadAllDataStreamManifests(packageRoot string) ([]DataStreamManifest, error) { + dirs, err := os.ReadDir(filepath.Join(packageRoot, "data_stream")) + if errors.Is(err, os.ErrNotExist) { + return nil, nil + } + if err != nil { + return nil, fmt.Errorf("could not list data streams: %w", err) + } + var manifests []DataStreamManifest + for _, dir := range dirs { + if !dir.IsDir() { + continue + } + m, err := ReadDataStreamManifestFromPackageRoot(packageRoot, dir.Name()) + if err != nil { + return nil, fmt.Errorf("could not read data stream manifest for %q: %w", dir.Name(), err) + } + manifests = append(manifests, *m) + } + return manifests, nil +} + +// FilterDatastreamsForPolicyTemplate returns the subset of the provided data +// streams that belong to the given policy template. When the policy template +// declares an explicit DataStreams list, only data streams whose names appear +// in that list are returned; otherwise all provided data streams are returned. +func FilterDatastreamsForPolicyTemplate(datastreams []DataStreamManifest, pt PolicyTemplate) []DataStreamManifest { + if len(pt.DataStreams) == 0 { + return datastreams + } + result := make([]DataStreamManifest, 0, len(pt.DataStreams)) + for _, ds := range datastreams { + if slices.Contains(pt.DataStreams, ds.Name) { + result = append(result, ds) + } + } + return result +} + // GetPipelineNameOrDefault returns the name of the data stream's pipeline, if one is explicitly defined in the // data stream manifest. If not, the default pipeline name is returned. func (dsm *DataStreamManifest) GetPipelineNameOrDefault() string { @@ -740,3 +780,109 @@ func isDataStreamManifest(path string) (bool, error) { (m.Type == dataStreamTypeLogs || m.Type == dataStreamTypeMetrics || m.Type == dataStreamTypeSynthetics || m.Type == dataStreamTypeTraces), nil } + +// GetDataStreamIndex returns the index of the stream in ds whose input name +// matches inputName. If inputName is empty, returns 0 (first stream). If no +// stream matches, logs a debug message and falls back to index 0. +func GetDataStreamIndex(inputName string, ds DataStreamManifest) int { + if inputName == "" { + return 0 + } + for i, s := range ds.Streams { + if s.Input == inputName { + return i + } + } + logger.Debugf("no stream found with input %q in data stream %q, using first stream", inputName, ds.Name) + return 0 +} + +// FindPolicyTemplateForInput returns the name of the policy template that +// applies to the given data stream and input type. Pass nil for ds when +// working with input packages. An error is returned when no template matches +// or when multiple templates match and the result would be ambiguous. +func FindPolicyTemplateForInput(pkg *PackageManifest, ds *DataStreamManifest, inputName string) (string, error) { + if ds != nil { + return findPolicyTemplateForDataStream(*pkg, *ds, inputName) + } + return findPolicyTemplateForInputPackage(*pkg, inputName) +} + +func findPolicyTemplateForDataStream(pkg PackageManifest, ds DataStreamManifest, inputName string) (string, error) { + if inputName == "" { + if len(ds.Streams) == 0 { + return "", errors.New("no streams declared in data stream manifest") + } + inputName = ds.Streams[0].Input + } + + var matchedPolicyTemplates []string + for _, policyTemplate := range pkg.PolicyTemplates { + // Does this policy_template include this input type? + if policyTemplate.FindInputByType(inputName) == nil { + continue + } + + // Does the policy_template apply to this data stream (when data streams are specified)? + if len(policyTemplate.DataStreams) > 0 && !slices.Contains(policyTemplate.DataStreams, ds.Name) { + continue + } + + matchedPolicyTemplates = append(matchedPolicyTemplates, policyTemplate.Name) + } + + switch len(matchedPolicyTemplates) { + case 1: + return matchedPolicyTemplates[0], nil + case 0: + return "", fmt.Errorf("no policy template was found for data stream %q "+ + "with input type %q: verify that you have included the data stream "+ + "and input in the package's policy_template list", ds.Name, inputName) + default: + return "", fmt.Errorf("ambiguous result: multiple policy templates ([%s]) "+ + "were found that apply to data stream %q with input type %q: please "+ + "specify the 'policy_template' in the system test config", + strings.Join(matchedPolicyTemplates, ", "), ds.Name, inputName) + } +} + +func findPolicyTemplateForInputPackage(pkg PackageManifest, inputName string) (string, error) { + if inputName == "" { + if len(pkg.PolicyTemplates) == 0 { + return "", errors.New("no policy templates specified for input package") + } + inputName = pkg.PolicyTemplates[0].Input + } + + var matched []string + for _, policyTemplate := range pkg.PolicyTemplates { + if policyTemplate.Input != inputName { + continue + } + matched = append(matched, policyTemplate.Name) + } + + switch len(matched) { + case 1: + return matched[0], nil + case 0: + return "", fmt.Errorf("no policy template was found "+ + "with input type %q: verify that you have included the data stream "+ + "and input in the package's policy_template list", inputName) + default: + return "", fmt.Errorf("ambiguous result: multiple policy templates ([%s]) "+ + "with input type %q: please "+ + "specify the 'policy_template' in the system test config", + strings.Join(matched, ", "), inputName) + } +} + +// SelectPolicyTemplateByName returns the policy template with the given name. +func SelectPolicyTemplateByName(policies []PolicyTemplate, name string) (PolicyTemplate, error) { + for _, pt := range policies { + if pt.Name == name { + return pt, nil + } + } + return PolicyTemplate{}, fmt.Errorf("policy template %q not found", name) +} diff --git a/internal/resources/fleetpolicy.go b/internal/resources/fleetpolicy.go index f2c5d88094..f6a122b025 100644 --- a/internal/resources/fleetpolicy.go +++ b/internal/resources/fleetpolicy.go @@ -7,8 +7,6 @@ package resources import ( "errors" "fmt" - "slices" - "strings" "github.com/elastic/go-resource" @@ -72,6 +70,10 @@ type FleetPackagePolicy struct { // DataStreamVars contains the values for the variables at the data stream level. DataStreamVars map[string]any + // PolicyAPIFormat overrides the Fleet API format used to create the package policy. + // Valid values: "simplified", "legacy", "" (auto-detect, default). + PolicyAPIFormat string + // Absent is set to true to indicate that the policy should not be present. Absent bool } @@ -142,11 +144,11 @@ func (f *FleetAgentPolicy) Create(ctx resource.Context) error { f.ID = policy.ID for _, packagePolicy := range f.PackagePolicies { - policy, err := createPackagePolicy(*f, packagePolicy) + pp, err := createPackagePolicy(*f, packagePolicy) if err != nil { return fmt.Errorf("could not prepare package policy: %w", err) } - err = provider.Client.AddPackageDataStreamToPolicy(ctx, *policy) + _, err = provider.Client.CreatePackagePolicy(ctx, *pp, packagePolicy.PolicyAPIFormat) if err != nil { return fmt.Errorf("could not add package policy %q to agent policy %q: %w", packagePolicy.Name, f.Name, err) } @@ -155,7 +157,7 @@ func (f *FleetAgentPolicy) Create(ctx resource.Context) error { return nil } -func createPackagePolicy(policy FleetAgentPolicy, packagePolicy FleetPackagePolicy) (*kibana.PackageDataStream, error) { +func createPackagePolicy(policy FleetAgentPolicy, packagePolicy FleetPackagePolicy) (*kibana.PackagePolicy, error) { manifest, err := packages.ReadPackageManifestFromPackageRoot(packagePolicy.PackageRoot) if err != nil { return nil, fmt.Errorf("could not read package manifest at %s: %w", packagePolicy.PackageRoot, err) @@ -171,167 +173,71 @@ func createPackagePolicy(policy FleetAgentPolicy, packagePolicy FleetPackagePoli } } -func createIntegrationPackagePolicy(policy FleetAgentPolicy, manifest packages.PackageManifest, packagePolicy FleetPackagePolicy) (*kibana.PackageDataStream, error) { +func createIntegrationPackagePolicy(policy FleetAgentPolicy, manifest packages.PackageManifest, packagePolicy FleetPackagePolicy) (*kibana.PackagePolicy, error) { if packagePolicy.DataStreamName == "" { return nil, fmt.Errorf("expected data stream for integration package policy %q", packagePolicy.Name) } - dsManifest, err := packages.ReadDataStreamManifestFromPackageRoot(packagePolicy.PackageRoot, packagePolicy.DataStreamName) if err != nil { return nil, fmt.Errorf("could not read %q data stream manifest for package at %s: %w", packagePolicy.DataStreamName, packagePolicy.PackageRoot, err) } - policyTemplateName := packagePolicy.TemplateName if policyTemplateName == "" { - name, err := findPolicyTemplateForDataStream(manifest, *dsManifest, packagePolicy.InputName) + name, err := packages.FindPolicyTemplateForInput(&manifest, dsManifest, packagePolicy.InputName) if err != nil { return nil, fmt.Errorf("failed to determine the associated policy_template: %w", err) } policyTemplateName = name } - - policyTemplate, err := selectPolicyTemplateByName(manifest.PolicyTemplates, policyTemplateName) + policyTemplate, err := packages.SelectPolicyTemplateByName(manifest.PolicyTemplates, policyTemplateName) if err != nil { return nil, fmt.Errorf("failed to find the selected policy_template: %w", err) } - - ds := kibana.PackageDataStream{ - Name: packagePolicy.Name, - Namespace: policy.Namespace, - PolicyID: policy.ID, - Enabled: !packagePolicy.Disabled, - Inputs: []kibana.Input{ - { - PolicyTemplate: policyTemplate.Name, - Enabled: !packagePolicy.Disabled, - }, - }, - } - ds.Package.Name = manifest.Name - ds.Package.Title = manifest.Title - ds.Package.Version = manifest.Version - - stream := dsManifest.Streams[getDataStreamIndex(packagePolicy.InputName, *dsManifest)] - streamInput := stream.Input - ds.Inputs[0].Type = streamInput - - streams := []kibana.Stream{ - { - ID: fmt.Sprintf("%s-%s.%s", streamInput, manifest.Name, dsManifest.Name), - Enabled: !packagePolicy.Disabled, - DataStream: kibana.DataStream{ - Type: dsManifest.Type, - Dataset: getDataStreamDataset(manifest, *dsManifest), - }, - }, - } - - // Add dataStream-level vars - streams[0].Vars = setKibanaVariables(stream.Vars, common.MapStr(packagePolicy.DataStreamVars)) - ds.Inputs[0].Streams = streams - - // Add input-level vars - input := policyTemplate.FindInputByType(streamInput) - if input != nil { - ds.Inputs[0].Vars = setKibanaVariables(input.Vars, common.MapStr(packagePolicy.Vars)) + allDatastreams, err := packages.ReadAllDataStreamManifests(packagePolicy.PackageRoot) + if err != nil { + return nil, fmt.Errorf("could not read data stream manifests: %w", err) + } + pp, err := kibana.BuildIntegrationPackagePolicy( + policy.ID, policy.Namespace, packagePolicy.Name, + manifest, policyTemplate, *dsManifest, + packagePolicy.InputName, + common.MapStr(packagePolicy.Vars), common.MapStr(packagePolicy.DataStreamVars), + !packagePolicy.Disabled, allDatastreams, + ) + if err != nil { + return nil, err } - - // Add package-level vars - ds.Vars = setKibanaVariables(manifest.Vars, common.MapStr(packagePolicy.Vars)) - - return &ds, nil + pp.OutputID = policy.DataOutputID + return &pp, nil } -func createInputPackagePolicy(policy FleetAgentPolicy, manifest packages.PackageManifest, packagePolicy FleetPackagePolicy) (*kibana.PackageDataStream, error) { +func createInputPackagePolicy(policy FleetAgentPolicy, manifest packages.PackageManifest, packagePolicy FleetPackagePolicy) (*kibana.PackagePolicy, error) { if dsName := packagePolicy.DataStreamName; dsName != "" { return nil, fmt.Errorf("no data stream expected for input package policy %q, found %q", packagePolicy.Name, dsName) } policyTemplateName := packagePolicy.TemplateName if policyTemplateName == "" { - name, err := findPolicyTemplateForInputPackage(manifest, packagePolicy.InputName) + name, err := packages.FindPolicyTemplateForInput(&manifest, nil, packagePolicy.InputName) if err != nil { return nil, fmt.Errorf("failed to determine the associated policy_template: %w", err) } policyTemplateName = name } - policyTemplate, err := selectPolicyTemplateByName(manifest.PolicyTemplates, policyTemplateName) + policyTemplate, err := packages.SelectPolicyTemplateByName(manifest.PolicyTemplates, policyTemplateName) if err != nil { return nil, fmt.Errorf("failed to find the selected policy_template: %w", err) } - ds := kibana.PackageDataStream{ - Name: packagePolicy.Name, - Namespace: policy.Namespace, - PolicyID: policy.ID, - Enabled: !packagePolicy.Disabled, - Inputs: []kibana.Input{ - { - PolicyTemplate: policyTemplate.Name, - Enabled: !packagePolicy.Disabled, - }, - }, - } - ds.Package.Name = manifest.Name - ds.Package.Title = manifest.Title - ds.Package.Version = manifest.Version - - streamInput := policyTemplate.Input - ds.Inputs[0].Type = streamInput - - streams := []kibana.Stream{ - { - ID: fmt.Sprintf("%s-%s.%s", streamInput, manifest.Name, policyTemplate.Name), - Enabled: !packagePolicy.Disabled, - DataStream: kibana.DataStream{ - Type: policyTemplate.Type, - Dataset: fmt.Sprintf("%s.%s", manifest.Name, policyTemplate.Name), - }, - }, - } - - // Add policyTemplate-level vars. - vars := setKibanaVariables(policyTemplate.Vars, common.MapStr(packagePolicy.Vars)) - if _, found := vars["data_stream.dataset"]; !found { - var value packages.VarValue - value.Unpack(policyTemplate.Name) - vars["data_stream.dataset"] = kibana.Var{ - Value: value, - Type: "text", - } - } - - streams[0].Vars = vars - ds.Inputs[0].Streams = streams - - return &ds, nil -} - -func setKibanaVariables(definitions []packages.Variable, values common.MapStr) kibana.Vars { - vars := kibana.Vars{} - for _, definition := range definitions { - // Elastic Package uses the deprecated 'inputs' array in its /api/fleet/package_policies request. - // When using this API parameter, default values are not automatically incorporated into - // the policy, whereas with the 'inputs' object, defaults are incorporated by the API service. - // This means that our client must include the default values in its request to ensure correct behavior. - val := definition.Default - - value, err := values.GetValue(definition.Name) - if err == nil { - val = &packages.VarValue{} - val.Unpack(value) - } else if errors.Is(err, common.ErrKeyNotFound) && definition.Default == nil { - // Do not include nulls for unset variables. - continue - } - - vars[definition.Name] = kibana.Var{ - Type: definition.Type, - Value: *val, - } - } - return vars + pp := kibana.BuildInputPackagePolicy( + policy.ID, policy.Namespace, packagePolicy.Name, + manifest, policyTemplate, + common.MapStr(packagePolicy.Vars), + !packagePolicy.Disabled, + ) + pp.OutputID = policy.DataOutputID + return &pp, nil } func (f *FleetAgentPolicy) Update(ctx resource.Context) error { @@ -365,100 +271,3 @@ func (s *FleetAgentPolicyState) NeedsUpdate(resource resource.Resource) (bool, e policy := resource.(*FleetAgentPolicy) return policy.Absent == (s.current != nil), nil } - -// getDataStreamIndex returns the index of the data stream whose input name -// matches. Otherwise it returns the 0. -func getDataStreamIndex(inputName string, ds packages.DataStreamManifest) int { - for i, s := range ds.Streams { - if s.Input == inputName { - return i - } - } - return 0 -} - -func getDataStreamDataset(pkg packages.PackageManifest, ds packages.DataStreamManifest) string { - if len(ds.Dataset) > 0 { - return ds.Dataset - } - return fmt.Sprintf("%s.%s", pkg.Name, ds.Name) -} - -func findPolicyTemplateForDataStream(pkg packages.PackageManifest, ds packages.DataStreamManifest, inputName string) (string, error) { - if inputName == "" { - if len(ds.Streams) == 0 { - return "", errors.New("no streams declared in data stream manifest") - } - inputName = ds.Streams[getDataStreamIndex(inputName, ds)].Input - } - - var matchedPolicyTemplates []string - for _, policyTemplate := range pkg.PolicyTemplates { - // Does this policy_template include this input type? - if policyTemplate.FindInputByType(inputName) == nil { - continue - } - - // Does the policy_template apply to this data stream (when data streams are specified)? - if len(policyTemplate.DataStreams) > 0 && !slices.Contains(policyTemplate.DataStreams, ds.Name) { - continue - } - - matchedPolicyTemplates = append(matchedPolicyTemplates, policyTemplate.Name) - } - - switch len(matchedPolicyTemplates) { - case 1: - return matchedPolicyTemplates[0], nil - case 0: - return "", fmt.Errorf("no policy template was found for data stream %q "+ - "with input type %q: verify that you have included the data stream "+ - "and input in the package's policy_template list", ds.Name, inputName) - default: - return "", fmt.Errorf("ambiguous result: multiple policy templates ([%s]) "+ - "were found that apply to data stream %q with input type %q: please "+ - "specify the 'policy_template' in the system test config", - strings.Join(matchedPolicyTemplates, ", "), ds.Name, inputName) - } -} - -func findPolicyTemplateForInputPackage(pkg packages.PackageManifest, inputName string) (string, error) { - if inputName == "" { - if len(pkg.PolicyTemplates) == 0 { - return "", errors.New("no policy templates specified for input package") - } - inputName = pkg.PolicyTemplates[0].Input - } - - var matched []string - for _, policyTemplate := range pkg.PolicyTemplates { - if policyTemplate.Input != inputName { - continue - } - - matched = append(matched, policyTemplate.Name) - } - - switch len(matched) { - case 1: - return matched[0], nil - case 0: - return "", fmt.Errorf("no policy template was found"+ - "with input type %q: verify that you have included the data stream "+ - "and input in the package's policy_template list", inputName) - default: - return "", fmt.Errorf("ambiguous result: multiple policy templates ([%s]) "+ - "with input type %q: please "+ - "specify the 'policy_template' in the system test config", - strings.Join(matched, ", "), inputName) - } -} - -func selectPolicyTemplateByName(policies []packages.PolicyTemplate, name string) (packages.PolicyTemplate, error) { - for _, policy := range policies { - if policy.Name == name { - return policy, nil - } - } - return packages.PolicyTemplate{}, fmt.Errorf("policy template %q not found", name) -} diff --git a/internal/resources/fleetpolicy_test.go b/internal/resources/fleetpolicy_test.go index d658e5cb9d..c0b93b0078 100644 --- a/internal/resources/fleetpolicy_test.go +++ b/internal/resources/fleetpolicy_test.go @@ -286,14 +286,22 @@ func TestCreateInputPackagePolicy_DatasetVariable(t *testing.T) { } require.NoError(t, err) require.NotNil(t, result) - require.Len(t, result.Inputs, 1) - require.Len(t, result.Inputs[0].Streams, 1) - streamVars := result.Inputs[0].Streams[0].Vars + // Find the enabled input by its key. + pt := c.manifest.PolicyTemplates[0] + inputKey := fmt.Sprintf("%s-%s", pt.Name, pt.Input) + inputEntry, ok := result.Inputs[inputKey] + require.True(t, ok, "expected input key %q in inputs map", inputKey) + require.True(t, inputEntry.Enabled) + + streamKey := fmt.Sprintf("%s.%s", c.manifest.Name, pt.Name) + streamEntry, ok := inputEntry.Streams[streamKey] + require.True(t, ok, "expected stream key %q in streams map", streamKey) + + streamVars := streamEntry.Vars require.Contains(t, streamVars, "data_stream.dataset", "stream vars must contain data_stream.dataset") - datasetVar := streamVars["data_stream.dataset"] - val := datasetVar.Value.Value() + val := streamVars["data_stream.dataset"] require.NotNil(t, val) assert.Equal(t, c.expectedDataset, val, "data_stream.dataset variable value") }) diff --git a/internal/stack/agentpolicy.go b/internal/stack/agentpolicy.go index 82f85156b1..2c7bd816b3 100644 --- a/internal/stack/agentpolicy.go +++ b/internal/stack/agentpolicy.go @@ -80,7 +80,7 @@ func createSystemPackagePolicy(ctx context.Context, kibanaClient *kibana.Client, packagePolicy.Package.Name = "system" packagePolicy.Package.Version = systemPackages[0].Version - _, err = kibanaClient.CreatePackagePolicy(ctx, packagePolicy) + _, err = kibanaClient.CreatePackagePolicy(ctx, packagePolicy, kibana.PolicyAPIFormatAuto) if err != nil { return fmt.Errorf("error while creating package policy: %w", err) } diff --git a/internal/stack/fleetserverpolicy.go b/internal/stack/fleetserverpolicy.go index cabf1d2d38..089ab3f845 100644 --- a/internal/stack/fleetserverpolicy.go +++ b/internal/stack/fleetserverpolicy.go @@ -70,7 +70,7 @@ func createFleetServerPackagePolicy(ctx context.Context, kibanaClient *kibana.Cl packagePolicy.Package.Name = "fleet_server" packagePolicy.Package.Version = packages[0].Version - _, err = kibanaClient.CreatePackagePolicy(ctx, packagePolicy) + _, err = kibanaClient.CreatePackagePolicy(ctx, packagePolicy, kibana.PolicyAPIFormatAuto) if err != nil { return fmt.Errorf("error while creating package policy: %w", err) } diff --git a/internal/testrunner/runners/policy/testconfig.go b/internal/testrunner/runners/policy/testconfig.go index cfddd740f4..75d2a0f3fd 100644 --- a/internal/testrunner/runners/policy/testconfig.go +++ b/internal/testrunner/runners/policy/testconfig.go @@ -22,6 +22,13 @@ type testConfig struct { DataStream struct { Vars map[string]any `config:"vars,omitempty" yaml:"vars,omitempty"` } `config:"data_stream" yaml:"data_stream"` + + // PolicyAPIFormat overrides the Fleet API format used to create the package + // policy. Valid values: "simplified" (objects-based), "legacy" (arrays-based), + // "" (auto-detect from Kibana version, default). + // Use "legacy" as a workaround when a Fleet simplified-API bug prevents the + // test from passing (e.g. select vars with "false"/"true" option values). + PolicyAPIFormat string `config:"policy_api_format,omitempty" yaml:"policy_api_format,omitempty"` } func readTestConfig(testPath string) (*testConfig, error) { diff --git a/internal/testrunner/runners/policy/tester.go b/internal/testrunner/runners/policy/tester.go index 65104e70bb..cf8d0217d3 100644 --- a/internal/testrunner/runners/policy/tester.go +++ b/internal/testrunner/runners/policy/tester.go @@ -116,12 +116,13 @@ func (r *tester) runTest(ctx context.Context, manager *resources.Manager, testPa Namespace: "ep", PackagePolicies: []resources.FleetPackagePolicy{ { - Name: fmt.Sprintf("%s-%s-%s", testName, r.testFolder.Package, policyTestSuffix), - PackageRoot: r.packageRoot, - DataStreamName: r.testFolder.DataStream, - InputName: testConfig.Input, - Vars: testConfig.Vars, - DataStreamVars: testConfig.DataStream.Vars, + Name: fmt.Sprintf("%s-%s-%s", testName, r.testFolder.Package, policyTestSuffix), + PackageRoot: r.packageRoot, + DataStreamName: r.testFolder.DataStream, + InputName: testConfig.Input, + Vars: testConfig.Vars, + DataStreamVars: testConfig.DataStream.Vars, + PolicyAPIFormat: testConfig.PolicyAPIFormat, }, }, } diff --git a/internal/testrunner/runners/system/test_config.go b/internal/testrunner/runners/system/test_config.go index 075ef94223..3b919acc06 100644 --- a/internal/testrunner/runners/system/test_config.go +++ b/internal/testrunner/runners/system/test_config.go @@ -50,6 +50,13 @@ type testConfig struct { SkipTransformValidation bool `config:"skip_transform_validation"` + // PolicyAPIFormat overrides the Fleet API format used to create the package + // policy. Valid values: "simplified" (objects-based), "legacy" (arrays-based), + // "" (auto-detect from Kibana version, default). + // Use "legacy" as a workaround when a Fleet simplified-API bug prevents the + // test from passing (e.g. select vars with "false"/"true" option values). + PolicyAPIFormat string `config:"policy_api_format"` + Assert struct { // HitCount expected number of hits for a given test HitCount int `config:"hit_count"` diff --git a/internal/testrunner/runners/system/testconfig_test.go b/internal/testrunner/runners/system/testconfig_test.go index 4bbc62e721..9ccfc867c9 100644 --- a/internal/testrunner/runners/system/testconfig_test.go +++ b/internal/testrunner/runners/system/testconfig_test.go @@ -13,8 +13,6 @@ import ( "github.com/stretchr/testify/require" "github.com/elastic/elastic-package/internal/servicedeployer" - - "github.com/elastic/elastic-package/internal/packages" ) func TestNewConfig(t *testing.T) { @@ -150,9 +148,8 @@ service: nginx }) } -// Ensure that vars with data_stream.dataset work with getExpectedDatasetForTest -// (used when building data stream names). -func TestNewConfig_VarsUsedByGetExpectedDatasetForTest(t *testing.T) { +// Ensure that vars with data_stream.dataset are correctly parsed in the config. +func TestNewConfig_DataStreamDatasetVar(t *testing.T) { dir := t.TempDir() configPath := filepath.Join(dir, "test-dataset-config.yml") err := os.WriteFile(configPath, []byte(` @@ -167,7 +164,6 @@ vars: require.NoError(t, err) require.NotNil(t, cfg) - // Same logic as in getExpectedDatasetForTest for input packages. - got := getExpectedDatasetForTest("input", "default.dataset", packages.PolicyTemplate{Name: "bar"}, cfg.Vars) - assert.Equal(t, "other.name", got, "getExpectedDatasetForTest should use vars.data_stream.dataset") + v, _ := cfg.Vars.GetValue("data_stream.dataset") + assert.Equal(t, "other.name", v, "vars.data_stream.dataset should be available in config vars") } diff --git a/internal/testrunner/runners/system/tester.go b/internal/testrunner/runners/system/tester.go index e226f1ba07..0753764bcf 100644 --- a/internal/testrunner/runners/system/tester.go +++ b/internal/testrunner/runners/system/tester.go @@ -997,7 +997,8 @@ type scenarioTest struct { dataStream string indexTemplateName string policyTemplate packages.PolicyTemplate - kibanaDataStream kibana.PackageDataStream + kibanaPolicy kibana.PackagePolicy + dataStreamDataset string syntheticEnabled bool docs []common.MapStr deprecationWarnings []deprecationWarning @@ -1048,13 +1049,13 @@ func (r *tester) prepareScenario(ctx context.Context, config *testConfig, stackC serviceOptions.DeployIndependentAgent = r.runIndependentElasticAgent policyTemplateName := config.PolicyTemplate if policyTemplateName == "" { - policyTemplateName, err = FindPolicyTemplateForInput(r.pkgManifest, r.dataStreamManifest, config.Input) + policyTemplateName, err = packages.FindPolicyTemplateForInput(r.pkgManifest, r.dataStreamManifest, config.Input) if err != nil { return nil, fmt.Errorf("failed to determine the associated policy_template: %w", err) } } - policyTemplate, err := SelectPolicyTemplateByName(r.pkgManifest.PolicyTemplates, policyTemplateName) + policyTemplate, err := packages.SelectPolicyTemplateByName(r.pkgManifest.PolicyTemplates, policyTemplateName) if err != nil { return nil, fmt.Errorf("failed to find the selected policy_template: %w", err) } @@ -1105,21 +1106,22 @@ func (r *tester) prepareScenario(ctx context.Context, config *testConfig, stackC scenario.startTestTime = time.Now() logger.Debug("adding package data stream to test policy...") - ds, err := CreatePackageDatastream(policyToTest, r.pkgManifest, policyTemplate, r.dataStreamManifest, config.Input, config.Vars, config.DataStream.Vars, policyToTest.Namespace) + policy, dsType, dsDataset, err := CreatePackagePolicy(policyToTest, r.pkgManifest, policyTemplate, r.dataStreamManifest, config.Input, config.Vars, config.DataStream.Vars, policyToTest.Namespace, r.packageRoot) if err != nil { return nil, fmt.Errorf("could not create package data stream: %w", err) } if r.runTearDown { logger.Debug("Skip adding data stream config to policy") } else { - if err := r.kibanaClient.AddPackageDataStreamToPolicy(ctx, ds); err != nil { + if _, err := r.kibanaClient.CreatePackagePolicy(ctx, policy, config.PolicyAPIFormat); err != nil { return nil, fmt.Errorf("could not add data stream config to policy: %w", err) } } - scenario.kibanaDataStream = ds + scenario.kibanaPolicy = policy + scenario.dataStreamDataset = dsDataset - scenario.indexTemplateName = BuildIndexTemplateName(ds, policyTemplate, r.pkgManifest.Type, config.Vars) - scenario.dataStream = BuildDataStreamName(ds, policyTemplate, r.pkgManifest.Type, config.Vars) + scenario.indexTemplateName = buildIndexTemplateName(dsType, dsDataset) + scenario.dataStream = BuildDataStreamName(dsType, dsDataset, policy.Namespace, policyTemplate, r.pkgManifest.Type) r.cleanTestScenarioHandler = func(ctx context.Context) error { logger.Debugf("Deleting data stream for testing %s", scenario.dataStream) @@ -1286,52 +1288,23 @@ func (r *tester) prepareScenario(ctx context.Context, config *testConfig, stackC return &scenario, nil } -// BuildIndexTemplateName builds the expected index template name that is installed in Elasticsearch +// buildIndexTemplateName builds the expected index template name that is installed in Elasticsearch // when the package data stream is added to the policy. -func BuildIndexTemplateName(ds kibana.PackageDataStream, policyTemplate packages.PolicyTemplate, packageType string, cfgVars common.MapStr) string { - dataStreamDataset := getExpectedDatasetForTest(packageType, ds.Inputs[0].Streams[0].DataStream.Dataset, policyTemplate, cfgVars) - - indexTemplateName := fmt.Sprintf( - "%s-%s", - ds.Inputs[0].Streams[0].DataStream.Type, - dataStreamDataset, - ) - return indexTemplateName +func buildIndexTemplateName(dsType, dsDataset string) string { + return fmt.Sprintf("%s-%s", dsType, dsDataset) } // BuildDataStreamName builds the expected data stream name that is installed in Elasticsearch // when the package data stream is added to the policy. -func BuildDataStreamName(ds kibana.PackageDataStream, policyTemplate packages.PolicyTemplate, packageType string, cfgVars common.MapStr) string { - dataStreamDataset := getExpectedDatasetForTest(packageType, ds.Inputs[0].Streams[0].DataStream.Dataset, policyTemplate, cfgVars) +func BuildDataStreamName(dsType, dsDataset, namespace string, policyTemplate packages.PolicyTemplate, packageType string) string { + dataset := dsDataset // Input packages using the otel collector input require to add a specific dataset suffix if packageType == "input" && policyTemplate.Input == otelCollectorInputName { - dataStreamDataset = fmt.Sprintf("%s.%s", dataStreamDataset, otelSuffixDataset) + dataset = fmt.Sprintf("%s.%s", dataset, otelSuffixDataset) } - dataStreamName := fmt.Sprintf( - "%s-%s-%s", - ds.Inputs[0].Streams[0].DataStream.Type, - dataStreamDataset, - ds.Namespace, - ) - return dataStreamName -} - -func getExpectedDatasetForTest(pkgType, dataset string, policyTemplate packages.PolicyTemplate, cfgVars common.MapStr) string { - if pkgType == "input" { - // Input packages can set `data_stream.dataset` by convention to customize the dataset. - v, _ := cfgVars.GetValue("data_stream.dataset") - if ds, ok := v.(string); ok && ds != "" { - return ds - } - // Some of them also set a default value. - if ds := findDefaultValue(policyTemplate.Vars, "data_stream.dataset"); ds != "" { - return ds - } - return policyTemplate.Name - } - return dataset + return fmt.Sprintf("%s-%s-%s", dsType, dataset, namespace) } // createOrGetKibanaPolicies creates the Kibana policies required for testing. @@ -1807,11 +1780,7 @@ func (r *tester) expectedDatasets(scenario *scenarioTest, config *testConfig) ([ if len(expectedDatasets) == 0 { // get dataset directly from package policy added when preparing the scenario - expectedDataset := getExpectedDatasetForTest( - r.pkgManifest.Type, - scenario.kibanaDataStream.Inputs[0].Streams[0].DataStream.Dataset, - scenario.policyTemplate, - config.Vars) + expectedDataset := scenario.dataStreamDataset if scenario.policyTemplate.Input == otelCollectorInputName { // Input packages whose input is `otelcol` must add the `.otel` suffix // Example: httpcheck.metrics.otel @@ -1944,7 +1913,9 @@ func (r *tester) checkEnrolledAgents(ctx context.Context, agentInfo agentdeploye return &agent, nil } -func CreatePackageDatastream( +// CreatePackagePolicy builds a PackagePolicy for the given package configuration, returning +// the policy along with the data stream type and dataset for building index/data stream names. +func CreatePackagePolicy( kibanaPolicy *kibana.Policy, pkg *packages.PackageManifest, policyTemplate packages.PolicyTemplate, @@ -1952,284 +1923,66 @@ func CreatePackageDatastream( cfgName string, cfgVars, cfgDSVars common.MapStr, suffix string, -) (kibana.PackageDataStream, error) { + packageRoot string, +) (policy kibana.PackagePolicy, dsType string, dsDataset string, err error) { if pkg.Type == "input" { - return createInputPackageDatastream(kibanaPolicy, pkg, policyTemplate, cfgVars, cfgDSVars, suffix), nil + p := kibana.BuildInputPackagePolicy( + kibanaPolicy.ID, kibanaPolicy.Namespace, + fmt.Sprintf("%s-%s-%s", pkg.Name, policyTemplate.Name, suffix), + *pkg, policyTemplate, cfgVars, true, + ) + fallbackDataset := fmt.Sprintf("%s.%s", pkg.Name, policyTemplate.Name) + return p, policyTemplate.Type, datasetFromPolicy(p, fallbackDataset), nil } if ds == nil { - return kibana.PackageDataStream{}, fmt.Errorf("data stream manifest is required for integration packages") - } - return createIntegrationPackageDatastream(kibanaPolicy, pkg, policyTemplate, ds, cfgName, cfgVars, cfgDSVars, suffix), nil -} - -func createIntegrationPackageDatastream( - kibanaPolicy *kibana.Policy, - pkg *packages.PackageManifest, - policyTemplate packages.PolicyTemplate, - ds *packages.DataStreamManifest, - cfgName string, - cfgVars, cfgDSVars common.MapStr, - suffix string, -) kibana.PackageDataStream { - r := kibana.PackageDataStream{ - Name: fmt.Sprintf("%s-%s-%s", pkg.Name, ds.Name, suffix), - Namespace: kibanaPolicy.Namespace, - PolicyID: kibanaPolicy.ID, - Enabled: true, - Inputs: []kibana.Input{ - { - PolicyTemplate: policyTemplate.Name, - Enabled: true, - }, - }, - } - r.Package.Name = pkg.Name - r.Package.Title = pkg.Title - r.Package.Version = pkg.Version - - stream := ds.Streams[getDataStreamIndex(cfgName, ds)] - streamInput := stream.Input - r.Inputs[0].Type = streamInput - - dataset := fmt.Sprintf("%s.%s", pkg.Name, ds.Name) - if len(ds.Dataset) > 0 { - dataset = ds.Dataset + return kibana.PackagePolicy{}, "", "", fmt.Errorf("data stream manifest is required for integration packages") } - streams := []kibana.Stream{ - { - ID: fmt.Sprintf("%s-%s.%s", streamInput, pkg.Name, ds.Name), - Enabled: true, - DataStream: kibana.DataStream{ - Type: ds.Type, - Dataset: dataset, - }, - }, - } - - // Add dataStream-level vars - streams[0].Vars = setKibanaVariables(stream.Vars, cfgDSVars) - r.Inputs[0].Streams = streams - - // Add input-level vars - input := policyTemplate.FindInputByType(streamInput) - if input != nil { - r.Inputs[0].Vars = setKibanaVariables(input.Vars, cfgVars) + if packageRoot == "" { + return kibana.PackagePolicy{}, "", "", fmt.Errorf("package root is required for integration packages") } - // Add package-level vars - r.Vars = setKibanaVariables(pkg.Vars, cfgVars) - - return r -} - -func createInputPackageDatastream( - kibanaPolicy *kibana.Policy, - pkg *packages.PackageManifest, - policyTemplate packages.PolicyTemplate, - cfgVars, cfgDSVars common.MapStr, - suffix string, -) kibana.PackageDataStream { - r := kibana.PackageDataStream{ - Name: fmt.Sprintf("%s-%s-%s", pkg.Name, policyTemplate.Name, suffix), - Namespace: kibanaPolicy.Namespace, - PolicyID: kibanaPolicy.ID, - Enabled: true, - } - r.Package.Name = pkg.Name - r.Package.Title = pkg.Title - r.Package.Version = pkg.Version - r.Inputs = []kibana.Input{ - { - PolicyTemplate: policyTemplate.Name, - Enabled: true, - Vars: kibana.Vars{}, - Type: policyTemplate.Input, - }, + allDatastreams, err := packages.ReadAllDataStreamManifests(packageRoot) + if err != nil { + return kibana.PackagePolicy{}, "", "", err } - streams := []kibana.Stream{ - { - ID: fmt.Sprintf("%s-%s.%s", policyTemplate.Input, pkg.Name, policyTemplate.Name), - Enabled: true, - DataStream: kibana.DataStream{ - Type: policyTemplate.Type, - // This dataset is the one Fleet uses to identify the stream, - // it must be .. This is not - // the same as the dataset used for the index template, configured - // with vars below. - Dataset: fmt.Sprintf("%s.%s", pkg.Name, policyTemplate.Name), - }, - }, + p, err := kibana.BuildIntegrationPackagePolicy( + kibanaPolicy.ID, kibanaPolicy.Namespace, + fmt.Sprintf("%s-%s-%s", pkg.Name, ds.Name, suffix), + *pkg, policyTemplate, *ds, cfgName, cfgVars, cfgDSVars, true, allDatastreams, + ) + if err != nil { + return kibana.PackagePolicy{}, "", "", err } - // Add policyTemplate-level vars. - vars := setKibanaVariables(policyTemplate.Vars, cfgVars) - - // data_stream.dataset is required by Fleet for input packages, so mimic the value the - // UI would use if this is not defined in the config or doesn't have a default. - if _, found := vars["data_stream.dataset"]; !found { - // Fleet uses the policy template name as default dataset for input packages, do the same. - dataStreamDataset := policyTemplate.Name - v, _ := cfgVars.GetValue("data_stream.dataset") - if dataset, ok := v.(string); ok && dataset != "" { - dataStreamDataset = dataset - } - - var value packages.VarValue - value.Unpack(dataStreamDataset) - vars["data_stream.dataset"] = kibana.Var{ - Value: value, - Type: "text", - } + dataset := fmt.Sprintf("%s.%s", pkg.Name, ds.Name) + if ds.Dataset != "" { + dataset = ds.Dataset } - - streams[0].Vars = vars - r.Inputs[0].Streams = streams - return r + return p, ds.Type, dataset, nil } -func findDefaultValue(vars []packages.Variable, name string) string { - for _, v := range vars { - if v.Name != name { +func datasetFromPolicy(policy kibana.PackagePolicy, fallback string) string { + for _, input := range policy.Inputs { + if !input.Enabled { continue } - if v.Default != nil { - value, ok := v.Default.Value().(string) - if ok && value != "" { - return value + for _, stream := range input.Streams { + if !stream.Enabled { + continue } - } - } - return "" -} -func setKibanaVariables(definitions []packages.Variable, values common.MapStr) kibana.Vars { - vars := kibana.Vars{} - for _, definition := range definitions { - // Elastic Package uses the deprecated 'inputs' array in its /api/fleet/package_policies request. - // When using this API parameter, default values are not automatically incorporated into - // the policy, whereas with the 'inputs' object, defaults are incorporated by the API service. - // This means that our client must include the default values in its request to ensure correct behavior. - val := definition.Default - - value, err := values.GetValue(definition.Name) - if err == nil { - val = &packages.VarValue{} - val.Unpack(value) - } else if errors.Is(err, common.ErrKeyNotFound) && definition.Default == nil { - // Do not include nulls for unset variables. - continue - } - - vars[definition.Name] = kibana.Var{ - Type: definition.Type, - Value: *val, - } - } - return vars -} - -// getDataStreamIndex returns the index of the data stream whose input name -// matches. Otherwise it returns the 0. -func getDataStreamIndex(inputName string, ds *packages.DataStreamManifest) int { - for i, s := range ds.Streams { - if s.Input == inputName { - return i - } - } - return 0 -} - -// FindPolicyTemplateForInput returns the name of the policy_template that -// applies to the input under test. An error is returned if no policy template -// matches or if multiple policy templates match and the response is ambiguous. -func FindPolicyTemplateForInput(pkg *packages.PackageManifest, ds *packages.DataStreamManifest, inputName string) (string, error) { - if pkg.Type == "input" { - return findPolicyTemplateForInputPackage(pkg, inputName) - } - if ds == nil { - return "", errors.New("data stream must be specified for integration packages") - } - return findPolicyTemplateForDataStream(pkg, ds, inputName) -} - -func findPolicyTemplateForDataStream(pkg *packages.PackageManifest, ds *packages.DataStreamManifest, inputName string) (string, error) { - if inputName == "" { - if len(ds.Streams) == 0 { - return "", errors.New("no streams declared in data stream manifest") - } - inputName = ds.Streams[getDataStreamIndex(inputName, ds)].Input - } - - var matchedPolicyTemplates []string - for _, policyTemplate := range pkg.PolicyTemplates { - // Does this policy_template include this input type? - if policyTemplate.FindInputByType(inputName) == nil { - continue - } - - // Does the policy_template apply to this data stream (when data streams are specified)? - if len(policyTemplate.DataStreams) > 0 && !slices.Contains(policyTemplate.DataStreams, ds.Name) { - continue - } - - matchedPolicyTemplates = append(matchedPolicyTemplates, policyTemplate.Name) - } - - switch len(matchedPolicyTemplates) { - case 1: - return matchedPolicyTemplates[0], nil - case 0: - return "", fmt.Errorf("no policy template was found for data stream %q "+ - "with input type %q: verify that you have included the data stream "+ - "and input in the package's policy_template list", ds.Name, inputName) - default: - return "", fmt.Errorf("ambiguous result: multiple policy templates ([%s]) "+ - "were found that apply to data stream %q with input type %q: please "+ - "specify the 'policy_template' in the system test config", - strings.Join(matchedPolicyTemplates, ", "), ds.Name, inputName) - } -} - -func findPolicyTemplateForInputPackage(pkg *packages.PackageManifest, inputName string) (string, error) { - if inputName == "" { - if len(pkg.PolicyTemplates) == 0 { - return "", errors.New("no policy templates specified for input package") - } - inputName = pkg.PolicyTemplates[0].Input - } + v, _ := common.MapStr(stream.Vars).GetValue("data_stream.dataset") + ds, _ := v.(string) + if ds == "" { + continue + } - var matched []string - for _, policyTemplate := range pkg.PolicyTemplates { - if policyTemplate.Input != inputName { - continue + return ds } - - matched = append(matched, policyTemplate.Name) } - switch len(matched) { - case 1: - return matched[0], nil - case 0: - return "", fmt.Errorf("no policy template was found"+ - "with input type %q: verify that you have included the data stream "+ - "and input in the package's policy_template list", inputName) - default: - return "", fmt.Errorf("ambiguous result: multiple policy templates ([%s]) "+ - "with input type %q: please "+ - "specify the 'policy_template' in the system test config", - strings.Join(matched, ", "), inputName) - } -} - -func SelectPolicyTemplateByName(policies []packages.PolicyTemplate, name string) (packages.PolicyTemplate, error) { - for _, policy := range policies { - if policy.Name == name { - return policy, nil - } - } - return packages.PolicyTemplate{}, fmt.Errorf("policy template %q not found", name) + return fallback } func (r *tester) checkTransforms(ctx context.Context, config *testConfig, pkgManifest *packages.PackageManifest, dataStream, policyTemplateInput string, syntheticEnabled bool) error { diff --git a/internal/testrunner/runners/system/tester_test.go b/internal/testrunner/runners/system/tester_test.go index 34efe0c742..d4d4f11889 100644 --- a/internal/testrunner/runners/system/tester_test.go +++ b/internal/testrunner/runners/system/tester_test.go @@ -152,7 +152,7 @@ func TestFindPolicyTemplateForInput(t *testing.T) { tc := tc t.Run(tc.testName, func(t *testing.T) { - name, err := FindPolicyTemplateForInput(tc.pkg, ds, inputName) + name, err := packages.FindPolicyTemplateForInput(tc.pkg, ds, inputName) if tc.err != "" { require.Errorf(t, err, "expected err containing %q", tc.err) @@ -440,89 +440,6 @@ func TestIsSyntheticSourceModeEnabled(t *testing.T) { } } -func TestGetExpectedDatasetForTest(t *testing.T) { - defaultValue := func(v any) *packages.VarValue { - vv := &packages.VarValue{} - vv.Unpack(v) - return vv - } - - cases := []struct { - title string - expected string - - packageType string - datasetInPolicy string - policyTemplate packages.PolicyTemplate - vars common.MapStr - }{ - { - title: "data stream in integration package", - expected: "foo.bar", - packageType: "integration", - datasetInPolicy: "foo.bar", - policyTemplate: packages.PolicyTemplate{Name: "bar"}, - }, - { - title: "input package", - expected: "bar", - packageType: "input", - datasetInPolicy: "foo.bar", - policyTemplate: packages.PolicyTemplate{Name: "bar"}, - }, - { - title: "input package with default value", - expected: "foo.default", - packageType: "input", - datasetInPolicy: "foo.bar", - policyTemplate: packages.PolicyTemplate{ - Name: "bar", - Vars: []packages.Variable{ - { - Name: "data_stream.dataset", - Default: defaultValue("foo.default"), - }, - }, - }, - }, - { - title: "input package with user-defined variable", - expected: "foo.custom", - packageType: "input", - datasetInPolicy: "foo.bar", - policyTemplate: packages.PolicyTemplate{Name: "bar"}, - vars: common.MapStr{ - "data_stream.dataset": "foo.custom", - }, - }, - { - title: "input package with default value and user-defined variable", - expected: "foo.custom", - packageType: "input", - datasetInPolicy: "foo.bar", - policyTemplate: packages.PolicyTemplate{ - Name: "bar", - Vars: []packages.Variable{ - { - Name: "data_stream.dataset", - Default: defaultValue("foo.default"), - }, - }, - }, - vars: common.MapStr{ - "data_stream.dataset": "foo.custom", - }, - }, - } - - for _, c := range cases { - t.Run(c.title, func(t *testing.T) { - found := getExpectedDatasetForTest(c.packageType, c.datasetInPolicy, c.policyTemplate, c.vars) - assert.Equal(t, c.expected, found) - }) - } -} - func TestPipelineErrorMessage(t *testing.T) { testCases := []struct { name string diff --git a/internal/testrunner/script/data_stream.go b/internal/testrunner/script/data_stream.go index a53025f019..8673bdbb50 100644 --- a/internal/testrunner/script/data_stream.go +++ b/internal/testrunner/script/data_stream.go @@ -19,6 +19,7 @@ import ( "github.com/elastic/go-ucfg/yaml" "github.com/elastic/elastic-package/internal/common" + "github.com/elastic/elastic-package/internal/kibana" "github.com/elastic/elastic-package/internal/packages" "github.com/elastic/elastic-package/internal/testrunner/runners/system" ) @@ -99,21 +100,22 @@ func addPackagePolicy(ts *testscript.TestScript, neg bool, args []string) { ts.Check(decoratedWith("reading data stream manifest", err)) if *polName == "" { - *polName, err = system.FindPolicyTemplateForInput(pkgMan, dsMan, config.Input) + *polName, err = packages.FindPolicyTemplateForInput(pkgMan, dsMan, config.Input) ts.Check(decoratedWith("finding policy template name", err)) } - templ, err := system.SelectPolicyTemplateByName(pkgMan.PolicyTemplates, *polName) + templ, err := packages.SelectPolicyTemplateByName(pkgMan.PolicyTemplates, *polName) ts.Check(decoratedWith("finding policy template", err)) - pds, err := system.CreatePackageDatastream(installed.testingPolicy, pkgMan, templ, dsMan, config.Input, config.Vars, config.DataStream.Vars, installed.testingPolicy.Namespace) - ts.Check(decoratedWith("creating package data stream", err)) - ts.Check(decoratedWith("adding data stream to policy", stk.kibana.AddPackageDataStreamToPolicy(ctx, pds))) + policy, dsType, dsDataset, err := system.CreatePackagePolicy(installed.testingPolicy, pkgMan, templ, dsMan, config.Input, config.Vars, config.DataStream.Vars, installed.testingPolicy.Namespace, pkgRoot) + ts.Check(decoratedWith("creating package policy", err)) + _, err = stk.kibana.CreatePackagePolicy(ctx, policy, kibana.PolicyAPIFormatAuto) + ts.Check(decoratedWith("adding package policy", err)) pol, err := stk.kibana.GetPolicy(ctx, installed.testingPolicy.ID) ts.Check(decoratedWith("reading policy", err)) ts.Check(decoratedWith("assigning policy", stk.kibana.AssignPolicyToAgent(ctx, installed.enrolled, *pol))) - dsName := system.BuildDataStreamName(pds, templ, pkgMan.Type, config.Vars) + dsName := system.BuildDataStreamName(dsType, dsDataset, installed.testingPolicy.Namespace, templ, pkgMan.Type) ts.Setenv(dsNameLabel, dsName) dataStreams[dsName] = struct{}{} diff --git a/internal/testrunner/script/stack.go b/internal/testrunner/script/stack.go index c88a9d20e7..f446a4e094 100644 --- a/internal/testrunner/script/stack.go +++ b/internal/testrunner/script/stack.go @@ -277,7 +277,6 @@ func getDocs(ts *testscript.TestScript, neg bool, args []string) { stk.es.Search.WithBody(strings.NewReader(system.FieldsQuery)), stk.es.Search.WithIgnoreUnavailable(true), ) - resp.String() ts.Check(decoratedWith("performing search", err)) body.Reset() _, err = io.Copy(&body, resp.Body) diff --git a/test/packages/other/with_legacy_policy_api/changelog.yml b/test/packages/other/with_legacy_policy_api/changelog.yml new file mode 100644 index 0000000000..0d0ae227c8 --- /dev/null +++ b/test/packages/other/with_legacy_policy_api/changelog.yml @@ -0,0 +1,5 @@ +- version: "0.0.1" + changes: + - description: Initial package to test policy_api_format setting. + type: enhancement + link: https://github.com/elastic/elastic-package/pull/1 diff --git a/test/packages/other/with_legacy_policy_api/data_stream/indicator/_dev/test/system/test-default-config.yml b/test/packages/other/with_legacy_policy_api/data_stream/indicator/_dev/test/system/test-default-config.yml new file mode 100644 index 0000000000..203914b6a1 --- /dev/null +++ b/test/packages/other/with_legacy_policy_api/data_stream/indicator/_dev/test/system/test-default-config.yml @@ -0,0 +1,12 @@ +# policy_api_format: legacy is required because this package has a select variable +# (revoked) with "false" as an option value. Fleet's simplified API coerces the +# string "false" to boolean false before validation, causing a 400 error. +# See: https://github.com/elastic/kibana/issues/XXXXX +policy_api_format: legacy +vars: + revoked: "false" + environment: "staging" +data_stream: + vars: + paths: + - "/usr/share/elastic-agent/state/data/logs/*" diff --git a/test/packages/other/with_legacy_policy_api/data_stream/indicator/agent/stream/stream.yml.hbs b/test/packages/other/with_legacy_policy_api/data_stream/indicator/agent/stream/stream.yml.hbs new file mode 100644 index 0000000000..908a4421a8 --- /dev/null +++ b/test/packages/other/with_legacy_policy_api/data_stream/indicator/agent/stream/stream.yml.hbs @@ -0,0 +1,11 @@ +paths: +{{#each paths as |path i|}} + - {{path}} +{{/each}} +exclude_files: [".gz$"] +processors: + - add_locale: ~ + - add_tags: + tags: + - revoked_{{revoked}} + - env_{{environment}} diff --git a/test/packages/other/with_legacy_policy_api/data_stream/indicator/fields/base-fields.yml b/test/packages/other/with_legacy_policy_api/data_stream/indicator/fields/base-fields.yml new file mode 100644 index 0000000000..c233075896 --- /dev/null +++ b/test/packages/other/with_legacy_policy_api/data_stream/indicator/fields/base-fields.yml @@ -0,0 +1,20 @@ +- name: data_stream.type + type: constant_keyword + description: Data stream type. +- name: data_stream.dataset + type: constant_keyword + description: Data stream dataset. +- name: data_stream.namespace + type: constant_keyword + description: Data stream namespace. +- name: '@timestamp' + type: date + description: Event timestamp. +- name: event.module + type: constant_keyword + description: Event module + value: with_legacy_policy_api +- name: event.dataset + type: constant_keyword + description: Event dataset + value: with_legacy_policy_api.indicator diff --git a/test/packages/other/with_legacy_policy_api/data_stream/indicator/fields/fields.yml b/test/packages/other/with_legacy_policy_api/data_stream/indicator/fields/fields.yml new file mode 100644 index 0000000000..9aa13de166 --- /dev/null +++ b/test/packages/other/with_legacy_policy_api/data_stream/indicator/fields/fields.yml @@ -0,0 +1,23 @@ +- name: ecs.version + type: keyword + description: ECS version this event conforms to. +- name: input.type + type: keyword + description: Type of Filebeat input. +- name: log.file.path + type: keyword + description: Full path to the log file this event came from. + ignore_above: 1024 + multi_fields: + - name: text + type: text +- name: log.offset + type: long + description: Offset of the entry in the log file. +- name: message + type: match_only_text + description: Log message optimized for viewing in a log viewer. +- name: tags + type: keyword + description: List of keywords used to tag each event. + ignore_above: 1024 diff --git a/test/packages/other/with_legacy_policy_api/data_stream/indicator/manifest.yml b/test/packages/other/with_legacy_policy_api/data_stream/indicator/manifest.yml new file mode 100644 index 0000000000..61f93fd5be --- /dev/null +++ b/test/packages/other/with_legacy_policy_api/data_stream/indicator/manifest.yml @@ -0,0 +1,13 @@ +title: "Indicator" +type: logs +streams: + - input: logfile + title: Indicator logs + description: Collect indicator logs + vars: + - name: paths + type: text + title: Paths + multi: true + default: + - /var/log/*.log diff --git a/test/packages/other/with_legacy_policy_api/data_stream/indicator/sample_event.json b/test/packages/other/with_legacy_policy_api/data_stream/indicator/sample_event.json new file mode 100644 index 0000000000..b3cddf15a9 --- /dev/null +++ b/test/packages/other/with_legacy_policy_api/data_stream/indicator/sample_event.json @@ -0,0 +1,66 @@ +{ + "@timestamp": "2026-03-05T12:25:52.686Z", + "agent": { + "ephemeral_id": "32155baa-1d7e-4bdc-90d9-3d79c8c8816c", + "id": "c4b10722-86d2-4df4-a5bb-535a04109b8a", + "name": "elastic-agent-45757", + "type": "filebeat", + "version": "9.3.1" + }, + "data_stream": { + "dataset": "with_legacy_policy_api.indicator", + "namespace": "31787", + "type": "logs" + }, + "ecs": { + "version": "8.0.0" + }, + "elastic_agent": { + "id": "c4b10722-86d2-4df4-a5bb-535a04109b8a", + "snapshot": false, + "version": "9.3.1" + }, + "event": { + "agent_id_status": "verified", + "dataset": "with_legacy_policy_api.indicator", + "ingested": "2026-03-05T12:25:54Z", + "module": "with_legacy_policy_api", + "timezone": "+00:00" + }, + "host": { + "architecture": "x86_64", + "containerized": false, + "hostname": "elastic-agent-45757", + "ip": [ + "172.27.0.2", + "172.18.0.4" + ], + "mac": [ + "3E-EB-C9-F5-F0-40", + "96-6F-29-0A-37-CE" + ], + "name": "elastic-agent-45757", + "os": { + "family": "", + "kernel": "6.8.0-101-generic", + "name": "Wolfi", + "platform": "wolfi", + "type": "linux", + "version": "20230201" + } + }, + "input": { + "type": "log" + }, + "log": { + "file": { + "path": "/usr/share/elastic-agent/state/data/logs/elastic-agent-20260305.ndjson" + }, + "offset": 0 + }, + "message": "{\"log.level\":\"info\",\"@timestamp\":\"2026-03-05T12:25:31.260Z\",\"log.origin\":{\"function\":\"github.com/elastic/elastic-agent/internal/pkg/agent/cmd.runElasticAgent\",\"file.name\":\"cmd/run.go\",\"file.line\":310},\"message\":\"Elastic Agent started\",\"log.source\":\"elastic-agent\",\"process.pid\":7,\"agent.version\":\"9.3.1\",\"agent.unprivileged\":true,\"ecs.version\":\"1.6.0\"}", + "tags": [ + "revoked_false", + "env_staging" + ] +} diff --git a/test/packages/other/with_legacy_policy_api/docs/README.md b/test/packages/other/with_legacy_policy_api/docs/README.md new file mode 100644 index 0000000000..50ff75e23f --- /dev/null +++ b/test/packages/other/with_legacy_policy_api/docs/README.md @@ -0,0 +1,17 @@ +# with_legacy_policy_api + +Test package that demonstrates the `policy_api_format: legacy` system test +configuration setting. + +This package includes a `select`-type variable (`revoked`) with `"false"` as +one of its option values. Fleet's simplified package policy API has a bug where +`schema.boolean()` runs before `schema.string()` in its validation schema, +causing the JSON string `"false"` to be coerced to the boolean `false`. The +subsequent check `["", "false", "true"].includes(false)` then fails, returning +a 400 error. + +The workaround is to set `policy_api_format: legacy` in the system test config, +which makes elastic-package use the legacy arrays-based Fleet API instead of +the simplified objects-based API. The legacy API wraps variable values with +`{"type": ..., "value": ...}` objects, preserving the string type and avoiding +the coercion. diff --git a/test/packages/other/with_legacy_policy_api/img/sample-logo.svg b/test/packages/other/with_legacy_policy_api/img/sample-logo.svg new file mode 100644 index 0000000000..6268dd88f3 --- /dev/null +++ b/test/packages/other/with_legacy_policy_api/img/sample-logo.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/test/packages/other/with_legacy_policy_api/img/sample-screenshot.png b/test/packages/other/with_legacy_policy_api/img/sample-screenshot.png new file mode 100644 index 0000000000..d7a56a3ecc Binary files /dev/null and b/test/packages/other/with_legacy_policy_api/img/sample-screenshot.png differ diff --git a/test/packages/other/with_legacy_policy_api/manifest.yml b/test/packages/other/with_legacy_policy_api/manifest.yml new file mode 100644 index 0000000000..086bc5c72d --- /dev/null +++ b/test/packages/other/with_legacy_policy_api/manifest.yml @@ -0,0 +1,57 @@ +format_version: 3.1.3 +name: with_legacy_policy_api +title: "Package requiring legacy policy API format." +version: 0.0.1 +source: + license: "Apache-2.0" +description: > + This package has a select-type variable with "false" as an option value. + Fleet's simplified package policy API coerces the string "false" to a boolean, + causing validation to fail. The system test must use policy_api_format: legacy + to work around this Fleet bug. +type: integration +categories: + - custom +conditions: + kibana: + version: "^8.12.2" + elastic: + subscription: "basic" +vars: + - name: environment + type: text + title: Environment + description: Environment label added to all events (e.g. production, staging). + required: false + show_user: true + default: "production" +policy_templates: + - name: with_legacy_policy_api + title: Indicator + description: Collect indicator data. + inputs: + - type: logfile + title: Collect indicator logs + description: Collect indicator logs + vars: + - name: revoked + type: select + title: Revoked Status + description: > + Filter by revoked status. This select variable has "false"/"true" as + option values, which triggers a Fleet bug in the simplified API where + string "false" is coerced to boolean false, failing validation. + options: + - text: All Indicators + value: "" + - text: Active Only (Not Revoked) + value: "false" + - text: Revoked Only + value: "true" + default: "" + multi: false + required: true + show_user: true +owner: + github: elastic/elastic-package + type: elastic diff --git a/test/packages/parallel/sql_input/manifest.yml b/test/packages/parallel/sql_input/manifest.yml index 8b046bf4c8..619eabbb35 100644 --- a/test/packages/parallel/sql_input/manifest.yml +++ b/test/packages/parallel/sql_input/manifest.yml @@ -8,6 +8,15 @@ version: 0.2.0 categories: - custom - datastore +vars: + - name: driver + type: text + title: Driver + description: "Supported database drivers: mssql, mysql, oracle, postgres" + multi: false + required: true + show_user: true + default: "mysql" policy_templates: - name: sql_query type: metrics @@ -31,14 +40,6 @@ policy_templates: required: true show_user: true default: 10s - - name: driver - type: text - title: Driver - description: "Supported database drivers: mssql, mysql, oracle, postgres" - multi: false - required: true - show_user: true - default: "mysql" - name: sql_query type: text title: Query diff --git a/test/packages/parallel/zipkin_input_otel.stack_version b/test/packages/parallel/zipkin_input_otel.stack_version new file mode 100644 index 0000000000..b7ca1e13b0 --- /dev/null +++ b/test/packages/parallel/zipkin_input_otel.stack_version @@ -0,0 +1 @@ +9.4.0-SNAPSHOT diff --git a/test/packages/parallel/zipkin_input_otel/LICENSE.txt b/test/packages/parallel/zipkin_input_otel/LICENSE.txt new file mode 100644 index 0000000000..d317b57b29 --- /dev/null +++ b/test/packages/parallel/zipkin_input_otel/LICENSE.txt @@ -0,0 +1,93 @@ +Elastic License 2.0 + +URL: https://www.elastic.co/licensing/elastic-license + +## Acceptance + +By using the software, you agree to all of the terms and conditions below. + +## Copyright License + +The licensor grants you a non-exclusive, royalty-free, worldwide, +non-sublicensable, non-transferable license to use, copy, distribute, make +available, and prepare derivative works of the software, in each case subject to +the limitations and conditions below. + +## Limitations + +You may not provide the software to third parties as a hosted or managed +service, where the service provides users with access to any substantial set of +the features or functionality of the software. + +You may not move, change, disable, or circumvent the license key functionality +in the software, and you may not remove or obscure any functionality in the +software that is protected by the license key. + +You may not alter, remove, or obscure any licensing, copyright, or other notices +of the licensor in the software. Any use of the licensor's trademarks is subject +to applicable law. + +## Patents + +The licensor grants you a license, under any patent claims the licensor can +license, or becomes able to license, to make, have made, use, sell, offer for +sale, import and have imported the software, in each case subject to the +limitations and conditions in this license. This license does not cover any +patent claims that you cause to be infringed by modifications or additions to +the software. If you or your company make any written claim that the software +infringes or contributes to infringement of any patent, your patent license for +the software granted under these terms ends immediately. If your company makes +such a claim, your patent license ends immediately for work on behalf of your +company. + +## Notices + +You must ensure that anyone who gets a copy of any part of the software from you +also gets a copy of these terms. + +If you modify the software, you must include in any modified copies of the +software prominent notices stating that you have modified the software. + +## No Other Rights + +These terms do not imply any licenses other than those expressly granted in +these terms. + +## Termination + +If you use the software in violation of these terms, such use is not licensed, +and your licenses will automatically terminate. If the licensor provides you +with a notice of your violation, and you cease all violation of this license no +later than 30 days after you receive that notice, your licenses will be +reinstated retroactively. However, if you violate these terms after such +reinstatement, any additional violation of these terms will cause your licenses +to terminate automatically and permanently. + +## No Liability + +*As far as the law allows, the software comes as is, without any warranty or +condition, and the licensor will not be liable to you for any damages arising +out of these terms or the use or nature of the software, under any kind of +legal claim.* + +## Definitions + +The **licensor** is the entity offering these terms, and the **software** is the +software the licensor makes available under these terms, including any portion +of it. + +**you** refers to the individual or entity agreeing to these terms. + +**your company** is any legal entity, sole proprietorship, or other kind of +organization that you work for, plus all organizations that have control over, +are under the control of, or are under common control with that +organization. **control** means ownership of substantially all the assets of an +entity, or the power to direct its management and policies by vote, contract, or +otherwise. Control can be direct or indirect. + +**your licenses** are all the licenses granted to you for the software under +these terms. + +**use** means anything you do with the software requiring one of your licenses. + +**trademark** means trademarks, service marks, and similar rights. diff --git a/test/packages/parallel/zipkin_input_otel/_dev/build/docs/README.md b/test/packages/parallel/zipkin_input_otel/_dev/build/docs/README.md new file mode 100644 index 0000000000..80968afe73 --- /dev/null +++ b/test/packages/parallel/zipkin_input_otel/_dev/build/docs/README.md @@ -0,0 +1,22 @@ +# Zipkin OpenTelemetry Input Package + +## Overview +The Zipkin OpenTelemetry Input Package for Elastic enables collection of trace data from applications instrumented with [Zipkin](https://zipkin.io/) through OpenTelemetry protocols using the [zipkinreceiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/zipkinreceiver#zipkin-receiver). + +### How it works +This package receives Zipkin trace data (V1 and V2 JSON/Protobuf formats) by configuring the Zipkin receiver in the Input Package, which then gets applied to the zipkinreceiver present in the EDOT collector, which then forwards the data to Elastic Agent. The Elastic Agent processes and enriches the data before sending it to Elasticsearch for indexing and analysis. + +## Configuration + +For the full list of settings exposed for the receiver and examples, refer to the [Zipkin Receiver documentation](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/zipkinreceiver). + +## Troubleshooting + +If you encounter issues: + +1. Verify the endpoint is accessible and not blocked by a firewall. +2. Ensure applications are sending Zipkin-formatted traces to the configured endpoint (default: `http://:9411`). +3. Check the Elastic Agent logs for any receiver errors. + +## Traces reference +For more details about the Zipkin receiver and its configuration options, refer to the [Zipkin Receiver documentation](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/zipkinreceiver) in the upstream OpenTelemetry Collector repository. diff --git a/test/packages/parallel/zipkin_input_otel/_dev/deploy/docker/docker-compose.yml b/test/packages/parallel/zipkin_input_otel/_dev/deploy/docker/docker-compose.yml new file mode 100644 index 0000000000..0cd0903cae --- /dev/null +++ b/test/packages/parallel/zipkin_input_otel/_dev/deploy/docker/docker-compose.yml @@ -0,0 +1,21 @@ +version: '2.3' +services: + run_queries: + image: curlimages/curl + environment: + - TARGET_URL=http://backend:9000 + volumes: + - ./scripts/run_queries.sh:/run_queries.sh + command: ["sh", "/run_queries.sh"] + depends_on: + backend: + condition: service_healthy + # Scenario based on the example defined in + # https://github.com/openzipkin/brave-example/tree/a71b18511b4fc8cbbe67ea2a6fd5ca43048c2465/docker + # https://github.com/openzipkin/brave-example/blob/a71b18511b4fc8cbbe67ea2a6fd5ca43048c2465/README.md#running-the-example + # The backend container is used to generate traces that are sent to the elastic-agent container. + backend: + image: ghcr.io/openzipkin/brave-example:armeria + entrypoint: start-backend + environment: + - ZIPKIN_BASEURL=http://elastic-agent:9411 diff --git a/test/packages/parallel/zipkin_input_otel/_dev/deploy/docker/scripts/run_queries.sh b/test/packages/parallel/zipkin_input_otel/_dev/deploy/docker/scripts/run_queries.sh new file mode 100755 index 0000000000..242c3d8deb --- /dev/null +++ b/test/packages/parallel/zipkin_input_otel/_dev/deploy/docker/scripts/run_queries.sh @@ -0,0 +1,27 @@ +#!/usr/bin/env sh + +set -ex + +TARGET_URL=${TARGET_URL:-http://backend:9000} +SLEEP_TIME=${SLEEP_TIME:-5} + +# NOTE: it cannot be done using SIGHUP signal, since the backend container is also +# killed and the traces would not be sent to the elastic-agent container. + +# Wait for the elastic-agent container to be ready with the corresponding +# agent policy assigned +sleep ${SLEEP_TIME} + +echo "Sending traces to ${TARGET_URL}/api" +i=0 +while true; do + echo "Trigger query ${i}" + # Force creating traces containing requests with errors + # The following curl command will fail with a 404 error per the backend code. + curl -s -o /dev/null "${TARGET_URL}"; sleep 1; + + ## Create traces without any errors + # curl ${TARGET_URL}/api will return a 200 status code + curl -s -o /dev/null "${TARGET_URL}/api"; sleep 1; + i=$((i+1)) +done diff --git a/test/packages/parallel/zipkin_input_otel/_dev/test/policy/test-default-custom-dataset.expected b/test/packages/parallel/zipkin_input_otel/_dev/test/policy/test-default-custom-dataset.expected new file mode 100644 index 0000000000..1b501b75df --- /dev/null +++ b/test/packages/parallel/zipkin_input_otel/_dev/test/policy/test-default-custom-dataset.expected @@ -0,0 +1,76 @@ +connectors: + elasticapm: {} + forward: {} +exporters: + elasticsearch/componentid-0: + endpoints: + - https://elasticsearch:9200 +inputs: [] +output_permissions: + default: + _elastic_agent_checks: + cluster: + - monitor + _elastic_agent_monitoring: + indices: [] + uuid-for-permissions-on-related-indices: + indices: + - names: + - traces-*-* + privileges: + - auto_configure + - create_doc + - names: + - logs-generic.otel-* + privileges: + - auto_configure + - create_doc + - names: + - metrics-*-* + privileges: + - auto_configure + - create_doc +processors: + elasticapm: {} + transform/componentid-0: + trace_statements: + - context: span + statements: + - set(attributes["data_stream.type"], "traces") + - set(attributes["data_stream.dataset"], "zipkin.custom") + - set(attributes["data_stream.namespace"], "ep") + - context: spanevent + statements: + - set(attributes["data_stream.type"], "logs") + - set(attributes["data_stream.namespace"], "ep") +receivers: + zipkin/componentid-0: + endpoint: 0.0.0.0:9411 + parse_string_tags: true +secret_references: [] +service: + pipelines: + metrics: + exporters: + - elasticsearch/componentid-0 + receivers: + - forward + metrics/componentid-1: + exporters: + - forward + receivers: + - elasticapm + traces: + exporters: + - elasticsearch/componentid-0 + receivers: + - forward + traces/componentid-0: + exporters: + - elasticapm + - forward + processors: + - elasticapm + - transform/componentid-0 + receivers: + - zipkin/componentid-0 diff --git a/test/packages/parallel/zipkin_input_otel/_dev/test/policy/test-default-custom-dataset.yml b/test/packages/parallel/zipkin_input_otel/_dev/test/policy/test-default-custom-dataset.yml new file mode 100644 index 0000000000..90f01adbef --- /dev/null +++ b/test/packages/parallel/zipkin_input_otel/_dev/test/policy/test-default-custom-dataset.yml @@ -0,0 +1,4 @@ +vars: + endpoint: "0.0.0.0:9411" + parse_string_tags: true + data_stream.dataset: "zipkin.custom" diff --git a/test/packages/parallel/zipkin_input_otel/_dev/test/policy/test-default-disable-use-apm.expected b/test/packages/parallel/zipkin_input_otel/_dev/test/policy/test-default-disable-use-apm.expected new file mode 100644 index 0000000000..9bfab820c3 --- /dev/null +++ b/test/packages/parallel/zipkin_input_otel/_dev/test/policy/test-default-disable-use-apm.expected @@ -0,0 +1,57 @@ +connectors: + forward: {} +exporters: + elasticsearch/componentid-0: + endpoints: + - https://elasticsearch:9200 +inputs: [] +output_permissions: + default: + _elastic_agent_checks: + cluster: + - monitor + _elastic_agent_monitoring: + indices: [] + uuid-for-permissions-on-related-indices: + indices: + - names: + - traces-*-* + privileges: + - auto_configure + - create_doc + - names: + - logs-generic.otel-* + privileges: + - auto_configure + - create_doc +processors: + transform/componentid-0: + trace_statements: + - context: span + statements: + - set(attributes["data_stream.type"], "traces") + - set(attributes["data_stream.dataset"], "zipkinreceiver") + - set(attributes["data_stream.namespace"], "ep") + - context: spanevent + statements: + - set(attributes["data_stream.type"], "logs") + - set(attributes["data_stream.namespace"], "ep") +receivers: + zipkin/componentid-0: + endpoint: 0.0.0.0:9411 + parse_string_tags: true +secret_references: [] +service: + pipelines: + traces: + exporters: + - elasticsearch/componentid-0 + receivers: + - forward + traces/componentid-0: + exporters: + - forward + processors: + - transform/componentid-0 + receivers: + - zipkin/componentid-0 diff --git a/test/packages/parallel/zipkin_input_otel/_dev/test/policy/test-default-disable-use-apm.yml b/test/packages/parallel/zipkin_input_otel/_dev/test/policy/test-default-disable-use-apm.yml new file mode 100644 index 0000000000..70e101c31d --- /dev/null +++ b/test/packages/parallel/zipkin_input_otel/_dev/test/policy/test-default-disable-use-apm.yml @@ -0,0 +1,4 @@ +vars: + endpoint: "0.0.0.0:9411" + parse_string_tags: true + use_apm: false diff --git a/test/packages/parallel/zipkin_input_otel/_dev/test/policy/test-default-use-apm.expected b/test/packages/parallel/zipkin_input_otel/_dev/test/policy/test-default-use-apm.expected new file mode 100644 index 0000000000..0db5270d10 --- /dev/null +++ b/test/packages/parallel/zipkin_input_otel/_dev/test/policy/test-default-use-apm.expected @@ -0,0 +1,76 @@ +connectors: + elasticapm: {} + forward: {} +exporters: + elasticsearch/componentid-0: + endpoints: + - https://elasticsearch:9200 +inputs: [] +output_permissions: + default: + _elastic_agent_checks: + cluster: + - monitor + _elastic_agent_monitoring: + indices: [] + uuid-for-permissions-on-related-indices: + indices: + - names: + - traces-*-* + privileges: + - auto_configure + - create_doc + - names: + - logs-generic.otel-* + privileges: + - auto_configure + - create_doc + - names: + - metrics-*-* + privileges: + - auto_configure + - create_doc +processors: + elasticapm: {} + transform/componentid-0: + trace_statements: + - context: span + statements: + - set(attributes["data_stream.type"], "traces") + - set(attributes["data_stream.dataset"], "zipkinreceiver") + - set(attributes["data_stream.namespace"], "ep") + - context: spanevent + statements: + - set(attributes["data_stream.type"], "logs") + - set(attributes["data_stream.namespace"], "ep") +receivers: + zipkin/componentid-0: + endpoint: 0.0.0.0:9411 + parse_string_tags: true +secret_references: [] +service: + pipelines: + metrics: + exporters: + - elasticsearch/componentid-0 + receivers: + - forward + metrics/componentid-1: + exporters: + - forward + receivers: + - elasticapm + traces: + exporters: + - elasticsearch/componentid-0 + receivers: + - forward + traces/componentid-0: + exporters: + - elasticapm + - forward + processors: + - elasticapm + - transform/componentid-0 + receivers: + - zipkin/componentid-0 diff --git a/test/packages/parallel/zipkin_input_otel/_dev/test/policy/test-default-use-apm.yml b/test/packages/parallel/zipkin_input_otel/_dev/test/policy/test-default-use-apm.yml new file mode 100644 index 0000000000..7b383dfd44 --- /dev/null +++ b/test/packages/parallel/zipkin_input_otel/_dev/test/policy/test-default-use-apm.yml @@ -0,0 +1,4 @@ +vars: + endpoint: "0.0.0.0:9411" + parse_string_tags: true + use_apm: true diff --git a/test/packages/parallel/zipkin_input_otel/_dev/test/policy/test-default-vars.expected b/test/packages/parallel/zipkin_input_otel/_dev/test/policy/test-default-vars.expected new file mode 100644 index 0000000000..45b0332210 --- /dev/null +++ b/test/packages/parallel/zipkin_input_otel/_dev/test/policy/test-default-vars.expected @@ -0,0 +1,76 @@ +connectors: + elasticapm: {} + forward: {} +exporters: + elasticsearch/componentid-0: + endpoints: + - https://elasticsearch:9200 +inputs: [] +output_permissions: + default: + _elastic_agent_checks: + cluster: + - monitor + _elastic_agent_monitoring: + indices: [] + uuid-for-permissions-on-related-indices: + indices: + - names: + - traces-*-* + privileges: + - auto_configure + - create_doc + - names: + - logs-generic.otel-* + privileges: + - auto_configure + - create_doc + - names: + - metrics-*-* + privileges: + - auto_configure + - create_doc +processors: + elasticapm: {} + transform/componentid-0: + trace_statements: + - context: span + statements: + - set(attributes["data_stream.type"], "traces") + - set(attributes["data_stream.dataset"], "zipkinreceiver") + - set(attributes["data_stream.namespace"], "ep") + - context: spanevent + statements: + - set(attributes["data_stream.type"], "logs") + - set(attributes["data_stream.namespace"], "ep") +receivers: + zipkin/componentid-0: + endpoint: localhost:9411 + parse_string_tags: false +secret_references: [] +service: + pipelines: + metrics: + exporters: + - elasticsearch/componentid-0 + receivers: + - forward + metrics/componentid-1: + exporters: + - forward + receivers: + - elasticapm + traces: + exporters: + - elasticsearch/componentid-0 + receivers: + - forward + traces/componentid-0: + exporters: + - elasticapm + - forward + processors: + - elasticapm + - transform/componentid-0 + receivers: + - zipkin/componentid-0 diff --git a/test/packages/parallel/zipkin_input_otel/_dev/test/policy/test-default-vars.yml b/test/packages/parallel/zipkin_input_otel/_dev/test/policy/test-default-vars.yml new file mode 100644 index 0000000000..72b4ca320d --- /dev/null +++ b/test/packages/parallel/zipkin_input_otel/_dev/test/policy/test-default-vars.yml @@ -0,0 +1 @@ +vars: ~ diff --git a/test/packages/parallel/zipkin_input_otel/_dev/test/policy/test-default.expected b/test/packages/parallel/zipkin_input_otel/_dev/test/policy/test-default.expected new file mode 100644 index 0000000000..0db5270d10 --- /dev/null +++ b/test/packages/parallel/zipkin_input_otel/_dev/test/policy/test-default.expected @@ -0,0 +1,76 @@ +connectors: + elasticapm: {} + forward: {} +exporters: + elasticsearch/componentid-0: + endpoints: + - https://elasticsearch:9200 +inputs: [] +output_permissions: + default: + _elastic_agent_checks: + cluster: + - monitor + _elastic_agent_monitoring: + indices: [] + uuid-for-permissions-on-related-indices: + indices: + - names: + - traces-*-* + privileges: + - auto_configure + - create_doc + - names: + - logs-generic.otel-* + privileges: + - auto_configure + - create_doc + - names: + - metrics-*-* + privileges: + - auto_configure + - create_doc +processors: + elasticapm: {} + transform/componentid-0: + trace_statements: + - context: span + statements: + - set(attributes["data_stream.type"], "traces") + - set(attributes["data_stream.dataset"], "zipkinreceiver") + - set(attributes["data_stream.namespace"], "ep") + - context: spanevent + statements: + - set(attributes["data_stream.type"], "logs") + - set(attributes["data_stream.namespace"], "ep") +receivers: + zipkin/componentid-0: + endpoint: 0.0.0.0:9411 + parse_string_tags: true +secret_references: [] +service: + pipelines: + metrics: + exporters: + - elasticsearch/componentid-0 + receivers: + - forward + metrics/componentid-1: + exporters: + - forward + receivers: + - elasticapm + traces: + exporters: + - elasticsearch/componentid-0 + receivers: + - forward + traces/componentid-0: + exporters: + - elasticapm + - forward + processors: + - elasticapm + - transform/componentid-0 + receivers: + - zipkin/componentid-0 diff --git a/test/packages/parallel/zipkin_input_otel/_dev/test/policy/test-default.yml b/test/packages/parallel/zipkin_input_otel/_dev/test/policy/test-default.yml new file mode 100644 index 0000000000..5893fe8d4f --- /dev/null +++ b/test/packages/parallel/zipkin_input_otel/_dev/test/policy/test-default.yml @@ -0,0 +1,3 @@ +vars: + endpoint: "0.0.0.0:9411" + parse_string_tags: true diff --git a/test/packages/parallel/zipkin_input_otel/_dev/test/system/test-default-config.yml b/test/packages/parallel/zipkin_input_otel/_dev/test/system/test-default-config.yml new file mode 100644 index 0000000000..57cf096fef --- /dev/null +++ b/test/packages/parallel/zipkin_input_otel/_dev/test/system/test-default-config.yml @@ -0,0 +1,7 @@ +service: backend +vars: + endpoint: "0.0.0.0:9411" + parse_string_tags: false + use_apm: true +assert: + min_count: 20 diff --git a/test/packages/parallel/zipkin_input_otel/agent/input/input.yml.hbs b/test/packages/parallel/zipkin_input_otel/agent/input/input.yml.hbs new file mode 100644 index 0000000000..721ef84c9e --- /dev/null +++ b/test/packages/parallel/zipkin_input_otel/agent/input/input.yml.hbs @@ -0,0 +1,36 @@ +receivers: + zipkin: + endpoint: {{endpoint}} + parse_string_tags: {{parse_string_tags}} +{{#if tls_enabled}} + tls: + insecure: {{tls_insecure}} +{{#if tls_insecure_skip_verify}} + insecure_skip_verify: {{tls_insecure_skip_verify}} +{{/if}} +{{#if tls_ca_file}} + ca_file: {{tls_ca_file}} +{{/if}} +{{#if tls_cert_file}} + cert_file: {{tls_cert_file}} +{{/if}} +{{#if tls_key_file}} + key_file: {{tls_key_file}} +{{/if}} +{{#if tls_server_name_override}} + server_name_override: {{tls_server_name_override}} +{{/if}} +{{#if tls_min_version}} + min_version: "{{tls_min_version}}" +{{/if}} +{{#if tls_max_version}} + max_version: "{{tls_max_version}}" +{{/if}} +{{#if tls_include_system_ca_certs_pool}} + include_system_ca_certs_pool: {{tls_include_system_ca_certs_pool}} +{{/if}} +{{/if}} +service: + pipelines: + traces: + receivers: [zipkin] diff --git a/test/packages/parallel/zipkin_input_otel/changelog.yml b/test/packages/parallel/zipkin_input_otel/changelog.yml new file mode 100644 index 0000000000..ed6aabd03e --- /dev/null +++ b/test/packages/parallel/zipkin_input_otel/changelog.yml @@ -0,0 +1,6 @@ +# newer versions go on top +- version: "0.1.0" + changes: + - description: Initial draft of the package + type: enhancement + link: https://github.com/elastic/integrations/pull/17226 diff --git a/test/packages/parallel/zipkin_input_otel/docs/README.md b/test/packages/parallel/zipkin_input_otel/docs/README.md new file mode 100644 index 0000000000..80968afe73 --- /dev/null +++ b/test/packages/parallel/zipkin_input_otel/docs/README.md @@ -0,0 +1,22 @@ +# Zipkin OpenTelemetry Input Package + +## Overview +The Zipkin OpenTelemetry Input Package for Elastic enables collection of trace data from applications instrumented with [Zipkin](https://zipkin.io/) through OpenTelemetry protocols using the [zipkinreceiver](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/zipkinreceiver#zipkin-receiver). + +### How it works +This package receives Zipkin trace data (V1 and V2 JSON/Protobuf formats) by configuring the Zipkin receiver in the Input Package, which then gets applied to the zipkinreceiver present in the EDOT collector, which then forwards the data to Elastic Agent. The Elastic Agent processes and enriches the data before sending it to Elasticsearch for indexing and analysis. + +## Configuration + +For the full list of settings exposed for the receiver and examples, refer to the [Zipkin Receiver documentation](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/zipkinreceiver). + +## Troubleshooting + +If you encounter issues: + +1. Verify the endpoint is accessible and not blocked by a firewall. +2. Ensure applications are sending Zipkin-formatted traces to the configured endpoint (default: `http://:9411`). +3. Check the Elastic Agent logs for any receiver errors. + +## Traces reference +For more details about the Zipkin receiver and its configuration options, refer to the [Zipkin Receiver documentation](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/receiver/zipkinreceiver) in the upstream OpenTelemetry Collector repository. diff --git a/test/packages/parallel/zipkin_input_otel/img/zipkin_symbol_logo_otel.svg b/test/packages/parallel/zipkin_input_otel/img/zipkin_symbol_logo_otel.svg new file mode 100644 index 0000000000..06e4a16fde --- /dev/null +++ b/test/packages/parallel/zipkin_input_otel/img/zipkin_symbol_logo_otel.svg @@ -0,0 +1,63 @@ + + + + + + + + + + + + + + + + + + + + + diff --git a/test/packages/parallel/zipkin_input_otel/manifest.yml b/test/packages/parallel/zipkin_input_otel/manifest.yml new file mode 100644 index 0000000000..024fb0f5c8 --- /dev/null +++ b/test/packages/parallel/zipkin_input_otel/manifest.yml @@ -0,0 +1,131 @@ +format_version: 3.5.0 +name: zipkin_input_otel +title: "Zipkin OpenTelemetry Input Package" +version: "0.1.0" +source: + license: "Elastic-2.0" +description: "Collect Zipkin traces using OpenTelemetry Collector" +type: input +categories: + - observability + - opentelemetry +conditions: + kibana: + version: "^9.4.0" + elastic: + subscription: "basic" +icons: + - src: /img/zipkin_symbol_logo_otel.svg + title: Zipkin OTel logo + size: 32x32 + type: image/svg+xml +policy_templates: + - name: zipkinreceiver + type: traces + title: Zipkin OpenTelemetry Input + description: Collect Zipkin traces using OpenTelemetry Collector + input: otelcol + template_path: input.yml.hbs + vars: + - name: parse_string_tags + type: bool + title: Parse String Tags + required: false + description: If enabled, string tags/binary annotations will be parsed into int/bool/float types. + default: false + show_user: true + # TLS settings + - name: tls_enabled + type: bool + required: false + title: Enable TLS Configuration + description: Enable TLS configuration for connecting to the server. + default: false + show_user: true + - name: tls_insecure + type: bool + required: false + title: Disable TLS + description: Whether to disable client transport security for the connection. Set to false to enable TLS. + default: false + show_user: false + - name: tls_insecure_skip_verify + type: bool + required: false + title: Skip TLS Verification + description: Whether to skip verifying the server certificate when TLS is enabled. + default: false + show_user: false + - name: tls_ca_file + type: text + required: false + title: TLS CA File + description: Path to the CA certificate file for server certificate verification. Only used when TLS is enabled. + show_user: false + - name: tls_cert_file + type: text + required: false + title: TLS Certificate File + description: Path to the TLS certificate file for client authentication. + show_user: false + - name: tls_key_file + type: text + required: false + title: TLS Key File + description: Path to the TLS key file for client authentication. + show_user: false + - name: tls_server_name_override + type: text + required: false + title: TLS Server Name Override + description: Override the virtual host name of authority in TLS requests. + show_user: false + - name: tls_min_version + type: select + required: false + title: TLS Min Version + description: Minimum acceptable TLS version. + show_user: false + options: + - text: "1.0" + value: "1.0" + - text: "1.1" + value: "1.1" + - text: "1.2" + value: "1.2" + - text: "1.3" + value: "1.3" + - name: tls_max_version + type: select + required: false + title: TLS Max Version + description: Maximum acceptable TLS version. + show_user: false + options: + - text: "1.0" + value: "1.0" + - text: "1.1" + value: "1.1" + - text: "1.2" + value: "1.2" + - text: "1.3" + value: "1.3" + - name: tls_include_system_ca_certs_pool + type: bool + required: false + title: Include System CA Certs Pool + description: Whether to load the system certificate authorities pool alongside the certificate authority. + default: false + show_user: false +# Force to set a variable at package level +vars: + - name: endpoint + type: text + title: Endpoint + required: true + description: The host:port address to listen on for Zipkin spans. + default: localhost:9411 + show_user: true +owner: + github: elastic/ecosystem + type: elastic diff --git a/test/packages/parallel/zipkin_input_otel/sample_event.json b/test/packages/parallel/zipkin_input_otel/sample_event.json new file mode 100644 index 0000000000..7dcbea40e1 --- /dev/null +++ b/test/packages/parallel/zipkin_input_otel/sample_event.json @@ -0,0 +1,116 @@ +{ + "@timestamp": "2026-03-05T13:05:14.439Z", + "agent": { + "name": "otlp", + "version": "unknown" + }, + "attributes": { + "event": { + "outcome": "success", + "success_count": 1 + }, + "http": { + "method": "GET", + "path": "/api" + }, + "net": { + "host": { + "ip": "172.28.0.2" + }, + "peer": { + "ip": "172.28.0.3", + "port": 39034 + } + }, + "processor": { + "event": "transaction" + }, + "span": { + "id": "3d3f1f56d1c91af8" + }, + "timestamp": { + "us": 1772715914439970 + }, + "transaction": { + "duration": { + "us": 2328 + }, + "id": "3d3f1f56d1c91af8", + "name": "GET /api", + "representative_count": 1, + "result": "Success", + "root": true, + "sampled": true, + "type": "request" + } + }, + "data_stream": { + "dataset": "zipkinreceiver.otel", + "namespace": "50659", + "type": "traces" + }, + "duration": 2328000, + "event": { + "agent_id_status": "missing", + "dataset": "zipkinreceiver.otel", + "ingested": "2026-03-05T13:05:24Z", + "outcome": "success", + "success_count": 1 + }, + "http": { + "method": "GET", + "path": "/api" + }, + "kind": "Server", + "name": "GET /api", + "net": { + "host": { + "ip": "172.28.0.2" + }, + "peer": { + "ip": "172.28.0.3", + "port": 39034 + } + }, + "processor": { + "event": "transaction" + }, + "resource": { + "attributes": { + "agent": { + "name": "otlp", + "version": "unknown" + }, + "service": { + "name": "backend" + } + } + }, + "service": { + "name": "backend" + }, + "span": { + "id": "3d3f1f56d1c91af8", + "name": "GET /api" + }, + "span_id": "3d3f1f56d1c91af8", + "timestamp": { + "us": 1772715914439970 + }, + "trace": { + "id": "00000000000000003d3f1f56d1c91af8" + }, + "trace_id": "00000000000000003d3f1f56d1c91af8", + "transaction": { + "duration": { + "us": 2328 + }, + "id": "3d3f1f56d1c91af8", + "name": "GET /api", + "representative_count": 1, + "result": "Success", + "root": true, + "sampled": true, + "type": "request" + } +}