diff --git a/cmd/nri-prometheus/config.go b/cmd/nri-prometheus/config.go index c6016581..446b48c6 100644 --- a/cmd/nri-prometheus/config.go +++ b/cmd/nri-prometheus/config.go @@ -76,6 +76,7 @@ func setViperDefaults(viper *viper.Viper) { viper.SetDefault("auto_decorate", false) viper.SetDefault("insecure_skip_verify", false) viper.SetDefault("standalone", true) + viper.SetDefault("disable_autodiscovery", false) viper.SetDefault("percentiles", []float64{50.0, 95.0, 99.0}) } diff --git a/deploy/local.yaml.example b/deploy/local.yaml.example index f21da017..43503fe8 100644 --- a/deploy/local.yaml.example +++ b/deploy/local.yaml.example @@ -93,6 +93,9 @@ data: insecure_skip_verify: false # The label used to identify scrapable targets. Defaults to "prometheus.io/scrape". scrape_enabled_label: "prometheus.io/scrape" + # Set to true in order to stop autodiscovery in the k8s cluster. It can be useful when running the Pod with a service account + # having limited privileges. Defaults to false. + # disable_autodiscovery: false # Wether k8s nodes needs to be labelled to be scraped or not. Defaults to false. require_scrape_enabled_label_for_nodes: true #targets: diff --git a/deploy/nri-prometheus.tmpl.yaml b/deploy/nri-prometheus.tmpl.yaml index 11fc60e8..04558261 100644 --- a/deploy/nri-prometheus.tmpl.yaml +++ b/deploy/nri-prometheus.tmpl.yaml @@ -132,6 +132,10 @@ data: # If left empty, TLS uses the host's root CA set. # emitter_ca_file: "/path/to/cert/server.pem" + # Set to true in order to stop autodiscovery in the k8s cluster. It can be useful when running the Pod with a service account + # having limited privileges. Defaults to false. + # disable_autodiscovery: false + # Whether the emitter should skip TLS verification when submitting data. # Defaults to false. # emitter_insecure_skip_verify: false diff --git a/internal/cmd/scraper/scraper.go b/internal/cmd/scraper/scraper.go index 3c168db1..60760891 100644 --- a/internal/cmd/scraper/scraper.go +++ b/internal/cmd/scraper/scraper.go @@ -33,6 +33,7 @@ type Config struct { RequireScrapeEnabledLabelForNodes bool `mapstructure:"require_scrape_enabled_label_for_nodes"` ScrapeTimeout time.Duration `mapstructure:"scrape_timeout"` Standalone bool `mapstructure:"standalone"` + DisableAutodiscovery bool `mapstructure:"disable_autodiscovery"` ScrapeDuration string `mapstructure:"scrape_duration"` EmitterHarvestPeriod string `mapstructure:"emitter_harvest_period"` TargetConfigs []endpoints.TargetConfig `mapstructure:"targets"` @@ -119,11 +120,13 @@ func RunWithEmitters(cfg *Config, emitters []integration.Emitter) error { } retrievers = append(retrievers, fixedRetriever) - kubernetesRetriever, err := endpoints.NewKubernetesTargetRetriever(cfg.ScrapeEnabledLabel, cfg.RequireScrapeEnabledLabelForNodes, endpoints.WithInClusterConfig()) - if err != nil { - logrus.WithError(err).Errorf("not possible to get a Kubernetes client. If you aren't running this integration in a Kubernetes cluster, you can ignore this error") - } else { - retrievers = append(retrievers, kubernetesRetriever) + if !cfg.DisableAutodiscovery { + kubernetesRetriever, err := endpoints.NewKubernetesTargetRetriever(cfg.ScrapeEnabledLabel, cfg.RequireScrapeEnabledLabelForNodes, endpoints.WithInClusterConfig()) + if err != nil { + logrus.WithError(err).Errorf("not possible to get a Kubernetes client. If you aren't running this integration in a Kubernetes cluster, you can ignore this error") + } else { + retrievers = append(retrievers, kubernetesRetriever) + } } defaultTransformations := integration.ProcessingRule{ Description: "Default transformation rules",