From a322548c64bd46275eff1c1a0709cb31ea33692e Mon Sep 17 00:00:00 2001 From: Mike Blum Date: Wed, 5 Mar 2025 22:06:16 -0600 Subject: [PATCH 1/2] remove deprecated config module --- CHANGELOG.md | 5 + Makefile | 8 +- config/doc.go | 15 - config/go.mod | 55 - config/go.sum | 106 -- config/jsonschema_patch.sed | 4 - config/testdata/bad_cert.crt | 1 - config/testdata/ca.crt | 20 - config/testdata/invalid_bool.json | 1 - config/testdata/invalid_bool.yaml | 2 - config/testdata/invalid_nil_name.json | 49 - config/testdata/invalid_nil_name.yaml | 13 - config/testdata/invalid_nil_value.json | 49 - config/testdata/invalid_nil_value.yaml | 13 - config/testdata/v0.2.json | 243 ----- config/testdata/v0.2.yaml | 428 -------- config/testdata/v0.3.json | 419 ------- config/testdata/v0.3.yaml | 466 -------- config/testdata/valid_empty.json | 1 - config/testdata/valid_empty.yaml | 2 - config/v0.2.0/config.go | 156 --- config/v0.2.0/config_test.go | 383 ------- config/v0.2.0/generated_config.go | 780 ------------- config/v0.2.0/log.go | 155 --- config/v0.2.0/log_test.go | 412 ------- config/v0.2.0/metric.go | 507 --------- config/v0.2.0/metric_test.go | 1158 -------------------- config/v0.2.0/resource.go | 63 -- config/v0.2.0/resource_test.go | 116 -- config/v0.2.0/trace.go | 197 ---- config/v0.2.0/trace_test.go | 535 --------- config/v0.3.0/config.go | 209 ---- config/v0.3.0/config_json.go | 382 ------- config/v0.3.0/config_test.go | 666 ------------ config/v0.3.0/config_yaml.go | 70 -- config/v0.3.0/fuzz_test.go | 64 -- config/v0.3.0/generated_config.go | 672 ------------ config/v0.3.0/log.go | 222 ---- config/v0.3.0/log_test.go | 696 ------------ config/v0.3.0/metric.go | 565 ---------- config/v0.3.0/metric_test.go | 1390 ------------------------ config/v0.3.0/resource.go | 63 -- config/v0.3.0/resource_test.go | 113 -- config/v0.3.0/trace.go | 219 ---- config/v0.3.0/trace_test.go | 701 ------------ versions.yaml | 1 - 46 files changed, 9 insertions(+), 12386 deletions(-) delete mode 100644 config/doc.go delete mode 100644 config/go.mod delete mode 100644 config/go.sum delete mode 100644 config/jsonschema_patch.sed delete mode 100644 config/testdata/bad_cert.crt delete mode 100644 config/testdata/ca.crt delete mode 100644 config/testdata/invalid_bool.json delete mode 100644 config/testdata/invalid_bool.yaml delete mode 100644 config/testdata/invalid_nil_name.json delete mode 100644 config/testdata/invalid_nil_name.yaml delete mode 100644 config/testdata/invalid_nil_value.json delete mode 100644 config/testdata/invalid_nil_value.yaml delete mode 100644 config/testdata/v0.2.json delete mode 100644 config/testdata/v0.2.yaml delete mode 100644 config/testdata/v0.3.json delete mode 100644 config/testdata/v0.3.yaml delete mode 100644 config/testdata/valid_empty.json delete mode 100644 config/testdata/valid_empty.yaml delete mode 100644 config/v0.2.0/config.go delete mode 100644 config/v0.2.0/config_test.go delete mode 100644 config/v0.2.0/generated_config.go delete mode 100644 config/v0.2.0/log.go delete mode 100644 config/v0.2.0/log_test.go delete mode 100644 config/v0.2.0/metric.go delete mode 100644 config/v0.2.0/metric_test.go delete mode 100644 config/v0.2.0/resource.go delete mode 100644 config/v0.2.0/resource_test.go delete mode 100644 config/v0.2.0/trace.go delete mode 100644 config/v0.2.0/trace_test.go delete mode 100644 config/v0.3.0/config.go delete mode 100644 config/v0.3.0/config_json.go delete mode 100644 config/v0.3.0/config_test.go delete mode 100644 config/v0.3.0/config_yaml.go delete mode 100644 config/v0.3.0/fuzz_test.go delete mode 100644 config/v0.3.0/generated_config.go delete mode 100644 config/v0.3.0/log.go delete mode 100644 config/v0.3.0/log_test.go delete mode 100644 config/v0.3.0/metric.go delete mode 100644 config/v0.3.0/metric_test.go delete mode 100644 config/v0.3.0/resource.go delete mode 100644 config/v0.3.0/resource_test.go delete mode 100644 config/v0.3.0/trace.go delete mode 100644 config/v0.3.0/trace_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 818e63f9ec2..8f758eb10e1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,7 +10,12 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ### Removed +<<<<<<< HEAD - Drop support for [Go 1.22]. (#6853) +||||||| parent of 666f71d5 (remove deprecated config module) +======= +- The deprecated `go.opentelemetry.io/contrib/config` package is removed. (#6894) +>>>>>>> 666f71d5 (remove deprecated config module) diff --git a/Makefile b/Makefile index 923d0d5ea33..5dfbe011fae 100644 --- a/Makefile +++ b/Makefile @@ -329,23 +329,23 @@ OPENTELEMETRY_CONFIGURATION_JSONSCHEMA_VERSION=v0.3.0 genjsonschema-cleanup: rm -Rf ${OPENTELEMETRY_CONFIGURATION_JSONSCHEMA_SRC_DIR} -GENERATED_CONFIG=./config/${OPENTELEMETRY_CONFIGURATION_JSONSCHEMA_VERSION}/generated_config.go +GENERATED_CONFIG=./otelconf/${OPENTELEMETRY_CONFIGURATION_JSONSCHEMA_VERSION}/generated_config.go # Generate structs for configuration from opentelemetry-configuration schema genjsonschema: genjsonschema-cleanup $(GOJSONSCHEMA) mkdir -p ${OPENTELEMETRY_CONFIGURATION_JSONSCHEMA_SRC_DIR} - mkdir -p ./config/${OPENTELEMETRY_CONFIGURATION_JSONSCHEMA_VERSION} + mkdir -p ./otelconf/${OPENTELEMETRY_CONFIGURATION_JSONSCHEMA_VERSION} curl -sSL https://api.github.com/repos/open-telemetry/opentelemetry-configuration/tarball/${OPENTELEMETRY_CONFIGURATION_JSONSCHEMA_VERSION} | tar xz --strip 1 -C ${OPENTELEMETRY_CONFIGURATION_JSONSCHEMA_SRC_DIR} $(GOJSONSCHEMA) \ --capitalization ID \ --capitalization OTLP \ --struct-name-from-title \ - --package config \ + --package otelconf \ --only-models \ --output ${GENERATED_CONFIG} \ ${OPENTELEMETRY_CONFIGURATION_JSONSCHEMA_SRC_DIR}/schema/opentelemetry_configuration.json @echo Modify jsonschema generated files. - sed -f ./config/jsonschema_patch.sed ${GENERATED_CONFIG} > ${GENERATED_CONFIG}.tmp + sed -f ./otelconf/jsonschema_patch.sed ${GENERATED_CONFIG} > ${GENERATED_CONFIG}.tmp mv ${GENERATED_CONFIG}.tmp ${GENERATED_CONFIG} $(MAKE) genjsonschema-cleanup diff --git a/config/doc.go b/config/doc.go deleted file mode 100644 index af19a8b6797..00000000000 --- a/config/doc.go +++ /dev/null @@ -1,15 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package config is deprecated. -// -// Deprecated: use [go.opentelemetry.io/contrib/otelconf] instead. -// This is the last release of this module. -// -// Package config can be used to parse a configuration file that follows -// the JSON Schema defined by the OpenTelemetry Configuration schema. Different -// versions of the schema are supported by the code in the directory that -// matches the version number of the schema. For example, the import -// go.opentelemetry.io/contrib/config/v0.3.0 includes code that supports the -// v0.3.0 release of the configuration schema. -package config // import "go.opentelemetry.io/contrib/config" diff --git a/config/go.mod b/config/go.mod deleted file mode 100644 index 59ffd60accc..00000000000 --- a/config/go.mod +++ /dev/null @@ -1,55 +0,0 @@ -// Deprecated: use go.opentelemetry.io/contrib/otelconf instead. -// This is the last release of this module. -module go.opentelemetry.io/contrib/config - -go 1.23.0 - -require ( - github.com/prometheus/client_golang v1.21.1 - github.com/stretchr/testify v1.10.0 - go.opentelemetry.io/otel v1.35.0 - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.11.0 - go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.11.0 - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.35.0 - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.35.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0 - go.opentelemetry.io/otel/exporters/prometheus v0.57.0 - go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.11.0 - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.35.0 - go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.35.0 - go.opentelemetry.io/otel/log v0.11.0 - go.opentelemetry.io/otel/metric v1.35.0 - go.opentelemetry.io/otel/sdk v1.35.0 - go.opentelemetry.io/otel/sdk/log v0.11.0 - go.opentelemetry.io/otel/sdk/metric v1.35.0 - go.opentelemetry.io/otel/trace v1.35.0 - google.golang.org/grpc v1.71.0 - gopkg.in/yaml.v3 v3.0.1 -) - -require ( - github.com/beorn7/perks v1.0.1 // indirect - github.com/cenkalti/backoff/v4 v4.3.0 // indirect - github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect - github.com/go-logr/logr v1.4.2 // indirect - github.com/go-logr/stdr v1.2.2 // indirect - github.com/google/uuid v1.6.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 // indirect - github.com/klauspost/compress v1.18.0 // indirect - github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/client_model v0.6.1 // indirect - github.com/prometheus/common v0.62.0 // indirect - github.com/prometheus/procfs v0.15.1 // indirect - go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 // indirect - go.opentelemetry.io/proto/otlp v1.5.0 // indirect - golang.org/x/net v0.37.0 // indirect - golang.org/x/sys v0.31.0 // indirect - golang.org/x/text v0.23.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb // indirect - google.golang.org/protobuf v1.36.5 // indirect -) diff --git a/config/go.sum b/config/go.sum deleted file mode 100644 index bef5e189383..00000000000 --- a/config/go.sum +++ /dev/null @@ -1,106 +0,0 @@ -github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= -github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= -github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= -github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= -github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= -github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= -github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= -github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= -github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= -github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1 h1:e9Rjr40Z98/clHv5Yg79Is0NtosR5LXRvdr7o/6NwbA= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.26.1/go.mod h1:tIxuGz/9mpox++sgp9fJjHO0+q1X9/UOWd798aAm22M= -github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= -github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= -github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= -github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= -github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= -github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= -github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v1.21.1 h1:DOvXXTqVzvkIewV/CDPFdejpMCGeMcbGCQ8YOmu+Ibk= -github.com/prometheus/client_golang v1.21.1/go.mod h1:U9NM32ykUErtVBxdvD3zfi+EuFkkaBvMb09mIfe0Zgg= -github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p8ais2e9E= -github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY= -github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io= -github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I= -github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= -github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= -github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= -github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= -go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= -go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.11.0 h1:HMUytBT3uGhPKYY/u/G5MR9itrlSO2SMOsSD3Tk3k7A= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc v0.11.0/go.mod h1:hdDXsiNLmdW/9BF2jQpnHHlhFajpWCEYfM6e5m2OAZg= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.11.0 h1:C/Wi2F8wEmbxJ9Kuzw/nhP+Z9XaHYMkyDmXy6yR2cjw= -go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp v0.11.0/go.mod h1:0Lr9vmGKzadCTgsiBydxr6GEZ8SsZ7Ks53LzjWG5Ar4= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.35.0 h1:QcFwRrZLc82r8wODjvyCbP7Ifp3UANaBSmhDSFjnqSc= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.35.0/go.mod h1:CXIWhUomyWBG/oY2/r/kLp6K/cmx9e/7DLpBuuGdLCA= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.35.0 h1:0NIXxOCFx+SKbhCVxwl3ETG8ClLPAa0KuKV6p3yhxP8= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.35.0/go.mod h1:ChZSJbbfbl/DcRZNc9Gqh6DYGlfjw4PvO1pEOZH1ZsE= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0 h1:1fTNlAIJZGWLP5FVu0fikVry1IsiUnXjf7QFvoNN3Xw= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.35.0/go.mod h1:zjPK58DtkqQFn+YUMbx0M2XV3QgKU0gS9LeGohREyK4= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0 h1:m639+BofXTvcY1q8CGs4ItwQarYtJPOWmVobfM1HpVI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0/go.mod h1:LjReUci/F4BUyv+y4dwnq3h/26iNOeC3wAIqgvTIZVo= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0 h1:xJ2qHD0C1BeYVTLLR9sX12+Qb95kfeD/byKj6Ky1pXg= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.35.0/go.mod h1:u5BF1xyjstDowA1R5QAO9JHzqK+ublenEW/dyqTjBVk= -go.opentelemetry.io/otel/exporters/prometheus v0.57.0 h1:AHh/lAP1BHrY5gBwk8ncc25FXWm/gmmY3BX258z5nuk= -go.opentelemetry.io/otel/exporters/prometheus v0.57.0/go.mod h1:QpFWz1QxqevfjwzYdbMb4Y1NnlJvqSGwyuU0B4iuc9c= -go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.11.0 h1:k6KdfZk72tVW/QVZf60xlDziDvYAePj5QHwoQvrB2m8= -go.opentelemetry.io/otel/exporters/stdout/stdoutlog v0.11.0/go.mod h1:5Y3ZJLqzi/x/kYtrSrPSx7TFI/SGsL7q2kME027tH6I= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.35.0 h1:PB3Zrjs1sG1GBX51SXyTSoOTqcDglmsk7nT6tkKPb/k= -go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.35.0/go.mod h1:U2R3XyVPzn0WX7wOIypPuptulsMcPDPs/oiSVOMVnHY= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.35.0 h1:T0Ec2E+3YZf5bgTNQVet8iTDW7oIk03tXHq+wkwIDnE= -go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.35.0/go.mod h1:30v2gqH+vYGJsesLWFov8u47EpYTcIQcBjKpI6pJThg= -go.opentelemetry.io/otel/log v0.11.0 h1:c24Hrlk5WJ8JWcwbQxdBqxZdOK7PcP/LFtOtwpDTe3Y= -go.opentelemetry.io/otel/log v0.11.0/go.mod h1:U/sxQ83FPmT29trrifhQg+Zj2lo1/IPN1PF6RTFqdwc= -go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= -go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= -go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY= -go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg= -go.opentelemetry.io/otel/sdk/log v0.11.0 h1:7bAOpjpGglWhdEzP8z0VXc4jObOiDEwr3IYbhBnjk2c= -go.opentelemetry.io/otel/sdk/log v0.11.0/go.mod h1:dndLTxZbwBstZoqsJB3kGsRPkpAgaJrWfQg3lhlHFFY= -go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o= -go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w= -go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= -go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= -go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4= -go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4= -go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= -go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= -golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c= -golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= -golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik= -golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= -golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= -golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= -google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb h1:p31xT4yrYrSM/G4Sn2+TNUkVhFCbG9y8itM2S6Th950= -google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:jbe3Bkdp+Dh2IrslsFCklNhweNTBgSYanP1UXhJDhKg= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb h1:TLPQVbx1GJ8VKZxz52VAxl1EBgKXXbTiU9Fc5fZeLn4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I= -google.golang.org/grpc v1.71.0 h1:kF77BGdPTQ4/JZWMlb9VpJ5pa25aqvVqogsxNHHdeBg= -google.golang.org/grpc v1.71.0/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec= -google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= -google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= -gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= -gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/config/jsonschema_patch.sed b/config/jsonschema_patch.sed deleted file mode 100644 index 04090d912d0..00000000000 --- a/config/jsonschema_patch.sed +++ /dev/null @@ -1,4 +0,0 @@ -# go-jsonschema always generates patternProperties as -# map[string]interface{}, for more specific types, they must -# be replaced here -s+type Headers.*+type Headers map[string]string+g \ No newline at end of file diff --git a/config/testdata/bad_cert.crt b/config/testdata/bad_cert.crt deleted file mode 100644 index dbbd2b8576f..00000000000 --- a/config/testdata/bad_cert.crt +++ /dev/null @@ -1 +0,0 @@ -This is intentionally not a PEM formatted cert file. diff --git a/config/testdata/ca.crt b/config/testdata/ca.crt deleted file mode 100644 index 2272f84e64d..00000000000 --- a/config/testdata/ca.crt +++ /dev/null @@ -1,20 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDNjCCAh4CCQC0I5IQT7eziDANBgkqhkiG9w0BAQsFADBdMQswCQYDVQQGEwJB -VTESMBAGA1UECAwJQXVzdHJhbGlhMQ8wDQYDVQQHDAZTeWRuZXkxEjAQBgNVBAoM -CU15T3JnTmFtZTEVMBMGA1UEAwwMTXlDb21tb25OYW1lMB4XDTIyMDgwMzA0MTky -MVoXDTMyMDczMTA0MTkyMVowXTELMAkGA1UEBhMCQVUxEjAQBgNVBAgMCUF1c3Ry -YWxpYTEPMA0GA1UEBwwGU3lkbmV5MRIwEAYDVQQKDAlNeU9yZ05hbWUxFTATBgNV -BAMMDE15Q29tbW9uTmFtZTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB -AMhGP0dy3zvkdx9zI+/XVjPOWlER0OUp7Sgzidc3nLOk42+bH4ofIVNtOFVqlNKi -O1bImu238VdBhd6R5IZZ1ZdIMcCeDgSJYu2X9wA3m4PKz8IdXo5ly2OHghhmCvqG -WxgqDj5wPXiczQwuf1EcDMtRWbXJ6Z/XH1U68R/kRdNLkiZ2LwtjoQpis5XYckLL -CrdF+AL6GeDIe0Mh9QGs26Vux+2kvaOGNUWRPE6Wt4GkqyKqmzYfR9HbflJ4xHT2 -I+jE1lg+jMBeom7z8Z90RE4GGcHjO+Vens/88r5EAjTnFj1Kb5gL2deSHY1m/++R -Z/kRyg+zQJyw4fAzlAA4+VkCAwEAATANBgkqhkiG9w0BAQsFAAOCAQEAM3gRdTKX -eGwGYVmmKqA2vTxeigQYLHml7OSopcWj2wJfxfp49HXPRuvgpQn9iubxO3Zmhd83 -2X1E+T0A8oy5CfxgpAhHb3lY0jm3TjKXm6m+dSODwL3uND8tX+SqR8sRTFxPvPuo -pmvhdTZoRI3EzIiHLTgCuSU25JNP/vrVoKk0JvCkDYTU/WcVfj0v95DTMoWR4JGz -mtBwrgD0EM2XRw5ZMc7sMPli1gqmCbCQUrDZ+rPB78WDCBILBd8Cz75qYTUp98BY -akJyBckdJHAdyEQYDKa9HpmpexOO7IhSXCTEN1DEBgpZgEi/lBDRG/b0OzenUUgt -LUABtWt3pNQ9HA== ------END CERTIFICATE----- diff --git a/config/testdata/invalid_bool.json b/config/testdata/invalid_bool.json deleted file mode 100644 index 979233b54fc..00000000000 --- a/config/testdata/invalid_bool.json +++ /dev/null @@ -1 +0,0 @@ -{"file_format": "yaml", "disabled": "notabool"} \ No newline at end of file diff --git a/config/testdata/invalid_bool.yaml b/config/testdata/invalid_bool.yaml deleted file mode 100644 index 837b3e0c2cb..00000000000 --- a/config/testdata/invalid_bool.yaml +++ /dev/null @@ -1,2 +0,0 @@ -file_format: yaml -disabled: notabool \ No newline at end of file diff --git a/config/testdata/invalid_nil_name.json b/config/testdata/invalid_nil_name.json deleted file mode 100644 index 163aab02d03..00000000000 --- a/config/testdata/invalid_nil_name.json +++ /dev/null @@ -1,49 +0,0 @@ -{ - "file_format": "0.3", - "disabled": false, - "logger_provider": { - "processors": [ - { - "batch": { - "schedule_delay": 5000, - "export_timeout": 30000, - "max_queue_size": 2048, - "max_export_batch_size": 512, - "exporter": { - "otlp": { - "protocol": "http/protobuf", - "endpoint": "http://localhost:4318/v1/logs", - "certificate": "/app/cert.pem", - "client_key": "/app/cert.pem", - "client_certificate": "/app/cert.pem", - "headers": [ - { - "name": "api-key", - "value": "1234" - }, - { - "value": "nil-name" - } - ], - "headers_list": "api-key=1234", - "compression": "gzip", - "timeout": 10000, - "insecure": false - } - } - } - }, - { - "simple": { - "exporter": { - "console": {} - } - } - } - ], - "limits": { - "attribute_value_length_limit": 4096, - "attribute_count_limit": 128 - } - } -} \ No newline at end of file diff --git a/config/testdata/invalid_nil_name.yaml b/config/testdata/invalid_nil_name.yaml deleted file mode 100644 index 89d197dbb8e..00000000000 --- a/config/testdata/invalid_nil_name.yaml +++ /dev/null @@ -1,13 +0,0 @@ -file_format: "0.3" -disabled: false -logger_provider: - processors: - - batch: - exporter: - otlp: - protocol: http/protobuf - endpoint: http://localhost:4318/v1/logs - headers: - - name: api-key - value: "1234" - - value: nil-name \ No newline at end of file diff --git a/config/testdata/invalid_nil_value.json b/config/testdata/invalid_nil_value.json deleted file mode 100644 index a6c0c5f856d..00000000000 --- a/config/testdata/invalid_nil_value.json +++ /dev/null @@ -1,49 +0,0 @@ -{ - "file_format": "0.3", - "disabled": false, - "logger_provider": { - "processors": [ - { - "batch": { - "schedule_delay": 5000, - "export_timeout": 30000, - "max_queue_size": 2048, - "max_export_batch_size": 512, - "exporter": { - "otlp": { - "protocol": "http/protobuf", - "endpoint": "http://localhost:4318/v1/logs", - "certificate": "/app/cert.pem", - "client_key": "/app/cert.pem", - "client_certificate": "/app/cert.pem", - "headers": [ - { - "name": "api-key", - "value": "1234" - }, - { - "name": "nil-value" - } - ], - "headers_list": "api-key=1234", - "compression": "gzip", - "timeout": 10000, - "insecure": false - } - } - } - }, - { - "simple": { - "exporter": { - "console": {} - } - } - } - ], - "limits": { - "attribute_value_length_limit": 4096, - "attribute_count_limit": 128 - } - } -} \ No newline at end of file diff --git a/config/testdata/invalid_nil_value.yaml b/config/testdata/invalid_nil_value.yaml deleted file mode 100644 index 8df3f074282..00000000000 --- a/config/testdata/invalid_nil_value.yaml +++ /dev/null @@ -1,13 +0,0 @@ -file_format: "0.3" -disabled: false -logger_provider: - processors: - - batch: - exporter: - otlp: - protocol: http/protobuf - endpoint: http://localhost:4318/v1/logs - headers: - - name: api-key - value: "1234" - - name: nil-value \ No newline at end of file diff --git a/config/testdata/v0.2.json b/config/testdata/v0.2.json deleted file mode 100644 index d26ca8b5826..00000000000 --- a/config/testdata/v0.2.json +++ /dev/null @@ -1,243 +0,0 @@ -{ - "file_format": "0.2", - "disabled": false, - "attribute_limits": { - "attribute_value_length_limit": 4096, - "attribute_count_limit": 128 - }, - "logger_provider": { - "processors": [ - { - "batch": { - "schedule_delay": 5000, - "export_timeout": 30000, - "max_queue_size": 2048, - "max_export_batch_size": 512, - "exporter": { - "otlp": { - "protocol": "http/protobuf", - "endpoint": "http://localhost:4318", - "certificate": "/app/cert.pem", - "client_key": "/app/cert.pem", - "client_certificate": "/app/cert.pem", - "headers": { - "api-key": "1234" - }, - "compression": "gzip", - "timeout": 10000, - "insecure": false - } - } - } - }, - { - "simple": { - "exporter": { - "console": {} - } - } - } - ], - "limits": { - "attribute_value_length_limit": 4096, - "attribute_count_limit": 128 - } - }, - "meter_provider": { - "readers": [ - { - "pull": { - "exporter": { - "prometheus": { - "host": "localhost", - "port": 9464, - "without_units": false, - "without_type_suffix": false, - "without_scope_info": false, - "with_resource_constant_labels": { - "included": [ - "service*" - ], - "excluded": [ - "service.attr1" - ] - } - } - } - } - }, - { - "periodic": { - "interval": 5000, - "timeout": 30000, - "exporter": { - "otlp": { - "protocol": "http/protobuf", - "endpoint": "http://localhost:4318", - "certificate": "/app/cert.pem", - "client_key": "/app/cert.pem", - "client_certificate": "/app/cert.pem", - "headers": { - "api-key": "1234" - }, - "compression": "gzip", - "timeout": 10000, - "insecure": false, - "temporality_preference": "delta", - "default_histogram_aggregation": "base2_exponential_bucket_histogram" - } - } - } - }, - { - "periodic": { - "exporter": { - "console": {} - } - } - } - ], - "views": [ - { - "selector": { - "instrument_name": "my-instrument", - "instrument_type": "histogram", - "unit": "ms", - "meter_name": "my-meter", - "meter_version": "1.0.0", - "meter_schema_url": "https://opentelemetry.io/schemas/1.16.0" - }, - "stream": { - "name": "new_instrument_name", - "description": "new_description", - "aggregation": { - "explicit_bucket_histogram": { - "boundaries": [ - 0, - 5, - 10, - 25, - 50, - 75, - 100, - 250, - 500, - 750, - 1000, - 2500, - 5000, - 7500, - 10000 - ], - "record_min_max": true - } - }, - "attribute_keys": [ - "key1", - "key2" - ] - } - } - ] - }, - "propagator": { - "composite": [ - "tracecontext", - "baggage", - "b3", - "b3multi", - "jaeger", - "xray", - "ottrace" - ] - }, - "tracer_provider": { - "processors": [ - { - "batch": { - "schedule_delay": 5000, - "export_timeout": 30000, - "max_queue_size": 2048, - "max_export_batch_size": 512, - "exporter": { - "otlp": { - "protocol": "http/protobuf", - "endpoint": "http://localhost:4318", - "certificate": "/app/cert.pem", - "client_key": "/app/cert.pem", - "client_certificate": "/app/cert.pem", - "headers": { - "api-key": "1234" - }, - "compression": "gzip", - "timeout": 10000, - "insecure": false - } - } - } - }, - { - "batch": { - "exporter": { - "zipkin": { - "endpoint": "http://localhost:9411/api/v2/spans", - "timeout": 10000 - } - } - } - }, - { - "simple": { - "exporter": { - "console": {} - } - } - } - ], - "limits": { - "attribute_value_length_limit": 4096, - "attribute_count_limit": 128, - "event_count_limit": 128, - "link_count_limit": 128, - "event_attribute_count_limit": 128, - "link_attribute_count_limit": 128 - }, - "sampler": { - "parent_based": { - "root": { - "trace_id_ratio_based": { - "ratio": 0.0001 - } - }, - "remote_parent_sampled": { - "always_on": {} - }, - "remote_parent_not_sampled": { - "always_off": {} - }, - "local_parent_sampled": { - "always_on": {} - }, - "local_parent_not_sampled": { - "always_off": {} - } - } - } - }, - "resource": { - "attributes": { - "service.name": "unknown_service" - }, - "detectors": { - "attributes": { - "included": [ - "process.*" - ], - "excluded": [ - "process.command_args" - ] - } - }, - "schema_url": "https://opentelemetry.io/schemas/1.16.0" - } -} \ No newline at end of file diff --git a/config/testdata/v0.2.yaml b/config/testdata/v0.2.yaml deleted file mode 100644 index 0ef5c87f825..00000000000 --- a/config/testdata/v0.2.yaml +++ /dev/null @@ -1,428 +0,0 @@ -# kitchen-sink.yaml demonstrates all configurable surface area, including explanatory comments. -# -# It DOES NOT represent expected real world configuration, as it makes strange configuration -# choices in an effort to exercise the full surface area. -# -# Configuration values are set to their defaults when default values are defined. - -# The file format version -file_format: "0.2" - -# Configure if the SDK is disabled or not. This is not required to be provided -# to ensure the SDK isn't disabled, the default value when this is not provided -# is for the SDK to be enabled. -# -# Environment variable: OTEL_SDK_DISABLED -disabled: false - -# Configure general attribute limits. See also tracer_provider.limits, logger_provider.limits. -attribute_limits: - # Configure max attribute value size. - # - # Environment variable: OTEL_ATTRIBUTE_VALUE_LENGTH_LIMIT - attribute_value_length_limit: 4096 - # Configure max attribute count. - # - # Environment variable: OTEL_ATTRIBUTE_COUNT_LIMIT - attribute_count_limit: 128 - -# Configure logger provider. -logger_provider: - # Configure log record processors. - processors: - # Configure a batch log record processor. - - batch: - # Configure delay interval (in milliseconds) between two consecutive exports. - # - # Environment variable: OTEL_BLRP_SCHEDULE_DELAY - schedule_delay: 5000 - # Configure maximum allowed time (in milliseconds) to export data. - # - # Environment variable: OTEL_BLRP_EXPORT_TIMEOUT - export_timeout: 30000 - # Configure maximum queue size. - # - # Environment variable: OTEL_BLRP_MAX_QUEUE_SIZE - max_queue_size: 2048 - # Configure maximum batch size. - # - # Environment variable: OTEL_BLRP_MAX_EXPORT_BATCH_SIZE - max_export_batch_size: 512 - # Configure exporter. - # - # Environment variable: OTEL_LOGS_EXPORTER - exporter: - # Configure exporter to be OTLP. - otlp: - # Configure protocol. - # - # Environment variable: OTEL_EXPORTER_OTLP_PROTOCOL, OTEL_EXPORTER_OTLP_LOGS_PROTOCOL - protocol: http/protobuf - # Configure endpoint. - # - # Environment variable: OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_LOGS_ENDPOINT - endpoint: http://localhost:4318 - # Configure certificate. - # - # Environment variable: OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_LOGS_CERTIFICATE - certificate: /app/cert.pem - # Configure mTLS private client key. - # - # Environment variable: OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_LOGS_CLIENT_KEY - client_key: /app/cert.pem - # Configure mTLS client certificate. - # - # Environment variable: OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_LOGS_CLIENT_CERTIFICATE - client_certificate: /app/cert.pem - # Configure headers. - # - # Environment variable: OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_LOGS_HEADERS - headers: - api-key: "1234" - # Configure compression. - # - # Environment variable: OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_LOGS_COMPRESSION - compression: gzip - # Configure max time (in milliseconds) to wait for each export. - # - # Environment variable: OTEL_EXPORTER_OTLP_TIMEOUT, OTEL_EXPORTER_OTLP_LOGS_TIMEOUT - timeout: 10000 - # Configure client transport security for the exporter's connection. - # - # Environment variable: OTEL_EXPORTER_OTLP_INSECURE, OTEL_EXPORTER_OTLP_LOGS_INSECURE - insecure: false - # Configure a simple span processor. - - simple: - # Configure exporter. - exporter: - # Configure exporter to be console. - console: {} - # Configure log record limits. See also attribute_limits. - limits: - # Configure max log record attribute value size. Overrides attribute_limits.attribute_value_length_limit. - # - # Environment variable: OTEL_LOGRECORD_ATTRIBUTE_VALUE_LENGTH_LIMIT - attribute_value_length_limit: 4096 - # Configure max log record attribute count. Overrides attribute_limits.attribute_count_limit. - # - # Environment variable: OTEL_LOGRECORD_ATTRIBUTE_COUNT_LIMIT - attribute_count_limit: 128 - -# Configure meter provider. -meter_provider: - # Configure metric readers. - readers: - # Configure a pull-based metric reader. - - pull: - # Configure exporter. - # - # Environment variable: OTEL_METRICS_EXPORTER - exporter: - # Configure exporter to be prometheus. - prometheus: - # Configure host. - # - # Environment variable: OTEL_EXPORTER_PROMETHEUS_HOST - host: localhost - # Configure port. - # - # Environment variable: OTEL_EXPORTER_PROMETHEUS_PORT - port: 9464 - # Configure Prometheus Exporter to produce metrics without a unit suffix or UNIT metadata. - without_units: false - # Configure Prometheus Exporter to produce metrics without a type suffix. - without_type_suffix: false - # Configure Prometheus Exporter to produce metrics without a scope info metric. - without_scope_info: false - # Configure Prometheus Exporter to add resource attributes as metrics attributes. - with_resource_constant_labels: - # Configure resource attributes to be included, in this example attributes starting with service. - included: - - "service*" - # Configure resource attributes to be excluded, in this example attribute service.attr1. - excluded: - - "service.attr1" - # Configure a periodic metric reader. - - periodic: - # Configure delay interval (in milliseconds) between start of two consecutive exports. - # - # Environment variable: OTEL_METRIC_EXPORT_INTERVAL - interval: 5000 - # Configure maximum allowed time (in milliseconds) to export data. - # - # Environment variable: OTEL_METRIC_EXPORT_TIMEOUT - timeout: 30000 - # Configure exporter. - # - # Environment variable: OTEL_METRICS_EXPORTER - exporter: - # Configure exporter to be OTLP. - otlp: - # Configure protocol. - # - # Environment variable: OTEL_EXPORTER_OTLP_PROTOCOL, OTEL_EXPORTER_OTLP_METRICS_PROTOCOL - protocol: http/protobuf - # Configure endpoint. - # - # Environment variable: OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_METRICS_ENDPOINT - endpoint: http://localhost:4318 - # Configure certificate. - # - # Environment variable: OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE - certificate: /app/cert.pem - # Configure mTLS private client key. - # - # Environment variable: OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_METRICS_CLIENT_KEY - client_key: /app/cert.pem - # Configure mTLS client certificate. - # - # Environment variable: OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_METRICS_CLIENT_CERTIFICATE - client_certificate: /app/cert.pem - # Configure headers. - # - # Environment variable: OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_METRICS_HEADERS - headers: - api-key: !!str 1234 - # Configure compression. - # - # Environment variable: OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_METRICS_COMPRESSION - compression: gzip - # Configure max time (in milliseconds) to wait for each export. - # - # Environment variable: OTEL_EXPORTER_OTLP_TIMEOUT, OTEL_EXPORTER_OTLP_METRICS_TIMEOUT - timeout: 10000 - # Configure client transport security for the exporter's connection. - # - # Environment variable: OTEL_EXPORTER_OTLP_INSECURE, OTEL_EXPORTER_OTLP_METRICS_INSECURE - insecure: false - # Configure temporality preference. - # - # Environment variable: OTEL_EXPORTER_OTLP_METRICS_TEMPORALITY_PREFERENCE - temporality_preference: delta - # Configure default histogram aggregation. - # - # Environment variable: OTEL_EXPORTER_OTLP_METRICS_DEFAULT_HISTOGRAM_AGGREGATION - default_histogram_aggregation: base2_exponential_bucket_histogram - # Configure a periodic metric reader. - - periodic: - # Configure exporter. - exporter: - # Configure exporter to be console. - console: {} - # Configure views. Each view has a selector which determines the instrument(s) it applies to, and a configuration for the resulting stream(s). - views: - # Configure a view. - - selector: - # Configure instrument name selection criteria. - instrument_name: my-instrument - # Configure instrument type selection criteria. - instrument_type: histogram - # Configure the instrument unit selection criteria. - unit: ms - # Configure meter name selection criteria. - meter_name: my-meter - # Configure meter version selection criteria. - meter_version: 1.0.0 - # Configure meter schema url selection criteria. - meter_schema_url: https://opentelemetry.io/schemas/1.16.0 - # Configure stream. - stream: - # Configure metric name of the resulting stream(s). - name: new_instrument_name - # Configure metric description of the resulting stream(s). - description: new_description - # Configure aggregation of the resulting stream(s). Known values include: default, drop, explicit_bucket_histogram, base2_exponential_bucket_histogram, last_value, sum. - aggregation: - # Configure aggregation to be explicit_bucket_histogram. - explicit_bucket_histogram: - # Configure bucket boundaries. - boundaries: [ 0.0, 5.0, 10.0, 25.0, 50.0, 75.0, 100.0, 250.0, 500.0, 750.0, 1000.0, 2500.0, 5000.0, 7500.0, 10000.0 ] - # Configure record min and max. - record_min_max: true - # Configure attribute keys retained in the resulting stream(s). - attribute_keys: - - key1 - - key2 - -# Configure text map context propagators. -# -# Environment variable: OTEL_PROPAGATORS -propagator: - composite: [tracecontext, baggage, b3, b3multi, jaeger, xray, ottrace] - -# Configure tracer provider. -tracer_provider: - # Configure span processors. - processors: - # Configure a batch span processor. - - batch: - # Configure delay interval (in milliseconds) between two consecutive exports. - # - # Environment variable: OTEL_BSP_SCHEDULE_DELAY - schedule_delay: 5000 - # Configure maximum allowed time (in milliseconds) to export data. - # - # Environment variable: OTEL_BSP_EXPORT_TIMEOUT - export_timeout: 30000 - # Configure maximum queue size. - # - # Environment variable: OTEL_BSP_MAX_QUEUE_SIZE - max_queue_size: 2048 - # Configure maximum batch size. - # - # Environment variable: OTEL_BSP_MAX_EXPORT_BATCH_SIZE - max_export_batch_size: 512 - # Configure exporter. - # - # Environment variable: OTEL_TRACES_EXPORTER - exporter: - # Configure exporter to be OTLP. - otlp: - # Configure protocol. - # - # Environment variable: OTEL_EXPORTER_OTLP_PROTOCOL, OTEL_EXPORTER_OTLP_TRACES_PROTOCOL - protocol: http/protobuf - # Configure endpoint. - # - # Environment variable: OTEL_EXPORTER_OTLP_ENDPOINT, OTEL_EXPORTER_OTLP_TRACES_ENDPOINT - endpoint: http://localhost:4318 - # Configure certificate. - # - # Environment variable: OTEL_EXPORTER_OTLP_CERTIFICATE, OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE - certificate: /app/cert.pem - # Configure mTLS private client key. - # - # Environment variable: OTEL_EXPORTER_OTLP_CLIENT_KEY, OTEL_EXPORTER_OTLP_TRACES_CLIENT_KEY - client_key: /app/cert.pem - # Configure mTLS client certificate. - # - # Environment variable: OTEL_EXPORTER_OTLP_CLIENT_CERTIFICATE, OTEL_EXPORTER_OTLP_TRACES_CLIENT_CERTIFICATE - client_certificate: /app/cert.pem - # Configure headers. - # - # Environment variable: OTEL_EXPORTER_OTLP_HEADERS, OTEL_EXPORTER_OTLP_TRACES_HEADERS - headers: - api-key: !!str 1234 - # Configure compression. - # - # Environment variable: OTEL_EXPORTER_OTLP_COMPRESSION, OTEL_EXPORTER_OTLP_TRACES_COMPRESSION - compression: gzip - # Configure max time (in milliseconds) to wait for each export. - # - # Environment variable: OTEL_EXPORTER_OTLP_TIMEOUT, OTEL_EXPORTER_OTLP_TRACES_TIMEOUT - timeout: 10000 - # Configure client transport security for the exporter's connection. - # - # Environment variable: OTEL_EXPORTER_OTLP_INSECURE, OTEL_EXPORTER_OTLP_TRACES_INSECURE - insecure: false - # Configure a batch span processor. - - batch: - # Configure exporter. - # - # Environment variable: OTEL_TRACES_EXPORTER - exporter: - # Configure exporter to be zipkin. - zipkin: - # Configure endpoint. - # - # Environment variable: OTEL_EXPORTER_ZIPKIN_ENDPOINT - endpoint: http://localhost:9411/api/v2/spans - # Configure max time (in milliseconds) to wait for each export. - # - # Environment variable: OTEL_EXPORTER_ZIPKIN_TIMEOUT - timeout: 10000 - # Configure a simple span processor. - - simple: - # Configure exporter. - exporter: - # Configure exporter to be console. - console: {} - # Configure span limits. See also attribute_limits. - limits: - # Configure max span attribute value size. Overrides attribute_limits.attribute_value_length_limit. - # - # Environment variable: OTEL_SPAN_ATTRIBUTE_VALUE_LENGTH_LIMIT - attribute_value_length_limit: 4096 - # Configure max span attribute count. Overrides attribute_limits.attribute_count_limit. - # - # Environment variable: OTEL_SPAN_ATTRIBUTE_COUNT_LIMIT - attribute_count_limit: 128 - # Configure max span event count. - # - # Environment variable: OTEL_SPAN_EVENT_COUNT_LIMIT - event_count_limit: 128 - # Configure max span link count. - # - # Environment variable: OTEL_SPAN_LINK_COUNT_LIMIT - link_count_limit: 128 - # Configure max attributes per span event. - # - # Environment variable: OTEL_EVENT_ATTRIBUTE_COUNT_LIMIT - event_attribute_count_limit: 128 - # Configure max attributes per span link. - # - # Environment variable: OTEL_LINK_ATTRIBUTE_COUNT_LIMIT - link_attribute_count_limit: 128 - # Configure the sampler. - sampler: - # Configure sampler to be parent_based. Known values include: always_off, always_on, jaeger_remote, parent_based, trace_id_ratio_based. - # - # Environment variable: OTEL_TRACES_SAMPLER=parentbased_* - parent_based: - # Configure root sampler. - # - # Environment variable: OTEL_TRACES_SAMPLER=parentbased_traceidratio - root: - # Configure sampler to be trace_id_ratio_based. - trace_id_ratio_based: - # Configure trace_id_ratio. - # - # Environment variable: OTEL_TRACES_SAMPLER_ARG=traceidratio=0.0001 - ratio: 0.0001 - # Configure remote_parent_sampled sampler. - remote_parent_sampled: - # Configure sampler to be always_on. - always_on: {} - # Configure remote_parent_not_sampled sampler. - remote_parent_not_sampled: - # Configure sampler to be always_off. - always_off: {} - # Configure local_parent_sampled sampler. - local_parent_sampled: - # Configure sampler to be always_on. - always_on: {} - # Configure local_parent_not_sampled sampler. - local_parent_not_sampled: - # Configure sampler to be always_off. - always_off: {} - -# Configure resource for all signals. -resource: - # Configure resource attributes. - # - # Environment variable: OTEL_RESOURCE_ATTRIBUTES - attributes: - # Configure `service.name` resource attribute - # - # Environment variable: OTEL_SERVICE_NAME - service.name: !!str "unknown_service" - # Configure resource detectors. - detectors: - # Configure attributes provided by resource detectors. - attributes: - # Configure list of attribute key patterns to include from resource detectors. If not set, all attributes are included. - # - # Attribute keys from resource detectors are evaluated to match as follows: - # * If the value of the attribute key exactly matches. - # * If the value of the attribute key matches the wildcard pattern, where '?' matches any single character and '*' matches any number of characters including none. - included: - - process.* - # Configure list of attribute key patterns to exclude from resource detectors. Applies after .resource.detectors.attributes.included (i.e. excluded has higher priority than included). - # - # Attribute keys from resource detectors are evaluated to match as follows: - # * If the value of the attribute key exactly matches. - # * If the value of the attribute key matches the wildcard pattern, where '?' matches any single character and '*' matches any number of characters including none. - excluded: - - process.command_args - # Configure the resource schema URL. - schema_url: https://opentelemetry.io/schemas/1.16.0 diff --git a/config/testdata/v0.3.json b/config/testdata/v0.3.json deleted file mode 100644 index b789a9dfdb0..00000000000 --- a/config/testdata/v0.3.json +++ /dev/null @@ -1,419 +0,0 @@ -{ - "file_format": "0.3", - "disabled": false, - "attribute_limits": { - "attribute_value_length_limit": 4096, - "attribute_count_limit": 128 - }, - "logger_provider": { - "processors": [ - { - "batch": { - "schedule_delay": 5000, - "export_timeout": 30000, - "max_queue_size": 2048, - "max_export_batch_size": 512, - "exporter": { - "otlp": { - "protocol": "http/protobuf", - "endpoint": "http://localhost:4318/v1/logs", - "certificate": "/app/cert.pem", - "client_key": "/app/cert.pem", - "client_certificate": "/app/cert.pem", - "headers": [ - { - "name": "api-key", - "value": "1234" - } - ], - "headers_list": "api-key=1234", - "compression": "gzip", - "timeout": 10000, - "insecure": false - } - } - } - }, - { - "simple": { - "exporter": { - "console": {} - } - } - } - ], - "limits": { - "attribute_value_length_limit": 4096, - "attribute_count_limit": 128 - } - }, - "meter_provider": { - "readers": [ - { - "pull": { - "exporter": { - "prometheus": { - "host": "localhost", - "port": 9464, - "without_units": false, - "without_type_suffix": false, - "without_scope_info": false, - "with_resource_constant_labels": { - "included": [ - "service*" - ], - "excluded": [ - "service.attr1" - ] - } - } - } - }, - "producers": [ - { - "opencensus": {} - } - ] - }, - { - "periodic": { - "interval": 5000, - "timeout": 30000, - "exporter": { - "otlp": { - "protocol": "http/protobuf", - "endpoint": "http://localhost:4318/v1/metrics", - "certificate": "/app/cert.pem", - "client_key": "/app/cert.pem", - "client_certificate": "/app/cert.pem", - "headers": [ - { - "name": "api-key", - "value": "1234" - } - ], - "headers_list": "api-key=1234", - "compression": "gzip", - "timeout": 10000, - "insecure": false, - "temporality_preference": "delta", - "default_histogram_aggregation": "base2_exponential_bucket_histogram" - } - } - }, - "producers": [ - { - "prometheus": {} - } - ] - }, - { - "periodic": { - "exporter": { - "console": {} - } - } - } - ], - "views": [ - { - "selector": { - "instrument_name": "my-instrument", - "instrument_type": "histogram", - "unit": "ms", - "meter_name": "my-meter", - "meter_version": "1.0.0", - "meter_schema_url": "https://opentelemetry.io/schemas/1.16.0" - }, - "stream": { - "name": "new_instrument_name", - "description": "new_description", - "aggregation": { - "explicit_bucket_histogram": { - "boundaries": [ - 0, - 5, - 10, - 25, - 50, - 75, - 100, - 250, - 500, - 750, - 1000, - 2500, - 5000, - 7500, - 10000 - ], - "record_min_max": true - } - }, - "attribute_keys": { - "included": [ - "key1", - "key2" - ], - "excluded": [ - "key3" - ] - } - } - } - ] - }, - "propagator": { - "composite": [ - "tracecontext", - "baggage", - "b3", - "b3multi", - "jaeger", - "xray", - "ottrace" - ] - }, - "tracer_provider": { - "processors": [ - { - "batch": { - "schedule_delay": 5000, - "export_timeout": 30000, - "max_queue_size": 2048, - "max_export_batch_size": 512, - "exporter": { - "otlp": { - "protocol": "http/protobuf", - "endpoint": "http://localhost:4318/v1/traces", - "certificate": "/app/cert.pem", - "client_key": "/app/cert.pem", - "client_certificate": "/app/cert.pem", - "headers": [ - { - "name": "api-key", - "value": "1234" - } - ], - "headers_list": "api-key=1234", - "compression": "gzip", - "timeout": 10000, - "insecure": false - } - } - } - }, - { - "batch": { - "exporter": { - "zipkin": { - "endpoint": "http://localhost:9411/api/v2/spans", - "timeout": 10000 - } - } - } - }, - { - "simple": { - "exporter": { - "console": {} - } - } - } - ], - "limits": { - "attribute_value_length_limit": 4096, - "attribute_count_limit": 128, - "event_count_limit": 128, - "link_count_limit": 128, - "event_attribute_count_limit": 128, - "link_attribute_count_limit": 128 - }, - "sampler": { - "parent_based": { - "root": { - "trace_id_ratio_based": { - "ratio": 0.0001 - } - }, - "remote_parent_sampled": { - "always_on": {} - }, - "remote_parent_not_sampled": { - "always_off": {} - }, - "local_parent_sampled": { - "always_on": {} - }, - "local_parent_not_sampled": { - "always_off": {} - } - } - } - }, - "resource": { - "attributes": [ - { - "name": "service.name", - "value": "unknown_service" - }, - { - "name": "string_key", - "value": "value", - "type": "string" - }, - { - "name": "bool_key", - "value": true, - "type": "bool" - }, - { - "name": "int_key", - "value": 1, - "type": "int" - }, - { - "name": "double_key", - "value": 1.1, - "type": "double" - }, - { - "name": "string_array_key", - "value": [ - "value1", - "value2" - ], - "type": "string_array" - }, - { - "name": "bool_array_key", - "value": [ - true, - false - ], - "type": "bool_array" - }, - { - "name": "int_array_key", - "value": [ - 1, - 2 - ], - "type": "int_array" - }, - { - "name": "double_array_key", - "value": [ - 1.1, - 2.2 - ], - "type": "double_array" - } - ], - "attributes_list": "service.namespace=my-namespace,service.version=1.0.0", - "detectors": { - "attributes": { - "included": [ - "process.*" - ], - "excluded": [ - "process.command_args" - ] - } - }, - "schema_url": "https://opentelemetry.io/schemas/1.16.0" - }, - "instrumentation": { - "general": { - "peer": { - "service_mapping": [ - { - "peer": "1.2.3.4", - "service": "FooService" - }, - { - "peer": "2.3.4.5", - "service": "BarService" - } - ] - }, - "http": { - "client": { - "request_captured_headers": [ - "Content-Type", - "Accept" - ], - "response_captured_headers": [ - "Content-Type", - "Content-Encoding" - ] - }, - "server": { - "request_captured_headers": [ - "Content-Type", - "Accept" - ], - "response_captured_headers": [ - "Content-Type", - "Content-Encoding" - ] - } - } - }, - "cpp": { - "example": { - "property": "value" - } - }, - "dotnet": { - "example": { - "property": "value" - } - }, - "erlang": { - "example": { - "property": "value" - } - }, - "go": { - "example": { - "property": "value" - } - }, - "java": { - "example": { - "property": "value" - } - }, - "js": { - "example": { - "property": "value" - } - }, - "php": { - "example": { - "property": "value" - } - }, - "python": { - "example": { - "property": "value" - } - }, - "ruby": { - "example": { - "property": "value" - } - }, - "rust": { - "example": { - "property": "value" - } - }, - "swift": { - "example": { - "property": "value" - } - } - } -} \ No newline at end of file diff --git a/config/testdata/v0.3.yaml b/config/testdata/v0.3.yaml deleted file mode 100644 index 998e5c176a6..00000000000 --- a/config/testdata/v0.3.yaml +++ /dev/null @@ -1,466 +0,0 @@ -# kitchen-sink.yaml demonstrates all configurable surface area, including explanatory comments. -# -# It DOES NOT represent expected real world configuration, as it makes strange configuration -# choices in an effort to exercise the full surface area. -# -# Configuration values are set to their defaults when default values are defined. - -# The file format version. -file_format: "0.3" - -# Configure if the SDK is disabled or not. This is not required to be provided to ensure the SDK isn't disabled, the default value when this is not provided is for the SDK to be enabled. -disabled: false - -# Configure general attribute limits. See also tracer_provider.limits, logger_provider.limits. -attribute_limits: - # Configure max attribute value size. - attribute_value_length_limit: 4096 - # Configure max attribute count. - attribute_count_limit: 128 - -# Configure logger provider. -logger_provider: - # Configure log record processors. - processors: - - # Configure a batch log record processor. - batch: - # Configure delay interval (in milliseconds) between two consecutive exports. - schedule_delay: 5000 - # Configure maximum allowed time (in milliseconds) to export data. - export_timeout: 30000 - # Configure maximum queue size. - max_queue_size: 2048 - # Configure maximum batch size. - max_export_batch_size: 512 - # Configure exporter. - exporter: - # Configure exporter to be OTLP. - otlp: - # Configure protocol. - protocol: http/protobuf - # Configure endpoint. - endpoint: http://localhost:4318/v1/logs - # Configure certificate. - certificate: /app/cert.pem - # Configure mTLS private client key. - client_key: /app/cert.pem - # Configure mTLS client certificate. - client_certificate: /app/cert.pem - # Configure headers. Entries have higher priority than entries from .headers_list. - headers: - - name: api-key - value: "1234" - # Configure headers. Entries have lower priority than entries from .headers. - # The value is a list of comma separated key-value pairs matching the format of OTEL_EXPORTER_OTLP_HEADERS. See https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/exporter.md#configuration-options for details. - headers_list: "api-key=1234" - # Configure compression. - compression: gzip - # Configure max time (in milliseconds) to wait for each export. - timeout: 10000 - # Configure client transport security for the exporter's connection when http/https is not specified for gRPC connections. - insecure: false - - # Configure a simple log record processor. - simple: - # Configure exporter. - exporter: - # Configure exporter to be console. - console: {} - # Configure log record limits. See also attribute_limits. - limits: - # Configure max attribute value size. Overrides .attribute_limits.attribute_value_length_limit. - attribute_value_length_limit: 4096 - # Configure max attribute count. Overrides .attribute_limits.attribute_count_limit. - attribute_count_limit: 128 - -# Configure meter provider. -meter_provider: - # Configure metric readers. - readers: - - # Configure a pull based metric reader. - pull: - # Configure exporter. - exporter: - # Configure exporter to be prometheus. - prometheus: - # Configure host. - host: localhost - # Configure port. - port: 9464 - # Configure Prometheus Exporter to produce metrics without a unit suffix or UNIT metadata. - without_units: false - # Configure Prometheus Exporter to produce metrics without a type suffix. - without_type_suffix: false - # Configure Prometheus Exporter to produce metrics without a scope info metric. - without_scope_info: false - # Configure Prometheus Exporter to add resource attributes as metrics attributes. - with_resource_constant_labels: - # Configure resource attributes to be included. If not set, no resource attributes are included. - # Attribute keys from resources are evaluated to match as follows: - # * If the value of the attribute key exactly matches. - # * If the value of the attribute key matches the wildcard pattern, where '?' matches any single character and '*' matches any number of characters including none. - included: - - "service*" - # Configure resource attributes to be excluded. Applies after .with_resource_constant_labels.included (i.e. excluded has higher priority than included). - # Attribute keys from resources are evaluated to match as follows: - # * If the value of the attribute key exactly matches. - # * If the value of the attribute key matches the wildcard pattern, where '?' matches any single character and '*' matches any number of characters including none. - excluded: - - "service.attr1" - # Configure metric producers. - producers: - - # Configure metric producer to be opencensus. - opencensus: {} - - # Configure a periodic metric reader. - periodic: - # Configure delay interval (in milliseconds) between start of two consecutive exports. - interval: 5000 - # Configure maximum allowed time (in milliseconds) to export data. - timeout: 30000 - # Configure exporter. - exporter: - # Configure exporter to be OTLP. - otlp: - # Configure protocol. - protocol: http/protobuf - # Configure endpoint. - endpoint: http://localhost:4318/v1/metrics - # Configure certificate. - certificate: /app/cert.pem - # Configure mTLS private client key. - client_key: /app/cert.pem - # Configure mTLS client certificate. - client_certificate: /app/cert.pem - # Configure headers. Entries have higher priority than entries from .headers_list. - headers: - - name: api-key - value: "1234" - # Configure headers. Entries have lower priority than entries from .headers. - # The value is a list of comma separated key-value pairs matching the format of OTEL_EXPORTER_OTLP_HEADERS. See https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/exporter.md#configuration-options for details. - headers_list: "api-key=1234" - # Configure compression. - compression: gzip - # Configure max time (in milliseconds) to wait for each export. - timeout: 10000 - # Configure client transport security for the exporter's connection when http/https is not specified for gRPC connections. - insecure: false - # Configure temporality preference. - temporality_preference: delta - # Configure default histogram aggregation. - default_histogram_aggregation: base2_exponential_bucket_histogram - # Configure metric producers. - producers: - - # Configure metric producer to be prometheus. - prometheus: {} - - # Configure a periodic metric reader. - periodic: - # Configure exporter. - exporter: - # Configure exporter to be console. - console: {} - # Configure views. Each view has a selector which determines the instrument(s) it applies to, and a configuration for the resulting stream(s). - views: - - # Configure view selector. - selector: - # Configure instrument name selection criteria. - instrument_name: my-instrument - # Configure instrument type selection criteria. - instrument_type: histogram - # Configure the instrument unit selection criteria. - unit: ms - # Configure meter name selection criteria. - meter_name: my-meter - # Configure meter version selection criteria. - meter_version: 1.0.0 - # Configure meter schema url selection criteria. - meter_schema_url: https://opentelemetry.io/schemas/1.16.0 - # Configure view stream. - stream: - # Configure metric name of the resulting stream(s). - name: new_instrument_name - # Configure metric description of the resulting stream(s). - description: new_description - # Configure aggregation of the resulting stream(s). Known values include: default, drop, explicit_bucket_histogram, base2_exponential_bucket_histogram, last_value, sum. - aggregation: - # Configure aggregation to be explicit_bucket_histogram. - explicit_bucket_histogram: - # Configure bucket boundaries. - boundaries: - [ - 0.0, - 5.0, - 10.0, - 25.0, - 50.0, - 75.0, - 100.0, - 250.0, - 500.0, - 750.0, - 1000.0, - 2500.0, - 5000.0, - 7500.0, - 10000.0 - ] - # Configure record min and max. - record_min_max: true - # Configure attribute keys retained in the resulting stream(s). - attribute_keys: - # Configure list of attribute keys to include in the resulting stream(s). All other attributes are dropped. If not set, stream attributes are not configured. - included: - - key1 - - key2 - # Configure list of attribute keys to exclude from the resulting stream(s). Applies after .attribute_keys.included (i.e. excluded has higher priority than included). - excluded: - - key3 - -# Configure text map context propagators. -propagator: - # Configure the set of propagators to include in the composite text map propagator. - composite: [ tracecontext, baggage, b3, b3multi, jaeger, xray, ottrace ] - -# Configure tracer provider. -tracer_provider: - # Configure span processors. - processors: - - # Configure a batch span processor. - batch: - # Configure delay interval (in milliseconds) between two consecutive exports. - schedule_delay: 5000 - # Configure maximum allowed time (in milliseconds) to export data. - export_timeout: 30000 - # Configure maximum queue size. - max_queue_size: 2048 - # Configure maximum batch size. - max_export_batch_size: 512 - # Configure exporter. - exporter: - # Configure exporter to be OTLP. - otlp: - # Configure protocol. - protocol: http/protobuf - # Configure endpoint. - endpoint: http://localhost:4318/v1/traces - # Configure certificate. - certificate: /app/cert.pem - # Configure mTLS private client key. - client_key: /app/cert.pem - # Configure mTLS client certificate. - client_certificate: /app/cert.pem - # Configure headers. Entries have higher priority than entries from .headers_list. - headers: - - name: api-key - value: "1234" - # Configure headers. Entries have lower priority than entries from .headers. - # The value is a list of comma separated key-value pairs matching the format of OTEL_EXPORTER_OTLP_HEADERS. See https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/exporter.md#configuration-options for details. - headers_list: "api-key=1234" - # Configure compression. - compression: gzip - # Configure max time (in milliseconds) to wait for each export. - timeout: 10000 - # Configure client transport security for the exporter's connection when http/https is not specified for gRPC connections. - insecure: false - - # Configure a batch span processor. - batch: - # Configure exporter. - exporter: - # Configure exporter to be zipkin. - zipkin: - # Configure endpoint. - endpoint: http://localhost:9411/api/v2/spans - # Configure max time (in milliseconds) to wait for each export. - timeout: 10000 - - # Configure a simple span processor. - simple: - # Configure exporter. - exporter: - # Configure exporter to be console. - console: {} - # Configure span limits. See also attribute_limits. - limits: - # Configure max attribute value size. Overrides .attribute_limits.attribute_value_length_limit. - attribute_value_length_limit: 4096 - # Configure max attribute count. Overrides .attribute_limits.attribute_count_limit. - attribute_count_limit: 128 - # Configure max span event count. - event_count_limit: 128 - # Configure max span link count. - link_count_limit: 128 - # Configure max attributes per span event. - event_attribute_count_limit: 128 - # Configure max attributes per span link. - link_attribute_count_limit: 128 - # Configure the sampler. - sampler: - # Configure sampler to be parent_based. - parent_based: - # Configure root sampler. - root: - # Configure sampler to be trace_id_ratio_based. - trace_id_ratio_based: - # Configure trace_id_ratio. - ratio: 0.0001 - # Configure remote_parent_sampled sampler. - remote_parent_sampled: - # Configure sampler to be always_on. - always_on: {} - # Configure remote_parent_not_sampled sampler. - remote_parent_not_sampled: - # Configure sampler to be always_off. - always_off: {} - # Configure local_parent_sampled sampler. - local_parent_sampled: - # Configure sampler to be always_on. - always_on: {} - # Configure local_parent_not_sampled sampler. - local_parent_not_sampled: - # Configure sampler to be always_off. - always_off: {} - -# Configure resource for all signals. -resource: - # Configure resource attributes. Entries have higher priority than entries from .resource.attributes_list. - # Entries must contain .name nand .value, and may optionally include .type, which defaults ot "string" if not set. The value must match the type. Values for .type include: string, bool, int, double, string_array, bool_array, int_array, double_array. - attributes: - - name: service.name - value: unknown_service - - name: string_key - value: value - type: string - - name: bool_key - value: true - type: bool - - name: int_key - value: 1 - type: int - - name: double_key - value: 1.1 - type: double - - name: string_array_key - value: [ "value1", "value2" ] - type: string_array - - name: bool_array_key - value: [ true, false ] - type: bool_array - - name: int_array_key - value: [ 1, 2 ] - type: int_array - - name: double_array_key - value: [ 1.1, 2.2 ] - type: double_array - # Configure resource attributes. Entries have lower priority than entries from .resource.attributes. - # The value is a list of comma separated key-value pairs matching the format of OTEL_RESOURCE_ATTRIBUTES. See https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/configuration/sdk-environment-variables.md#general-sdk-configuration for details. - attributes_list: "service.namespace=my-namespace,service.version=1.0.0" - # Configure resource detectors. - detectors: - # Configure attributes provided by resource detectors. - attributes: - # Configure list of attribute key patterns to include from resource detectors. If not set, all attributes are included. - # Attribute keys from resource detectors are evaluated to match as follows: - # * If the value of the attribute key exactly matches. - # * If the value of the attribute key matches the wildcard pattern, where '?' matches any single character and '*' matches any number of characters including none. - included: - - process.* - # Configure list of attribute key patterns to exclude from resource detectors. Applies after .resource.detectors.attributes.included (i.e. excluded has higher priority than included). - # Attribute keys from resource detectors are evaluated to match as follows: - # * If the value of the attribute key exactly matches. - # * If the value of the attribute key matches the wildcard pattern, where '?' matches any single character and '*' matches any number of characters including none. - excluded: - - process.command_args - # Configure resource schema URL. - schema_url: https://opentelemetry.io/schemas/1.16.0 - -# Configure instrumentation. -instrumentation: - # Configure general SemConv options that may apply to multiple languages and instrumentations. - # Instrumenation may merge general config options with the language specific configuration at .instrumentation.. - general: - # Configure instrumentations following the peer semantic conventions. - # See peer semantic conventions: https://opentelemetry.io/docs/specs/semconv/attributes-registry/peer/ - peer: - # Configure the service mapping for instrumentations following peer.service semantic conventions. - # Each entry is a key value pair where "peer" defines the IP address and "service" defines the corresponding logical name of the service. - # See peer.service semantic conventions: https://opentelemetry.io/docs/specs/semconv/general/attributes/#general-remote-service-attributes - service_mapping: - - peer: 1.2.3.4 - service: FooService - - peer: 2.3.4.5 - service: BarService - # Configure instrumentations following the http semantic conventions. - # See http semantic conventions: https://opentelemetry.io/docs/specs/semconv/http/ - http: - # Configure instrumentations following the http client semantic conventions. - client: - # Configure headers to capture for outbound http requests. - request_captured_headers: - - Content-Type - - Accept - # Configure headers to capture for outbound http responses. - response_captured_headers: - - Content-Type - - Content-Encoding - # Configure instrumentations following the http server semantic conventions. - server: - # Configure headers to capture for inbound http requests. - request_captured_headers: - - Content-Type - - Accept - # Configure headers to capture for outbound http responses. - response_captured_headers: - - Content-Type - - Content-Encoding - # Configure C++ language-specific instrumentation libraries. - cpp: - # Configure the instrumentation corresponding to key "example". - example: - property: "value" - # Configure .NET language-specific instrumentation libraries. - dotnet: - # Configure the instrumentation corresponding to key "example". - example: - property: "value" - # Configure Erlang language-specific instrumentation libraries. - erlang: - # Configure the instrumentation corresponding to key "example". - example: - property: "value" - # Configure Go language-specific instrumentation libraries. - go: - # Configure the instrumentation corresponding to key "example". - example: - property: "value" - # Configure Java language-specific instrumentation libraries. - java: - # Configure the instrumentation corresponding to key "example". - example: - property: "value" - # Configure JavaScript language-specific instrumentation libraries. - js: - # Configure the instrumentation corresponding to key "example". - example: - property: "value" - # Configure PHP language-specific instrumentation libraries. - php: - # Configure the instrumentation corresponding to key "example". - example: - property: "value" - # Configure Python language-specific instrumentation libraries. - python: - # Configure the instrumentation corresponding to key "example". - example: - property: "value" - # Configure Ruby language-specific instrumentation libraries. - ruby: - # Configure the instrumentation corresponding to key "example". - example: - property: "value" - # Configure Rust language-specific instrumentation libraries. - rust: - # Configure the instrumentation corresponding to key "example". - example: - property: "value" - # Configure Swift language-specific instrumentation libraries. - swift: - # Configure the instrumentation corresponding to key "example". - example: - property: "value" diff --git a/config/testdata/valid_empty.json b/config/testdata/valid_empty.json deleted file mode 100644 index 45e9de273b0..00000000000 --- a/config/testdata/valid_empty.json +++ /dev/null @@ -1 +0,0 @@ -{"file_format": "0.1", "disabled": false} \ No newline at end of file diff --git a/config/testdata/valid_empty.yaml b/config/testdata/valid_empty.yaml deleted file mode 100644 index dbcebaa0ca3..00000000000 --- a/config/testdata/valid_empty.yaml +++ /dev/null @@ -1,2 +0,0 @@ -file_format: 0.1 -disabled: false \ No newline at end of file diff --git a/config/v0.2.0/config.go b/config/v0.2.0/config.go deleted file mode 100644 index f5ad085452f..00000000000 --- a/config/v0.2.0/config.go +++ /dev/null @@ -1,156 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package config is deprecated. -// -// Deprecated: use [go.opentelemetry.io/contrib/otelconf/v0.2.0] instead. -// This is the last release of this module. -package config // import "go.opentelemetry.io/contrib/config/v0.2.0" - -import ( - "context" - "errors" - - "gopkg.in/yaml.v3" - - "go.opentelemetry.io/otel/log" - nooplog "go.opentelemetry.io/otel/log/noop" - "go.opentelemetry.io/otel/metric" - noopmetric "go.opentelemetry.io/otel/metric/noop" - "go.opentelemetry.io/otel/trace" - nooptrace "go.opentelemetry.io/otel/trace/noop" -) - -const ( - protocolProtobufHTTP = "http/protobuf" - protocolProtobufGRPC = "grpc/protobuf" - - compressionGzip = "gzip" - compressionNone = "none" -) - -type configOptions struct { - ctx context.Context - opentelemetryConfig OpenTelemetryConfiguration -} - -type shutdownFunc func(context.Context) error - -func noopShutdown(context.Context) error { - return nil -} - -// SDK is a struct that contains all the providers -// configured via the configuration model. -type SDK struct { - meterProvider metric.MeterProvider - tracerProvider trace.TracerProvider - loggerProvider log.LoggerProvider - shutdown shutdownFunc -} - -// TracerProvider returns a configured trace.TracerProvider. -func (s *SDK) TracerProvider() trace.TracerProvider { - return s.tracerProvider -} - -// MeterProvider returns a configured metric.MeterProvider. -func (s *SDK) MeterProvider() metric.MeterProvider { - return s.meterProvider -} - -// LoggerProvider returns a configured log.LoggerProvider. -func (s *SDK) LoggerProvider() log.LoggerProvider { - return s.loggerProvider -} - -// Shutdown calls shutdown on all configured providers. -func (s *SDK) Shutdown(ctx context.Context) error { - return s.shutdown(ctx) -} - -var noopSDK = SDK{ - loggerProvider: nooplog.LoggerProvider{}, - meterProvider: noopmetric.MeterProvider{}, - tracerProvider: nooptrace.TracerProvider{}, - shutdown: func(ctx context.Context) error { return nil }, -} - -// NewSDK creates SDK providers based on the configuration model. -func NewSDK(opts ...ConfigurationOption) (SDK, error) { - o := configOptions{} - for _, opt := range opts { - o = opt.apply(o) - } - if o.opentelemetryConfig.Disabled != nil && *o.opentelemetryConfig.Disabled { - return noopSDK, nil - } - - r, err := newResource(o.opentelemetryConfig.Resource) - if err != nil { - return noopSDK, err - } - - mp, mpShutdown, err := meterProvider(o, r) - if err != nil { - return noopSDK, err - } - - tp, tpShutdown, err := tracerProvider(o, r) - if err != nil { - return noopSDK, err - } - - lp, lpShutdown, err := loggerProvider(o, r) - if err != nil { - return noopSDK, err - } - - return SDK{ - meterProvider: mp, - tracerProvider: tp, - loggerProvider: lp, - shutdown: func(ctx context.Context) error { - return errors.Join(mpShutdown(ctx), tpShutdown(ctx), lpShutdown(ctx)) - }, - }, nil -} - -// ConfigurationOption configures options for providers. -type ConfigurationOption interface { - apply(configOptions) configOptions -} - -type configurationOptionFunc func(configOptions) configOptions - -func (fn configurationOptionFunc) apply(cfg configOptions) configOptions { - return fn(cfg) -} - -// WithContext sets the context.Context for the SDK. -func WithContext(ctx context.Context) ConfigurationOption { - return configurationOptionFunc(func(c configOptions) configOptions { - c.ctx = ctx - return c - }) -} - -// WithOpenTelemetryConfiguration sets the OpenTelemetryConfiguration used -// to produce the SDK. -func WithOpenTelemetryConfiguration(cfg OpenTelemetryConfiguration) ConfigurationOption { - return configurationOptionFunc(func(c configOptions) configOptions { - c.opentelemetryConfig = cfg - return c - }) -} - -// ParseYAML parses a YAML configuration file into an OpenTelemetryConfiguration. -func ParseYAML(file []byte) (*OpenTelemetryConfiguration, error) { - var cfg OpenTelemetryConfiguration - err := yaml.Unmarshal(file, &cfg) - if err != nil { - return nil, err - } - - return &cfg, nil -} diff --git a/config/v0.2.0/config_test.go b/config/v0.2.0/config_test.go deleted file mode 100644 index 653c6e978fd..00000000000 --- a/config/v0.2.0/config_test.go +++ /dev/null @@ -1,383 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package config - -import ( - "context" - "encoding/json" - "errors" - "os" - "path/filepath" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - lognoop "go.opentelemetry.io/otel/log/noop" - metricnoop "go.opentelemetry.io/otel/metric/noop" - sdklog "go.opentelemetry.io/otel/sdk/log" - sdkmetric "go.opentelemetry.io/otel/sdk/metric" - sdktrace "go.opentelemetry.io/otel/sdk/trace" - tracenoop "go.opentelemetry.io/otel/trace/noop" -) - -func TestNewSDK(t *testing.T) { - tests := []struct { - name string - cfg []ConfigurationOption - wantTracerProvider any - wantMeterProvider any - wantLoggerProvider any - wantErr error - wantShutdownErr error - }{ - { - name: "no-configuration", - wantTracerProvider: tracenoop.NewTracerProvider(), - wantMeterProvider: metricnoop.NewMeterProvider(), - wantLoggerProvider: lognoop.NewLoggerProvider(), - }, - { - name: "with-configuration", - cfg: []ConfigurationOption{ - WithContext(context.Background()), - WithOpenTelemetryConfiguration(OpenTelemetryConfiguration{ - TracerProvider: &TracerProvider{}, - MeterProvider: &MeterProvider{}, - LoggerProvider: &LoggerProvider{}, - }), - }, - wantTracerProvider: &sdktrace.TracerProvider{}, - wantMeterProvider: &sdkmetric.MeterProvider{}, - wantLoggerProvider: &sdklog.LoggerProvider{}, - }, - { - name: "with-sdk-disabled", - cfg: []ConfigurationOption{ - WithContext(context.Background()), - WithOpenTelemetryConfiguration(OpenTelemetryConfiguration{ - Disabled: ptr(true), - TracerProvider: &TracerProvider{}, - MeterProvider: &MeterProvider{}, - LoggerProvider: &LoggerProvider{}, - }), - }, - wantTracerProvider: tracenoop.NewTracerProvider(), - wantMeterProvider: metricnoop.NewMeterProvider(), - wantLoggerProvider: lognoop.NewLoggerProvider(), - }, - } - for _, tt := range tests { - sdk, err := NewSDK(tt.cfg...) - require.Equal(t, tt.wantErr, err) - assert.IsType(t, tt.wantTracerProvider, sdk.TracerProvider()) - assert.IsType(t, tt.wantMeterProvider, sdk.MeterProvider()) - assert.IsType(t, tt.wantLoggerProvider, sdk.LoggerProvider()) - require.Equal(t, tt.wantShutdownErr, sdk.Shutdown(context.Background())) - } -} - -var v02OpenTelemetryConfig = OpenTelemetryConfiguration{ - Disabled: ptr(false), - FileFormat: "0.2", - AttributeLimits: &AttributeLimits{ - AttributeCountLimit: ptr(128), - AttributeValueLengthLimit: ptr(4096), - }, - LoggerProvider: &LoggerProvider{ - Limits: &LogRecordLimits{ - AttributeCountLimit: ptr(128), - AttributeValueLengthLimit: ptr(4096), - }, - Processors: []LogRecordProcessor{ - { - Batch: &BatchLogRecordProcessor{ - ExportTimeout: ptr(30000), - Exporter: LogRecordExporter{ - OTLP: &OTLP{ - Certificate: ptr("/app/cert.pem"), - ClientCertificate: ptr("/app/cert.pem"), - ClientKey: ptr("/app/cert.pem"), - Compression: ptr("gzip"), - Endpoint: "http://localhost:4318", - Headers: Headers{ - "api-key": "1234", - }, - Insecure: ptr(false), - Protocol: "http/protobuf", - Timeout: ptr(10000), - }, - }, - MaxExportBatchSize: ptr(512), - MaxQueueSize: ptr(2048), - ScheduleDelay: ptr(5000), - }, - }, - { - Simple: &SimpleLogRecordProcessor{ - Exporter: LogRecordExporter{ - Console: Console{}, - }, - }, - }, - }, - }, - MeterProvider: &MeterProvider{ - Readers: []MetricReader{ - { - Pull: &PullMetricReader{ - Exporter: MetricExporter{ - Prometheus: &Prometheus{ - Host: ptr("localhost"), - Port: ptr(9464), - WithResourceConstantLabels: &IncludeExclude{ - Excluded: []string{"service.attr1"}, - Included: []string{"service*"}, - }, - WithoutScopeInfo: ptr(false), - WithoutTypeSuffix: ptr(false), - WithoutUnits: ptr(false), - }, - }, - }, - }, - { - Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ - OTLP: &OTLPMetric{ - Certificate: ptr("/app/cert.pem"), - ClientCertificate: ptr("/app/cert.pem"), - ClientKey: ptr("/app/cert.pem"), - Compression: ptr("gzip"), - DefaultHistogramAggregation: ptr(OTLPMetricDefaultHistogramAggregationBase2ExponentialBucketHistogram), - Endpoint: "http://localhost:4318", - Headers: Headers{ - "api-key": "1234", - }, - Insecure: ptr(false), - Protocol: "http/protobuf", - TemporalityPreference: ptr("delta"), - Timeout: ptr(10000), - }, - }, - Interval: ptr(5000), - Timeout: ptr(30000), - }, - }, - { - Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ - Console: Console{}, - }, - }, - }, - }, - Views: []View{ - { - Selector: &ViewSelector{ - InstrumentName: ptr("my-instrument"), - InstrumentType: ptr(ViewSelectorInstrumentTypeHistogram), - MeterName: ptr("my-meter"), - MeterSchemaUrl: ptr("https://opentelemetry.io/schemas/1.16.0"), - MeterVersion: ptr("1.0.0"), - Unit: ptr("ms"), - }, - Stream: &ViewStream{ - Aggregation: &ViewStreamAggregation{ - ExplicitBucketHistogram: &ViewStreamAggregationExplicitBucketHistogram{ - Boundaries: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, - RecordMinMax: ptr(true), - }, - }, - AttributeKeys: []string{"key1", "key2"}, - Description: ptr("new_description"), - Name: ptr("new_instrument_name"), - }, - }, - }, - }, - Propagator: &Propagator{ - Composite: []string{"tracecontext", "baggage", "b3", "b3multi", "jaeger", "xray", "ottrace"}, - }, - Resource: &Resource{ - Attributes: Attributes{ - "service.name": "unknown_service", - }, - Detectors: &Detectors{ - Attributes: &DetectorsAttributes{ - Excluded: []string{"process.command_args"}, - Included: []string{"process.*"}, - }, - }, - SchemaUrl: ptr("https://opentelemetry.io/schemas/1.16.0"), - }, - TracerProvider: &TracerProvider{ - Limits: &SpanLimits{ - AttributeCountLimit: ptr(128), - AttributeValueLengthLimit: ptr(4096), - EventCountLimit: ptr(128), - EventAttributeCountLimit: ptr(128), - LinkCountLimit: ptr(128), - LinkAttributeCountLimit: ptr(128), - }, - Processors: []SpanProcessor{ - { - Batch: &BatchSpanProcessor{ - ExportTimeout: ptr(30000), - Exporter: SpanExporter{ - OTLP: &OTLP{ - Certificate: ptr("/app/cert.pem"), - ClientCertificate: ptr("/app/cert.pem"), - ClientKey: ptr("/app/cert.pem"), - Compression: ptr("gzip"), - Endpoint: "http://localhost:4318", - Headers: Headers{ - "api-key": "1234", - }, - Insecure: ptr(false), - Protocol: "http/protobuf", - Timeout: ptr(10000), - }, - }, - MaxExportBatchSize: ptr(512), - MaxQueueSize: ptr(2048), - ScheduleDelay: ptr(5000), - }, - }, - { - Batch: &BatchSpanProcessor{ - Exporter: SpanExporter{ - Zipkin: &Zipkin{ - Endpoint: "http://localhost:9411/api/v2/spans", - Timeout: ptr(10000), - }, - }, - }, - }, - { - Simple: &SimpleSpanProcessor{ - Exporter: SpanExporter{ - Console: Console{}, - }, - }, - }, - }, - Sampler: &Sampler{ - ParentBased: &SamplerParentBased{ - LocalParentNotSampled: &Sampler{ - AlwaysOff: SamplerAlwaysOff{}, - }, - LocalParentSampled: &Sampler{ - AlwaysOn: SamplerAlwaysOn{}, - }, - RemoteParentNotSampled: &Sampler{ - AlwaysOff: SamplerAlwaysOff{}, - }, - RemoteParentSampled: &Sampler{ - AlwaysOn: SamplerAlwaysOn{}, - }, - Root: &Sampler{ - TraceIDRatioBased: &SamplerTraceIDRatioBased{ - Ratio: ptr(0.0001), - }, - }, - }, - }, - }, -} - -func TestParseYAML(t *testing.T) { - tests := []struct { - name string - input string - wantErr error - wantType interface{} - }{ - { - name: "valid YAML config", - input: `valid_empty.yaml`, - wantErr: nil, - wantType: &OpenTelemetryConfiguration{ - Disabled: ptr(false), - FileFormat: "0.1", - }, - }, - { - name: "invalid config", - input: "invalid_bool.yaml", - wantErr: errors.New(`yaml: unmarshal errors: - line 2: cannot unmarshal !!str ` + "`notabool`" + ` into bool`), - }, - { - name: "valid v0.2 config", - input: "v0.2.yaml", - wantType: &v02OpenTelemetryConfig, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - b, err := os.ReadFile(filepath.Join("..", "testdata", tt.input)) - require.NoError(t, err) - - got, err := ParseYAML(b) - if tt.wantErr != nil { - require.Equal(t, tt.wantErr.Error(), err.Error()) - } else { - require.NoError(t, err) - assert.Equal(t, tt.wantType, got) - } - }) - } -} - -func TestSerializeJSON(t *testing.T) { - tests := []struct { - name string - input string - wantErr error - wantType interface{} - }{ - { - name: "valid JSON config", - input: `valid_empty.json`, - wantErr: nil, - wantType: OpenTelemetryConfiguration{ - Disabled: ptr(false), - FileFormat: "0.1", - }, - }, - { - name: "invalid config", - input: "invalid_bool.json", - wantErr: errors.New(`json: cannot unmarshal string into Go struct field Plain.disabled of type bool`), - }, - { - name: "valid v0.2 config", - input: "v0.2.json", - wantType: v02OpenTelemetryConfig, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - b, err := os.ReadFile(filepath.Join("..", "testdata", tt.input)) - require.NoError(t, err) - - var got OpenTelemetryConfiguration - err = json.Unmarshal(b, &got) - - if tt.wantErr != nil { - require.Equal(t, tt.wantErr.Error(), err.Error()) - } else { - require.NoError(t, err) - assert.Equal(t, tt.wantType, got) - } - }) - } -} - -func ptr[T any](v T) *T { - return &v -} diff --git a/config/v0.2.0/generated_config.go b/config/v0.2.0/generated_config.go deleted file mode 100644 index 2315641db64..00000000000 --- a/config/v0.2.0/generated_config.go +++ /dev/null @@ -1,780 +0,0 @@ -// Code generated by github.com/atombender/go-jsonschema, DO NOT EDIT. - -package config - -import "encoding/json" -import "fmt" -import "reflect" - -type AttributeLimits struct { - // AttributeCountLimit corresponds to the JSON schema field - // "attribute_count_limit". - AttributeCountLimit *int `json:"attribute_count_limit,omitempty" yaml:"attribute_count_limit,omitempty" mapstructure:"attribute_count_limit,omitempty"` - - // AttributeValueLengthLimit corresponds to the JSON schema field - // "attribute_value_length_limit". - AttributeValueLengthLimit *int `json:"attribute_value_length_limit,omitempty" yaml:"attribute_value_length_limit,omitempty" mapstructure:"attribute_value_length_limit,omitempty"` - - AdditionalProperties interface{} -} - -type Attributes map[string]interface{} - -type BatchLogRecordProcessor struct { - // ExportTimeout corresponds to the JSON schema field "export_timeout". - ExportTimeout *int `json:"export_timeout,omitempty" yaml:"export_timeout,omitempty" mapstructure:"export_timeout,omitempty"` - - // Exporter corresponds to the JSON schema field "exporter". - Exporter LogRecordExporter `json:"exporter" yaml:"exporter" mapstructure:"exporter"` - - // MaxExportBatchSize corresponds to the JSON schema field - // "max_export_batch_size". - MaxExportBatchSize *int `json:"max_export_batch_size,omitempty" yaml:"max_export_batch_size,omitempty" mapstructure:"max_export_batch_size,omitempty"` - - // MaxQueueSize corresponds to the JSON schema field "max_queue_size". - MaxQueueSize *int `json:"max_queue_size,omitempty" yaml:"max_queue_size,omitempty" mapstructure:"max_queue_size,omitempty"` - - // ScheduleDelay corresponds to the JSON schema field "schedule_delay". - ScheduleDelay *int `json:"schedule_delay,omitempty" yaml:"schedule_delay,omitempty" mapstructure:"schedule_delay,omitempty"` -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *BatchLogRecordProcessor) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if _, ok := raw["exporter"]; raw != nil && !ok { - return fmt.Errorf("field exporter in BatchLogRecordProcessor: required") - } - type Plain BatchLogRecordProcessor - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = BatchLogRecordProcessor(plain) - return nil -} - -type BatchSpanProcessor struct { - // ExportTimeout corresponds to the JSON schema field "export_timeout". - ExportTimeout *int `json:"export_timeout,omitempty" yaml:"export_timeout,omitempty" mapstructure:"export_timeout,omitempty"` - - // Exporter corresponds to the JSON schema field "exporter". - Exporter SpanExporter `json:"exporter" yaml:"exporter" mapstructure:"exporter"` - - // MaxExportBatchSize corresponds to the JSON schema field - // "max_export_batch_size". - MaxExportBatchSize *int `json:"max_export_batch_size,omitempty" yaml:"max_export_batch_size,omitempty" mapstructure:"max_export_batch_size,omitempty"` - - // MaxQueueSize corresponds to the JSON schema field "max_queue_size". - MaxQueueSize *int `json:"max_queue_size,omitempty" yaml:"max_queue_size,omitempty" mapstructure:"max_queue_size,omitempty"` - - // ScheduleDelay corresponds to the JSON schema field "schedule_delay". - ScheduleDelay *int `json:"schedule_delay,omitempty" yaml:"schedule_delay,omitempty" mapstructure:"schedule_delay,omitempty"` -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *BatchSpanProcessor) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if _, ok := raw["exporter"]; raw != nil && !ok { - return fmt.Errorf("field exporter in BatchSpanProcessor: required") - } - type Plain BatchSpanProcessor - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = BatchSpanProcessor(plain) - return nil -} - -type Common map[string]interface{} - -type Console map[string]interface{} - -type Detectors struct { - // Attributes corresponds to the JSON schema field "attributes". - Attributes *DetectorsAttributes `json:"attributes,omitempty" yaml:"attributes,omitempty" mapstructure:"attributes,omitempty"` -} - -type DetectorsAttributes struct { - // Excluded corresponds to the JSON schema field "excluded". - Excluded []string `json:"excluded,omitempty" yaml:"excluded,omitempty" mapstructure:"excluded,omitempty"` - - // Included corresponds to the JSON schema field "included". - Included []string `json:"included,omitempty" yaml:"included,omitempty" mapstructure:"included,omitempty"` -} - -type Headers map[string]string - -type IncludeExclude struct { - // Excluded corresponds to the JSON schema field "excluded". - Excluded []string `json:"excluded,omitempty" yaml:"excluded,omitempty" mapstructure:"excluded,omitempty"` - - // Included corresponds to the JSON schema field "included". - Included []string `json:"included,omitempty" yaml:"included,omitempty" mapstructure:"included,omitempty"` -} - -type LogRecordExporter struct { - // Console corresponds to the JSON schema field "console". - Console Console `json:"console,omitempty" yaml:"console,omitempty" mapstructure:"console,omitempty"` - - // OTLP corresponds to the JSON schema field "otlp". - OTLP *OTLP `json:"otlp,omitempty" yaml:"otlp,omitempty" mapstructure:"otlp,omitempty"` - - AdditionalProperties interface{} -} - -type LogRecordLimits struct { - // AttributeCountLimit corresponds to the JSON schema field - // "attribute_count_limit". - AttributeCountLimit *int `json:"attribute_count_limit,omitempty" yaml:"attribute_count_limit,omitempty" mapstructure:"attribute_count_limit,omitempty"` - - // AttributeValueLengthLimit corresponds to the JSON schema field - // "attribute_value_length_limit". - AttributeValueLengthLimit *int `json:"attribute_value_length_limit,omitempty" yaml:"attribute_value_length_limit,omitempty" mapstructure:"attribute_value_length_limit,omitempty"` -} - -type LogRecordProcessor struct { - // Batch corresponds to the JSON schema field "batch". - Batch *BatchLogRecordProcessor `json:"batch,omitempty" yaml:"batch,omitempty" mapstructure:"batch,omitempty"` - - // Simple corresponds to the JSON schema field "simple". - Simple *SimpleLogRecordProcessor `json:"simple,omitempty" yaml:"simple,omitempty" mapstructure:"simple,omitempty"` - - AdditionalProperties interface{} -} - -type LoggerProvider struct { - // Limits corresponds to the JSON schema field "limits". - Limits *LogRecordLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` - - // Processors corresponds to the JSON schema field "processors". - Processors []LogRecordProcessor `json:"processors,omitempty" yaml:"processors,omitempty" mapstructure:"processors,omitempty"` -} - -type MeterProvider struct { - // Readers corresponds to the JSON schema field "readers". - Readers []MetricReader `json:"readers,omitempty" yaml:"readers,omitempty" mapstructure:"readers,omitempty"` - - // Views corresponds to the JSON schema field "views". - Views []View `json:"views,omitempty" yaml:"views,omitempty" mapstructure:"views,omitempty"` -} - -type MetricExporter struct { - // Console corresponds to the JSON schema field "console". - Console Console `json:"console,omitempty" yaml:"console,omitempty" mapstructure:"console,omitempty"` - - // OTLP corresponds to the JSON schema field "otlp". - OTLP *OTLPMetric `json:"otlp,omitempty" yaml:"otlp,omitempty" mapstructure:"otlp,omitempty"` - - // Prometheus corresponds to the JSON schema field "prometheus". - Prometheus *Prometheus `json:"prometheus,omitempty" yaml:"prometheus,omitempty" mapstructure:"prometheus,omitempty"` - - AdditionalProperties interface{} -} - -type MetricReader struct { - // Periodic corresponds to the JSON schema field "periodic". - Periodic *PeriodicMetricReader `json:"periodic,omitempty" yaml:"periodic,omitempty" mapstructure:"periodic,omitempty"` - - // Pull corresponds to the JSON schema field "pull". - Pull *PullMetricReader `json:"pull,omitempty" yaml:"pull,omitempty" mapstructure:"pull,omitempty"` -} - -type OTLP struct { - // Certificate corresponds to the JSON schema field "certificate". - Certificate *string `json:"certificate,omitempty" yaml:"certificate,omitempty" mapstructure:"certificate,omitempty"` - - // ClientCertificate corresponds to the JSON schema field "client_certificate". - ClientCertificate *string `json:"client_certificate,omitempty" yaml:"client_certificate,omitempty" mapstructure:"client_certificate,omitempty"` - - // ClientKey corresponds to the JSON schema field "client_key". - ClientKey *string `json:"client_key,omitempty" yaml:"client_key,omitempty" mapstructure:"client_key,omitempty"` - - // Compression corresponds to the JSON schema field "compression". - Compression *string `json:"compression,omitempty" yaml:"compression,omitempty" mapstructure:"compression,omitempty"` - - // Endpoint corresponds to the JSON schema field "endpoint". - Endpoint string `json:"endpoint" yaml:"endpoint" mapstructure:"endpoint"` - - // Headers corresponds to the JSON schema field "headers". - Headers Headers `json:"headers,omitempty" yaml:"headers,omitempty" mapstructure:"headers,omitempty"` - - // Insecure corresponds to the JSON schema field "insecure". - Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - - // Protocol corresponds to the JSON schema field "protocol". - Protocol string `json:"protocol" yaml:"protocol" mapstructure:"protocol"` - - // Timeout corresponds to the JSON schema field "timeout". - Timeout *int `json:"timeout,omitempty" yaml:"timeout,omitempty" mapstructure:"timeout,omitempty"` -} - -type OTLPMetric struct { - // Certificate corresponds to the JSON schema field "certificate". - Certificate *string `json:"certificate,omitempty" yaml:"certificate,omitempty" mapstructure:"certificate,omitempty"` - - // ClientCertificate corresponds to the JSON schema field "client_certificate". - ClientCertificate *string `json:"client_certificate,omitempty" yaml:"client_certificate,omitempty" mapstructure:"client_certificate,omitempty"` - - // ClientKey corresponds to the JSON schema field "client_key". - ClientKey *string `json:"client_key,omitempty" yaml:"client_key,omitempty" mapstructure:"client_key,omitempty"` - - // Compression corresponds to the JSON schema field "compression". - Compression *string `json:"compression,omitempty" yaml:"compression,omitempty" mapstructure:"compression,omitempty"` - - // DefaultHistogramAggregation corresponds to the JSON schema field - // "default_histogram_aggregation". - DefaultHistogramAggregation *OTLPMetricDefaultHistogramAggregation `json:"default_histogram_aggregation,omitempty" yaml:"default_histogram_aggregation,omitempty" mapstructure:"default_histogram_aggregation,omitempty"` - - // Endpoint corresponds to the JSON schema field "endpoint". - Endpoint string `json:"endpoint" yaml:"endpoint" mapstructure:"endpoint"` - - // Headers corresponds to the JSON schema field "headers". - Headers Headers `json:"headers,omitempty" yaml:"headers,omitempty" mapstructure:"headers,omitempty"` - - // Insecure corresponds to the JSON schema field "insecure". - Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - - // Protocol corresponds to the JSON schema field "protocol". - Protocol string `json:"protocol" yaml:"protocol" mapstructure:"protocol"` - - // TemporalityPreference corresponds to the JSON schema field - // "temporality_preference". - TemporalityPreference *string `json:"temporality_preference,omitempty" yaml:"temporality_preference,omitempty" mapstructure:"temporality_preference,omitempty"` - - // Timeout corresponds to the JSON schema field "timeout". - Timeout *int `json:"timeout,omitempty" yaml:"timeout,omitempty" mapstructure:"timeout,omitempty"` -} - -type OTLPMetricDefaultHistogramAggregation string - -const OTLPMetricDefaultHistogramAggregationBase2ExponentialBucketHistogram OTLPMetricDefaultHistogramAggregation = "base2_exponential_bucket_histogram" -const OTLPMetricDefaultHistogramAggregationExplicitBucketHistogram OTLPMetricDefaultHistogramAggregation = "explicit_bucket_histogram" - -var enumValues_OTLPMetricDefaultHistogramAggregation = []interface{}{ - "explicit_bucket_histogram", - "base2_exponential_bucket_histogram", -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *OTLPMetricDefaultHistogramAggregation) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_OTLPMetricDefaultHistogramAggregation { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_OTLPMetricDefaultHistogramAggregation, v) - } - *j = OTLPMetricDefaultHistogramAggregation(v) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *OTLPMetric) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if _, ok := raw["endpoint"]; raw != nil && !ok { - return fmt.Errorf("field endpoint in OTLPMetric: required") - } - if _, ok := raw["protocol"]; raw != nil && !ok { - return fmt.Errorf("field protocol in OTLPMetric: required") - } - type Plain OTLPMetric - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = OTLPMetric(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *OTLP) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if _, ok := raw["endpoint"]; raw != nil && !ok { - return fmt.Errorf("field endpoint in OTLP: required") - } - if _, ok := raw["protocol"]; raw != nil && !ok { - return fmt.Errorf("field protocol in OTLP: required") - } - type Plain OTLP - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = OTLP(plain) - return nil -} - -type OpenTelemetryConfiguration struct { - // AttributeLimits corresponds to the JSON schema field "attribute_limits". - AttributeLimits *AttributeLimits `json:"attribute_limits,omitempty" yaml:"attribute_limits,omitempty" mapstructure:"attribute_limits,omitempty"` - - // Disabled corresponds to the JSON schema field "disabled". - Disabled *bool `json:"disabled,omitempty" yaml:"disabled,omitempty" mapstructure:"disabled,omitempty"` - - // FileFormat corresponds to the JSON schema field "file_format". - FileFormat string `json:"file_format" yaml:"file_format" mapstructure:"file_format"` - - // LoggerProvider corresponds to the JSON schema field "logger_provider". - LoggerProvider *LoggerProvider `json:"logger_provider,omitempty" yaml:"logger_provider,omitempty" mapstructure:"logger_provider,omitempty"` - - // MeterProvider corresponds to the JSON schema field "meter_provider". - MeterProvider *MeterProvider `json:"meter_provider,omitempty" yaml:"meter_provider,omitempty" mapstructure:"meter_provider,omitempty"` - - // Propagator corresponds to the JSON schema field "propagator". - Propagator *Propagator `json:"propagator,omitempty" yaml:"propagator,omitempty" mapstructure:"propagator,omitempty"` - - // Resource corresponds to the JSON schema field "resource". - Resource *Resource `json:"resource,omitempty" yaml:"resource,omitempty" mapstructure:"resource,omitempty"` - - // TracerProvider corresponds to the JSON schema field "tracer_provider". - TracerProvider *TracerProvider `json:"tracer_provider,omitempty" yaml:"tracer_provider,omitempty" mapstructure:"tracer_provider,omitempty"` - - AdditionalProperties interface{} -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *OpenTelemetryConfiguration) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if _, ok := raw["file_format"]; raw != nil && !ok { - return fmt.Errorf("field file_format in OpenTelemetryConfiguration: required") - } - type Plain OpenTelemetryConfiguration - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = OpenTelemetryConfiguration(plain) - return nil -} - -type PeriodicMetricReader struct { - // Exporter corresponds to the JSON schema field "exporter". - Exporter MetricExporter `json:"exporter" yaml:"exporter" mapstructure:"exporter"` - - // Interval corresponds to the JSON schema field "interval". - Interval *int `json:"interval,omitempty" yaml:"interval,omitempty" mapstructure:"interval,omitempty"` - - // Timeout corresponds to the JSON schema field "timeout". - Timeout *int `json:"timeout,omitempty" yaml:"timeout,omitempty" mapstructure:"timeout,omitempty"` -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *PeriodicMetricReader) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if _, ok := raw["exporter"]; raw != nil && !ok { - return fmt.Errorf("field exporter in PeriodicMetricReader: required") - } - type Plain PeriodicMetricReader - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = PeriodicMetricReader(plain) - return nil -} - -type Prometheus struct { - // Host corresponds to the JSON schema field "host". - Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` - - // Port corresponds to the JSON schema field "port". - Port *int `json:"port,omitempty" yaml:"port,omitempty" mapstructure:"port,omitempty"` - - // WithResourceConstantLabels corresponds to the JSON schema field - // "with_resource_constant_labels". - WithResourceConstantLabels *IncludeExclude `json:"with_resource_constant_labels,omitempty" yaml:"with_resource_constant_labels,omitempty" mapstructure:"with_resource_constant_labels,omitempty"` - - // WithoutScopeInfo corresponds to the JSON schema field "without_scope_info". - WithoutScopeInfo *bool `json:"without_scope_info,omitempty" yaml:"without_scope_info,omitempty" mapstructure:"without_scope_info,omitempty"` - - // WithoutTypeSuffix corresponds to the JSON schema field "without_type_suffix". - WithoutTypeSuffix *bool `json:"without_type_suffix,omitempty" yaml:"without_type_suffix,omitempty" mapstructure:"without_type_suffix,omitempty"` - - // WithoutUnits corresponds to the JSON schema field "without_units". - WithoutUnits *bool `json:"without_units,omitempty" yaml:"without_units,omitempty" mapstructure:"without_units,omitempty"` -} - -type Propagator struct { - // Composite corresponds to the JSON schema field "composite". - Composite []string `json:"composite,omitempty" yaml:"composite,omitempty" mapstructure:"composite,omitempty"` - - AdditionalProperties interface{} -} - -type PullMetricReader struct { - // Exporter corresponds to the JSON schema field "exporter". - Exporter MetricExporter `json:"exporter" yaml:"exporter" mapstructure:"exporter"` -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *PullMetricReader) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if _, ok := raw["exporter"]; raw != nil && !ok { - return fmt.Errorf("field exporter in PullMetricReader: required") - } - type Plain PullMetricReader - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = PullMetricReader(plain) - return nil -} - -type Resource struct { - // Attributes corresponds to the JSON schema field "attributes". - Attributes Attributes `json:"attributes,omitempty" yaml:"attributes,omitempty" mapstructure:"attributes,omitempty"` - - // Detectors corresponds to the JSON schema field "detectors". - Detectors *Detectors `json:"detectors,omitempty" yaml:"detectors,omitempty" mapstructure:"detectors,omitempty"` - - // SchemaUrl corresponds to the JSON schema field "schema_url". - SchemaUrl *string `json:"schema_url,omitempty" yaml:"schema_url,omitempty" mapstructure:"schema_url,omitempty"` -} - -type Sampler struct { - // AlwaysOff corresponds to the JSON schema field "always_off". - AlwaysOff SamplerAlwaysOff `json:"always_off,omitempty" yaml:"always_off,omitempty" mapstructure:"always_off,omitempty"` - - // AlwaysOn corresponds to the JSON schema field "always_on". - AlwaysOn SamplerAlwaysOn `json:"always_on,omitempty" yaml:"always_on,omitempty" mapstructure:"always_on,omitempty"` - - // JaegerRemote corresponds to the JSON schema field "jaeger_remote". - JaegerRemote *SamplerJaegerRemote `json:"jaeger_remote,omitempty" yaml:"jaeger_remote,omitempty" mapstructure:"jaeger_remote,omitempty"` - - // ParentBased corresponds to the JSON schema field "parent_based". - ParentBased *SamplerParentBased `json:"parent_based,omitempty" yaml:"parent_based,omitempty" mapstructure:"parent_based,omitempty"` - - // TraceIDRatioBased corresponds to the JSON schema field "trace_id_ratio_based". - TraceIDRatioBased *SamplerTraceIDRatioBased `json:"trace_id_ratio_based,omitempty" yaml:"trace_id_ratio_based,omitempty" mapstructure:"trace_id_ratio_based,omitempty"` - - AdditionalProperties interface{} -} - -type SamplerAlwaysOff map[string]interface{} - -type SamplerAlwaysOn map[string]interface{} - -type SamplerJaegerRemote struct { - // Endpoint corresponds to the JSON schema field "endpoint". - Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - - // InitialSampler corresponds to the JSON schema field "initial_sampler". - InitialSampler *Sampler `json:"initial_sampler,omitempty" yaml:"initial_sampler,omitempty" mapstructure:"initial_sampler,omitempty"` - - // Interval corresponds to the JSON schema field "interval". - Interval *int `json:"interval,omitempty" yaml:"interval,omitempty" mapstructure:"interval,omitempty"` -} - -type SamplerParentBased struct { - // LocalParentNotSampled corresponds to the JSON schema field - // "local_parent_not_sampled". - LocalParentNotSampled *Sampler `json:"local_parent_not_sampled,omitempty" yaml:"local_parent_not_sampled,omitempty" mapstructure:"local_parent_not_sampled,omitempty"` - - // LocalParentSampled corresponds to the JSON schema field "local_parent_sampled". - LocalParentSampled *Sampler `json:"local_parent_sampled,omitempty" yaml:"local_parent_sampled,omitempty" mapstructure:"local_parent_sampled,omitempty"` - - // RemoteParentNotSampled corresponds to the JSON schema field - // "remote_parent_not_sampled". - RemoteParentNotSampled *Sampler `json:"remote_parent_not_sampled,omitempty" yaml:"remote_parent_not_sampled,omitempty" mapstructure:"remote_parent_not_sampled,omitempty"` - - // RemoteParentSampled corresponds to the JSON schema field - // "remote_parent_sampled". - RemoteParentSampled *Sampler `json:"remote_parent_sampled,omitempty" yaml:"remote_parent_sampled,omitempty" mapstructure:"remote_parent_sampled,omitempty"` - - // Root corresponds to the JSON schema field "root". - Root *Sampler `json:"root,omitempty" yaml:"root,omitempty" mapstructure:"root,omitempty"` -} - -type SamplerTraceIDRatioBased struct { - // Ratio corresponds to the JSON schema field "ratio". - Ratio *float64 `json:"ratio,omitempty" yaml:"ratio,omitempty" mapstructure:"ratio,omitempty"` -} - -type SimpleLogRecordProcessor struct { - // Exporter corresponds to the JSON schema field "exporter". - Exporter LogRecordExporter `json:"exporter" yaml:"exporter" mapstructure:"exporter"` -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SimpleLogRecordProcessor) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if _, ok := raw["exporter"]; raw != nil && !ok { - return fmt.Errorf("field exporter in SimpleLogRecordProcessor: required") - } - type Plain SimpleLogRecordProcessor - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SimpleLogRecordProcessor(plain) - return nil -} - -type SimpleSpanProcessor struct { - // Exporter corresponds to the JSON schema field "exporter". - Exporter SpanExporter `json:"exporter" yaml:"exporter" mapstructure:"exporter"` -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SimpleSpanProcessor) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if _, ok := raw["exporter"]; raw != nil && !ok { - return fmt.Errorf("field exporter in SimpleSpanProcessor: required") - } - type Plain SimpleSpanProcessor - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SimpleSpanProcessor(plain) - return nil -} - -type SpanExporter struct { - // Console corresponds to the JSON schema field "console". - Console Console `json:"console,omitempty" yaml:"console,omitempty" mapstructure:"console,omitempty"` - - // OTLP corresponds to the JSON schema field "otlp". - OTLP *OTLP `json:"otlp,omitempty" yaml:"otlp,omitempty" mapstructure:"otlp,omitempty"` - - // Zipkin corresponds to the JSON schema field "zipkin". - Zipkin *Zipkin `json:"zipkin,omitempty" yaml:"zipkin,omitempty" mapstructure:"zipkin,omitempty"` - - AdditionalProperties interface{} -} - -type SpanLimits struct { - // AttributeCountLimit corresponds to the JSON schema field - // "attribute_count_limit". - AttributeCountLimit *int `json:"attribute_count_limit,omitempty" yaml:"attribute_count_limit,omitempty" mapstructure:"attribute_count_limit,omitempty"` - - // AttributeValueLengthLimit corresponds to the JSON schema field - // "attribute_value_length_limit". - AttributeValueLengthLimit *int `json:"attribute_value_length_limit,omitempty" yaml:"attribute_value_length_limit,omitempty" mapstructure:"attribute_value_length_limit,omitempty"` - - // EventAttributeCountLimit corresponds to the JSON schema field - // "event_attribute_count_limit". - EventAttributeCountLimit *int `json:"event_attribute_count_limit,omitempty" yaml:"event_attribute_count_limit,omitempty" mapstructure:"event_attribute_count_limit,omitempty"` - - // EventCountLimit corresponds to the JSON schema field "event_count_limit". - EventCountLimit *int `json:"event_count_limit,omitempty" yaml:"event_count_limit,omitempty" mapstructure:"event_count_limit,omitempty"` - - // LinkAttributeCountLimit corresponds to the JSON schema field - // "link_attribute_count_limit". - LinkAttributeCountLimit *int `json:"link_attribute_count_limit,omitempty" yaml:"link_attribute_count_limit,omitempty" mapstructure:"link_attribute_count_limit,omitempty"` - - // LinkCountLimit corresponds to the JSON schema field "link_count_limit". - LinkCountLimit *int `json:"link_count_limit,omitempty" yaml:"link_count_limit,omitempty" mapstructure:"link_count_limit,omitempty"` -} - -type SpanProcessor struct { - // Batch corresponds to the JSON schema field "batch". - Batch *BatchSpanProcessor `json:"batch,omitempty" yaml:"batch,omitempty" mapstructure:"batch,omitempty"` - - // Simple corresponds to the JSON schema field "simple". - Simple *SimpleSpanProcessor `json:"simple,omitempty" yaml:"simple,omitempty" mapstructure:"simple,omitempty"` - - AdditionalProperties interface{} -} - -type TracerProvider struct { - // Limits corresponds to the JSON schema field "limits". - Limits *SpanLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` - - // Processors corresponds to the JSON schema field "processors". - Processors []SpanProcessor `json:"processors,omitempty" yaml:"processors,omitempty" mapstructure:"processors,omitempty"` - - // Sampler corresponds to the JSON schema field "sampler". - Sampler *Sampler `json:"sampler,omitempty" yaml:"sampler,omitempty" mapstructure:"sampler,omitempty"` -} - -type View struct { - // Selector corresponds to the JSON schema field "selector". - Selector *ViewSelector `json:"selector,omitempty" yaml:"selector,omitempty" mapstructure:"selector,omitempty"` - - // Stream corresponds to the JSON schema field "stream". - Stream *ViewStream `json:"stream,omitempty" yaml:"stream,omitempty" mapstructure:"stream,omitempty"` -} - -type ViewSelector struct { - // InstrumentName corresponds to the JSON schema field "instrument_name". - InstrumentName *string `json:"instrument_name,omitempty" yaml:"instrument_name,omitempty" mapstructure:"instrument_name,omitempty"` - - // InstrumentType corresponds to the JSON schema field "instrument_type". - InstrumentType *ViewSelectorInstrumentType `json:"instrument_type,omitempty" yaml:"instrument_type,omitempty" mapstructure:"instrument_type,omitempty"` - - // MeterName corresponds to the JSON schema field "meter_name". - MeterName *string `json:"meter_name,omitempty" yaml:"meter_name,omitempty" mapstructure:"meter_name,omitempty"` - - // MeterSchemaUrl corresponds to the JSON schema field "meter_schema_url". - MeterSchemaUrl *string `json:"meter_schema_url,omitempty" yaml:"meter_schema_url,omitempty" mapstructure:"meter_schema_url,omitempty"` - - // MeterVersion corresponds to the JSON schema field "meter_version". - MeterVersion *string `json:"meter_version,omitempty" yaml:"meter_version,omitempty" mapstructure:"meter_version,omitempty"` - - // Unit corresponds to the JSON schema field "unit". - Unit *string `json:"unit,omitempty" yaml:"unit,omitempty" mapstructure:"unit,omitempty"` -} - -type ViewSelectorInstrumentType string - -const ViewSelectorInstrumentTypeCounter ViewSelectorInstrumentType = "counter" -const ViewSelectorInstrumentTypeHistogram ViewSelectorInstrumentType = "histogram" -const ViewSelectorInstrumentTypeObservableCounter ViewSelectorInstrumentType = "observable_counter" -const ViewSelectorInstrumentTypeObservableGauge ViewSelectorInstrumentType = "observable_gauge" -const ViewSelectorInstrumentTypeObservableUpDownCounter ViewSelectorInstrumentType = "observable_up_down_counter" -const ViewSelectorInstrumentTypeUpDownCounter ViewSelectorInstrumentType = "up_down_counter" - -var enumValues_ViewSelectorInstrumentType = []interface{}{ - "counter", - "histogram", - "observable_counter", - "observable_gauge", - "observable_up_down_counter", - "up_down_counter", -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *ViewSelectorInstrumentType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValues_ViewSelectorInstrumentType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValues_ViewSelectorInstrumentType, v) - } - *j = ViewSelectorInstrumentType(v) - return nil -} - -type ViewStream struct { - // Aggregation corresponds to the JSON schema field "aggregation". - Aggregation *ViewStreamAggregation `json:"aggregation,omitempty" yaml:"aggregation,omitempty" mapstructure:"aggregation,omitempty"` - - // AttributeKeys corresponds to the JSON schema field "attribute_keys". - AttributeKeys []string `json:"attribute_keys,omitempty" yaml:"attribute_keys,omitempty" mapstructure:"attribute_keys,omitempty"` - - // Description corresponds to the JSON schema field "description". - Description *string `json:"description,omitempty" yaml:"description,omitempty" mapstructure:"description,omitempty"` - - // Name corresponds to the JSON schema field "name". - Name *string `json:"name,omitempty" yaml:"name,omitempty" mapstructure:"name,omitempty"` -} - -type ViewStreamAggregation struct { - // Base2ExponentialBucketHistogram corresponds to the JSON schema field - // "base2_exponential_bucket_histogram". - Base2ExponentialBucketHistogram *ViewStreamAggregationBase2ExponentialBucketHistogram `json:"base2_exponential_bucket_histogram,omitempty" yaml:"base2_exponential_bucket_histogram,omitempty" mapstructure:"base2_exponential_bucket_histogram,omitempty"` - - // Default corresponds to the JSON schema field "default". - Default ViewStreamAggregationDefault `json:"default,omitempty" yaml:"default,omitempty" mapstructure:"default,omitempty"` - - // Drop corresponds to the JSON schema field "drop". - Drop ViewStreamAggregationDrop `json:"drop,omitempty" yaml:"drop,omitempty" mapstructure:"drop,omitempty"` - - // ExplicitBucketHistogram corresponds to the JSON schema field - // "explicit_bucket_histogram". - ExplicitBucketHistogram *ViewStreamAggregationExplicitBucketHistogram `json:"explicit_bucket_histogram,omitempty" yaml:"explicit_bucket_histogram,omitempty" mapstructure:"explicit_bucket_histogram,omitempty"` - - // LastValue corresponds to the JSON schema field "last_value". - LastValue ViewStreamAggregationLastValue `json:"last_value,omitempty" yaml:"last_value,omitempty" mapstructure:"last_value,omitempty"` - - // Sum corresponds to the JSON schema field "sum". - Sum ViewStreamAggregationSum `json:"sum,omitempty" yaml:"sum,omitempty" mapstructure:"sum,omitempty"` -} - -type ViewStreamAggregationBase2ExponentialBucketHistogram struct { - // MaxScale corresponds to the JSON schema field "max_scale". - MaxScale *int `json:"max_scale,omitempty" yaml:"max_scale,omitempty" mapstructure:"max_scale,omitempty"` - - // MaxSize corresponds to the JSON schema field "max_size". - MaxSize *int `json:"max_size,omitempty" yaml:"max_size,omitempty" mapstructure:"max_size,omitempty"` - - // RecordMinMax corresponds to the JSON schema field "record_min_max". - RecordMinMax *bool `json:"record_min_max,omitempty" yaml:"record_min_max,omitempty" mapstructure:"record_min_max,omitempty"` -} - -type ViewStreamAggregationDefault map[string]interface{} - -type ViewStreamAggregationDrop map[string]interface{} - -type ViewStreamAggregationExplicitBucketHistogram struct { - // Boundaries corresponds to the JSON schema field "boundaries". - Boundaries []float64 `json:"boundaries,omitempty" yaml:"boundaries,omitempty" mapstructure:"boundaries,omitempty"` - - // RecordMinMax corresponds to the JSON schema field "record_min_max". - RecordMinMax *bool `json:"record_min_max,omitempty" yaml:"record_min_max,omitempty" mapstructure:"record_min_max,omitempty"` -} - -type ViewStreamAggregationLastValue map[string]interface{} - -type ViewStreamAggregationSum map[string]interface{} - -type Zipkin struct { - // Endpoint corresponds to the JSON schema field "endpoint". - Endpoint string `json:"endpoint" yaml:"endpoint" mapstructure:"endpoint"` - - // Timeout corresponds to the JSON schema field "timeout". - Timeout *int `json:"timeout,omitempty" yaml:"timeout,omitempty" mapstructure:"timeout,omitempty"` -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *Zipkin) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if _, ok := raw["endpoint"]; raw != nil && !ok { - return fmt.Errorf("field endpoint in Zipkin: required") - } - type Plain Zipkin - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = Zipkin(plain) - return nil -} diff --git a/config/v0.2.0/log.go b/config/v0.2.0/log.go deleted file mode 100644 index bd302cfa75a..00000000000 --- a/config/v0.2.0/log.go +++ /dev/null @@ -1,155 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package config // import "go.opentelemetry.io/contrib/config/v0.2.0" - -import ( - "context" - "errors" - "fmt" - "net/url" - "time" - - "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp" - "go.opentelemetry.io/otel/exporters/stdout/stdoutlog" - "go.opentelemetry.io/otel/log" - "go.opentelemetry.io/otel/log/noop" - sdklog "go.opentelemetry.io/otel/sdk/log" - "go.opentelemetry.io/otel/sdk/resource" -) - -func loggerProvider(cfg configOptions, res *resource.Resource) (log.LoggerProvider, shutdownFunc, error) { - if cfg.opentelemetryConfig.LoggerProvider == nil { - return noop.NewLoggerProvider(), noopShutdown, nil - } - opts := []sdklog.LoggerProviderOption{ - sdklog.WithResource(res), - } - var errs []error - for _, processor := range cfg.opentelemetryConfig.LoggerProvider.Processors { - sp, err := logProcessor(cfg.ctx, processor) - if err == nil { - opts = append(opts, sdklog.WithProcessor(sp)) - } else { - errs = append(errs, err) - } - } - - if len(errs) > 0 { - return noop.NewLoggerProvider(), noopShutdown, errors.Join(errs...) - } - - lp := sdklog.NewLoggerProvider(opts...) - return lp, lp.Shutdown, nil -} - -func logProcessor(ctx context.Context, processor LogRecordProcessor) (sdklog.Processor, error) { - if processor.Batch != nil && processor.Simple != nil { - return nil, errors.New("must not specify multiple log processor type") - } - if processor.Batch != nil { - exp, err := logExporter(ctx, processor.Batch.Exporter) - if err != nil { - return nil, err - } - return batchLogProcessor(processor.Batch, exp) - } - if processor.Simple != nil { - exp, err := logExporter(ctx, processor.Simple.Exporter) - if err != nil { - return nil, err - } - return sdklog.NewSimpleProcessor(exp), nil - } - return nil, errors.New("unsupported log processor type, must be one of simple or batch") -} - -func logExporter(ctx context.Context, exporter LogRecordExporter) (sdklog.Exporter, error) { - if exporter.Console != nil && exporter.OTLP != nil { - return nil, errors.New("must not specify multiple exporters") - } - - if exporter.Console != nil { - return stdoutlog.New( - stdoutlog.WithPrettyPrint(), - ) - } - - if exporter.OTLP != nil { - switch exporter.OTLP.Protocol { - case protocolProtobufHTTP: - return otlpHTTPLogExporter(ctx, exporter.OTLP) - default: - return nil, fmt.Errorf("unsupported protocol %q", exporter.OTLP.Protocol) - } - } - return nil, errors.New("no valid log exporter") -} - -func batchLogProcessor(blp *BatchLogRecordProcessor, exp sdklog.Exporter) (*sdklog.BatchProcessor, error) { - var opts []sdklog.BatchProcessorOption - if blp.ExportTimeout != nil { - if *blp.ExportTimeout < 0 { - return nil, fmt.Errorf("invalid export timeout %d", *blp.ExportTimeout) - } - opts = append(opts, sdklog.WithExportTimeout(time.Millisecond*time.Duration(*blp.ExportTimeout))) - } - if blp.MaxExportBatchSize != nil { - if *blp.MaxExportBatchSize < 0 { - return nil, fmt.Errorf("invalid batch size %d", *blp.MaxExportBatchSize) - } - opts = append(opts, sdklog.WithExportMaxBatchSize(*blp.MaxExportBatchSize)) - } - if blp.MaxQueueSize != nil { - if *blp.MaxQueueSize < 0 { - return nil, fmt.Errorf("invalid queue size %d", *blp.MaxQueueSize) - } - opts = append(opts, sdklog.WithMaxQueueSize(*blp.MaxQueueSize)) - } - - if blp.ScheduleDelay != nil { - if *blp.ScheduleDelay < 0 { - return nil, fmt.Errorf("invalid schedule delay %d", *blp.ScheduleDelay) - } - opts = append(opts, sdklog.WithExportInterval(time.Millisecond*time.Duration(*blp.ScheduleDelay))) - } - - return sdklog.NewBatchProcessor(exp, opts...), nil -} - -func otlpHTTPLogExporter(ctx context.Context, otlpConfig *OTLP) (sdklog.Exporter, error) { - var opts []otlploghttp.Option - - if len(otlpConfig.Endpoint) > 0 { - u, err := url.ParseRequestURI(otlpConfig.Endpoint) - if err != nil { - return nil, err - } - opts = append(opts, otlploghttp.WithEndpoint(u.Host)) - - if u.Scheme == "http" { - opts = append(opts, otlploghttp.WithInsecure()) - } - if len(u.Path) > 0 { - opts = append(opts, otlploghttp.WithURLPath(u.Path)) - } - } - if otlpConfig.Compression != nil { - switch *otlpConfig.Compression { - case compressionGzip: - opts = append(opts, otlploghttp.WithCompression(otlploghttp.GzipCompression)) - case compressionNone: - opts = append(opts, otlploghttp.WithCompression(otlploghttp.NoCompression)) - default: - return nil, fmt.Errorf("unsupported compression %q", *otlpConfig.Compression) - } - } - if otlpConfig.Timeout != nil && *otlpConfig.Timeout > 0 { - opts = append(opts, otlploghttp.WithTimeout(time.Millisecond*time.Duration(*otlpConfig.Timeout))) - } - if len(otlpConfig.Headers) > 0 { - opts = append(opts, otlploghttp.WithHeaders(otlpConfig.Headers)) - } - - return otlploghttp.New(ctx, opts...) -} diff --git a/config/v0.2.0/log_test.go b/config/v0.2.0/log_test.go deleted file mode 100644 index 24effa7d283..00000000000 --- a/config/v0.2.0/log_test.go +++ /dev/null @@ -1,412 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package config - -import ( - "context" - "errors" - "net/url" - "reflect" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp" - "go.opentelemetry.io/otel/exporters/stdout/stdoutlog" - "go.opentelemetry.io/otel/log" - "go.opentelemetry.io/otel/log/noop" - sdklog "go.opentelemetry.io/otel/sdk/log" - "go.opentelemetry.io/otel/sdk/resource" -) - -func TestLoggerProvider(t *testing.T) { - tests := []struct { - name string - cfg configOptions - wantProvider log.LoggerProvider - wantErr error - }{ - { - name: "no-logger-provider-configured", - wantProvider: noop.NewLoggerProvider(), - }, - { - name: "error-in-config", - cfg: configOptions{ - opentelemetryConfig: OpenTelemetryConfiguration{ - LoggerProvider: &LoggerProvider{ - Processors: []LogRecordProcessor{ - { - Simple: &SimpleLogRecordProcessor{}, - Batch: &BatchLogRecordProcessor{}, - }, - }, - }, - }, - }, - wantProvider: noop.NewLoggerProvider(), - wantErr: errors.Join(errors.New("must not specify multiple log processor type")), - }, - } - for _, tt := range tests { - mp, shutdown, err := loggerProvider(tt.cfg, resource.Default()) - require.Equal(t, tt.wantProvider, mp) - assert.Equal(t, tt.wantErr, err) - require.NoError(t, shutdown(context.Background())) - } -} - -func TestLogProcessor(t *testing.T) { - ctx := context.Background() - - otlpHTTPExporter, err := otlploghttp.New(ctx) - require.NoError(t, err) - - consoleExporter, err := stdoutlog.New( - stdoutlog.WithPrettyPrint(), - ) - require.NoError(t, err) - - testCases := []struct { - name string - processor LogRecordProcessor - args any - wantErr error - wantProcessor sdklog.Processor - }{ - { - name: "no processor", - wantErr: errors.New("unsupported log processor type, must be one of simple or batch"), - }, - { - name: "multiple processor types", - processor: LogRecordProcessor{ - Batch: &BatchLogRecordProcessor{ - Exporter: LogRecordExporter{}, - }, - Simple: &SimpleLogRecordProcessor{}, - }, - wantErr: errors.New("must not specify multiple log processor type"), - }, - { - name: "batch processor invalid batch size otlphttp exporter", - - processor: LogRecordProcessor{ - Batch: &BatchLogRecordProcessor{ - MaxExportBatchSize: ptr(-1), - Exporter: LogRecordExporter{ - OTLP: &OTLP{ - Protocol: "http/protobuf", - }, - }, - }, - }, - wantErr: errors.New("invalid batch size -1"), - }, - { - name: "batch processor invalid export timeout otlphttp exporter", - processor: LogRecordProcessor{ - Batch: &BatchLogRecordProcessor{ - ExportTimeout: ptr(-2), - Exporter: LogRecordExporter{ - OTLP: &OTLP{ - Protocol: "http/protobuf", - }, - }, - }, - }, - wantErr: errors.New("invalid export timeout -2"), - }, - { - name: "batch processor invalid queue size otlphttp exporter", - - processor: LogRecordProcessor{ - Batch: &BatchLogRecordProcessor{ - MaxQueueSize: ptr(-3), - Exporter: LogRecordExporter{ - OTLP: &OTLP{ - Protocol: "http/protobuf", - }, - }, - }, - }, - wantErr: errors.New("invalid queue size -3"), - }, - { - name: "batch processor invalid schedule delay console exporter", - processor: LogRecordProcessor{ - Batch: &BatchLogRecordProcessor{ - ScheduleDelay: ptr(-4), - Exporter: LogRecordExporter{ - OTLP: &OTLP{ - Protocol: "http/protobuf", - }, - }, - }, - }, - wantErr: errors.New("invalid schedule delay -4"), - }, - { - name: "batch processor invalid exporter", - processor: LogRecordProcessor{ - Batch: &BatchLogRecordProcessor{ - Exporter: LogRecordExporter{}, - }, - }, - wantErr: errors.New("no valid log exporter"), - }, - { - name: "batch/console", - processor: LogRecordProcessor{ - Batch: &BatchLogRecordProcessor{ - MaxExportBatchSize: ptr(0), - ExportTimeout: ptr(0), - MaxQueueSize: ptr(0), - ScheduleDelay: ptr(0), - Exporter: LogRecordExporter{ - Console: map[string]any{}, - }, - }, - }, - wantProcessor: sdklog.NewBatchProcessor(consoleExporter), - }, - { - name: "batch/otlp-http-exporter", - processor: LogRecordProcessor{ - Batch: &BatchLogRecordProcessor{ - MaxExportBatchSize: ptr(0), - ExportTimeout: ptr(0), - MaxQueueSize: ptr(0), - ScheduleDelay: ptr(0), - Exporter: LogRecordExporter{ - OTLP: &OTLP{ - Protocol: "http/protobuf", - Endpoint: "http://localhost:4318", - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", - }, - }, - }, - }, - }, - wantProcessor: sdklog.NewBatchProcessor(otlpHTTPExporter), - }, - { - name: "batch/otlp-http-exporter-with-path", - processor: LogRecordProcessor{ - Batch: &BatchLogRecordProcessor{ - MaxExportBatchSize: ptr(0), - ExportTimeout: ptr(0), - MaxQueueSize: ptr(0), - ScheduleDelay: ptr(0), - Exporter: LogRecordExporter{ - OTLP: &OTLP{ - Protocol: "http/protobuf", - Endpoint: "http://localhost:4318/path/123", - Compression: ptr("none"), - Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", - }, - }, - }, - }, - }, - wantProcessor: sdklog.NewBatchProcessor(otlpHTTPExporter), - }, - { - name: "batch/otlp-http-exporter-no-endpoint", - processor: LogRecordProcessor{ - Batch: &BatchLogRecordProcessor{ - MaxExportBatchSize: ptr(0), - ExportTimeout: ptr(0), - MaxQueueSize: ptr(0), - ScheduleDelay: ptr(0), - Exporter: LogRecordExporter{ - OTLP: &OTLP{ - Protocol: "http/protobuf", - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", - }, - }, - }, - }, - }, - wantProcessor: sdklog.NewBatchProcessor(otlpHTTPExporter), - }, - { - name: "batch/otlp-http-exporter-no-scheme", - processor: LogRecordProcessor{ - Batch: &BatchLogRecordProcessor{ - MaxExportBatchSize: ptr(0), - ExportTimeout: ptr(0), - MaxQueueSize: ptr(0), - ScheduleDelay: ptr(0), - Exporter: LogRecordExporter{ - OTLP: &OTLP{ - Protocol: "http/protobuf", - Endpoint: "localhost:4318", - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", - }, - }, - }, - }, - }, - wantProcessor: sdklog.NewBatchProcessor(otlpHTTPExporter), - }, - { - name: "batch/otlp-http-invalid-protocol", - processor: LogRecordProcessor{ - Batch: &BatchLogRecordProcessor{ - MaxExportBatchSize: ptr(0), - ExportTimeout: ptr(0), - MaxQueueSize: ptr(0), - ScheduleDelay: ptr(0), - Exporter: LogRecordExporter{ - OTLP: &OTLP{ - Protocol: "invalid", - Endpoint: "https://10.0.0.0:443", - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", - }, - }, - }, - }, - }, - wantErr: errors.New("unsupported protocol \"invalid\""), - }, - { - name: "batch/otlp-http-invalid-endpoint", - processor: LogRecordProcessor{ - Batch: &BatchLogRecordProcessor{ - MaxExportBatchSize: ptr(0), - ExportTimeout: ptr(0), - MaxQueueSize: ptr(0), - ScheduleDelay: ptr(0), - Exporter: LogRecordExporter{ - OTLP: &OTLP{ - Protocol: "http/protobuf", - Endpoint: " ", - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", - }, - }, - }, - }, - }, - wantErr: &url.Error{Op: "parse", URL: " ", Err: errors.New("invalid URI for request")}, - }, - { - name: "batch/otlp-http-none-compression", - processor: LogRecordProcessor{ - Batch: &BatchLogRecordProcessor{ - MaxExportBatchSize: ptr(0), - ExportTimeout: ptr(0), - MaxQueueSize: ptr(0), - ScheduleDelay: ptr(0), - Exporter: LogRecordExporter{ - OTLP: &OTLP{ - Protocol: "http/protobuf", - Endpoint: "localhost:4318", - Compression: ptr("none"), - Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", - }, - }, - }, - }, - }, - wantProcessor: sdklog.NewBatchProcessor(otlpHTTPExporter), - }, - { - name: "batch/otlp-http-invalid-compression", - processor: LogRecordProcessor{ - Batch: &BatchLogRecordProcessor{ - MaxExportBatchSize: ptr(0), - ExportTimeout: ptr(0), - MaxQueueSize: ptr(0), - ScheduleDelay: ptr(0), - Exporter: LogRecordExporter{ - OTLP: &OTLP{ - Protocol: "http/protobuf", - Endpoint: "localhost:4318", - Compression: ptr("invalid"), - Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", - }, - }, - }, - }, - }, - wantErr: errors.New("unsupported compression \"invalid\""), - }, - { - name: "simple/no-exporter", - processor: LogRecordProcessor{ - Simple: &SimpleLogRecordProcessor{ - Exporter: LogRecordExporter{}, - }, - }, - wantErr: errors.New("no valid log exporter"), - }, - { - name: "simple/console", - processor: LogRecordProcessor{ - Simple: &SimpleLogRecordProcessor{ - Exporter: LogRecordExporter{ - Console: map[string]any{}, - }, - }, - }, - wantProcessor: sdklog.NewSimpleProcessor(consoleExporter), - }, - { - name: "simple/otlp-exporter", - processor: LogRecordProcessor{ - Simple: &SimpleLogRecordProcessor{ - Exporter: LogRecordExporter{ - OTLP: &OTLP{ - Protocol: "http/protobuf", - Endpoint: "localhost:4318", - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", - }, - }, - }, - }, - }, - wantProcessor: sdklog.NewSimpleProcessor(otlpHTTPExporter), - }, - } - for _, tt := range testCases { - t.Run(tt.name, func(t *testing.T) { - got, err := logProcessor(context.Background(), tt.processor) - require.Equal(t, tt.wantErr, err) - if tt.wantProcessor == nil { - require.Nil(t, got) - } else { - require.Equal(t, reflect.TypeOf(tt.wantProcessor), reflect.TypeOf(got)) - wantExporterType := reflect.Indirect(reflect.ValueOf(tt.wantProcessor)).FieldByName("exporter").Elem().Type() - gotExporterType := reflect.Indirect(reflect.ValueOf(got)).FieldByName("exporter").Elem().Type() - require.Equal(t, wantExporterType.String(), gotExporterType.String()) - } - }) - } -} diff --git a/config/v0.2.0/metric.go b/config/v0.2.0/metric.go deleted file mode 100644 index c09212ad91c..00000000000 --- a/config/v0.2.0/metric.go +++ /dev/null @@ -1,507 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package config // import "go.opentelemetry.io/contrib/config/v0.2.0" - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "math" - "net" - "net/http" - "net/url" - "os" - "strconv" - "time" - - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promhttp" - - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" - "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp" - otelprom "go.opentelemetry.io/otel/exporters/prometheus" - "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/metric/noop" - "go.opentelemetry.io/otel/sdk/instrumentation" - sdkmetric "go.opentelemetry.io/otel/sdk/metric" - "go.opentelemetry.io/otel/sdk/metric/metricdata" - "go.opentelemetry.io/otel/sdk/resource" -) - -var zeroScope instrumentation.Scope - -const instrumentKindUndefined = sdkmetric.InstrumentKind(0) - -func meterProvider(cfg configOptions, res *resource.Resource) (metric.MeterProvider, shutdownFunc, error) { - if cfg.opentelemetryConfig.MeterProvider == nil { - return noop.NewMeterProvider(), noopShutdown, nil - } - opts := []sdkmetric.Option{ - sdkmetric.WithResource(res), - } - - var errs []error - for _, reader := range cfg.opentelemetryConfig.MeterProvider.Readers { - r, err := metricReader(cfg.ctx, reader) - if err == nil { - opts = append(opts, sdkmetric.WithReader(r)) - } else { - errs = append(errs, err) - } - } - for _, vw := range cfg.opentelemetryConfig.MeterProvider.Views { - v, err := view(vw) - if err == nil { - opts = append(opts, sdkmetric.WithView(v)) - } else { - errs = append(errs, err) - } - } - - if len(errs) > 0 { - return noop.NewMeterProvider(), noopShutdown, errors.Join(errs...) - } - - mp := sdkmetric.NewMeterProvider(opts...) - return mp, mp.Shutdown, nil -} - -func metricReader(ctx context.Context, r MetricReader) (sdkmetric.Reader, error) { - if r.Periodic != nil && r.Pull != nil { - return nil, errors.New("must not specify multiple metric reader type") - } - - if r.Periodic != nil { - var opts []sdkmetric.PeriodicReaderOption - if r.Periodic.Interval != nil { - opts = append(opts, sdkmetric.WithInterval(time.Duration(*r.Periodic.Interval)*time.Millisecond)) - } - - if r.Periodic.Timeout != nil { - opts = append(opts, sdkmetric.WithTimeout(time.Duration(*r.Periodic.Timeout)*time.Millisecond)) - } - return periodicExporter(ctx, r.Periodic.Exporter, opts...) - } - - if r.Pull != nil { - return pullReader(ctx, r.Pull.Exporter) - } - return nil, errors.New("no valid metric reader") -} - -func pullReader(ctx context.Context, exporter MetricExporter) (sdkmetric.Reader, error) { - if exporter.Prometheus != nil { - return prometheusReader(ctx, exporter.Prometheus) - } - return nil, errors.New("no valid metric exporter") -} - -func periodicExporter(ctx context.Context, exporter MetricExporter, opts ...sdkmetric.PeriodicReaderOption) (sdkmetric.Reader, error) { - if exporter.Console != nil && exporter.OTLP != nil { - return nil, errors.New("must not specify multiple exporters") - } - if exporter.Console != nil { - enc := json.NewEncoder(os.Stdout) - enc.SetIndent("", " ") - - exp, err := stdoutmetric.New( - stdoutmetric.WithEncoder(enc), - ) - if err != nil { - return nil, err - } - return sdkmetric.NewPeriodicReader(exp, opts...), nil - } - if exporter.OTLP != nil { - var err error - var exp sdkmetric.Exporter - switch exporter.OTLP.Protocol { - case protocolProtobufHTTP: - exp, err = otlpHTTPMetricExporter(ctx, exporter.OTLP) - case protocolProtobufGRPC: - exp, err = otlpGRPCMetricExporter(ctx, exporter.OTLP) - default: - return nil, fmt.Errorf("unsupported protocol %q", exporter.OTLP.Protocol) - } - if err != nil { - return nil, err - } - return sdkmetric.NewPeriodicReader(exp, opts...), nil - } - return nil, errors.New("no valid metric exporter") -} - -func otlpHTTPMetricExporter(ctx context.Context, otlpConfig *OTLPMetric) (sdkmetric.Exporter, error) { - opts := []otlpmetrichttp.Option{} - - if len(otlpConfig.Endpoint) > 0 { - u, err := url.ParseRequestURI(otlpConfig.Endpoint) - if err != nil { - return nil, err - } - opts = append(opts, otlpmetrichttp.WithEndpoint(u.Host)) - - if u.Scheme == "http" { - opts = append(opts, otlpmetrichttp.WithInsecure()) - } - if len(u.Path) > 0 { - opts = append(opts, otlpmetrichttp.WithURLPath(u.Path)) - } - } - if otlpConfig.Compression != nil { - switch *otlpConfig.Compression { - case compressionGzip: - opts = append(opts, otlpmetrichttp.WithCompression(otlpmetrichttp.GzipCompression)) - case compressionNone: - opts = append(opts, otlpmetrichttp.WithCompression(otlpmetrichttp.NoCompression)) - default: - return nil, fmt.Errorf("unsupported compression %q", *otlpConfig.Compression) - } - } - if otlpConfig.Timeout != nil { - opts = append(opts, otlpmetrichttp.WithTimeout(time.Millisecond*time.Duration(*otlpConfig.Timeout))) - } - if len(otlpConfig.Headers) > 0 { - opts = append(opts, otlpmetrichttp.WithHeaders(otlpConfig.Headers)) - } - if otlpConfig.TemporalityPreference != nil { - switch *otlpConfig.TemporalityPreference { - case "delta": - opts = append(opts, otlpmetrichttp.WithTemporalitySelector(deltaTemporality)) - case "cumulative": - opts = append(opts, otlpmetrichttp.WithTemporalitySelector(cumulativeTemporality)) - case "lowmemory": - opts = append(opts, otlpmetrichttp.WithTemporalitySelector(lowMemory)) - default: - return nil, fmt.Errorf("unsupported temporality preference %q", *otlpConfig.TemporalityPreference) - } - } - - return otlpmetrichttp.New(ctx, opts...) -} - -func otlpGRPCMetricExporter(ctx context.Context, otlpConfig *OTLPMetric) (sdkmetric.Exporter, error) { - var opts []otlpmetricgrpc.Option - - if len(otlpConfig.Endpoint) > 0 { - u, err := url.ParseRequestURI(otlpConfig.Endpoint) - if err != nil { - return nil, err - } - // ParseRequestURI leaves the Host field empty when no - // scheme is specified (i.e. localhost:4317). This check is - // here to support the case where a user may not specify a - // scheme. The code does its best effort here by using - // otlpConfig.Endpoint as-is in that case - if u.Host != "" { - opts = append(opts, otlpmetricgrpc.WithEndpoint(u.Host)) - } else { - opts = append(opts, otlpmetricgrpc.WithEndpoint(otlpConfig.Endpoint)) - } - if u.Scheme == "http" { - opts = append(opts, otlpmetricgrpc.WithInsecure()) - } - } - - if otlpConfig.Compression != nil { - switch *otlpConfig.Compression { - case compressionGzip: - opts = append(opts, otlpmetricgrpc.WithCompressor(*otlpConfig.Compression)) - case compressionNone: - // none requires no options - default: - return nil, fmt.Errorf("unsupported compression %q", *otlpConfig.Compression) - } - } - if otlpConfig.Timeout != nil && *otlpConfig.Timeout > 0 { - opts = append(opts, otlpmetricgrpc.WithTimeout(time.Millisecond*time.Duration(*otlpConfig.Timeout))) - } - if len(otlpConfig.Headers) > 0 { - opts = append(opts, otlpmetricgrpc.WithHeaders(otlpConfig.Headers)) - } - if otlpConfig.TemporalityPreference != nil { - switch *otlpConfig.TemporalityPreference { - case "delta": - opts = append(opts, otlpmetricgrpc.WithTemporalitySelector(deltaTemporality)) - case "cumulative": - opts = append(opts, otlpmetricgrpc.WithTemporalitySelector(cumulativeTemporality)) - case "lowmemory": - opts = append(opts, otlpmetricgrpc.WithTemporalitySelector(lowMemory)) - default: - return nil, fmt.Errorf("unsupported temporality preference %q", *otlpConfig.TemporalityPreference) - } - } - - return otlpmetricgrpc.New(ctx, opts...) -} - -func cumulativeTemporality(sdkmetric.InstrumentKind) metricdata.Temporality { - return metricdata.CumulativeTemporality -} - -func deltaTemporality(ik sdkmetric.InstrumentKind) metricdata.Temporality { - switch ik { - case sdkmetric.InstrumentKindCounter, sdkmetric.InstrumentKindHistogram, sdkmetric.InstrumentKindObservableCounter: - return metricdata.DeltaTemporality - default: - return metricdata.CumulativeTemporality - } -} - -func lowMemory(ik sdkmetric.InstrumentKind) metricdata.Temporality { - switch ik { - case sdkmetric.InstrumentKindCounter, sdkmetric.InstrumentKindHistogram: - return metricdata.DeltaTemporality - default: - return metricdata.CumulativeTemporality - } -} - -func prometheusReader(ctx context.Context, prometheusConfig *Prometheus) (sdkmetric.Reader, error) { - var opts []otelprom.Option - if prometheusConfig.Host == nil { - return nil, errors.New("host must be specified") - } - if prometheusConfig.Port == nil { - return nil, errors.New("port must be specified") - } - if prometheusConfig.WithoutScopeInfo != nil && *prometheusConfig.WithoutScopeInfo { - opts = append(opts, otelprom.WithoutScopeInfo()) - } - if prometheusConfig.WithoutTypeSuffix != nil && *prometheusConfig.WithoutTypeSuffix { - opts = append(opts, otelprom.WithoutCounterSuffixes()) - } - if prometheusConfig.WithoutUnits != nil && *prometheusConfig.WithoutUnits { - opts = append(opts, otelprom.WithoutUnits()) - } - if prometheusConfig.WithResourceConstantLabels != nil { - if prometheusConfig.WithResourceConstantLabels.Included != nil { - var keys []attribute.Key - for _, val := range prometheusConfig.WithResourceConstantLabels.Included { - keys = append(keys, attribute.Key(val)) - } - otelprom.WithResourceAsConstantLabels(attribute.NewAllowKeysFilter(keys...)) - } - if prometheusConfig.WithResourceConstantLabels.Excluded != nil { - var keys []attribute.Key - for _, val := range prometheusConfig.WithResourceConstantLabels.Included { - keys = append(keys, attribute.Key(val)) - } - otelprom.WithResourceAsConstantLabels(attribute.NewDenyKeysFilter(keys...)) - } - } - - reg := prometheus.NewRegistry() - opts = append(opts, otelprom.WithRegisterer(reg)) - - reader, err := otelprom.New(opts...) - if err != nil { - return nil, fmt.Errorf("error creating otel prometheus exporter: %w", err) - } - - mux := http.NewServeMux() - mux.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg})) - server := http.Server{ - // Timeouts are necessary to make a server resilient to attacks, but ListenAndServe doesn't set any. - // We use values from this example: https://blog.cloudflare.com/exposing-go-on-the-internet/#:~:text=There%20are%20three%20main%20timeouts - ReadTimeout: 5 * time.Second, - WriteTimeout: 10 * time.Second, - IdleTimeout: 120 * time.Second, - Handler: mux, - } - - // Remove surrounding "[]" from the host definition to allow users to define the host as "[::1]" or "::1". - host := *prometheusConfig.Host - if len(host) > 2 && host[0] == '[' && host[len(host)-1] == ']' { - host = host[1 : len(host)-1] - } - - addr := net.JoinHostPort(host, strconv.Itoa(*prometheusConfig.Port)) - lis, err := net.Listen("tcp", addr) - if err != nil { - return nil, errors.Join( - fmt.Errorf("binding address %s for Prometheus exporter: %w", addr, err), - reader.Shutdown(ctx), - ) - } - - // Only for testing reasons, add the address to the http Server, will not be used. - server.Addr = lis.Addr().String() - - go func() { - if err := server.Serve(lis); err != nil && errors.Is(err, http.ErrServerClosed) { - otel.Handle(fmt.Errorf("the Prometheus HTTP server exited unexpectedly: %w", err)) - } - }() - - return readerWithServer{reader, &server}, nil -} - -type readerWithServer struct { - sdkmetric.Reader - server *http.Server -} - -func (rws readerWithServer) Shutdown(ctx context.Context) error { - return errors.Join( - rws.Reader.Shutdown(ctx), - rws.server.Shutdown(ctx), - ) -} - -func view(v View) (sdkmetric.View, error) { - if v.Selector == nil { - return nil, errors.New("view: no selector provided") - } - - inst, err := instrument(*v.Selector) - if err != nil { - return nil, err - } - - return sdkmetric.NewView(inst, stream(v.Stream)), nil -} - -func instrument(vs ViewSelector) (sdkmetric.Instrument, error) { - kind, err := instrumentKind(vs.InstrumentType) - if err != nil { - return sdkmetric.Instrument{}, fmt.Errorf("view_selector: %w", err) - } - inst := sdkmetric.Instrument{ - Name: strOrEmpty(vs.InstrumentName), - Unit: strOrEmpty(vs.Unit), - Kind: kind, - Scope: instrumentation.Scope{ - Name: strOrEmpty(vs.MeterName), - Version: strOrEmpty(vs.MeterVersion), - SchemaURL: strOrEmpty(vs.MeterSchemaUrl), - }, - } - - if instrumentIsEmpty(inst) { - return sdkmetric.Instrument{}, errors.New("view_selector: empty selector not supporter") - } - return inst, nil -} - -func stream(vs *ViewStream) sdkmetric.Stream { - if vs == nil { - return sdkmetric.Stream{} - } - - return sdkmetric.Stream{ - Name: strOrEmpty(vs.Name), - Description: strOrEmpty(vs.Description), - Aggregation: aggregation(vs.Aggregation), - AttributeFilter: attributeFilter(vs.AttributeKeys), - } -} - -func attributeFilter(attributeKeys []string) attribute.Filter { - var attrKeys []attribute.Key - for _, attrStr := range attributeKeys { - attrKeys = append(attrKeys, attribute.Key(attrStr)) - } - return attribute.NewAllowKeysFilter(attrKeys...) -} - -func aggregation(aggr *ViewStreamAggregation) sdkmetric.Aggregation { - if aggr == nil { - return nil - } - - if aggr.Base2ExponentialBucketHistogram != nil { - return sdkmetric.AggregationBase2ExponentialHistogram{ - MaxSize: int32OrZero(aggr.Base2ExponentialBucketHistogram.MaxSize), - MaxScale: int32OrZero(aggr.Base2ExponentialBucketHistogram.MaxScale), - // Need to negate because config has the positive action RecordMinMax. - NoMinMax: !boolOrFalse(aggr.Base2ExponentialBucketHistogram.RecordMinMax), - } - } - if aggr.Default != nil { - // TODO: Understand what to set here. - return nil - } - if aggr.Drop != nil { - return sdkmetric.AggregationDrop{} - } - if aggr.ExplicitBucketHistogram != nil { - return sdkmetric.AggregationExplicitBucketHistogram{ - Boundaries: aggr.ExplicitBucketHistogram.Boundaries, - // Need to negate because config has the positive action RecordMinMax. - NoMinMax: !boolOrFalse(aggr.ExplicitBucketHistogram.RecordMinMax), - } - } - if aggr.LastValue != nil { - return sdkmetric.AggregationLastValue{} - } - if aggr.Sum != nil { - return sdkmetric.AggregationSum{} - } - return nil -} - -func instrumentKind(vsit *ViewSelectorInstrumentType) (sdkmetric.InstrumentKind, error) { - if vsit == nil { - // Equivalent to instrumentKindUndefined. - return instrumentKindUndefined, nil - } - - switch *vsit { - case ViewSelectorInstrumentTypeCounter: - return sdkmetric.InstrumentKindCounter, nil - case ViewSelectorInstrumentTypeUpDownCounter: - return sdkmetric.InstrumentKindUpDownCounter, nil - case ViewSelectorInstrumentTypeHistogram: - return sdkmetric.InstrumentKindHistogram, nil - case ViewSelectorInstrumentTypeObservableCounter: - return sdkmetric.InstrumentKindObservableCounter, nil - case ViewSelectorInstrumentTypeObservableUpDownCounter: - return sdkmetric.InstrumentKindObservableUpDownCounter, nil - case ViewSelectorInstrumentTypeObservableGauge: - return sdkmetric.InstrumentKindObservableGauge, nil - } - - return instrumentKindUndefined, errors.New("instrument_type: invalid value") -} - -func instrumentIsEmpty(i sdkmetric.Instrument) bool { - return i.Name == "" && - i.Description == "" && - i.Kind == instrumentKindUndefined && - i.Unit == "" && - i.Scope == zeroScope -} - -func boolOrFalse(pBool *bool) bool { - if pBool == nil { - return false - } - return *pBool -} - -func int32OrZero(pInt *int) int32 { - if pInt == nil { - return 0 - } - i := *pInt - if i > math.MaxInt32 { - return math.MaxInt32 - } - if i < math.MinInt32 { - return math.MinInt32 - } - return int32(i) // nolint: gosec // Overflow and underflow checked above. -} - -func strOrEmpty(pStr *string) string { - if pStr == nil { - return "" - } - return *pStr -} diff --git a/config/v0.2.0/metric_test.go b/config/v0.2.0/metric_test.go deleted file mode 100644 index 8395bca12f7..00000000000 --- a/config/v0.2.0/metric_test.go +++ /dev/null @@ -1,1158 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package config - -import ( - "context" - "errors" - "net/http" - "net/url" - "reflect" - "strings" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" - "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp" - otelprom "go.opentelemetry.io/otel/exporters/prometheus" - "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/metric/noop" - "go.opentelemetry.io/otel/sdk/instrumentation" - sdkmetric "go.opentelemetry.io/otel/sdk/metric" - "go.opentelemetry.io/otel/sdk/resource" -) - -func TestMeterProvider(t *testing.T) { - tests := []struct { - name string - cfg configOptions - wantProvider metric.MeterProvider - wantErr error - }{ - { - name: "no-meter-provider-configured", - wantProvider: noop.NewMeterProvider(), - }, - { - name: "error-in-config", - cfg: configOptions{ - opentelemetryConfig: OpenTelemetryConfiguration{ - MeterProvider: &MeterProvider{ - Readers: []MetricReader{ - { - Periodic: &PeriodicMetricReader{}, - Pull: &PullMetricReader{}, - }, - }, - }, - }, - }, - wantProvider: noop.NewMeterProvider(), - wantErr: errors.Join(errors.New("must not specify multiple metric reader type")), - }, - { - name: "multiple-errors-in-config", - cfg: configOptions{ - opentelemetryConfig: OpenTelemetryConfiguration{ - MeterProvider: &MeterProvider{ - Readers: []MetricReader{ - { - Periodic: &PeriodicMetricReader{}, - Pull: &PullMetricReader{}, - }, - { - Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ - Console: Console{}, - OTLP: &OTLPMetric{}, - }, - }, - }, - }, - }, - }, - }, - wantProvider: noop.NewMeterProvider(), - wantErr: errors.Join(errors.New("must not specify multiple metric reader type"), errors.New("must not specify multiple exporters")), - }, - } - for _, tt := range tests { - mp, shutdown, err := meterProvider(tt.cfg, resource.Default()) - require.Equal(t, tt.wantProvider, mp) - assert.Equal(t, tt.wantErr, err) - require.NoError(t, shutdown(context.Background())) - } -} - -func TestReader(t *testing.T) { - consoleExporter, err := stdoutmetric.New( - stdoutmetric.WithPrettyPrint(), - ) - require.NoError(t, err) - ctx := context.Background() - otlpGRPCExporter, err := otlpmetricgrpc.New(ctx) - require.NoError(t, err) - otlpHTTPExporter, err := otlpmetrichttp.New(ctx) - require.NoError(t, err) - promExporter, err := otelprom.New() - require.NoError(t, err) - testCases := []struct { - name string - reader MetricReader - args any - wantErr error - wantReader sdkmetric.Reader - }{ - { - name: "no reader", - wantErr: errors.New("no valid metric reader"), - }, - { - name: "pull/no-exporter", - reader: MetricReader{ - Pull: &PullMetricReader{}, - }, - wantErr: errors.New("no valid metric exporter"), - }, - { - name: "pull/prometheus-no-host", - reader: MetricReader{ - Pull: &PullMetricReader{ - Exporter: MetricExporter{ - Prometheus: &Prometheus{}, - }, - }, - }, - wantErr: errors.New("host must be specified"), - }, - { - name: "pull/prometheus-no-port", - reader: MetricReader{ - Pull: &PullMetricReader{ - Exporter: MetricExporter{ - Prometheus: &Prometheus{ - Host: ptr("localhost"), - }, - }, - }, - }, - wantErr: errors.New("port must be specified"), - }, - { - name: "pull/prometheus", - reader: MetricReader{ - Pull: &PullMetricReader{ - Exporter: MetricExporter{ - Prometheus: &Prometheus{ - Host: ptr("localhost"), - Port: ptr(0), - WithoutScopeInfo: ptr(true), - WithoutUnits: ptr(true), - WithoutTypeSuffix: ptr(true), - WithResourceConstantLabels: &IncludeExclude{ - Included: []string{"include"}, - Excluded: []string{"exclude"}, - }, - }, - }, - }, - }, - wantReader: readerWithServer{promExporter, nil}, - }, - { - name: "periodic/otlp-exporter-invalid-protocol", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ - OTLP: &OTLPMetric{ - Protocol: "http/invalid", - }, - }, - }, - }, - wantErr: errors.New("unsupported protocol \"http/invalid\""), - }, - { - name: "periodic/otlp-grpc-exporter", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ - OTLP: &OTLPMetric{ - Protocol: "grpc/protobuf", - Endpoint: "http://localhost:4318", - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", - }, - }, - }, - }, - }, - wantReader: sdkmetric.NewPeriodicReader(otlpGRPCExporter), - }, - { - name: "periodic/otlp-grpc-exporter-with-path", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ - OTLP: &OTLPMetric{ - Protocol: "grpc/protobuf", - Endpoint: "http://localhost:4318/path/123", - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", - }, - }, - }, - }, - }, - wantReader: sdkmetric.NewPeriodicReader(otlpGRPCExporter), - }, - { - name: "periodic/otlp-grpc-exporter-no-endpoint", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ - OTLP: &OTLPMetric{ - Protocol: "grpc/protobuf", - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", - }, - }, - }, - }, - }, - wantReader: sdkmetric.NewPeriodicReader(otlpGRPCExporter), - }, - { - name: "periodic/otlp-grpc-exporter-no-scheme", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ - OTLP: &OTLPMetric{ - Protocol: "grpc/protobuf", - Endpoint: "localhost:4318", - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", - }, - }, - }, - }, - }, - wantReader: sdkmetric.NewPeriodicReader(otlpGRPCExporter), - }, - { - name: "periodic/otlp-grpc-invalid-endpoint", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ - OTLP: &OTLPMetric{ - Protocol: "grpc/protobuf", - Endpoint: " ", - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", - }, - }, - }, - }, - }, - wantErr: &url.Error{Op: "parse", URL: " ", Err: errors.New("invalid URI for request")}, - }, - { - name: "periodic/otlp-grpc-none-compression", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ - OTLP: &OTLPMetric{ - Protocol: "grpc/protobuf", - Endpoint: "localhost:4318", - Compression: ptr("none"), - Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", - }, - }, - }, - }, - }, - wantReader: sdkmetric.NewPeriodicReader(otlpGRPCExporter), - }, - { - name: "periodic/otlp-grpc-delta-temporality", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ - OTLP: &OTLPMetric{ - Protocol: "grpc/protobuf", - Endpoint: "localhost:4318", - Compression: ptr("none"), - Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", - }, - TemporalityPreference: ptr("delta"), - }, - }, - }, - }, - wantReader: sdkmetric.NewPeriodicReader(otlpGRPCExporter), - }, - { - name: "periodic/otlp-grpc-cumulative-temporality", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ - OTLP: &OTLPMetric{ - Protocol: "grpc/protobuf", - Endpoint: "localhost:4318", - Compression: ptr("none"), - Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", - }, - TemporalityPreference: ptr("cumulative"), - }, - }, - }, - }, - wantReader: sdkmetric.NewPeriodicReader(otlpGRPCExporter), - }, - { - name: "periodic/otlp-grpc-lowmemory-temporality", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ - OTLP: &OTLPMetric{ - Protocol: "grpc/protobuf", - Endpoint: "localhost:4318", - Compression: ptr("none"), - Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", - }, - TemporalityPreference: ptr("lowmemory"), - }, - }, - }, - }, - wantReader: sdkmetric.NewPeriodicReader(otlpGRPCExporter), - }, - { - name: "periodic/otlp-grpc-invalid-temporality", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ - OTLP: &OTLPMetric{ - Protocol: "grpc/protobuf", - Endpoint: "localhost:4318", - Compression: ptr("none"), - Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", - }, - TemporalityPreference: ptr("invalid"), - }, - }, - }, - }, - wantErr: errors.New("unsupported temporality preference \"invalid\""), - }, - { - name: "periodic/otlp-grpc-invalid-compression", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ - OTLP: &OTLPMetric{ - Protocol: "grpc/protobuf", - Endpoint: "localhost:4318", - Compression: ptr("invalid"), - Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", - }, - }, - }, - }, - }, - wantErr: errors.New("unsupported compression \"invalid\""), - }, - { - name: "periodic/otlp-http-exporter", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ - OTLP: &OTLPMetric{ - Protocol: "http/protobuf", - Endpoint: "http://localhost:4318", - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", - }, - }, - }, - }, - }, - wantReader: sdkmetric.NewPeriodicReader(otlpHTTPExporter), - }, - { - name: "periodic/otlp-http-exporter-with-path", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ - OTLP: &OTLPMetric{ - Protocol: "http/protobuf", - Endpoint: "http://localhost:4318/path/123", - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", - }, - }, - }, - }, - }, - wantReader: sdkmetric.NewPeriodicReader(otlpHTTPExporter), - }, - { - name: "periodic/otlp-http-exporter-no-endpoint", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ - OTLP: &OTLPMetric{ - Protocol: "http/protobuf", - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", - }, - }, - }, - }, - }, - wantReader: sdkmetric.NewPeriodicReader(otlpHTTPExporter), - }, - { - name: "periodic/otlp-http-exporter-no-scheme", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ - OTLP: &OTLPMetric{ - Protocol: "http/protobuf", - Endpoint: "localhost:4318", - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", - }, - }, - }, - }, - }, - wantReader: sdkmetric.NewPeriodicReader(otlpHTTPExporter), - }, - { - name: "periodic/otlp-http-invalid-endpoint", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ - OTLP: &OTLPMetric{ - Protocol: "http/protobuf", - Endpoint: " ", - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", - }, - }, - }, - }, - }, - wantErr: &url.Error{Op: "parse", URL: " ", Err: errors.New("invalid URI for request")}, - }, - { - name: "periodic/otlp-http-none-compression", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ - OTLP: &OTLPMetric{ - Protocol: "http/protobuf", - Endpoint: "localhost:4318", - Compression: ptr("none"), - Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", - }, - }, - }, - }, - }, - wantReader: sdkmetric.NewPeriodicReader(otlpHTTPExporter), - }, - { - name: "periodic/otlp-http-cumulative-temporality", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ - OTLP: &OTLPMetric{ - Protocol: "http/protobuf", - Endpoint: "localhost:4318", - Compression: ptr("none"), - Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", - }, - TemporalityPreference: ptr("cumulative"), - }, - }, - }, - }, - wantReader: sdkmetric.NewPeriodicReader(otlpHTTPExporter), - }, - { - name: "periodic/otlp-http-lowmemory-temporality", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ - OTLP: &OTLPMetric{ - Protocol: "http/protobuf", - Endpoint: "localhost:4318", - Compression: ptr("none"), - Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", - }, - TemporalityPreference: ptr("lowmemory"), - }, - }, - }, - }, - wantReader: sdkmetric.NewPeriodicReader(otlpHTTPExporter), - }, - { - name: "periodic/otlp-http-delta-temporality", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ - OTLP: &OTLPMetric{ - Protocol: "http/protobuf", - Endpoint: "localhost:4318", - Compression: ptr("none"), - Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", - }, - TemporalityPreference: ptr("delta"), - }, - }, - }, - }, - wantReader: sdkmetric.NewPeriodicReader(otlpHTTPExporter), - }, - { - name: "periodic/otlp-http-invalid-temporality", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ - OTLP: &OTLPMetric{ - Protocol: "http/protobuf", - Endpoint: "localhost:4318", - Compression: ptr("none"), - Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", - }, - TemporalityPreference: ptr("invalid"), - }, - }, - }, - }, - wantErr: errors.New("unsupported temporality preference \"invalid\""), - }, - { - name: "periodic/otlp-http-invalid-compression", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ - OTLP: &OTLPMetric{ - Protocol: "http/protobuf", - Endpoint: "localhost:4318", - Compression: ptr("invalid"), - Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", - }, - }, - }, - }, - }, - wantErr: errors.New("unsupported compression \"invalid\""), - }, - { - name: "periodic/no-exporter", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{}, - }, - }, - wantErr: errors.New("no valid metric exporter"), - }, - { - name: "periodic/console-exporter", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: MetricExporter{ - Console: Console{}, - }, - }, - }, - wantReader: sdkmetric.NewPeriodicReader(consoleExporter), - }, - { - name: "periodic/console-exporter-with-extra-options", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Interval: ptr(30_000), - Timeout: ptr(5_000), - Exporter: MetricExporter{ - Console: Console{}, - }, - }, - }, - wantReader: sdkmetric.NewPeriodicReader( - consoleExporter, - sdkmetric.WithInterval(30_000*time.Millisecond), - sdkmetric.WithTimeout(5_000*time.Millisecond), - ), - }, - } - for _, tt := range testCases { - t.Run(tt.name, func(t *testing.T) { - got, err := metricReader(context.Background(), tt.reader) - require.Equal(t, tt.wantErr, err) - if tt.wantReader == nil { - require.Nil(t, got) - } else { - require.Equal(t, reflect.TypeOf(tt.wantReader), reflect.TypeOf(got)) - var fieldName string - switch reflect.TypeOf(tt.wantReader).String() { - case "*metric.PeriodicReader": - fieldName = "exporter" - case "config.readerWithServer": - fieldName = "Reader" - default: - fieldName = "e" - } - wantExporterType := reflect.Indirect(reflect.ValueOf(tt.wantReader)).FieldByName(fieldName).Elem().Type() - gotExporterType := reflect.Indirect(reflect.ValueOf(got)).FieldByName(fieldName).Elem().Type() - require.Equal(t, wantExporterType.String(), gotExporterType.String()) - require.NoError(t, got.Shutdown(context.Background())) - } - }) - } -} - -func TestView(t *testing.T) { - testCases := []struct { - name string - view View - args any - wantErr string - matchInstrument *sdkmetric.Instrument - wantStream sdkmetric.Stream - wantResult bool - }{ - { - name: "no selector", - wantErr: "view: no selector provided", - }, - { - name: "selector/invalid_type", - view: View{ - Selector: &ViewSelector{ - InstrumentType: (*ViewSelectorInstrumentType)(ptr("invalid_type")), - }, - }, - wantErr: "view_selector: instrument_type: invalid value", - }, - { - name: "selector/invalid_type", - view: View{ - Selector: &ViewSelector{}, - }, - wantErr: "view_selector: empty selector not supporter", - }, - { - name: "all selectors match", - view: View{ - Selector: &ViewSelector{ - InstrumentName: ptr("test_name"), - InstrumentType: (*ViewSelectorInstrumentType)(ptr("counter")), - Unit: ptr("test_unit"), - MeterName: ptr("test_meter_name"), - MeterVersion: ptr("test_meter_version"), - MeterSchemaUrl: ptr("test_schema_url"), - }, - }, - matchInstrument: &sdkmetric.Instrument{ - Name: "test_name", - Unit: "test_unit", - Kind: sdkmetric.InstrumentKindCounter, - Scope: instrumentation.Scope{ - Name: "test_meter_name", - Version: "test_meter_version", - SchemaURL: "test_schema_url", - }, - }, - wantStream: sdkmetric.Stream{Name: "test_name", Unit: "test_unit"}, - wantResult: true, - }, - { - name: "all selectors no match name", - view: View{ - Selector: &ViewSelector{ - InstrumentName: ptr("test_name"), - InstrumentType: (*ViewSelectorInstrumentType)(ptr("counter")), - Unit: ptr("test_unit"), - MeterName: ptr("test_meter_name"), - MeterVersion: ptr("test_meter_version"), - MeterSchemaUrl: ptr("test_schema_url"), - }, - }, - matchInstrument: &sdkmetric.Instrument{ - Name: "not_match", - Unit: "test_unit", - Kind: sdkmetric.InstrumentKindCounter, - Scope: instrumentation.Scope{ - Name: "test_meter_name", - Version: "test_meter_version", - SchemaURL: "test_schema_url", - }, - }, - wantStream: sdkmetric.Stream{}, - wantResult: false, - }, - { - name: "all selectors no match unit", - view: View{ - Selector: &ViewSelector{ - InstrumentName: ptr("test_name"), - InstrumentType: (*ViewSelectorInstrumentType)(ptr("counter")), - Unit: ptr("test_unit"), - MeterName: ptr("test_meter_name"), - MeterVersion: ptr("test_meter_version"), - MeterSchemaUrl: ptr("test_schema_url"), - }, - }, - matchInstrument: &sdkmetric.Instrument{ - Name: "test_name", - Unit: "not_match", - Kind: sdkmetric.InstrumentKindCounter, - Scope: instrumentation.Scope{ - Name: "test_meter_name", - Version: "test_meter_version", - SchemaURL: "test_schema_url", - }, - }, - wantStream: sdkmetric.Stream{}, - wantResult: false, - }, - { - name: "all selectors no match kind", - view: View{ - Selector: &ViewSelector{ - InstrumentName: ptr("test_name"), - InstrumentType: (*ViewSelectorInstrumentType)(ptr("histogram")), - Unit: ptr("test_unit"), - MeterName: ptr("test_meter_name"), - MeterVersion: ptr("test_meter_version"), - MeterSchemaUrl: ptr("test_schema_url"), - }, - }, - matchInstrument: &sdkmetric.Instrument{ - Name: "test_name", - Unit: "test_unit", - Kind: sdkmetric.InstrumentKindCounter, - Scope: instrumentation.Scope{ - Name: "test_meter_name", - Version: "test_meter_version", - SchemaURL: "test_schema_url", - }, - }, - wantStream: sdkmetric.Stream{}, - wantResult: false, - }, - { - name: "all selectors no match meter name", - view: View{ - Selector: &ViewSelector{ - InstrumentName: ptr("test_name"), - InstrumentType: (*ViewSelectorInstrumentType)(ptr("counter")), - Unit: ptr("test_unit"), - MeterName: ptr("test_meter_name"), - MeterVersion: ptr("test_meter_version"), - MeterSchemaUrl: ptr("test_schema_url"), - }, - }, - matchInstrument: &sdkmetric.Instrument{ - Name: "test_name", - Unit: "test_unit", - Kind: sdkmetric.InstrumentKindCounter, - Scope: instrumentation.Scope{ - Name: "not_match", - Version: "test_meter_version", - SchemaURL: "test_schema_url", - }, - }, - wantStream: sdkmetric.Stream{}, - wantResult: false, - }, - { - name: "all selectors no match meter version", - view: View{ - Selector: &ViewSelector{ - InstrumentName: ptr("test_name"), - InstrumentType: (*ViewSelectorInstrumentType)(ptr("counter")), - Unit: ptr("test_unit"), - MeterName: ptr("test_meter_name"), - MeterVersion: ptr("test_meter_version"), - MeterSchemaUrl: ptr("test_schema_url"), - }, - }, - matchInstrument: &sdkmetric.Instrument{ - Name: "test_name", - Unit: "test_unit", - Kind: sdkmetric.InstrumentKindCounter, - Scope: instrumentation.Scope{ - Name: "test_meter_name", - Version: "not_match", - SchemaURL: "test_schema_url", - }, - }, - wantStream: sdkmetric.Stream{}, - wantResult: false, - }, - { - name: "all selectors no match meter schema url", - view: View{ - Selector: &ViewSelector{ - InstrumentName: ptr("test_name"), - InstrumentType: (*ViewSelectorInstrumentType)(ptr("counter")), - Unit: ptr("test_unit"), - MeterName: ptr("test_meter_name"), - MeterVersion: ptr("test_meter_version"), - MeterSchemaUrl: ptr("test_schema_url"), - }, - }, - matchInstrument: &sdkmetric.Instrument{ - Name: "test_name", - Unit: "test_unit", - Kind: sdkmetric.InstrumentKindCounter, - Scope: instrumentation.Scope{ - Name: "test_meter_name", - Version: "test_meter_version", - SchemaURL: "not_match", - }, - }, - wantStream: sdkmetric.Stream{}, - wantResult: false, - }, - { - name: "with stream", - view: View{ - Selector: &ViewSelector{ - InstrumentName: ptr("test_name"), - Unit: ptr("test_unit"), - }, - Stream: &ViewStream{ - Name: ptr("new_name"), - Description: ptr("new_description"), - AttributeKeys: []string{"foo", "bar"}, - Aggregation: &ViewStreamAggregation{Sum: make(ViewStreamAggregationSum)}, - }, - }, - matchInstrument: &sdkmetric.Instrument{ - Name: "test_name", - Description: "test_description", - Unit: "test_unit", - }, - wantStream: sdkmetric.Stream{ - Name: "new_name", - Description: "new_description", - Unit: "test_unit", - Aggregation: sdkmetric.AggregationSum{}, - }, - wantResult: true, - }, - } - for _, tt := range testCases { - t.Run(tt.name, func(t *testing.T) { - got, err := view(tt.view) - if tt.wantErr != "" { - require.EqualError(t, err, tt.wantErr) - require.Nil(t, got) - } else { - require.NoError(t, err) - gotStream, gotResult := got(*tt.matchInstrument) - // Remove filter, since it cannot be compared - gotStream.AttributeFilter = nil - require.Equal(t, tt.wantStream, gotStream) - require.Equal(t, tt.wantResult, gotResult) - } - }) - } -} - -func TestInstrumentType(t *testing.T) { - testCases := []struct { - name string - instType *ViewSelectorInstrumentType - wantErr error - wantKind sdkmetric.InstrumentKind - }{ - { - name: "nil", - wantKind: sdkmetric.InstrumentKind(0), - }, - { - name: "counter", - instType: (*ViewSelectorInstrumentType)(ptr("counter")), - wantKind: sdkmetric.InstrumentKindCounter, - }, - { - name: "up_down_counter", - instType: (*ViewSelectorInstrumentType)(ptr("up_down_counter")), - wantKind: sdkmetric.InstrumentKindUpDownCounter, - }, - { - name: "histogram", - instType: (*ViewSelectorInstrumentType)(ptr("histogram")), - wantKind: sdkmetric.InstrumentKindHistogram, - }, - { - name: "observable_counter", - instType: (*ViewSelectorInstrumentType)(ptr("observable_counter")), - wantKind: sdkmetric.InstrumentKindObservableCounter, - }, - { - name: "observable_up_down_counter", - instType: (*ViewSelectorInstrumentType)(ptr("observable_up_down_counter")), - wantKind: sdkmetric.InstrumentKindObservableUpDownCounter, - }, - { - name: "observable_gauge", - instType: (*ViewSelectorInstrumentType)(ptr("observable_gauge")), - wantKind: sdkmetric.InstrumentKindObservableGauge, - }, - { - name: "invalid", - instType: (*ViewSelectorInstrumentType)(ptr("invalid")), - wantErr: errors.New("instrument_type: invalid value"), - }, - } - for _, tt := range testCases { - t.Run(tt.name, func(t *testing.T) { - got, err := instrumentKind(tt.instType) - if tt.wantErr != nil { - require.Equal(t, tt.wantErr, err) - require.Zero(t, got) - } else { - require.NoError(t, err) - require.Equal(t, tt.wantKind, got) - } - }) - } -} - -func TestAggregation(t *testing.T) { - testCases := []struct { - name string - aggregation *ViewStreamAggregation - wantAggregation sdkmetric.Aggregation - }{ - { - name: "nil", - wantAggregation: nil, - }, - { - name: "empty", - aggregation: &ViewStreamAggregation{}, - wantAggregation: nil, - }, - { - name: "Base2ExponentialBucketHistogram empty", - aggregation: &ViewStreamAggregation{ - Base2ExponentialBucketHistogram: &ViewStreamAggregationBase2ExponentialBucketHistogram{}, - }, - wantAggregation: sdkmetric.AggregationBase2ExponentialHistogram{ - MaxSize: 0, - MaxScale: 0, - NoMinMax: true, - }, - }, - { - name: "Base2ExponentialBucketHistogram", - aggregation: &ViewStreamAggregation{ - Base2ExponentialBucketHistogram: &ViewStreamAggregationBase2ExponentialBucketHistogram{ - MaxSize: ptr(2), - MaxScale: ptr(3), - RecordMinMax: ptr(true), - }, - }, - wantAggregation: sdkmetric.AggregationBase2ExponentialHistogram{ - MaxSize: 2, - MaxScale: 3, - NoMinMax: false, - }, - }, - { - name: "Default", - aggregation: &ViewStreamAggregation{ - Default: make(ViewStreamAggregationDefault), - }, - wantAggregation: nil, - }, - { - name: "Drop", - aggregation: &ViewStreamAggregation{ - Drop: make(ViewStreamAggregationDrop), - }, - wantAggregation: sdkmetric.AggregationDrop{}, - }, - { - name: "ExplicitBucketHistogram empty", - aggregation: &ViewStreamAggregation{ - ExplicitBucketHistogram: &ViewStreamAggregationExplicitBucketHistogram{}, - }, - wantAggregation: sdkmetric.AggregationExplicitBucketHistogram{ - Boundaries: nil, - NoMinMax: true, - }, - }, - { - name: "ExplicitBucketHistogram", - aggregation: &ViewStreamAggregation{ - ExplicitBucketHistogram: &ViewStreamAggregationExplicitBucketHistogram{ - Boundaries: []float64{1, 2, 3}, - RecordMinMax: ptr(true), - }, - }, - wantAggregation: sdkmetric.AggregationExplicitBucketHistogram{ - Boundaries: []float64{1, 2, 3}, - NoMinMax: false, - }, - }, - { - name: "LastValue", - aggregation: &ViewStreamAggregation{ - LastValue: make(ViewStreamAggregationLastValue), - }, - wantAggregation: sdkmetric.AggregationLastValue{}, - }, - { - name: "Sum", - aggregation: &ViewStreamAggregation{ - Sum: make(ViewStreamAggregationSum), - }, - wantAggregation: sdkmetric.AggregationSum{}, - }, - } - for _, tt := range testCases { - t.Run(tt.name, func(t *testing.T) { - got := aggregation(tt.aggregation) - require.Equal(t, tt.wantAggregation, got) - }) - } -} - -func TestAttributeFilter(t *testing.T) { - testCases := []struct { - name string - attributeKeys []string - wantPass []string - wantFail []string - }{ - { - name: "empty", - attributeKeys: []string{}, - wantPass: nil, - wantFail: []string{"foo", "bar"}, - }, - { - name: "filter", - attributeKeys: []string{"foo"}, - wantPass: []string{"foo"}, - wantFail: []string{"bar"}, - }, - } - for _, tt := range testCases { - t.Run(tt.name, func(t *testing.T) { - got := attributeFilter(tt.attributeKeys) - for _, pass := range tt.wantPass { - require.True(t, got(attribute.KeyValue{Key: attribute.Key(pass), Value: attribute.StringValue("")})) - } - for _, fail := range tt.wantFail { - require.False(t, got(attribute.KeyValue{Key: attribute.Key(fail), Value: attribute.StringValue("")})) - } - }) - } -} - -func TestPrometheusIPv6(t *testing.T) { - tests := []struct { - name string - host string - }{ - { - name: "IPv6", - host: "::1", - }, - { - name: "[IPv6]", - host: "[::1]", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - port := 0 - cfg := Prometheus{ - Host: &tt.host, - Port: &port, - WithoutScopeInfo: ptr(true), - WithoutTypeSuffix: ptr(true), - WithoutUnits: ptr(true), - WithResourceConstantLabels: &IncludeExclude{}, - } - - rs, err := prometheusReader(context.Background(), &cfg) - t.Cleanup(func() { - require.NoError(t, rs.Shutdown(context.Background())) - }) - require.NoError(t, err) - - hServ := rs.(readerWithServer).server - assert.True(t, strings.HasPrefix(hServ.Addr, "[::1]:")) - - resp, err := http.DefaultClient.Get("http://" + hServ.Addr + "/metrics") - t.Cleanup(func() { - require.NoError(t, resp.Body.Close()) - }) - require.NoError(t, err) - assert.Equal(t, http.StatusOK, resp.StatusCode) - }) - } -} diff --git a/config/v0.2.0/resource.go b/config/v0.2.0/resource.go deleted file mode 100644 index 7c24e109f72..00000000000 --- a/config/v0.2.0/resource.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package config // import "go.opentelemetry.io/contrib/config/v0.2.0" - -import ( - "fmt" - "strconv" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/sdk/resource" -) - -func keyVal(k string, v any) attribute.KeyValue { - switch val := v.(type) { - case bool: - return attribute.Bool(k, val) - case int64: - return attribute.Int64(k, val) - case uint64: - return attribute.String(k, strconv.FormatUint(val, 10)) - case float64: - return attribute.Float64(k, val) - case int8: - return attribute.Int64(k, int64(val)) - case uint8: - return attribute.Int64(k, int64(val)) - case int16: - return attribute.Int64(k, int64(val)) - case uint16: - return attribute.Int64(k, int64(val)) - case int32: - return attribute.Int64(k, int64(val)) - case uint32: - return attribute.Int64(k, int64(val)) - case float32: - return attribute.Float64(k, float64(val)) - case int: - return attribute.Int(k, val) - case uint: - return attribute.String(k, strconv.FormatUint(uint64(val), 10)) - case string: - return attribute.String(k, val) - default: - return attribute.String(k, fmt.Sprint(v)) - } -} - -func newResource(res *Resource) (*resource.Resource, error) { - if res == nil || res.Attributes == nil { - return resource.Default(), nil - } - var attrs []attribute.KeyValue - - for k, v := range res.Attributes { - attrs = append(attrs, keyVal(k, v)) - } - - return resource.Merge(resource.Default(), - resource.NewWithAttributes(*res.SchemaUrl, - attrs..., - )) -} diff --git a/config/v0.2.0/resource_test.go b/config/v0.2.0/resource_test.go deleted file mode 100644 index 12c15a843e5..00000000000 --- a/config/v0.2.0/resource_test.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package config - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/sdk/resource" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" -) - -type mockType struct{} - -func TestNewResource(t *testing.T) { - res, err := resource.Merge(resource.Default(), - resource.NewWithAttributes(semconv.SchemaURL, - semconv.ServiceName("service-a"), - )) - other := mockType{} - require.NoError(t, err) - resWithAttrs, err := resource.Merge(resource.Default(), - resource.NewWithAttributes(semconv.SchemaURL, - semconv.ServiceName("service-a"), - attribute.Bool("attr-bool", true), - attribute.String("attr-uint64", fmt.Sprintf("%d", 164)), - attribute.Int64("attr-int64", int64(-164)), - attribute.Float64("attr-float64", float64(64.0)), - attribute.Int64("attr-int8", int64(-18)), - attribute.Int64("attr-uint8", int64(18)), - attribute.Int64("attr-int16", int64(-116)), - attribute.Int64("attr-uint16", int64(116)), - attribute.Int64("attr-int32", int64(-132)), - attribute.Int64("attr-uint32", int64(132)), - attribute.Float64("attr-float32", float64(32.0)), - attribute.Int64("attr-int", int64(-1)), - attribute.String("attr-uint", fmt.Sprintf("%d", 1)), - attribute.String("attr-string", "string-val"), - attribute.String("attr-default", fmt.Sprintf("%v", other)), - )) - require.NoError(t, err) - tests := []struct { - name string - config *Resource - wantResource *resource.Resource - wantErr error - }{ - { - name: "no-resource-configuration", - wantResource: resource.Default(), - }, - { - name: "resource-no-attributes", - config: &Resource{}, - wantResource: resource.Default(), - }, - { - name: "resource-with-attributes-invalid-schema", - config: &Resource{ - SchemaUrl: ptr("https://opentelemetry.io/invalid-schema"), - Attributes: Attributes{ - "service.name": "service-a", - }, - }, - wantResource: resource.NewSchemaless(res.Attributes()...), - wantErr: resource.ErrSchemaURLConflict, - }, - { - name: "resource-with-attributes-and-schema", - config: &Resource{ - Attributes: Attributes{ - "service.name": "service-a", - }, - SchemaUrl: ptr(semconv.SchemaURL), - }, - wantResource: res, - }, - { - name: "resource-with-additional-attributes-and-schema", - config: &Resource{ - Attributes: Attributes{ - "service.name": "service-a", - "attr-bool": true, - "attr-int64": int64(-164), - "attr-uint64": uint64(164), - "attr-float64": float64(64.0), - "attr-int8": int8(-18), - "attr-uint8": uint8(18), - "attr-int16": int16(-116), - "attr-uint16": uint16(116), - "attr-int32": int32(-132), - "attr-uint32": uint32(132), - "attr-float32": float32(32.0), - "attr-int": int(-1), - "attr-uint": uint(1), - "attr-string": "string-val", - "attr-default": other, - }, - SchemaUrl: ptr(semconv.SchemaURL), - }, - wantResource: resWithAttrs, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := newResource(tt.config) - assert.ErrorIs(t, err, tt.wantErr) - assert.Equal(t, tt.wantResource, got) - }) - } -} diff --git a/config/v0.2.0/trace.go b/config/v0.2.0/trace.go deleted file mode 100644 index c639c174328..00000000000 --- a/config/v0.2.0/trace.go +++ /dev/null @@ -1,197 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package config // import "go.opentelemetry.io/contrib/config/v0.2.0" - -import ( - "context" - "errors" - "fmt" - "net/url" - "time" - - "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" - "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp" - "go.opentelemetry.io/otel/exporters/stdout/stdouttrace" - "go.opentelemetry.io/otel/sdk/resource" - sdktrace "go.opentelemetry.io/otel/sdk/trace" - "go.opentelemetry.io/otel/trace" - "go.opentelemetry.io/otel/trace/noop" -) - -func tracerProvider(cfg configOptions, res *resource.Resource) (trace.TracerProvider, shutdownFunc, error) { - if cfg.opentelemetryConfig.TracerProvider == nil { - return noop.NewTracerProvider(), noopShutdown, nil - } - opts := []sdktrace.TracerProviderOption{ - sdktrace.WithResource(res), - } - var errs []error - for _, processor := range cfg.opentelemetryConfig.TracerProvider.Processors { - sp, err := spanProcessor(cfg.ctx, processor) - if err == nil { - opts = append(opts, sdktrace.WithSpanProcessor(sp)) - } else { - errs = append(errs, err) - } - } - if len(errs) > 0 { - return noop.NewTracerProvider(), noopShutdown, errors.Join(errs...) - } - tp := sdktrace.NewTracerProvider(opts...) - return tp, tp.Shutdown, nil -} - -func spanExporter(ctx context.Context, exporter SpanExporter) (sdktrace.SpanExporter, error) { - if exporter.Console != nil && exporter.OTLP != nil { - return nil, errors.New("must not specify multiple exporters") - } - - if exporter.Console != nil { - return stdouttrace.New( - stdouttrace.WithPrettyPrint(), - ) - } - if exporter.OTLP != nil { - switch exporter.OTLP.Protocol { - case protocolProtobufHTTP: - return otlpHTTPSpanExporter(ctx, exporter.OTLP) - case protocolProtobufGRPC: - return otlpGRPCSpanExporter(ctx, exporter.OTLP) - default: - return nil, fmt.Errorf("unsupported protocol %q", exporter.OTLP.Protocol) - } - } - return nil, errors.New("no valid span exporter") -} - -func spanProcessor(ctx context.Context, processor SpanProcessor) (sdktrace.SpanProcessor, error) { - if processor.Batch != nil && processor.Simple != nil { - return nil, errors.New("must not specify multiple span processor type") - } - if processor.Batch != nil { - exp, err := spanExporter(ctx, processor.Batch.Exporter) - if err != nil { - return nil, err - } - return batchSpanProcessor(processor.Batch, exp) - } - if processor.Simple != nil { - exp, err := spanExporter(ctx, processor.Simple.Exporter) - if err != nil { - return nil, err - } - return sdktrace.NewSimpleSpanProcessor(exp), nil - } - return nil, errors.New("unsupported span processor type, must be one of simple or batch") -} - -func otlpGRPCSpanExporter(ctx context.Context, otlpConfig *OTLP) (sdktrace.SpanExporter, error) { - var opts []otlptracegrpc.Option - - if len(otlpConfig.Endpoint) > 0 { - u, err := url.ParseRequestURI(otlpConfig.Endpoint) - if err != nil { - return nil, err - } - // ParseRequestURI leaves the Host field empty when no - // scheme is specified (i.e. localhost:4317). This check is - // here to support the case where a user may not specify a - // scheme. The code does its best effort here by using - // otlpConfig.Endpoint as-is in that case. - if u.Host != "" { - opts = append(opts, otlptracegrpc.WithEndpoint(u.Host)) - } else { - opts = append(opts, otlptracegrpc.WithEndpoint(otlpConfig.Endpoint)) - } - - if u.Scheme == "http" { - opts = append(opts, otlptracegrpc.WithInsecure()) - } - } - - if otlpConfig.Compression != nil { - switch *otlpConfig.Compression { - case compressionGzip: - opts = append(opts, otlptracegrpc.WithCompressor(*otlpConfig.Compression)) - case compressionNone: - // none requires no options - default: - return nil, fmt.Errorf("unsupported compression %q", *otlpConfig.Compression) - } - } - if otlpConfig.Timeout != nil && *otlpConfig.Timeout > 0 { - opts = append(opts, otlptracegrpc.WithTimeout(time.Millisecond*time.Duration(*otlpConfig.Timeout))) - } - if len(otlpConfig.Headers) > 0 { - opts = append(opts, otlptracegrpc.WithHeaders(otlpConfig.Headers)) - } - - return otlptracegrpc.New(ctx, opts...) -} - -func otlpHTTPSpanExporter(ctx context.Context, otlpConfig *OTLP) (sdktrace.SpanExporter, error) { - var opts []otlptracehttp.Option - - if len(otlpConfig.Endpoint) > 0 { - u, err := url.ParseRequestURI(otlpConfig.Endpoint) - if err != nil { - return nil, err - } - opts = append(opts, otlptracehttp.WithEndpoint(u.Host)) - - if u.Scheme == "http" { - opts = append(opts, otlptracehttp.WithInsecure()) - } - if len(u.Path) > 0 { - opts = append(opts, otlptracehttp.WithURLPath(u.Path)) - } - } - if otlpConfig.Compression != nil { - switch *otlpConfig.Compression { - case compressionGzip: - opts = append(opts, otlptracehttp.WithCompression(otlptracehttp.GzipCompression)) - case compressionNone: - opts = append(opts, otlptracehttp.WithCompression(otlptracehttp.NoCompression)) - default: - return nil, fmt.Errorf("unsupported compression %q", *otlpConfig.Compression) - } - } - if otlpConfig.Timeout != nil && *otlpConfig.Timeout > 0 { - opts = append(opts, otlptracehttp.WithTimeout(time.Millisecond*time.Duration(*otlpConfig.Timeout))) - } - if len(otlpConfig.Headers) > 0 { - opts = append(opts, otlptracehttp.WithHeaders(otlpConfig.Headers)) - } - - return otlptracehttp.New(ctx, opts...) -} - -func batchSpanProcessor(bsp *BatchSpanProcessor, exp sdktrace.SpanExporter) (sdktrace.SpanProcessor, error) { - var opts []sdktrace.BatchSpanProcessorOption - if bsp.ExportTimeout != nil { - if *bsp.ExportTimeout < 0 { - return nil, fmt.Errorf("invalid export timeout %d", *bsp.ExportTimeout) - } - opts = append(opts, sdktrace.WithExportTimeout(time.Millisecond*time.Duration(*bsp.ExportTimeout))) - } - if bsp.MaxExportBatchSize != nil { - if *bsp.MaxExportBatchSize < 0 { - return nil, fmt.Errorf("invalid batch size %d", *bsp.MaxExportBatchSize) - } - opts = append(opts, sdktrace.WithMaxExportBatchSize(*bsp.MaxExportBatchSize)) - } - if bsp.MaxQueueSize != nil { - if *bsp.MaxQueueSize < 0 { - return nil, fmt.Errorf("invalid queue size %d", *bsp.MaxQueueSize) - } - opts = append(opts, sdktrace.WithMaxQueueSize(*bsp.MaxQueueSize)) - } - if bsp.ScheduleDelay != nil { - if *bsp.ScheduleDelay < 0 { - return nil, fmt.Errorf("invalid schedule delay %d", *bsp.ScheduleDelay) - } - opts = append(opts, sdktrace.WithBatchTimeout(time.Millisecond*time.Duration(*bsp.ScheduleDelay))) - } - return sdktrace.NewBatchSpanProcessor(exp, opts...), nil -} diff --git a/config/v0.2.0/trace_test.go b/config/v0.2.0/trace_test.go deleted file mode 100644 index 4f4a197770e..00000000000 --- a/config/v0.2.0/trace_test.go +++ /dev/null @@ -1,535 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package config - -import ( - "context" - "errors" - "net/url" - "reflect" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" - "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp" - "go.opentelemetry.io/otel/exporters/stdout/stdouttrace" - "go.opentelemetry.io/otel/sdk/resource" - sdktrace "go.opentelemetry.io/otel/sdk/trace" - "go.opentelemetry.io/otel/trace" - "go.opentelemetry.io/otel/trace/noop" -) - -func TestTracerPovider(t *testing.T) { - tests := []struct { - name string - cfg configOptions - wantProvider trace.TracerProvider - wantErr error - }{ - { - name: "no-tracer-provider-configured", - wantProvider: noop.NewTracerProvider(), - }, - { - name: "error-in-config", - cfg: configOptions{ - opentelemetryConfig: OpenTelemetryConfiguration{ - TracerProvider: &TracerProvider{ - Processors: []SpanProcessor{ - { - Batch: &BatchSpanProcessor{}, - Simple: &SimpleSpanProcessor{}, - }, - }, - }, - }, - }, - wantProvider: noop.NewTracerProvider(), - wantErr: errors.Join(errors.New("must not specify multiple span processor type")), - }, - { - name: "multiple-errors-in-config", - cfg: configOptions{ - opentelemetryConfig: OpenTelemetryConfiguration{ - TracerProvider: &TracerProvider{ - Processors: []SpanProcessor{ - { - Batch: &BatchSpanProcessor{}, - Simple: &SimpleSpanProcessor{}, - }, - { - Simple: &SimpleSpanProcessor{ - Exporter: SpanExporter{ - Console: Console{}, - OTLP: &OTLP{}, - }, - }, - }, - }, - }, - }, - }, - wantProvider: noop.NewTracerProvider(), - wantErr: errors.Join(errors.New("must not specify multiple span processor type"), errors.New("must not specify multiple exporters")), - }, - } - for _, tt := range tests { - tp, shutdown, err := tracerProvider(tt.cfg, resource.Default()) - require.Equal(t, tt.wantProvider, tp) - assert.Equal(t, tt.wantErr, err) - require.NoError(t, shutdown(context.Background())) - } -} - -func TestSpanProcessor(t *testing.T) { - consoleExporter, err := stdouttrace.New( - stdouttrace.WithPrettyPrint(), - ) - require.NoError(t, err) - ctx := context.Background() - otlpGRPCExporter, err := otlptracegrpc.New(ctx) - require.NoError(t, err) - otlpHTTPExporter, err := otlptracehttp.New(ctx) - require.NoError(t, err) - testCases := []struct { - name string - processor SpanProcessor - args any - wantErr error - wantProcessor sdktrace.SpanProcessor - }{ - { - name: "no processor", - wantErr: errors.New("unsupported span processor type, must be one of simple or batch"), - }, - { - name: "multiple processor types", - processor: SpanProcessor{ - Batch: &BatchSpanProcessor{ - Exporter: SpanExporter{}, - }, - Simple: &SimpleSpanProcessor{}, - }, - wantErr: errors.New("must not specify multiple span processor type"), - }, - { - name: "batch processor invalid exporter", - processor: SpanProcessor{ - Batch: &BatchSpanProcessor{ - Exporter: SpanExporter{}, - }, - }, - wantErr: errors.New("no valid span exporter"), - }, - { - name: "batch processor invalid batch size console exporter", - processor: SpanProcessor{ - Batch: &BatchSpanProcessor{ - MaxExportBatchSize: ptr(-1), - Exporter: SpanExporter{ - Console: Console{}, - }, - }, - }, - wantErr: errors.New("invalid batch size -1"), - }, - { - name: "batch processor invalid export timeout console exporter", - processor: SpanProcessor{ - Batch: &BatchSpanProcessor{ - ExportTimeout: ptr(-2), - Exporter: SpanExporter{ - Console: Console{}, - }, - }, - }, - wantErr: errors.New("invalid export timeout -2"), - }, - { - name: "batch processor invalid queue size console exporter", - processor: SpanProcessor{ - Batch: &BatchSpanProcessor{ - MaxQueueSize: ptr(-3), - Exporter: SpanExporter{ - Console: Console{}, - }, - }, - }, - wantErr: errors.New("invalid queue size -3"), - }, - { - name: "batch processor invalid schedule delay console exporter", - processor: SpanProcessor{ - Batch: &BatchSpanProcessor{ - ScheduleDelay: ptr(-4), - Exporter: SpanExporter{ - Console: Console{}, - }, - }, - }, - wantErr: errors.New("invalid schedule delay -4"), - }, - { - name: "batch processor with multiple exporters", - processor: SpanProcessor{ - Batch: &BatchSpanProcessor{ - Exporter: SpanExporter{ - Console: Console{}, - OTLP: &OTLP{}, - }, - }, - }, - wantErr: errors.New("must not specify multiple exporters"), - }, - { - name: "batch processor console exporter", - processor: SpanProcessor{ - Batch: &BatchSpanProcessor{ - MaxExportBatchSize: ptr(0), - ExportTimeout: ptr(0), - MaxQueueSize: ptr(0), - ScheduleDelay: ptr(0), - Exporter: SpanExporter{ - Console: Console{}, - }, - }, - }, - wantProcessor: sdktrace.NewBatchSpanProcessor(consoleExporter), - }, - { - name: "batch/otlp-exporter-invalid-protocol", - processor: SpanProcessor{ - Batch: &BatchSpanProcessor{ - MaxExportBatchSize: ptr(0), - ExportTimeout: ptr(0), - MaxQueueSize: ptr(0), - ScheduleDelay: ptr(0), - Exporter: SpanExporter{ - OTLP: &OTLP{ - Protocol: "http/invalid", - }, - }, - }, - }, - wantErr: errors.New("unsupported protocol \"http/invalid\""), - }, - { - name: "batch/otlp-grpc-exporter-no-endpoint", - processor: SpanProcessor{ - Batch: &BatchSpanProcessor{ - MaxExportBatchSize: ptr(0), - ExportTimeout: ptr(0), - MaxQueueSize: ptr(0), - ScheduleDelay: ptr(0), - Exporter: SpanExporter{ - OTLP: &OTLP{ - Protocol: "grpc/protobuf", - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", - }, - }, - }, - }, - }, - wantProcessor: sdktrace.NewBatchSpanProcessor(otlpGRPCExporter), - }, - { - name: "batch/otlp-grpc-exporter", - processor: SpanProcessor{ - Batch: &BatchSpanProcessor{ - MaxExportBatchSize: ptr(0), - ExportTimeout: ptr(0), - MaxQueueSize: ptr(0), - ScheduleDelay: ptr(0), - Exporter: SpanExporter{ - OTLP: &OTLP{ - Protocol: "grpc/protobuf", - Endpoint: "http://localhost:4317", - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", - }, - }, - }, - }, - }, - wantProcessor: sdktrace.NewBatchSpanProcessor(otlpGRPCExporter), - }, - { - name: "batch/otlp-grpc-exporter-no-scheme", - processor: SpanProcessor{ - Batch: &BatchSpanProcessor{ - MaxExportBatchSize: ptr(0), - ExportTimeout: ptr(0), - MaxQueueSize: ptr(0), - ScheduleDelay: ptr(0), - Exporter: SpanExporter{ - OTLP: &OTLP{ - Protocol: "grpc/protobuf", - Endpoint: "localhost:4317", - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", - }, - }, - }, - }, - }, - wantProcessor: sdktrace.NewBatchSpanProcessor(otlpGRPCExporter), - }, - { - name: "batch/otlp-grpc-invalid-endpoint", - processor: SpanProcessor{ - Batch: &BatchSpanProcessor{ - MaxExportBatchSize: ptr(0), - ExportTimeout: ptr(0), - MaxQueueSize: ptr(0), - ScheduleDelay: ptr(0), - Exporter: SpanExporter{ - OTLP: &OTLP{ - Protocol: "grpc/protobuf", - Endpoint: " ", - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", - }, - }, - }, - }, - }, - wantErr: &url.Error{Op: "parse", URL: " ", Err: errors.New("invalid URI for request")}, - }, - { - name: "batch/otlp-grpc-invalid-compression", - processor: SpanProcessor{ - Batch: &BatchSpanProcessor{ - MaxExportBatchSize: ptr(0), - ExportTimeout: ptr(0), - MaxQueueSize: ptr(0), - ScheduleDelay: ptr(0), - Exporter: SpanExporter{ - OTLP: &OTLP{ - Protocol: "grpc/protobuf", - Endpoint: "localhost:4317", - Compression: ptr("invalid"), - Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", - }, - }, - }, - }, - }, - wantErr: errors.New("unsupported compression \"invalid\""), - }, - { - name: "batch/otlp-http-exporter", - processor: SpanProcessor{ - Batch: &BatchSpanProcessor{ - MaxExportBatchSize: ptr(0), - ExportTimeout: ptr(0), - MaxQueueSize: ptr(0), - ScheduleDelay: ptr(0), - Exporter: SpanExporter{ - OTLP: &OTLP{ - Protocol: "http/protobuf", - Endpoint: "http://localhost:4318", - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", - }, - }, - }, - }, - }, - wantProcessor: sdktrace.NewBatchSpanProcessor(otlpHTTPExporter), - }, - { - name: "batch/otlp-http-exporter-with-path", - processor: SpanProcessor{ - Batch: &BatchSpanProcessor{ - MaxExportBatchSize: ptr(0), - ExportTimeout: ptr(0), - MaxQueueSize: ptr(0), - ScheduleDelay: ptr(0), - Exporter: SpanExporter{ - OTLP: &OTLP{ - Protocol: "http/protobuf", - Endpoint: "http://localhost:4318/path/123", - Compression: ptr("none"), - Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", - }, - }, - }, - }, - }, - wantProcessor: sdktrace.NewBatchSpanProcessor(otlpHTTPExporter), - }, - { - name: "batch/otlp-http-exporter-no-endpoint", - processor: SpanProcessor{ - Batch: &BatchSpanProcessor{ - MaxExportBatchSize: ptr(0), - ExportTimeout: ptr(0), - MaxQueueSize: ptr(0), - ScheduleDelay: ptr(0), - Exporter: SpanExporter{ - OTLP: &OTLP{ - Protocol: "http/protobuf", - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", - }, - }, - }, - }, - }, - wantProcessor: sdktrace.NewBatchSpanProcessor(otlpHTTPExporter), - }, - { - name: "batch/otlp-http-exporter-no-scheme", - processor: SpanProcessor{ - Batch: &BatchSpanProcessor{ - MaxExportBatchSize: ptr(0), - ExportTimeout: ptr(0), - MaxQueueSize: ptr(0), - ScheduleDelay: ptr(0), - Exporter: SpanExporter{ - OTLP: &OTLP{ - Protocol: "http/protobuf", - Endpoint: "localhost:4318", - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", - }, - }, - }, - }, - }, - wantProcessor: sdktrace.NewBatchSpanProcessor(otlpHTTPExporter), - }, - { - name: "batch/otlp-http-invalid-endpoint", - processor: SpanProcessor{ - Batch: &BatchSpanProcessor{ - MaxExportBatchSize: ptr(0), - ExportTimeout: ptr(0), - MaxQueueSize: ptr(0), - ScheduleDelay: ptr(0), - Exporter: SpanExporter{ - OTLP: &OTLP{ - Protocol: "http/protobuf", - Endpoint: " ", - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", - }, - }, - }, - }, - }, - wantErr: &url.Error{Op: "parse", URL: " ", Err: errors.New("invalid URI for request")}, - }, - { - name: "batch/otlp-http-none-compression", - processor: SpanProcessor{ - Batch: &BatchSpanProcessor{ - MaxExportBatchSize: ptr(0), - ExportTimeout: ptr(0), - MaxQueueSize: ptr(0), - ScheduleDelay: ptr(0), - Exporter: SpanExporter{ - OTLP: &OTLP{ - Protocol: "http/protobuf", - Endpoint: "localhost:4318", - Compression: ptr("none"), - Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", - }, - }, - }, - }, - }, - wantProcessor: sdktrace.NewBatchSpanProcessor(otlpHTTPExporter), - }, - { - name: "batch/otlp-http-invalid-compression", - processor: SpanProcessor{ - Batch: &BatchSpanProcessor{ - MaxExportBatchSize: ptr(0), - ExportTimeout: ptr(0), - MaxQueueSize: ptr(0), - ScheduleDelay: ptr(0), - Exporter: SpanExporter{ - OTLP: &OTLP{ - Protocol: "http/protobuf", - Endpoint: "localhost:4318", - Compression: ptr("invalid"), - Timeout: ptr(1000), - Headers: map[string]string{ - "test": "test1", - }, - }, - }, - }, - }, - wantErr: errors.New("unsupported compression \"invalid\""), - }, - { - name: "simple/no-exporter", - processor: SpanProcessor{ - Simple: &SimpleSpanProcessor{ - Exporter: SpanExporter{}, - }, - }, - wantErr: errors.New("no valid span exporter"), - }, - { - name: "simple/console-exporter", - processor: SpanProcessor{ - Simple: &SimpleSpanProcessor{ - Exporter: SpanExporter{ - Console: Console{}, - }, - }, - }, - wantProcessor: sdktrace.NewSimpleSpanProcessor(consoleExporter), - }, - } - for _, tt := range testCases { - t.Run(tt.name, func(t *testing.T) { - got, err := spanProcessor(context.Background(), tt.processor) - require.Equal(t, tt.wantErr, err) - if tt.wantProcessor == nil { - require.Nil(t, got) - } else { - require.Equal(t, reflect.TypeOf(tt.wantProcessor), reflect.TypeOf(got)) - var fieldName string - switch reflect.TypeOf(tt.wantProcessor).String() { - case "*trace.simpleSpanProcessor": - fieldName = "exporter" - default: - fieldName = "e" - } - wantExporterType := reflect.Indirect(reflect.ValueOf(tt.wantProcessor)).FieldByName(fieldName).Elem().Type() - gotExporterType := reflect.Indirect(reflect.ValueOf(got)).FieldByName(fieldName).Elem().Type() - require.Equal(t, wantExporterType.String(), gotExporterType.String()) - } - }) - } -} diff --git a/config/v0.3.0/config.go b/config/v0.3.0/config.go deleted file mode 100644 index 2754a0303d9..00000000000 --- a/config/v0.3.0/config.go +++ /dev/null @@ -1,209 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -// Package config is deprecated. -// -// Deprecated: use [go.opentelemetry.io/contrib/otelconf/v0.3.0] instead. -// This is the last release of this module. -package config // import "go.opentelemetry.io/contrib/config/v0.3.0" - -import ( - "context" - "crypto/tls" - "crypto/x509" - "errors" - "fmt" - "os" - - "gopkg.in/yaml.v3" - - "go.opentelemetry.io/otel/baggage" - "go.opentelemetry.io/otel/log" - nooplog "go.opentelemetry.io/otel/log/noop" - "go.opentelemetry.io/otel/metric" - noopmetric "go.opentelemetry.io/otel/metric/noop" - "go.opentelemetry.io/otel/trace" - nooptrace "go.opentelemetry.io/otel/trace/noop" -) - -const ( - protocolProtobufHTTP = "http/protobuf" - protocolProtobufGRPC = "grpc" - - compressionGzip = "gzip" - compressionNone = "none" -) - -type configOptions struct { - ctx context.Context - opentelemetryConfig OpenTelemetryConfiguration -} - -type shutdownFunc func(context.Context) error - -func noopShutdown(context.Context) error { - return nil -} - -// SDK is a struct that contains all the providers -// configured via the configuration model. -type SDK struct { - meterProvider metric.MeterProvider - tracerProvider trace.TracerProvider - loggerProvider log.LoggerProvider - shutdown shutdownFunc -} - -// TracerProvider returns a configured trace.TracerProvider. -func (s *SDK) TracerProvider() trace.TracerProvider { - return s.tracerProvider -} - -// MeterProvider returns a configured metric.MeterProvider. -func (s *SDK) MeterProvider() metric.MeterProvider { - return s.meterProvider -} - -// LoggerProvider returns a configured log.LoggerProvider. -func (s *SDK) LoggerProvider() log.LoggerProvider { - return s.loggerProvider -} - -// Shutdown calls shutdown on all configured providers. -func (s *SDK) Shutdown(ctx context.Context) error { - return s.shutdown(ctx) -} - -var noopSDK = SDK{ - loggerProvider: nooplog.LoggerProvider{}, - meterProvider: noopmetric.MeterProvider{}, - tracerProvider: nooptrace.TracerProvider{}, - shutdown: func(ctx context.Context) error { return nil }, -} - -// NewSDK creates SDK providers based on the configuration model. -func NewSDK(opts ...ConfigurationOption) (SDK, error) { - o := configOptions{} - for _, opt := range opts { - o = opt.apply(o) - } - if o.opentelemetryConfig.Disabled != nil && *o.opentelemetryConfig.Disabled { - return noopSDK, nil - } - - r := newResource(o.opentelemetryConfig.Resource) - - mp, mpShutdown, err := meterProvider(o, r) - if err != nil { - return noopSDK, err - } - - tp, tpShutdown, err := tracerProvider(o, r) - if err != nil { - return noopSDK, err - } - - lp, lpShutdown, err := loggerProvider(o, r) - if err != nil { - return noopSDK, err - } - - return SDK{ - meterProvider: mp, - tracerProvider: tp, - loggerProvider: lp, - shutdown: func(ctx context.Context) error { - return errors.Join(mpShutdown(ctx), tpShutdown(ctx), lpShutdown(ctx)) - }, - }, nil -} - -// ConfigurationOption configures options for providers. -type ConfigurationOption interface { - apply(configOptions) configOptions -} - -type configurationOptionFunc func(configOptions) configOptions - -func (fn configurationOptionFunc) apply(cfg configOptions) configOptions { - return fn(cfg) -} - -// WithContext sets the context.Context for the SDK. -func WithContext(ctx context.Context) ConfigurationOption { - return configurationOptionFunc(func(c configOptions) configOptions { - c.ctx = ctx - return c - }) -} - -// WithOpenTelemetryConfiguration sets the OpenTelemetryConfiguration used -// to produce the SDK. -func WithOpenTelemetryConfiguration(cfg OpenTelemetryConfiguration) ConfigurationOption { - return configurationOptionFunc(func(c configOptions) configOptions { - c.opentelemetryConfig = cfg - return c - }) -} - -// ParseYAML parses a YAML configuration file into an OpenTelemetryConfiguration. -func ParseYAML(file []byte) (*OpenTelemetryConfiguration, error) { - var cfg OpenTelemetryConfiguration - err := yaml.Unmarshal(file, &cfg) - if err != nil { - return nil, err - } - - return &cfg, nil -} - -// createTLSConfig creates a tls.Config from certificate files. -func createTLSConfig(caCertFile *string, clientCertFile *string, clientKeyFile *string) (*tls.Config, error) { - tlsConfig := &tls.Config{} - if caCertFile != nil { - caText, err := os.ReadFile(*caCertFile) - if err != nil { - return nil, err - } - certPool := x509.NewCertPool() - if !certPool.AppendCertsFromPEM(caText) { - return nil, errors.New("could not create certificate authority chain from certificate") - } - tlsConfig.RootCAs = certPool - } - if clientCertFile != nil { - if clientKeyFile == nil { - return nil, errors.New("client certificate was provided but no client key was provided") - } - clientCert, err := tls.LoadX509KeyPair(*clientCertFile, *clientKeyFile) - if err != nil { - return nil, fmt.Errorf("could not use client certificate: %w", err) - } - tlsConfig.Certificates = []tls.Certificate{clientCert} - } - return tlsConfig, nil -} - -// createHeadersConfig combines the two header config fields. Headers take precedence over headersList. -func createHeadersConfig(headers []NameStringValuePair, headersList *string) (map[string]string, error) { - result := make(map[string]string) - if headersList != nil { - // Parsing follows https://github.com/open-telemetry/opentelemetry-configuration/blob/568e5080816d40d75792eb754fc96bde09654159/schema/type_descriptions.yaml#L584. - headerslist, err := baggage.Parse(*headersList) - if err != nil { - return nil, fmt.Errorf("invalid headers list: %w", err) - } - for _, kv := range headerslist.Members() { - result[kv.Key()] = kv.Value() - } - } - // Headers take precedence over HeadersList, so this has to be after HeadersList is processed - if len(headers) > 0 { - for _, kv := range headers { - if kv.Value != nil { - result[kv.Name] = *kv.Value - } - } - } - return result, nil -} diff --git a/config/v0.3.0/config_json.go b/config/v0.3.0/config_json.go deleted file mode 100644 index 2d5a7f33125..00000000000 --- a/config/v0.3.0/config_json.go +++ /dev/null @@ -1,382 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package config // import "go.opentelemetry.io/contrib/config/v0.3.0" - -import ( - "encoding/json" - "errors" - "fmt" - "reflect" -) - -// MarshalJSON implements json.Marshaler. -func (j *AttributeNameValueType) MarshalJSON() ([]byte, error) { - return json.Marshal(j.Value) -} - -var enumValuesAttributeNameValueType = []interface{}{ - nil, - "string", - "bool", - "int", - "double", - "string_array", - "bool_array", - "int_array", - "double_array", -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *AttributeNameValueType) UnmarshalJSON(b []byte) error { - var v struct { - Value interface{} - } - if err := json.Unmarshal(b, &v.Value); err != nil { - return err - } - var ok bool - for _, expected := range enumValuesAttributeNameValueType { - if reflect.DeepEqual(v.Value, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValuesAttributeNameValueType, v.Value) - } - *j = AttributeNameValueType(v) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *BatchLogRecordProcessor) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if _, ok := raw["exporter"]; raw != nil && !ok { - return errors.New("field exporter in BatchLogRecordProcessor: required") - } - type Plain BatchLogRecordProcessor - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = BatchLogRecordProcessor(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *BatchSpanProcessor) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if _, ok := raw["exporter"]; raw != nil && !ok { - return errors.New("field exporter in BatchSpanProcessor: required") - } - type Plain BatchSpanProcessor - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = BatchSpanProcessor(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *GeneralInstrumentationPeerServiceMappingElem) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if _, ok := raw["peer"]; raw != nil && !ok { - return errors.New("field peer in GeneralInstrumentationPeerServiceMappingElem: required") - } - if _, ok := raw["service"]; raw != nil && !ok { - return errors.New("field service in GeneralInstrumentationPeerServiceMappingElem: required") - } - type Plain GeneralInstrumentationPeerServiceMappingElem - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = GeneralInstrumentationPeerServiceMappingElem(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *NameStringValuePair) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if _, ok := raw["name"]; !ok { - return errors.New("json: cannot unmarshal field name in NameStringValuePair required") - } - if _, ok := raw["value"]; !ok { - return errors.New("json: cannot unmarshal field value in NameStringValuePair required") - } - var name, value string - var ok bool - if name, ok = raw["name"].(string); !ok { - return errors.New("yaml: cannot unmarshal field name in NameStringValuePair must be string") - } - if value, ok = raw["value"].(string); !ok { - return errors.New("yaml: cannot unmarshal field value in NameStringValuePair must be string") - } - - *j = NameStringValuePair{ - Name: name, - Value: &value, - } - return nil -} - -var enumValuesOTLPMetricDefaultHistogramAggregation = []interface{}{ - "explicit_bucket_histogram", - "base2_exponential_bucket_histogram", -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *OTLPMetricDefaultHistogramAggregation) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValuesOTLPMetricDefaultHistogramAggregation { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValuesOTLPMetricDefaultHistogramAggregation, v) - } - *j = OTLPMetricDefaultHistogramAggregation(v) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *OTLPMetric) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if _, ok := raw["endpoint"]; raw != nil && !ok { - return errors.New("field endpoint in OTLPMetric: required") - } - if _, ok := raw["protocol"]; raw != nil && !ok { - return errors.New("field protocol in OTLPMetric: required") - } - type Plain OTLPMetric - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = OTLPMetric(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *OTLP) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if _, ok := raw["endpoint"]; raw != nil && !ok { - return errors.New("field endpoint in OTLP: required") - } - if _, ok := raw["protocol"]; raw != nil && !ok { - return errors.New("field protocol in OTLP: required") - } - type Plain OTLP - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = OTLP(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *OpenTelemetryConfiguration) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if _, ok := raw["file_format"]; raw != nil && !ok { - return errors.New("field file_format in OpenTelemetryConfiguration: required") - } - type Plain OpenTelemetryConfiguration - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = OpenTelemetryConfiguration(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *PeriodicMetricReader) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if _, ok := raw["exporter"]; raw != nil && !ok { - return errors.New("field exporter in PeriodicMetricReader: required") - } - type Plain PeriodicMetricReader - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = PeriodicMetricReader(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *PullMetricReader) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if _, ok := raw["exporter"]; raw != nil && !ok { - return errors.New("field exporter in PullMetricReader: required") - } - type Plain PullMetricReader - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = PullMetricReader(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SimpleLogRecordProcessor) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if _, ok := raw["exporter"]; raw != nil && !ok { - return errors.New("field exporter in SimpleLogRecordProcessor: required") - } - type Plain SimpleLogRecordProcessor - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SimpleLogRecordProcessor(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *SimpleSpanProcessor) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if _, ok := raw["exporter"]; raw != nil && !ok { - return errors.New("field exporter in SimpleSpanProcessor: required") - } - type Plain SimpleSpanProcessor - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = SimpleSpanProcessor(plain) - return nil -} - -var enumValuesViewSelectorInstrumentType = []interface{}{ - "counter", - "histogram", - "observable_counter", - "observable_gauge", - "observable_up_down_counter", - "up_down_counter", -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *ViewSelectorInstrumentType) UnmarshalJSON(b []byte) error { - var v string - if err := json.Unmarshal(b, &v); err != nil { - return err - } - var ok bool - for _, expected := range enumValuesViewSelectorInstrumentType { - if reflect.DeepEqual(v, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValuesViewSelectorInstrumentType, v) - } - *j = ViewSelectorInstrumentType(v) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *Zipkin) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if _, ok := raw["endpoint"]; raw != nil && !ok { - return errors.New("field endpoint in Zipkin: required") - } - type Plain Zipkin - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - *j = Zipkin(plain) - return nil -} - -// UnmarshalJSON implements json.Unmarshaler. -func (j *AttributeNameValue) UnmarshalJSON(b []byte) error { - var raw map[string]interface{} - if err := json.Unmarshal(b, &raw); err != nil { - return err - } - if _, ok := raw["name"]; raw != nil && !ok { - return errors.New("field name in AttributeNameValue: required") - } - if _, ok := raw["value"]; raw != nil && !ok { - return errors.New("field value in AttributeNameValue: required") - } - type Plain AttributeNameValue - var plain Plain - if err := json.Unmarshal(b, &plain); err != nil { - return err - } - if plain.Type != nil && plain.Type.Value == "int" { - val, ok := plain.Value.(float64) - if ok { - plain.Value = int(val) - } - } - if plain.Type != nil && plain.Type.Value == "int_array" { - m, ok := plain.Value.([]interface{}) - if ok { - var vals []interface{} - for _, v := range m { - val, ok := v.(float64) - if ok { - vals = append(vals, int(val)) - } else { - vals = append(vals, val) - } - } - plain.Value = vals - } - } - - *j = AttributeNameValue(plain) - return nil -} diff --git a/config/v0.3.0/config_test.go b/config/v0.3.0/config_test.go deleted file mode 100644 index 4ab519c84bb..00000000000 --- a/config/v0.3.0/config_test.go +++ /dev/null @@ -1,666 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package config - -import ( - "context" - "crypto/tls" - "encoding/json" - "errors" - "os" - "path/filepath" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - lognoop "go.opentelemetry.io/otel/log/noop" - metricnoop "go.opentelemetry.io/otel/metric/noop" - sdklog "go.opentelemetry.io/otel/sdk/log" - sdkmetric "go.opentelemetry.io/otel/sdk/metric" - sdktrace "go.opentelemetry.io/otel/sdk/trace" - tracenoop "go.opentelemetry.io/otel/trace/noop" -) - -func TestNewSDK(t *testing.T) { - tests := []struct { - name string - cfg []ConfigurationOption - wantTracerProvider any - wantMeterProvider any - wantLoggerProvider any - wantErr error - wantShutdownErr error - }{ - { - name: "no-configuration", - wantTracerProvider: tracenoop.NewTracerProvider(), - wantMeterProvider: metricnoop.NewMeterProvider(), - wantLoggerProvider: lognoop.NewLoggerProvider(), - }, - { - name: "with-configuration", - cfg: []ConfigurationOption{ - WithContext(context.Background()), - WithOpenTelemetryConfiguration(OpenTelemetryConfiguration{ - TracerProvider: &TracerProvider{}, - MeterProvider: &MeterProvider{}, - LoggerProvider: &LoggerProvider{}, - }), - }, - wantTracerProvider: &sdktrace.TracerProvider{}, - wantMeterProvider: &sdkmetric.MeterProvider{}, - wantLoggerProvider: &sdklog.LoggerProvider{}, - }, - { - name: "with-sdk-disabled", - cfg: []ConfigurationOption{ - WithContext(context.Background()), - WithOpenTelemetryConfiguration(OpenTelemetryConfiguration{ - Disabled: ptr(true), - TracerProvider: &TracerProvider{}, - MeterProvider: &MeterProvider{}, - LoggerProvider: &LoggerProvider{}, - }), - }, - wantTracerProvider: tracenoop.NewTracerProvider(), - wantMeterProvider: metricnoop.NewMeterProvider(), - wantLoggerProvider: lognoop.NewLoggerProvider(), - }, - } - for _, tt := range tests { - sdk, err := NewSDK(tt.cfg...) - require.Equal(t, tt.wantErr, err) - assert.IsType(t, tt.wantTracerProvider, sdk.TracerProvider()) - assert.IsType(t, tt.wantMeterProvider, sdk.MeterProvider()) - assert.IsType(t, tt.wantLoggerProvider, sdk.LoggerProvider()) - require.Equal(t, tt.wantShutdownErr, sdk.Shutdown(context.Background())) - } -} - -var v03OpenTelemetryConfig = OpenTelemetryConfiguration{ - Disabled: ptr(false), - FileFormat: ptr("0.3"), - AttributeLimits: &AttributeLimits{ - AttributeCountLimit: ptr(128), - AttributeValueLengthLimit: ptr(4096), - }, - Instrumentation: &Instrumentation{ - Cpp: LanguageSpecificInstrumentation{ - "example": map[string]interface{}{ - "property": "value", - }, - }, - Dotnet: LanguageSpecificInstrumentation{ - "example": map[string]interface{}{ - "property": "value", - }, - }, - Erlang: LanguageSpecificInstrumentation{ - "example": map[string]interface{}{ - "property": "value", - }, - }, - General: &GeneralInstrumentation{ - Http: &GeneralInstrumentationHttp{ - Client: &GeneralInstrumentationHttpClient{ - RequestCapturedHeaders: []string{"Content-Type", "Accept"}, - ResponseCapturedHeaders: []string{"Content-Type", "Content-Encoding"}, - }, - Server: &GeneralInstrumentationHttpServer{ - RequestCapturedHeaders: []string{"Content-Type", "Accept"}, - ResponseCapturedHeaders: []string{"Content-Type", "Content-Encoding"}, - }, - }, - Peer: &GeneralInstrumentationPeer{ - ServiceMapping: []GeneralInstrumentationPeerServiceMappingElem{ - {Peer: "1.2.3.4", Service: "FooService"}, - {Peer: "2.3.4.5", Service: "BarService"}, - }, - }, - }, - Go: LanguageSpecificInstrumentation{ - "example": map[string]interface{}{ - "property": "value", - }, - }, - Java: LanguageSpecificInstrumentation{ - "example": map[string]interface{}{ - "property": "value", - }, - }, - Js: LanguageSpecificInstrumentation{ - "example": map[string]interface{}{ - "property": "value", - }, - }, - Php: LanguageSpecificInstrumentation{ - "example": map[string]interface{}{ - "property": "value", - }, - }, - Python: LanguageSpecificInstrumentation{ - "example": map[string]interface{}{ - "property": "value", - }, - }, - Ruby: LanguageSpecificInstrumentation{ - "example": map[string]interface{}{ - "property": "value", - }, - }, - Rust: LanguageSpecificInstrumentation{ - "example": map[string]interface{}{ - "property": "value", - }, - }, - Swift: LanguageSpecificInstrumentation{ - "example": map[string]interface{}{ - "property": "value", - }, - }, - }, - LoggerProvider: &LoggerProvider{ - Limits: &LogRecordLimits{ - AttributeCountLimit: ptr(128), - AttributeValueLengthLimit: ptr(4096), - }, - Processors: []LogRecordProcessor{ - { - Batch: &BatchLogRecordProcessor{ - ExportTimeout: ptr(30000), - Exporter: LogRecordExporter{ - OTLP: &OTLP{ - Certificate: ptr("/app/cert.pem"), - ClientCertificate: ptr("/app/cert.pem"), - ClientKey: ptr("/app/cert.pem"), - Compression: ptr("gzip"), - Endpoint: ptr("http://localhost:4318/v1/logs"), - Headers: []NameStringValuePair{ - {Name: "api-key", Value: ptr("1234")}, - }, - HeadersList: ptr("api-key=1234"), - Insecure: ptr(false), - Protocol: ptr("http/protobuf"), - Timeout: ptr(10000), - }, - }, - MaxExportBatchSize: ptr(512), - MaxQueueSize: ptr(2048), - ScheduleDelay: ptr(5000), - }, - }, - { - Simple: &SimpleLogRecordProcessor{ - Exporter: LogRecordExporter{ - Console: Console{}, - }, - }, - }, - }, - }, - MeterProvider: &MeterProvider{ - Readers: []MetricReader{ - { - Producers: []MetricProducer{ - {Opencensus: MetricProducerOpencensus{}}, - }, - Pull: &PullMetricReader{ - Exporter: PullMetricExporter{ - Prometheus: &Prometheus{ - Host: ptr("localhost"), - Port: ptr(9464), - WithResourceConstantLabels: &IncludeExclude{ - Excluded: []string{"service.attr1"}, - Included: []string{"service*"}, - }, - WithoutScopeInfo: ptr(false), - WithoutTypeSuffix: ptr(false), - WithoutUnits: ptr(false), - }, - }, - }, - }, - { - Producers: []MetricProducer{ - {}, - }, - Periodic: &PeriodicMetricReader{ - Exporter: PushMetricExporter{ - OTLP: &OTLPMetric{ - Certificate: ptr("/app/cert.pem"), - ClientCertificate: ptr("/app/cert.pem"), - ClientKey: ptr("/app/cert.pem"), - Compression: ptr("gzip"), - DefaultHistogramAggregation: ptr(OTLPMetricDefaultHistogramAggregationBase2ExponentialBucketHistogram), - Endpoint: ptr("http://localhost:4318/v1/metrics"), - Headers: []NameStringValuePair{ - {Name: "api-key", Value: ptr("1234")}, - }, - HeadersList: ptr("api-key=1234"), - Insecure: ptr(false), - Protocol: ptr("http/protobuf"), - TemporalityPreference: ptr("delta"), - Timeout: ptr(10000), - }, - }, - Interval: ptr(5000), - Timeout: ptr(30000), - }, - }, - { - Periodic: &PeriodicMetricReader{ - Exporter: PushMetricExporter{ - Console: Console{}, - }, - }, - }, - }, - Views: []View{ - { - Selector: &ViewSelector{ - InstrumentName: ptr("my-instrument"), - InstrumentType: ptr(ViewSelectorInstrumentTypeHistogram), - MeterName: ptr("my-meter"), - MeterSchemaUrl: ptr("https://opentelemetry.io/schemas/1.16.0"), - MeterVersion: ptr("1.0.0"), - Unit: ptr("ms"), - }, - Stream: &ViewStream{ - Aggregation: &ViewStreamAggregation{ - ExplicitBucketHistogram: &ViewStreamAggregationExplicitBucketHistogram{ - Boundaries: []float64{0, 5, 10, 25, 50, 75, 100, 250, 500, 750, 1000, 2500, 5000, 7500, 10000}, - RecordMinMax: ptr(true), - }, - }, - AttributeKeys: &IncludeExclude{ - Included: []string{"key1", "key2"}, - Excluded: []string{"key3"}, - }, - Description: ptr("new_description"), - Name: ptr("new_instrument_name"), - }, - }, - }, - }, - Propagator: &Propagator{ - Composite: []*string{ptr("tracecontext"), ptr("baggage"), ptr("b3"), ptr("b3multi"), ptr("jaeger"), ptr("xray"), ptr("ottrace")}, - }, - Resource: &Resource{ - Attributes: []AttributeNameValue{ - {Name: "service.name", Value: "unknown_service"}, - {Name: "string_key", Type: &AttributeNameValueType{Value: "string"}, Value: "value"}, - {Name: "bool_key", Type: &AttributeNameValueType{Value: "bool"}, Value: true}, - {Name: "int_key", Type: &AttributeNameValueType{Value: "int"}, Value: 1}, - {Name: "double_key", Type: &AttributeNameValueType{Value: "double"}, Value: 1.1}, - {Name: "string_array_key", Type: &AttributeNameValueType{Value: "string_array"}, Value: []interface{}{"value1", "value2"}}, - {Name: "bool_array_key", Type: &AttributeNameValueType{Value: "bool_array"}, Value: []interface{}{true, false}}, - {Name: "int_array_key", Type: &AttributeNameValueType{Value: "int_array"}, Value: []interface{}{1, 2}}, - {Name: "double_array_key", Type: &AttributeNameValueType{Value: "double_array"}, Value: []interface{}{1.1, 2.2}}, - }, - AttributesList: ptr("service.namespace=my-namespace,service.version=1.0.0"), - Detectors: &Detectors{ - Attributes: &DetectorsAttributes{ - Excluded: []string{"process.command_args"}, - Included: []string{"process.*"}, - }, - }, - SchemaUrl: ptr("https://opentelemetry.io/schemas/1.16.0"), - }, - TracerProvider: &TracerProvider{ - Limits: &SpanLimits{ - AttributeCountLimit: ptr(128), - AttributeValueLengthLimit: ptr(4096), - EventCountLimit: ptr(128), - EventAttributeCountLimit: ptr(128), - LinkCountLimit: ptr(128), - LinkAttributeCountLimit: ptr(128), - }, - Processors: []SpanProcessor{ - { - Batch: &BatchSpanProcessor{ - ExportTimeout: ptr(30000), - Exporter: SpanExporter{ - OTLP: &OTLP{ - Certificate: ptr("/app/cert.pem"), - ClientCertificate: ptr("/app/cert.pem"), - ClientKey: ptr("/app/cert.pem"), - Compression: ptr("gzip"), - Endpoint: ptr("http://localhost:4318/v1/traces"), - Headers: []NameStringValuePair{ - {Name: "api-key", Value: ptr("1234")}, - }, - HeadersList: ptr("api-key=1234"), - Insecure: ptr(false), - Protocol: ptr("http/protobuf"), - Timeout: ptr(10000), - }, - }, - MaxExportBatchSize: ptr(512), - MaxQueueSize: ptr(2048), - ScheduleDelay: ptr(5000), - }, - }, - { - Batch: &BatchSpanProcessor{ - Exporter: SpanExporter{ - Zipkin: &Zipkin{ - Endpoint: ptr("http://localhost:9411/api/v2/spans"), - Timeout: ptr(10000), - }, - }, - }, - }, - { - Simple: &SimpleSpanProcessor{ - Exporter: SpanExporter{ - Console: Console{}, - }, - }, - }, - }, - Sampler: &Sampler{ - ParentBased: &SamplerParentBased{ - LocalParentNotSampled: &Sampler{ - AlwaysOff: SamplerAlwaysOff{}, - }, - LocalParentSampled: &Sampler{ - AlwaysOn: SamplerAlwaysOn{}, - }, - RemoteParentNotSampled: &Sampler{ - AlwaysOff: SamplerAlwaysOff{}, - }, - RemoteParentSampled: &Sampler{ - AlwaysOn: SamplerAlwaysOn{}, - }, - Root: &Sampler{ - TraceIDRatioBased: &SamplerTraceIDRatioBased{ - Ratio: ptr(0.0001), - }, - }, - }, - }, - }, -} - -func TestParseYAML(t *testing.T) { - tests := []struct { - name string - input string - wantErr error - wantType interface{} - }{ - { - name: "valid YAML config", - input: `valid_empty.yaml`, - wantErr: nil, - wantType: &OpenTelemetryConfiguration{ - Disabled: ptr(false), - FileFormat: ptr("0.1"), - }, - }, - { - name: "invalid config", - input: "invalid_bool.yaml", - wantErr: errors.New(`yaml: unmarshal errors: - line 2: cannot unmarshal !!str ` + "`notabool`" + ` into bool`), - }, - { - name: "invalid nil name", - input: "invalid_nil_name.yaml", - wantErr: errors.New(`yaml: cannot unmarshal field name in NameStringValuePair required`), - }, - { - name: "invalid nil value", - input: "invalid_nil_value.yaml", - wantErr: errors.New(`yaml: cannot unmarshal field value in NameStringValuePair required`), - }, - { - name: "valid v0.2 config", - input: "v0.2.yaml", - wantErr: errors.New(`yaml: unmarshal errors: - line 81: cannot unmarshal !!map into []config.NameStringValuePair - line 185: cannot unmarshal !!map into []config.NameStringValuePair - line 244: cannot unmarshal !!seq into config.IncludeExclude - line 305: cannot unmarshal !!map into []config.NameStringValuePair - line 408: cannot unmarshal !!map into []config.AttributeNameValue`), - }, - { - name: "valid v0.3 config", - input: "v0.3.yaml", - wantType: &v03OpenTelemetryConfig, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - b, err := os.ReadFile(filepath.Join("..", "testdata", tt.input)) - require.NoError(t, err) - - got, err := ParseYAML(b) - if tt.wantErr != nil { - require.Error(t, err) - require.Equal(t, tt.wantErr.Error(), err.Error()) - } else { - require.NoError(t, err) - assert.Equal(t, tt.wantType, got) - } - }) - } -} - -func TestSerializeJSON(t *testing.T) { - tests := []struct { - name string - input string - wantErr error - wantType interface{} - }{ - { - name: "valid JSON config", - input: `valid_empty.json`, - wantErr: nil, - wantType: OpenTelemetryConfiguration{ - Disabled: ptr(false), - FileFormat: ptr("0.1"), - }, - }, - { - name: "invalid config", - input: "invalid_bool.json", - wantErr: errors.New(`json: cannot unmarshal string into Go struct field Plain.disabled of type bool`), - }, - { - name: "invalid nil name", - input: "invalid_nil_name.json", - wantErr: errors.New(`json: cannot unmarshal field name in NameStringValuePair required`), - }, - { - name: "invalid nil value", - input: "invalid_nil_value.json", - wantErr: errors.New(`json: cannot unmarshal field value in NameStringValuePair required`), - }, - { - name: "valid v0.2 config", - input: "v0.2.json", - wantErr: errors.New(`json: cannot unmarshal object into Go struct field LogRecordProcessor.logger_provider.processors.batch`), - }, - { - name: "valid v0.3 config", - input: "v0.3.json", - wantType: v03OpenTelemetryConfig, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - b, err := os.ReadFile(filepath.Join("..", "testdata", tt.input)) - require.NoError(t, err) - - var got OpenTelemetryConfiguration - err = json.Unmarshal(b, &got) - - if tt.wantErr != nil { - require.Error(t, err) - require.ErrorContains(t, err, tt.wantErr.Error()) - } else { - require.NoError(t, err) - assert.Equal(t, tt.wantType, got) - } - }) - } -} - -func TestCreateTLSConfig(t *testing.T) { - tests := []struct { - name string - caCertFile *string - clientCertFile *string - clientKeyFile *string - wantErrContains string - want func(*tls.Config, *testing.T) - }{ - { - name: "no-input", - want: func(result *tls.Config, t *testing.T) { - require.Nil(t, result.Certificates) - require.Nil(t, result.RootCAs) - }, - }, - { - name: "only-cacert-provided", - caCertFile: ptr(filepath.Join("..", "testdata", "ca.crt")), - want: func(result *tls.Config, t *testing.T) { - require.Nil(t, result.Certificates) - require.NotNil(t, result.RootCAs) - }, - }, - { - name: "nonexistent-cacert-file", - caCertFile: ptr("nowhere.crt"), - wantErrContains: "open nowhere.crt:", - }, - { - name: "nonexistent-clientcert-file", - clientCertFile: ptr("nowhere.crt"), - clientKeyFile: ptr("nowhere.crt"), - wantErrContains: "could not use client certificate: open nowhere.crt:", - }, - { - name: "bad-cacert-file", - caCertFile: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), - wantErrContains: "could not create certificate authority chain from certificate", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := createTLSConfig(tt.caCertFile, tt.clientCertFile, tt.clientKeyFile) - - if tt.wantErrContains != "" { - require.Contains(t, err.Error(), tt.wantErrContains) - } else { - require.NoError(t, err) - tt.want(got, t) - } - }) - } -} - -func TestCreateHeadersConfig(t *testing.T) { - tests := []struct { - name string - headers []NameStringValuePair - headersList *string - wantHeaders map[string]string - wantErr string - }{ - { - name: "no headers", - headers: []NameStringValuePair{}, - headersList: nil, - wantHeaders: map[string]string{}, - }, - { - name: "headerslist only", - headers: []NameStringValuePair{}, - headersList: ptr("a=b,c=d"), - wantHeaders: map[string]string{ - "a": "b", - "c": "d", - }, - }, - { - name: "headers only", - headers: []NameStringValuePair{ - { - Name: "a", - Value: ptr("b"), - }, - { - Name: "c", - Value: ptr("d"), - }, - }, - headersList: nil, - wantHeaders: map[string]string{ - "a": "b", - "c": "d", - }, - }, - { - name: "both headers and headerslist", - headers: []NameStringValuePair{ - { - Name: "a", - Value: ptr("b"), - }, - }, - headersList: ptr("c=d"), - wantHeaders: map[string]string{ - "a": "b", - "c": "d", - }, - }, - { - name: "headers supersedes headerslist", - headers: []NameStringValuePair{ - { - Name: "a", - Value: ptr("b"), - }, - { - Name: "c", - Value: ptr("override"), - }, - }, - headersList: ptr("c=d"), - wantHeaders: map[string]string{ - "a": "b", - "c": "override", - }, - }, - { - name: "invalid headerslist", - headersList: ptr("==="), - wantErr: "invalid headers list: invalid key: \"\"", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - headersMap, err := createHeadersConfig(tt.headers, tt.headersList) - if tt.wantErr != "" { - require.Error(t, err) - require.Equal(t, tt.wantErr, err.Error()) - } else { - require.NoError(t, err) - } - require.Equal(t, tt.wantHeaders, headersMap) - }) - } -} - -func ptr[T any](v T) *T { - return &v -} diff --git a/config/v0.3.0/config_yaml.go b/config/v0.3.0/config_yaml.go deleted file mode 100644 index 48a8ca640f5..00000000000 --- a/config/v0.3.0/config_yaml.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package config // import "go.opentelemetry.io/contrib/config/v0.3.0" - -import ( - "errors" - "fmt" - "reflect" -) - -// UnmarshalYAML implements yaml.Unmarshaler. -func (j *AttributeNameValueType) UnmarshalYAML(unmarshal func(interface{}) error) error { - var v struct { - Value interface{} - } - if err := unmarshal(&v.Value); err != nil { - return err - } - var ok bool - for _, expected := range enumValuesAttributeNameValueType { - if reflect.DeepEqual(v.Value, expected) { - ok = true - break - } - } - if !ok { - return fmt.Errorf("invalid value (expected one of %#v): %#v", enumValuesAttributeNameValueType, v.Value) - } - *j = AttributeNameValueType(v) - return nil -} - -// UnmarshalYAML implements yaml.Unmarshaler. -func (j *NameStringValuePair) UnmarshalYAML(unmarshal func(interface{}) error) error { - var raw map[string]interface{} - if err := unmarshal(&raw); err != nil { - return err - } - if _, ok := raw["name"]; !ok { - return errors.New("yaml: cannot unmarshal field name in NameStringValuePair required") - } - if _, ok := raw["value"]; !ok { - return errors.New("yaml: cannot unmarshal field value in NameStringValuePair required") - } - var name, value string - var ok bool - if name, ok = raw["name"].(string); !ok { - return errors.New("yaml: cannot unmarshal field name in NameStringValuePair must be string") - } - if value, ok = raw["value"].(string); !ok { - return errors.New("yaml: cannot unmarshal field value in NameStringValuePair must be string") - } - *j = NameStringValuePair{ - Name: name, - Value: &value, - } - return nil -} - -// UnmarshalYAML implements yaml.Unmarshaler. -func (j *LanguageSpecificInstrumentation) UnmarshalYAML(unmarshal func(interface{}) error) error { - var raw map[string]interface{} - if err := unmarshal(&raw); err != nil { - return err - } - - *j = raw - return nil -} diff --git a/config/v0.3.0/fuzz_test.go b/config/v0.3.0/fuzz_test.go deleted file mode 100644 index 54244d14d89..00000000000 --- a/config/v0.3.0/fuzz_test.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package config - -import ( - "context" - "encoding/json" - "os" - "path/filepath" - "testing" - "time" - - "github.com/stretchr/testify/require" -) - -func FuzzJSON(f *testing.F) { - b, err := os.ReadFile(filepath.Join("..", "testdata", "v0.3.json")) - require.NoError(f, err) - f.Add(b) - - f.Fuzz(func(t *testing.T, data []byte) { - t.Log("JSON:\n" + string(data)) - - var cfg OpenTelemetryConfiguration - err := json.Unmarshal(b, &cfg) - if err != nil { - return - } - - sdk, err := NewSDK(WithOpenTelemetryConfiguration(cfg)) - if err != nil { - return - } - - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) - defer cancel() - _ = sdk.Shutdown(ctx) - }) -} - -func FuzzYAML(f *testing.F) { - b, err := os.ReadFile(filepath.Join("..", "testdata", "v0.3.yaml")) - require.NoError(f, err) - f.Add(b) - - f.Fuzz(func(t *testing.T, data []byte) { - t.Log("YAML:\n" + string(data)) - - cfg, err := ParseYAML(data) - if err != nil { - return - } - - sdk, err := NewSDK(WithOpenTelemetryConfiguration(*cfg)) - if err != nil { - return - } - - ctx, cancel := context.WithTimeout(context.Background(), time.Millisecond) - defer cancel() - _ = sdk.Shutdown(ctx) - }) -} diff --git a/config/v0.3.0/generated_config.go b/config/v0.3.0/generated_config.go deleted file mode 100644 index 8fe7310e9de..00000000000 --- a/config/v0.3.0/generated_config.go +++ /dev/null @@ -1,672 +0,0 @@ -// Code generated by github.com/atombender/go-jsonschema, DO NOT EDIT. - -package config - -type AttributeLimits struct { - // AttributeCountLimit corresponds to the JSON schema field - // "attribute_count_limit". - AttributeCountLimit *int `json:"attribute_count_limit,omitempty" yaml:"attribute_count_limit,omitempty" mapstructure:"attribute_count_limit,omitempty"` - - // AttributeValueLengthLimit corresponds to the JSON schema field - // "attribute_value_length_limit". - AttributeValueLengthLimit *int `json:"attribute_value_length_limit,omitempty" yaml:"attribute_value_length_limit,omitempty" mapstructure:"attribute_value_length_limit,omitempty"` - - AdditionalProperties interface{} `mapstructure:",remain"` -} - -type AttributeNameValue struct { - // Name corresponds to the JSON schema field "name". - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // Type corresponds to the JSON schema field "type". - Type *AttributeNameValueType `json:"type,omitempty" yaml:"type,omitempty" mapstructure:"type,omitempty"` - - // Value corresponds to the JSON schema field "value". - Value interface{} `json:"value" yaml:"value" mapstructure:"value"` -} - -type AttributeNameValueType struct { - Value interface{} -} - -type BatchLogRecordProcessor struct { - // ExportTimeout corresponds to the JSON schema field "export_timeout". - ExportTimeout *int `json:"export_timeout,omitempty" yaml:"export_timeout,omitempty" mapstructure:"export_timeout,omitempty"` - - // Exporter corresponds to the JSON schema field "exporter". - Exporter LogRecordExporter `json:"exporter" yaml:"exporter" mapstructure:"exporter"` - - // MaxExportBatchSize corresponds to the JSON schema field - // "max_export_batch_size". - MaxExportBatchSize *int `json:"max_export_batch_size,omitempty" yaml:"max_export_batch_size,omitempty" mapstructure:"max_export_batch_size,omitempty"` - - // MaxQueueSize corresponds to the JSON schema field "max_queue_size". - MaxQueueSize *int `json:"max_queue_size,omitempty" yaml:"max_queue_size,omitempty" mapstructure:"max_queue_size,omitempty"` - - // ScheduleDelay corresponds to the JSON schema field "schedule_delay". - ScheduleDelay *int `json:"schedule_delay,omitempty" yaml:"schedule_delay,omitempty" mapstructure:"schedule_delay,omitempty"` -} - -type BatchSpanProcessor struct { - // ExportTimeout corresponds to the JSON schema field "export_timeout". - ExportTimeout *int `json:"export_timeout,omitempty" yaml:"export_timeout,omitempty" mapstructure:"export_timeout,omitempty"` - - // Exporter corresponds to the JSON schema field "exporter". - Exporter SpanExporter `json:"exporter" yaml:"exporter" mapstructure:"exporter"` - - // MaxExportBatchSize corresponds to the JSON schema field - // "max_export_batch_size". - MaxExportBatchSize *int `json:"max_export_batch_size,omitempty" yaml:"max_export_batch_size,omitempty" mapstructure:"max_export_batch_size,omitempty"` - - // MaxQueueSize corresponds to the JSON schema field "max_queue_size". - MaxQueueSize *int `json:"max_queue_size,omitempty" yaml:"max_queue_size,omitempty" mapstructure:"max_queue_size,omitempty"` - - // ScheduleDelay corresponds to the JSON schema field "schedule_delay". - ScheduleDelay *int `json:"schedule_delay,omitempty" yaml:"schedule_delay,omitempty" mapstructure:"schedule_delay,omitempty"` -} - -type Common map[string]interface{} - -type Console map[string]interface{} - -type Detectors struct { - // Attributes corresponds to the JSON schema field "attributes". - Attributes *DetectorsAttributes `json:"attributes,omitempty" yaml:"attributes,omitempty" mapstructure:"attributes,omitempty"` -} - -type DetectorsAttributes struct { - // Excluded corresponds to the JSON schema field "excluded". - Excluded []string `json:"excluded,omitempty" yaml:"excluded,omitempty" mapstructure:"excluded,omitempty"` - - // Included corresponds to the JSON schema field "included". - Included []string `json:"included,omitempty" yaml:"included,omitempty" mapstructure:"included,omitempty"` -} - -type GeneralInstrumentation struct { - // Http corresponds to the JSON schema field "http". - Http *GeneralInstrumentationHttp `json:"http,omitempty" yaml:"http,omitempty" mapstructure:"http,omitempty"` - - // Peer corresponds to the JSON schema field "peer". - Peer *GeneralInstrumentationPeer `json:"peer,omitempty" yaml:"peer,omitempty" mapstructure:"peer,omitempty"` -} - -type GeneralInstrumentationHttp struct { - // Client corresponds to the JSON schema field "client". - Client *GeneralInstrumentationHttpClient `json:"client,omitempty" yaml:"client,omitempty" mapstructure:"client,omitempty"` - - // Server corresponds to the JSON schema field "server". - Server *GeneralInstrumentationHttpServer `json:"server,omitempty" yaml:"server,omitempty" mapstructure:"server,omitempty"` -} - -type GeneralInstrumentationHttpClient struct { - // RequestCapturedHeaders corresponds to the JSON schema field - // "request_captured_headers". - RequestCapturedHeaders []string `json:"request_captured_headers,omitempty" yaml:"request_captured_headers,omitempty" mapstructure:"request_captured_headers,omitempty"` - - // ResponseCapturedHeaders corresponds to the JSON schema field - // "response_captured_headers". - ResponseCapturedHeaders []string `json:"response_captured_headers,omitempty" yaml:"response_captured_headers,omitempty" mapstructure:"response_captured_headers,omitempty"` -} - -type GeneralInstrumentationHttpServer struct { - // RequestCapturedHeaders corresponds to the JSON schema field - // "request_captured_headers". - RequestCapturedHeaders []string `json:"request_captured_headers,omitempty" yaml:"request_captured_headers,omitempty" mapstructure:"request_captured_headers,omitempty"` - - // ResponseCapturedHeaders corresponds to the JSON schema field - // "response_captured_headers". - ResponseCapturedHeaders []string `json:"response_captured_headers,omitempty" yaml:"response_captured_headers,omitempty" mapstructure:"response_captured_headers,omitempty"` -} - -type GeneralInstrumentationPeer struct { - // ServiceMapping corresponds to the JSON schema field "service_mapping". - ServiceMapping []GeneralInstrumentationPeerServiceMappingElem `json:"service_mapping,omitempty" yaml:"service_mapping,omitempty" mapstructure:"service_mapping,omitempty"` -} - -type GeneralInstrumentationPeerServiceMappingElem struct { - // Peer corresponds to the JSON schema field "peer". - Peer string `json:"peer" yaml:"peer" mapstructure:"peer"` - - // Service corresponds to the JSON schema field "service". - Service string `json:"service" yaml:"service" mapstructure:"service"` -} - -type IncludeExclude struct { - // Excluded corresponds to the JSON schema field "excluded". - Excluded []string `json:"excluded,omitempty" yaml:"excluded,omitempty" mapstructure:"excluded,omitempty"` - - // Included corresponds to the JSON schema field "included". - Included []string `json:"included,omitempty" yaml:"included,omitempty" mapstructure:"included,omitempty"` -} - -type Instrumentation struct { - // Cpp corresponds to the JSON schema field "cpp". - Cpp LanguageSpecificInstrumentation `json:"cpp,omitempty" yaml:"cpp,omitempty" mapstructure:"cpp,omitempty"` - - // Dotnet corresponds to the JSON schema field "dotnet". - Dotnet LanguageSpecificInstrumentation `json:"dotnet,omitempty" yaml:"dotnet,omitempty" mapstructure:"dotnet,omitempty"` - - // Erlang corresponds to the JSON schema field "erlang". - Erlang LanguageSpecificInstrumentation `json:"erlang,omitempty" yaml:"erlang,omitempty" mapstructure:"erlang,omitempty"` - - // General corresponds to the JSON schema field "general". - General *GeneralInstrumentation `json:"general,omitempty" yaml:"general,omitempty" mapstructure:"general,omitempty"` - - // Go corresponds to the JSON schema field "go". - Go LanguageSpecificInstrumentation `json:"go,omitempty" yaml:"go,omitempty" mapstructure:"go,omitempty"` - - // Java corresponds to the JSON schema field "java". - Java LanguageSpecificInstrumentation `json:"java,omitempty" yaml:"java,omitempty" mapstructure:"java,omitempty"` - - // Js corresponds to the JSON schema field "js". - Js LanguageSpecificInstrumentation `json:"js,omitempty" yaml:"js,omitempty" mapstructure:"js,omitempty"` - - // Php corresponds to the JSON schema field "php". - Php LanguageSpecificInstrumentation `json:"php,omitempty" yaml:"php,omitempty" mapstructure:"php,omitempty"` - - // Python corresponds to the JSON schema field "python". - Python LanguageSpecificInstrumentation `json:"python,omitempty" yaml:"python,omitempty" mapstructure:"python,omitempty"` - - // Ruby corresponds to the JSON schema field "ruby". - Ruby LanguageSpecificInstrumentation `json:"ruby,omitempty" yaml:"ruby,omitempty" mapstructure:"ruby,omitempty"` - - // Rust corresponds to the JSON schema field "rust". - Rust LanguageSpecificInstrumentation `json:"rust,omitempty" yaml:"rust,omitempty" mapstructure:"rust,omitempty"` - - // Swift corresponds to the JSON schema field "swift". - Swift LanguageSpecificInstrumentation `json:"swift,omitempty" yaml:"swift,omitempty" mapstructure:"swift,omitempty"` -} - -type LanguageSpecificInstrumentation map[string]interface{} - -type LogRecordExporter struct { - // Console corresponds to the JSON schema field "console". - Console Console `json:"console,omitempty" yaml:"console,omitempty" mapstructure:"console,omitempty"` - - // OTLP corresponds to the JSON schema field "otlp". - OTLP *OTLP `json:"otlp,omitempty" yaml:"otlp,omitempty" mapstructure:"otlp,omitempty"` - - AdditionalProperties interface{} `mapstructure:",remain"` -} - -type LogRecordLimits struct { - // AttributeCountLimit corresponds to the JSON schema field - // "attribute_count_limit". - AttributeCountLimit *int `json:"attribute_count_limit,omitempty" yaml:"attribute_count_limit,omitempty" mapstructure:"attribute_count_limit,omitempty"` - - // AttributeValueLengthLimit corresponds to the JSON schema field - // "attribute_value_length_limit". - AttributeValueLengthLimit *int `json:"attribute_value_length_limit,omitempty" yaml:"attribute_value_length_limit,omitempty" mapstructure:"attribute_value_length_limit,omitempty"` -} - -type LogRecordProcessor struct { - // Batch corresponds to the JSON schema field "batch". - Batch *BatchLogRecordProcessor `json:"batch,omitempty" yaml:"batch,omitempty" mapstructure:"batch,omitempty"` - - // Simple corresponds to the JSON schema field "simple". - Simple *SimpleLogRecordProcessor `json:"simple,omitempty" yaml:"simple,omitempty" mapstructure:"simple,omitempty"` - - AdditionalProperties interface{} `mapstructure:",remain"` -} - -type LoggerProvider struct { - // Limits corresponds to the JSON schema field "limits". - Limits *LogRecordLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` - - // Processors corresponds to the JSON schema field "processors". - Processors []LogRecordProcessor `json:"processors,omitempty" yaml:"processors,omitempty" mapstructure:"processors,omitempty"` -} - -type MeterProvider struct { - // Readers corresponds to the JSON schema field "readers". - Readers []MetricReader `json:"readers,omitempty" yaml:"readers,omitempty" mapstructure:"readers,omitempty"` - - // Views corresponds to the JSON schema field "views". - Views []View `json:"views,omitempty" yaml:"views,omitempty" mapstructure:"views,omitempty"` -} - -type MetricProducer struct { - // Opencensus corresponds to the JSON schema field "opencensus". - Opencensus MetricProducerOpencensus `json:"opencensus,omitempty" yaml:"opencensus,omitempty" mapstructure:"opencensus,omitempty"` - - AdditionalProperties interface{} `mapstructure:",remain"` -} - -type MetricProducerOpencensus map[string]interface{} - -type MetricReader struct { - // Periodic corresponds to the JSON schema field "periodic". - Periodic *PeriodicMetricReader `json:"periodic,omitempty" yaml:"periodic,omitempty" mapstructure:"periodic,omitempty"` - - // Producers corresponds to the JSON schema field "producers". - Producers []MetricProducer `json:"producers,omitempty" yaml:"producers,omitempty" mapstructure:"producers,omitempty"` - - // Pull corresponds to the JSON schema field "pull". - Pull *PullMetricReader `json:"pull,omitempty" yaml:"pull,omitempty" mapstructure:"pull,omitempty"` -} - -type NameStringValuePair struct { - // Name corresponds to the JSON schema field "name". - Name string `json:"name" yaml:"name" mapstructure:"name"` - - // Value corresponds to the JSON schema field "value". - Value *string `json:"value" yaml:"value" mapstructure:"value"` -} - -type OTLP struct { - // Certificate corresponds to the JSON schema field "certificate". - Certificate *string `json:"certificate,omitempty" yaml:"certificate,omitempty" mapstructure:"certificate,omitempty"` - - // ClientCertificate corresponds to the JSON schema field "client_certificate". - ClientCertificate *string `json:"client_certificate,omitempty" yaml:"client_certificate,omitempty" mapstructure:"client_certificate,omitempty"` - - // ClientKey corresponds to the JSON schema field "client_key". - ClientKey *string `json:"client_key,omitempty" yaml:"client_key,omitempty" mapstructure:"client_key,omitempty"` - - // Compression corresponds to the JSON schema field "compression". - Compression *string `json:"compression,omitempty" yaml:"compression,omitempty" mapstructure:"compression,omitempty"` - - // Endpoint corresponds to the JSON schema field "endpoint". - Endpoint *string `json:"endpoint" yaml:"endpoint" mapstructure:"endpoint"` - - // Headers corresponds to the JSON schema field "headers". - Headers []NameStringValuePair `json:"headers,omitempty" yaml:"headers,omitempty" mapstructure:"headers,omitempty"` - - // HeadersList corresponds to the JSON schema field "headers_list". - HeadersList *string `json:"headers_list,omitempty" yaml:"headers_list,omitempty" mapstructure:"headers_list,omitempty"` - - // Insecure corresponds to the JSON schema field "insecure". - Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - - // Protocol corresponds to the JSON schema field "protocol". - Protocol *string `json:"protocol" yaml:"protocol" mapstructure:"protocol"` - - // Timeout corresponds to the JSON schema field "timeout". - Timeout *int `json:"timeout,omitempty" yaml:"timeout,omitempty" mapstructure:"timeout,omitempty"` -} - -type OTLPMetric struct { - // Certificate corresponds to the JSON schema field "certificate". - Certificate *string `json:"certificate,omitempty" yaml:"certificate,omitempty" mapstructure:"certificate,omitempty"` - - // ClientCertificate corresponds to the JSON schema field "client_certificate". - ClientCertificate *string `json:"client_certificate,omitempty" yaml:"client_certificate,omitempty" mapstructure:"client_certificate,omitempty"` - - // ClientKey corresponds to the JSON schema field "client_key". - ClientKey *string `json:"client_key,omitempty" yaml:"client_key,omitempty" mapstructure:"client_key,omitempty"` - - // Compression corresponds to the JSON schema field "compression". - Compression *string `json:"compression,omitempty" yaml:"compression,omitempty" mapstructure:"compression,omitempty"` - - // DefaultHistogramAggregation corresponds to the JSON schema field - // "default_histogram_aggregation". - DefaultHistogramAggregation *OTLPMetricDefaultHistogramAggregation `json:"default_histogram_aggregation,omitempty" yaml:"default_histogram_aggregation,omitempty" mapstructure:"default_histogram_aggregation,omitempty"` - - // Endpoint corresponds to the JSON schema field "endpoint". - Endpoint *string `json:"endpoint" yaml:"endpoint" mapstructure:"endpoint"` - - // Headers corresponds to the JSON schema field "headers". - Headers []NameStringValuePair `json:"headers,omitempty" yaml:"headers,omitempty" mapstructure:"headers,omitempty"` - - // HeadersList corresponds to the JSON schema field "headers_list". - HeadersList *string `json:"headers_list,omitempty" yaml:"headers_list,omitempty" mapstructure:"headers_list,omitempty"` - - // Insecure corresponds to the JSON schema field "insecure". - Insecure *bool `json:"insecure,omitempty" yaml:"insecure,omitempty" mapstructure:"insecure,omitempty"` - - // Protocol corresponds to the JSON schema field "protocol". - Protocol *string `json:"protocol" yaml:"protocol" mapstructure:"protocol"` - - // TemporalityPreference corresponds to the JSON schema field - // "temporality_preference". - TemporalityPreference *string `json:"temporality_preference,omitempty" yaml:"temporality_preference,omitempty" mapstructure:"temporality_preference,omitempty"` - - // Timeout corresponds to the JSON schema field "timeout". - Timeout *int `json:"timeout,omitempty" yaml:"timeout,omitempty" mapstructure:"timeout,omitempty"` -} - -type OTLPMetricDefaultHistogramAggregation string - -const OTLPMetricDefaultHistogramAggregationBase2ExponentialBucketHistogram OTLPMetricDefaultHistogramAggregation = "base2_exponential_bucket_histogram" -const OTLPMetricDefaultHistogramAggregationExplicitBucketHistogram OTLPMetricDefaultHistogramAggregation = "explicit_bucket_histogram" - -type OpenTelemetryConfiguration struct { - // AttributeLimits corresponds to the JSON schema field "attribute_limits". - AttributeLimits *AttributeLimits `json:"attribute_limits,omitempty" yaml:"attribute_limits,omitempty" mapstructure:"attribute_limits,omitempty"` - - // Disabled corresponds to the JSON schema field "disabled". - Disabled *bool `json:"disabled,omitempty" yaml:"disabled,omitempty" mapstructure:"disabled,omitempty"` - - // FileFormat corresponds to the JSON schema field "file_format". - FileFormat *string `json:"file_format" yaml:"file_format" mapstructure:"file_format"` - - // Instrumentation corresponds to the JSON schema field "instrumentation". - Instrumentation *Instrumentation `json:"instrumentation,omitempty" yaml:"instrumentation,omitempty" mapstructure:"instrumentation,omitempty"` - - // LoggerProvider corresponds to the JSON schema field "logger_provider". - LoggerProvider *LoggerProvider `json:"logger_provider,omitempty" yaml:"logger_provider,omitempty" mapstructure:"logger_provider,omitempty"` - - // MeterProvider corresponds to the JSON schema field "meter_provider". - MeterProvider *MeterProvider `json:"meter_provider,omitempty" yaml:"meter_provider,omitempty" mapstructure:"meter_provider,omitempty"` - - // Propagator corresponds to the JSON schema field "propagator". - Propagator *Propagator `json:"propagator,omitempty" yaml:"propagator,omitempty" mapstructure:"propagator,omitempty"` - - // Resource corresponds to the JSON schema field "resource". - Resource *Resource `json:"resource,omitempty" yaml:"resource,omitempty" mapstructure:"resource,omitempty"` - - // TracerProvider corresponds to the JSON schema field "tracer_provider". - TracerProvider *TracerProvider `json:"tracer_provider,omitempty" yaml:"tracer_provider,omitempty" mapstructure:"tracer_provider,omitempty"` - - AdditionalProperties interface{} `mapstructure:",remain"` -} - -type PeriodicMetricReader struct { - // Exporter corresponds to the JSON schema field "exporter". - Exporter PushMetricExporter `json:"exporter" yaml:"exporter" mapstructure:"exporter"` - - // Interval corresponds to the JSON schema field "interval". - Interval *int `json:"interval,omitempty" yaml:"interval,omitempty" mapstructure:"interval,omitempty"` - - // Timeout corresponds to the JSON schema field "timeout". - Timeout *int `json:"timeout,omitempty" yaml:"timeout,omitempty" mapstructure:"timeout,omitempty"` -} - -type Prometheus struct { - // Host corresponds to the JSON schema field "host". - Host *string `json:"host,omitempty" yaml:"host,omitempty" mapstructure:"host,omitempty"` - - // Port corresponds to the JSON schema field "port". - Port *int `json:"port,omitempty" yaml:"port,omitempty" mapstructure:"port,omitempty"` - - // WithResourceConstantLabels corresponds to the JSON schema field - // "with_resource_constant_labels". - WithResourceConstantLabels *IncludeExclude `json:"with_resource_constant_labels,omitempty" yaml:"with_resource_constant_labels,omitempty" mapstructure:"with_resource_constant_labels,omitempty"` - - // WithoutScopeInfo corresponds to the JSON schema field "without_scope_info". - WithoutScopeInfo *bool `json:"without_scope_info,omitempty" yaml:"without_scope_info,omitempty" mapstructure:"without_scope_info,omitempty"` - - // WithoutTypeSuffix corresponds to the JSON schema field "without_type_suffix". - WithoutTypeSuffix *bool `json:"without_type_suffix,omitempty" yaml:"without_type_suffix,omitempty" mapstructure:"without_type_suffix,omitempty"` - - // WithoutUnits corresponds to the JSON schema field "without_units". - WithoutUnits *bool `json:"without_units,omitempty" yaml:"without_units,omitempty" mapstructure:"without_units,omitempty"` -} - -type Propagator struct { - // Composite corresponds to the JSON schema field "composite". - Composite []*string `json:"composite,omitempty" yaml:"composite,omitempty" mapstructure:"composite,omitempty"` - - AdditionalProperties interface{} `mapstructure:",remain"` -} - -type PullMetricExporter struct { - // Prometheus corresponds to the JSON schema field "prometheus". - Prometheus *Prometheus `json:"prometheus,omitempty" yaml:"prometheus,omitempty" mapstructure:"prometheus,omitempty"` - - AdditionalProperties interface{} `mapstructure:",remain"` -} - -type PullMetricReader struct { - // Exporter corresponds to the JSON schema field "exporter". - Exporter PullMetricExporter `json:"exporter" yaml:"exporter" mapstructure:"exporter"` -} - -type PushMetricExporter struct { - // Console corresponds to the JSON schema field "console". - Console Console `json:"console,omitempty" yaml:"console,omitempty" mapstructure:"console,omitempty"` - - // OTLP corresponds to the JSON schema field "otlp". - OTLP *OTLPMetric `json:"otlp,omitempty" yaml:"otlp,omitempty" mapstructure:"otlp,omitempty"` - - AdditionalProperties interface{} `mapstructure:",remain"` -} - -type Resource struct { - // Attributes corresponds to the JSON schema field "attributes". - Attributes []AttributeNameValue `json:"attributes,omitempty" yaml:"attributes,omitempty" mapstructure:"attributes,omitempty"` - - // AttributesList corresponds to the JSON schema field "attributes_list". - AttributesList *string `json:"attributes_list,omitempty" yaml:"attributes_list,omitempty" mapstructure:"attributes_list,omitempty"` - - // Detectors corresponds to the JSON schema field "detectors". - Detectors *Detectors `json:"detectors,omitempty" yaml:"detectors,omitempty" mapstructure:"detectors,omitempty"` - - // SchemaUrl corresponds to the JSON schema field "schema_url". - SchemaUrl *string `json:"schema_url,omitempty" yaml:"schema_url,omitempty" mapstructure:"schema_url,omitempty"` -} - -type Sampler struct { - // AlwaysOff corresponds to the JSON schema field "always_off". - AlwaysOff SamplerAlwaysOff `json:"always_off,omitempty" yaml:"always_off,omitempty" mapstructure:"always_off,omitempty"` - - // AlwaysOn corresponds to the JSON schema field "always_on". - AlwaysOn SamplerAlwaysOn `json:"always_on,omitempty" yaml:"always_on,omitempty" mapstructure:"always_on,omitempty"` - - // JaegerRemote corresponds to the JSON schema field "jaeger_remote". - JaegerRemote *SamplerJaegerRemote `json:"jaeger_remote,omitempty" yaml:"jaeger_remote,omitempty" mapstructure:"jaeger_remote,omitempty"` - - // ParentBased corresponds to the JSON schema field "parent_based". - ParentBased *SamplerParentBased `json:"parent_based,omitempty" yaml:"parent_based,omitempty" mapstructure:"parent_based,omitempty"` - - // TraceIDRatioBased corresponds to the JSON schema field "trace_id_ratio_based". - TraceIDRatioBased *SamplerTraceIDRatioBased `json:"trace_id_ratio_based,omitempty" yaml:"trace_id_ratio_based,omitempty" mapstructure:"trace_id_ratio_based,omitempty"` - - AdditionalProperties interface{} `mapstructure:",remain"` -} - -type SamplerAlwaysOff map[string]interface{} - -type SamplerAlwaysOn map[string]interface{} - -type SamplerJaegerRemote struct { - // Endpoint corresponds to the JSON schema field "endpoint". - Endpoint *string `json:"endpoint,omitempty" yaml:"endpoint,omitempty" mapstructure:"endpoint,omitempty"` - - // InitialSampler corresponds to the JSON schema field "initial_sampler". - InitialSampler *Sampler `json:"initial_sampler,omitempty" yaml:"initial_sampler,omitempty" mapstructure:"initial_sampler,omitempty"` - - // Interval corresponds to the JSON schema field "interval". - Interval *int `json:"interval,omitempty" yaml:"interval,omitempty" mapstructure:"interval,omitempty"` -} - -type SamplerParentBased struct { - // LocalParentNotSampled corresponds to the JSON schema field - // "local_parent_not_sampled". - LocalParentNotSampled *Sampler `json:"local_parent_not_sampled,omitempty" yaml:"local_parent_not_sampled,omitempty" mapstructure:"local_parent_not_sampled,omitempty"` - - // LocalParentSampled corresponds to the JSON schema field "local_parent_sampled". - LocalParentSampled *Sampler `json:"local_parent_sampled,omitempty" yaml:"local_parent_sampled,omitempty" mapstructure:"local_parent_sampled,omitempty"` - - // RemoteParentNotSampled corresponds to the JSON schema field - // "remote_parent_not_sampled". - RemoteParentNotSampled *Sampler `json:"remote_parent_not_sampled,omitempty" yaml:"remote_parent_not_sampled,omitempty" mapstructure:"remote_parent_not_sampled,omitempty"` - - // RemoteParentSampled corresponds to the JSON schema field - // "remote_parent_sampled". - RemoteParentSampled *Sampler `json:"remote_parent_sampled,omitempty" yaml:"remote_parent_sampled,omitempty" mapstructure:"remote_parent_sampled,omitempty"` - - // Root corresponds to the JSON schema field "root". - Root *Sampler `json:"root,omitempty" yaml:"root,omitempty" mapstructure:"root,omitempty"` -} - -type SamplerTraceIDRatioBased struct { - // Ratio corresponds to the JSON schema field "ratio". - Ratio *float64 `json:"ratio,omitempty" yaml:"ratio,omitempty" mapstructure:"ratio,omitempty"` -} - -type SimpleLogRecordProcessor struct { - // Exporter corresponds to the JSON schema field "exporter". - Exporter LogRecordExporter `json:"exporter" yaml:"exporter" mapstructure:"exporter"` -} - -type SimpleSpanProcessor struct { - // Exporter corresponds to the JSON schema field "exporter". - Exporter SpanExporter `json:"exporter" yaml:"exporter" mapstructure:"exporter"` -} - -type SpanExporter struct { - // Console corresponds to the JSON schema field "console". - Console Console `json:"console,omitempty" yaml:"console,omitempty" mapstructure:"console,omitempty"` - - // OTLP corresponds to the JSON schema field "otlp". - OTLP *OTLP `json:"otlp,omitempty" yaml:"otlp,omitempty" mapstructure:"otlp,omitempty"` - - // Zipkin corresponds to the JSON schema field "zipkin". - Zipkin *Zipkin `json:"zipkin,omitempty" yaml:"zipkin,omitempty" mapstructure:"zipkin,omitempty"` - - AdditionalProperties interface{} `mapstructure:",remain"` -} - -type SpanLimits struct { - // AttributeCountLimit corresponds to the JSON schema field - // "attribute_count_limit". - AttributeCountLimit *int `json:"attribute_count_limit,omitempty" yaml:"attribute_count_limit,omitempty" mapstructure:"attribute_count_limit,omitempty"` - - // AttributeValueLengthLimit corresponds to the JSON schema field - // "attribute_value_length_limit". - AttributeValueLengthLimit *int `json:"attribute_value_length_limit,omitempty" yaml:"attribute_value_length_limit,omitempty" mapstructure:"attribute_value_length_limit,omitempty"` - - // EventAttributeCountLimit corresponds to the JSON schema field - // "event_attribute_count_limit". - EventAttributeCountLimit *int `json:"event_attribute_count_limit,omitempty" yaml:"event_attribute_count_limit,omitempty" mapstructure:"event_attribute_count_limit,omitempty"` - - // EventCountLimit corresponds to the JSON schema field "event_count_limit". - EventCountLimit *int `json:"event_count_limit,omitempty" yaml:"event_count_limit,omitempty" mapstructure:"event_count_limit,omitempty"` - - // LinkAttributeCountLimit corresponds to the JSON schema field - // "link_attribute_count_limit". - LinkAttributeCountLimit *int `json:"link_attribute_count_limit,omitempty" yaml:"link_attribute_count_limit,omitempty" mapstructure:"link_attribute_count_limit,omitempty"` - - // LinkCountLimit corresponds to the JSON schema field "link_count_limit". - LinkCountLimit *int `json:"link_count_limit,omitempty" yaml:"link_count_limit,omitempty" mapstructure:"link_count_limit,omitempty"` -} - -type SpanProcessor struct { - // Batch corresponds to the JSON schema field "batch". - Batch *BatchSpanProcessor `json:"batch,omitempty" yaml:"batch,omitempty" mapstructure:"batch,omitempty"` - - // Simple corresponds to the JSON schema field "simple". - Simple *SimpleSpanProcessor `json:"simple,omitempty" yaml:"simple,omitempty" mapstructure:"simple,omitempty"` - - AdditionalProperties interface{} `mapstructure:",remain"` -} - -type TracerProvider struct { - // Limits corresponds to the JSON schema field "limits". - Limits *SpanLimits `json:"limits,omitempty" yaml:"limits,omitempty" mapstructure:"limits,omitempty"` - - // Processors corresponds to the JSON schema field "processors". - Processors []SpanProcessor `json:"processors,omitempty" yaml:"processors,omitempty" mapstructure:"processors,omitempty"` - - // Sampler corresponds to the JSON schema field "sampler". - Sampler *Sampler `json:"sampler,omitempty" yaml:"sampler,omitempty" mapstructure:"sampler,omitempty"` -} - -type View struct { - // Selector corresponds to the JSON schema field "selector". - Selector *ViewSelector `json:"selector,omitempty" yaml:"selector,omitempty" mapstructure:"selector,omitempty"` - - // Stream corresponds to the JSON schema field "stream". - Stream *ViewStream `json:"stream,omitempty" yaml:"stream,omitempty" mapstructure:"stream,omitempty"` -} - -type ViewSelector struct { - // InstrumentName corresponds to the JSON schema field "instrument_name". - InstrumentName *string `json:"instrument_name,omitempty" yaml:"instrument_name,omitempty" mapstructure:"instrument_name,omitempty"` - - // InstrumentType corresponds to the JSON schema field "instrument_type". - InstrumentType *ViewSelectorInstrumentType `json:"instrument_type,omitempty" yaml:"instrument_type,omitempty" mapstructure:"instrument_type,omitempty"` - - // MeterName corresponds to the JSON schema field "meter_name". - MeterName *string `json:"meter_name,omitempty" yaml:"meter_name,omitempty" mapstructure:"meter_name,omitempty"` - - // MeterSchemaUrl corresponds to the JSON schema field "meter_schema_url". - MeterSchemaUrl *string `json:"meter_schema_url,omitempty" yaml:"meter_schema_url,omitempty" mapstructure:"meter_schema_url,omitempty"` - - // MeterVersion corresponds to the JSON schema field "meter_version". - MeterVersion *string `json:"meter_version,omitempty" yaml:"meter_version,omitempty" mapstructure:"meter_version,omitempty"` - - // Unit corresponds to the JSON schema field "unit". - Unit *string `json:"unit,omitempty" yaml:"unit,omitempty" mapstructure:"unit,omitempty"` -} - -type ViewSelectorInstrumentType string - -const ViewSelectorInstrumentTypeCounter ViewSelectorInstrumentType = "counter" -const ViewSelectorInstrumentTypeHistogram ViewSelectorInstrumentType = "histogram" -const ViewSelectorInstrumentTypeObservableCounter ViewSelectorInstrumentType = "observable_counter" -const ViewSelectorInstrumentTypeObservableGauge ViewSelectorInstrumentType = "observable_gauge" -const ViewSelectorInstrumentTypeObservableUpDownCounter ViewSelectorInstrumentType = "observable_up_down_counter" -const ViewSelectorInstrumentTypeUpDownCounter ViewSelectorInstrumentType = "up_down_counter" - -type ViewStream struct { - // Aggregation corresponds to the JSON schema field "aggregation". - Aggregation *ViewStreamAggregation `json:"aggregation,omitempty" yaml:"aggregation,omitempty" mapstructure:"aggregation,omitempty"` - - // AttributeKeys corresponds to the JSON schema field "attribute_keys". - AttributeKeys *IncludeExclude `json:"attribute_keys,omitempty" yaml:"attribute_keys,omitempty" mapstructure:"attribute_keys,omitempty"` - - // Description corresponds to the JSON schema field "description". - Description *string `json:"description,omitempty" yaml:"description,omitempty" mapstructure:"description,omitempty"` - - // Name corresponds to the JSON schema field "name". - Name *string `json:"name,omitempty" yaml:"name,omitempty" mapstructure:"name,omitempty"` -} - -type ViewStreamAggregation struct { - // Base2ExponentialBucketHistogram corresponds to the JSON schema field - // "base2_exponential_bucket_histogram". - Base2ExponentialBucketHistogram *ViewStreamAggregationBase2ExponentialBucketHistogram `json:"base2_exponential_bucket_histogram,omitempty" yaml:"base2_exponential_bucket_histogram,omitempty" mapstructure:"base2_exponential_bucket_histogram,omitempty"` - - // Default corresponds to the JSON schema field "default". - Default ViewStreamAggregationDefault `json:"default,omitempty" yaml:"default,omitempty" mapstructure:"default,omitempty"` - - // Drop corresponds to the JSON schema field "drop". - Drop ViewStreamAggregationDrop `json:"drop,omitempty" yaml:"drop,omitempty" mapstructure:"drop,omitempty"` - - // ExplicitBucketHistogram corresponds to the JSON schema field - // "explicit_bucket_histogram". - ExplicitBucketHistogram *ViewStreamAggregationExplicitBucketHistogram `json:"explicit_bucket_histogram,omitempty" yaml:"explicit_bucket_histogram,omitempty" mapstructure:"explicit_bucket_histogram,omitempty"` - - // LastValue corresponds to the JSON schema field "last_value". - LastValue ViewStreamAggregationLastValue `json:"last_value,omitempty" yaml:"last_value,omitempty" mapstructure:"last_value,omitempty"` - - // Sum corresponds to the JSON schema field "sum". - Sum ViewStreamAggregationSum `json:"sum,omitempty" yaml:"sum,omitempty" mapstructure:"sum,omitempty"` -} - -type ViewStreamAggregationBase2ExponentialBucketHistogram struct { - // MaxScale corresponds to the JSON schema field "max_scale". - MaxScale *int `json:"max_scale,omitempty" yaml:"max_scale,omitempty" mapstructure:"max_scale,omitempty"` - - // MaxSize corresponds to the JSON schema field "max_size". - MaxSize *int `json:"max_size,omitempty" yaml:"max_size,omitempty" mapstructure:"max_size,omitempty"` - - // RecordMinMax corresponds to the JSON schema field "record_min_max". - RecordMinMax *bool `json:"record_min_max,omitempty" yaml:"record_min_max,omitempty" mapstructure:"record_min_max,omitempty"` -} - -type ViewStreamAggregationDefault map[string]interface{} - -type ViewStreamAggregationDrop map[string]interface{} - -type ViewStreamAggregationExplicitBucketHistogram struct { - // Boundaries corresponds to the JSON schema field "boundaries". - Boundaries []float64 `json:"boundaries,omitempty" yaml:"boundaries,omitempty" mapstructure:"boundaries,omitempty"` - - // RecordMinMax corresponds to the JSON schema field "record_min_max". - RecordMinMax *bool `json:"record_min_max,omitempty" yaml:"record_min_max,omitempty" mapstructure:"record_min_max,omitempty"` -} - -type ViewStreamAggregationLastValue map[string]interface{} - -type ViewStreamAggregationSum map[string]interface{} - -type Zipkin struct { - // Endpoint corresponds to the JSON schema field "endpoint". - Endpoint *string `json:"endpoint" yaml:"endpoint" mapstructure:"endpoint"` - - // Timeout corresponds to the JSON schema field "timeout". - Timeout *int `json:"timeout,omitempty" yaml:"timeout,omitempty" mapstructure:"timeout,omitempty"` -} diff --git a/config/v0.3.0/log.go b/config/v0.3.0/log.go deleted file mode 100644 index 714ed4faebd..00000000000 --- a/config/v0.3.0/log.go +++ /dev/null @@ -1,222 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package config // import "go.opentelemetry.io/contrib/config/v0.3.0" - -import ( - "context" - "errors" - "fmt" - "net/url" - "time" - - "google.golang.org/grpc/credentials" - - "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc" - "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp" - "go.opentelemetry.io/otel/exporters/stdout/stdoutlog" - "go.opentelemetry.io/otel/log" - "go.opentelemetry.io/otel/log/noop" - sdklog "go.opentelemetry.io/otel/sdk/log" - "go.opentelemetry.io/otel/sdk/resource" -) - -func loggerProvider(cfg configOptions, res *resource.Resource) (log.LoggerProvider, shutdownFunc, error) { - if cfg.opentelemetryConfig.LoggerProvider == nil { - return noop.NewLoggerProvider(), noopShutdown, nil - } - opts := []sdklog.LoggerProviderOption{ - sdklog.WithResource(res), - } - var errs []error - for _, processor := range cfg.opentelemetryConfig.LoggerProvider.Processors { - sp, err := logProcessor(cfg.ctx, processor) - if err == nil { - opts = append(opts, sdklog.WithProcessor(sp)) - } else { - errs = append(errs, err) - } - } - - if len(errs) > 0 { - return noop.NewLoggerProvider(), noopShutdown, errors.Join(errs...) - } - - lp := sdklog.NewLoggerProvider(opts...) - return lp, lp.Shutdown, nil -} - -func logProcessor(ctx context.Context, processor LogRecordProcessor) (sdklog.Processor, error) { - if processor.Batch != nil && processor.Simple != nil { - return nil, errors.New("must not specify multiple log processor type") - } - if processor.Batch != nil { - exp, err := logExporter(ctx, processor.Batch.Exporter) - if err != nil { - return nil, err - } - return batchLogProcessor(processor.Batch, exp) - } - if processor.Simple != nil { - exp, err := logExporter(ctx, processor.Simple.Exporter) - if err != nil { - return nil, err - } - return sdklog.NewSimpleProcessor(exp), nil - } - return nil, errors.New("unsupported log processor type, must be one of simple or batch") -} - -func logExporter(ctx context.Context, exporter LogRecordExporter) (sdklog.Exporter, error) { - if exporter.Console != nil && exporter.OTLP != nil { - return nil, errors.New("must not specify multiple exporters") - } - - if exporter.Console != nil { - return stdoutlog.New( - stdoutlog.WithPrettyPrint(), - ) - } - - if exporter.OTLP != nil && exporter.OTLP.Protocol != nil { - switch *exporter.OTLP.Protocol { - case protocolProtobufHTTP: - return otlpHTTPLogExporter(ctx, exporter.OTLP) - case protocolProtobufGRPC: - return otlpGRPCLogExporter(ctx, exporter.OTLP) - default: - return nil, fmt.Errorf("unsupported protocol %q", *exporter.OTLP.Protocol) - } - } - return nil, errors.New("no valid log exporter") -} - -func batchLogProcessor(blp *BatchLogRecordProcessor, exp sdklog.Exporter) (*sdklog.BatchProcessor, error) { - var opts []sdklog.BatchProcessorOption - if blp.ExportTimeout != nil { - if *blp.ExportTimeout < 0 { - return nil, fmt.Errorf("invalid export timeout %d", *blp.ExportTimeout) - } - opts = append(opts, sdklog.WithExportTimeout(time.Millisecond*time.Duration(*blp.ExportTimeout))) - } - if blp.MaxExportBatchSize != nil { - if *blp.MaxExportBatchSize < 0 { - return nil, fmt.Errorf("invalid batch size %d", *blp.MaxExportBatchSize) - } - opts = append(opts, sdklog.WithExportMaxBatchSize(*blp.MaxExportBatchSize)) - } - if blp.MaxQueueSize != nil { - if *blp.MaxQueueSize < 0 { - return nil, fmt.Errorf("invalid queue size %d", *blp.MaxQueueSize) - } - opts = append(opts, sdklog.WithMaxQueueSize(*blp.MaxQueueSize)) - } - - if blp.ScheduleDelay != nil { - if *blp.ScheduleDelay < 0 { - return nil, fmt.Errorf("invalid schedule delay %d", *blp.ScheduleDelay) - } - opts = append(opts, sdklog.WithExportInterval(time.Millisecond*time.Duration(*blp.ScheduleDelay))) - } - - return sdklog.NewBatchProcessor(exp, opts...), nil -} - -func otlpHTTPLogExporter(ctx context.Context, otlpConfig *OTLP) (sdklog.Exporter, error) { - var opts []otlploghttp.Option - - if otlpConfig.Endpoint != nil { - u, err := url.ParseRequestURI(*otlpConfig.Endpoint) - if err != nil { - return nil, err - } - opts = append(opts, otlploghttp.WithEndpoint(u.Host)) - - if u.Scheme == "http" { - opts = append(opts, otlploghttp.WithInsecure()) - } - if len(u.Path) > 0 { - opts = append(opts, otlploghttp.WithURLPath(u.Path)) - } - } - if otlpConfig.Compression != nil { - switch *otlpConfig.Compression { - case compressionGzip: - opts = append(opts, otlploghttp.WithCompression(otlploghttp.GzipCompression)) - case compressionNone: - opts = append(opts, otlploghttp.WithCompression(otlploghttp.NoCompression)) - default: - return nil, fmt.Errorf("unsupported compression %q", *otlpConfig.Compression) - } - } - if otlpConfig.Timeout != nil && *otlpConfig.Timeout > 0 { - opts = append(opts, otlploghttp.WithTimeout(time.Millisecond*time.Duration(*otlpConfig.Timeout))) - } - headersConfig, err := createHeadersConfig(otlpConfig.Headers, otlpConfig.HeadersList) - if err != nil { - return nil, err - } - if len(headersConfig) > 0 { - opts = append(opts, otlploghttp.WithHeaders(headersConfig)) - } - - tlsConfig, err := createTLSConfig(otlpConfig.Certificate, otlpConfig.ClientCertificate, otlpConfig.ClientKey) - if err != nil { - return nil, err - } - opts = append(opts, otlploghttp.WithTLSClientConfig(tlsConfig)) - - return otlploghttp.New(ctx, opts...) -} - -func otlpGRPCLogExporter(ctx context.Context, otlpConfig *OTLP) (sdklog.Exporter, error) { - var opts []otlploggrpc.Option - - if otlpConfig.Endpoint != nil { - u, err := url.ParseRequestURI(*otlpConfig.Endpoint) - if err != nil { - return nil, err - } - // ParseRequestURI leaves the Host field empty when no - // scheme is specified (i.e. localhost:4317). This check is - // here to support the case where a user may not specify a - // scheme. The code does its best effort here by using - // otlpConfig.Endpoint as-is in that case - if u.Host != "" { - opts = append(opts, otlploggrpc.WithEndpoint(u.Host)) - } else { - opts = append(opts, otlploggrpc.WithEndpoint(*otlpConfig.Endpoint)) - } - if u.Scheme == "http" || (u.Scheme != "https" && otlpConfig.Insecure != nil && *otlpConfig.Insecure) { - opts = append(opts, otlploggrpc.WithInsecure()) - } - } - if otlpConfig.Compression != nil { - switch *otlpConfig.Compression { - case compressionGzip: - opts = append(opts, otlploggrpc.WithCompressor(*otlpConfig.Compression)) - case compressionNone: - // none requires no options - default: - return nil, fmt.Errorf("unsupported compression %q", *otlpConfig.Compression) - } - } - if otlpConfig.Timeout != nil && *otlpConfig.Timeout > 0 { - opts = append(opts, otlploggrpc.WithTimeout(time.Millisecond*time.Duration(*otlpConfig.Timeout))) - } - headersConfig, err := createHeadersConfig(otlpConfig.Headers, otlpConfig.HeadersList) - if err != nil { - return nil, err - } - if len(headersConfig) > 0 { - opts = append(opts, otlploggrpc.WithHeaders(headersConfig)) - } - - tlsConfig, err := createTLSConfig(otlpConfig.Certificate, otlpConfig.ClientCertificate, otlpConfig.ClientKey) - if err != nil { - return nil, err - } - opts = append(opts, otlploggrpc.WithTLSCredentials(credentials.NewTLS(tlsConfig))) - - return otlploggrpc.New(ctx, opts...) -} diff --git a/config/v0.3.0/log_test.go b/config/v0.3.0/log_test.go deleted file mode 100644 index 4f73c289eb7..00000000000 --- a/config/v0.3.0/log_test.go +++ /dev/null @@ -1,696 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package config - -import ( - "context" - "errors" - "path/filepath" - "reflect" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc" - "go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp" - "go.opentelemetry.io/otel/exporters/stdout/stdoutlog" - "go.opentelemetry.io/otel/log" - "go.opentelemetry.io/otel/log/noop" - sdklog "go.opentelemetry.io/otel/sdk/log" - "go.opentelemetry.io/otel/sdk/resource" -) - -func TestLoggerProvider(t *testing.T) { - tests := []struct { - name string - cfg configOptions - wantProvider log.LoggerProvider - wantErr error - }{ - { - name: "no-logger-provider-configured", - wantProvider: noop.NewLoggerProvider(), - }, - { - name: "error-in-config", - cfg: configOptions{ - opentelemetryConfig: OpenTelemetryConfiguration{ - LoggerProvider: &LoggerProvider{ - Processors: []LogRecordProcessor{ - { - Simple: &SimpleLogRecordProcessor{}, - Batch: &BatchLogRecordProcessor{}, - }, - }, - }, - }, - }, - wantProvider: noop.NewLoggerProvider(), - wantErr: errors.Join(errors.New("must not specify multiple log processor type")), - }, - } - for _, tt := range tests { - mp, shutdown, err := loggerProvider(tt.cfg, resource.Default()) - require.Equal(t, tt.wantProvider, mp) - assert.Equal(t, tt.wantErr, err) - require.NoError(t, shutdown(context.Background())) - } -} - -func TestLogProcessor(t *testing.T) { - ctx := context.Background() - - otlpHTTPExporter, err := otlploghttp.New(ctx) - require.NoError(t, err) - - otlpGRPCExporter, err := otlploggrpc.New(ctx) - require.NoError(t, err) - - consoleExporter, err := stdoutlog.New( - stdoutlog.WithPrettyPrint(), - ) - require.NoError(t, err) - - testCases := []struct { - name string - processor LogRecordProcessor - args any - wantErr string - wantProcessor sdklog.Processor - }{ - { - name: "no processor", - wantErr: "unsupported log processor type, must be one of simple or batch", - }, - { - name: "multiple processor types", - processor: LogRecordProcessor{ - Batch: &BatchLogRecordProcessor{ - Exporter: LogRecordExporter{}, - }, - Simple: &SimpleLogRecordProcessor{}, - }, - wantErr: "must not specify multiple log processor type", - }, - { - name: "batch processor invalid batch size otlphttp exporter", - - processor: LogRecordProcessor{ - Batch: &BatchLogRecordProcessor{ - MaxExportBatchSize: ptr(-1), - Exporter: LogRecordExporter{ - OTLP: &OTLP{ - Protocol: ptr("http/protobuf"), - }, - }, - }, - }, - wantErr: "invalid batch size -1", - }, - { - name: "batch processor invalid export timeout otlphttp exporter", - processor: LogRecordProcessor{ - Batch: &BatchLogRecordProcessor{ - ExportTimeout: ptr(-2), - Exporter: LogRecordExporter{ - OTLP: &OTLP{ - Protocol: ptr("http/protobuf"), - }, - }, - }, - }, - wantErr: "invalid export timeout -2", - }, - { - name: "batch processor invalid queue size otlphttp exporter", - - processor: LogRecordProcessor{ - Batch: &BatchLogRecordProcessor{ - MaxQueueSize: ptr(-3), - Exporter: LogRecordExporter{ - OTLP: &OTLP{ - Protocol: ptr("http/protobuf"), - }, - }, - }, - }, - wantErr: "invalid queue size -3", - }, - { - name: "batch processor invalid schedule delay console exporter", - processor: LogRecordProcessor{ - Batch: &BatchLogRecordProcessor{ - ScheduleDelay: ptr(-4), - Exporter: LogRecordExporter{ - OTLP: &OTLP{ - Protocol: ptr("http/protobuf"), - }, - }, - }, - }, - wantErr: "invalid schedule delay -4", - }, - { - name: "batch processor invalid exporter", - processor: LogRecordProcessor{ - Batch: &BatchLogRecordProcessor{ - Exporter: LogRecordExporter{}, - }, - }, - wantErr: "no valid log exporter", - }, - { - name: "batch/console", - processor: LogRecordProcessor{ - Batch: &BatchLogRecordProcessor{ - MaxExportBatchSize: ptr(0), - ExportTimeout: ptr(0), - MaxQueueSize: ptr(0), - ScheduleDelay: ptr(0), - Exporter: LogRecordExporter{ - Console: map[string]any{}, - }, - }, - }, - wantProcessor: sdklog.NewBatchProcessor(consoleExporter), - }, - { - name: "batch/otlp-grpc-exporter-no-endpoint", - processor: LogRecordProcessor{ - Batch: &BatchLogRecordProcessor{ - MaxExportBatchSize: ptr(0), - ExportTimeout: ptr(0), - MaxQueueSize: ptr(0), - ScheduleDelay: ptr(0), - Exporter: LogRecordExporter{ - OTLP: &OTLP{ - Protocol: ptr("grpc"), - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: []NameStringValuePair{ - {Name: "test", Value: ptr("test1")}, - }, - }, - }, - }, - }, - wantProcessor: sdklog.NewBatchProcessor(otlpGRPCExporter), - }, - { - name: "batch/otlp-grpc-exporter", - processor: LogRecordProcessor{ - Batch: &BatchLogRecordProcessor{ - MaxExportBatchSize: ptr(0), - ExportTimeout: ptr(0), - MaxQueueSize: ptr(0), - ScheduleDelay: ptr(0), - Exporter: LogRecordExporter{ - OTLP: &OTLP{ - Protocol: ptr("grpc"), - Endpoint: ptr("http://localhost:4317"), - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: []NameStringValuePair{ - {Name: "test", Value: ptr("test1")}, - }, - }, - }, - }, - }, - wantProcessor: sdklog.NewBatchProcessor(otlpGRPCExporter), - }, - { - name: "batch/otlp-grpc-exporter-socket-endpoint", - processor: LogRecordProcessor{ - Batch: &BatchLogRecordProcessor{ - MaxExportBatchSize: ptr(0), - ExportTimeout: ptr(0), - MaxQueueSize: ptr(0), - ScheduleDelay: ptr(0), - Exporter: LogRecordExporter{ - OTLP: &OTLP{ - Protocol: ptr("grpc"), - Endpoint: ptr("unix:collector.sock"), - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: []NameStringValuePair{ - {Name: "test", Value: ptr("test1")}, - }, - }, - }, - }, - }, - wantProcessor: sdklog.NewBatchProcessor(otlpGRPCExporter), - }, - { - name: "batch/otlp-grpc-good-ca-certificate", - processor: LogRecordProcessor{ - Batch: &BatchLogRecordProcessor{ - Exporter: LogRecordExporter{ - OTLP: &OTLP{ - Protocol: ptr("grpc"), - Endpoint: ptr("localhost:4317"), - Compression: ptr("gzip"), - Timeout: ptr(1000), - Certificate: ptr(filepath.Join("..", "testdata", "ca.crt")), - }, - }, - }, - }, - wantProcessor: sdklog.NewBatchProcessor(otlpGRPCExporter), - }, - { - name: "batch/otlp-grpc-bad-ca-certificate", - processor: LogRecordProcessor{ - Batch: &BatchLogRecordProcessor{ - Exporter: LogRecordExporter{ - OTLP: &OTLP{ - Protocol: ptr("grpc"), - Endpoint: ptr("localhost:4317"), - Compression: ptr("gzip"), - Timeout: ptr(1000), - Certificate: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), - }, - }, - }, - }, - wantErr: "could not create certificate authority chain from certificate", - }, - { - name: "batch/otlp-grpc-bad-headerslist", - processor: LogRecordProcessor{ - Batch: &BatchLogRecordProcessor{ - Exporter: LogRecordExporter{ - OTLP: &OTLP{ - Protocol: ptr("grpc"), - Endpoint: ptr("localhost:4317"), - Compression: ptr("gzip"), - Timeout: ptr(1000), - HeadersList: ptr("==="), - }, - }, - }, - }, - wantErr: "invalid headers list: invalid key: \"\"", - }, - { - name: "batch/otlp-grpc-bad-client-certificate", - processor: LogRecordProcessor{ - Batch: &BatchLogRecordProcessor{ - Exporter: LogRecordExporter{ - OTLP: &OTLP{ - Protocol: ptr("grpc"), - Endpoint: ptr("localhost:4317"), - Compression: ptr("gzip"), - Timeout: ptr(1000), - ClientCertificate: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), - ClientKey: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), - }, - }, - }, - }, - wantErr: "could not use client certificate: tls: failed to find any PEM data in certificate input", - }, - { - name: "batch/otlp-grpc-exporter-no-scheme", - processor: LogRecordProcessor{ - Batch: &BatchLogRecordProcessor{ - MaxExportBatchSize: ptr(0), - ExportTimeout: ptr(0), - MaxQueueSize: ptr(0), - ScheduleDelay: ptr(0), - Exporter: LogRecordExporter{ - OTLP: &OTLP{ - Protocol: ptr("grpc"), - Endpoint: ptr("localhost:4317"), - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: []NameStringValuePair{ - {Name: "test", Value: ptr("test1")}, - }, - }, - }, - }, - }, - wantProcessor: sdklog.NewBatchProcessor(otlpGRPCExporter), - }, - { - name: "batch/otlp-grpc-invalid-endpoint", - processor: LogRecordProcessor{ - Batch: &BatchLogRecordProcessor{ - MaxExportBatchSize: ptr(0), - ExportTimeout: ptr(0), - MaxQueueSize: ptr(0), - ScheduleDelay: ptr(0), - Exporter: LogRecordExporter{ - OTLP: &OTLP{ - Protocol: ptr("grpc"), - Endpoint: ptr(" "), - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: []NameStringValuePair{ - {Name: "test", Value: ptr("test1")}, - }, - }, - }, - }, - }, - wantErr: "parse \" \": invalid URI for request", - }, - { - name: "batch/otlp-grpc-invalid-compression", - processor: LogRecordProcessor{ - Batch: &BatchLogRecordProcessor{ - MaxExportBatchSize: ptr(0), - ExportTimeout: ptr(0), - MaxQueueSize: ptr(0), - ScheduleDelay: ptr(0), - Exporter: LogRecordExporter{ - OTLP: &OTLP{ - Protocol: ptr("grpc"), - Endpoint: ptr("localhost:4317"), - Compression: ptr("invalid"), - Timeout: ptr(1000), - Headers: []NameStringValuePair{ - {Name: "test", Value: ptr("test1")}, - }, - }, - }, - }, - }, - wantErr: "unsupported compression \"invalid\"", - }, - { - name: "batch/otlp-http-exporter", - processor: LogRecordProcessor{ - Batch: &BatchLogRecordProcessor{ - MaxExportBatchSize: ptr(0), - ExportTimeout: ptr(0), - MaxQueueSize: ptr(0), - ScheduleDelay: ptr(0), - Exporter: LogRecordExporter{ - OTLP: &OTLP{ - Protocol: ptr("http/protobuf"), - Endpoint: ptr("http://localhost:4318"), - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: []NameStringValuePair{ - {Name: "test", Value: ptr("test1")}, - }, - }, - }, - }, - }, - wantProcessor: sdklog.NewBatchProcessor(otlpHTTPExporter), - }, - { - name: "batch/otlp-http-good-ca-certificate", - processor: LogRecordProcessor{ - Batch: &BatchLogRecordProcessor{ - Exporter: LogRecordExporter{ - OTLP: &OTLP{ - Protocol: ptr("http/protobuf"), - Endpoint: ptr("localhost:4317"), - Compression: ptr("gzip"), - Timeout: ptr(1000), - Certificate: ptr(filepath.Join("..", "testdata", "ca.crt")), - }, - }, - }, - }, - wantProcessor: sdklog.NewBatchProcessor(otlpHTTPExporter), - }, - { - name: "batch/otlp-http-bad-ca-certificate", - processor: LogRecordProcessor{ - Batch: &BatchLogRecordProcessor{ - Exporter: LogRecordExporter{ - OTLP: &OTLP{ - Protocol: ptr("http/protobuf"), - Endpoint: ptr("localhost:4317"), - Compression: ptr("gzip"), - Timeout: ptr(1000), - Certificate: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), - }, - }, - }, - }, - wantErr: "could not create certificate authority chain from certificate", - }, - { - name: "batch/otlp-http-bad-client-certificate", - processor: LogRecordProcessor{ - Batch: &BatchLogRecordProcessor{ - Exporter: LogRecordExporter{ - OTLP: &OTLP{ - Protocol: ptr("http/protobuf"), - Endpoint: ptr("localhost:4317"), - Compression: ptr("gzip"), - Timeout: ptr(1000), - ClientCertificate: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), - ClientKey: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), - }, - }, - }, - }, - wantErr: "could not use client certificate: tls: failed to find any PEM data in certificate input", - }, - { - name: "batch/otlp-http-bad-headerslist", - processor: LogRecordProcessor{ - Batch: &BatchLogRecordProcessor{ - Exporter: LogRecordExporter{ - OTLP: &OTLP{ - Protocol: ptr("http/protobuf"), - Endpoint: ptr("localhost:4317"), - Compression: ptr("gzip"), - Timeout: ptr(1000), - HeadersList: ptr("==="), - }, - }, - }, - }, - wantErr: "invalid headers list: invalid key: \"\"", - }, - { - name: "batch/otlp-http-exporter-with-path", - processor: LogRecordProcessor{ - Batch: &BatchLogRecordProcessor{ - MaxExportBatchSize: ptr(0), - ExportTimeout: ptr(0), - MaxQueueSize: ptr(0), - ScheduleDelay: ptr(0), - Exporter: LogRecordExporter{ - OTLP: &OTLP{ - Protocol: ptr("http/protobuf"), - Endpoint: ptr("http://localhost:4318/path/123"), - Compression: ptr("none"), - Timeout: ptr(1000), - Headers: []NameStringValuePair{ - {Name: "test", Value: ptr("test1")}, - }, - }, - }, - }, - }, - wantProcessor: sdklog.NewBatchProcessor(otlpHTTPExporter), - }, - { - name: "batch/otlp-http-exporter-no-endpoint", - processor: LogRecordProcessor{ - Batch: &BatchLogRecordProcessor{ - MaxExportBatchSize: ptr(0), - ExportTimeout: ptr(0), - MaxQueueSize: ptr(0), - ScheduleDelay: ptr(0), - Exporter: LogRecordExporter{ - OTLP: &OTLP{ - Protocol: ptr("http/protobuf"), - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: []NameStringValuePair{ - {Name: "test", Value: ptr("test1")}, - }, - }, - }, - }, - }, - wantProcessor: sdklog.NewBatchProcessor(otlpHTTPExporter), - }, - { - name: "batch/otlp-http-exporter-no-scheme", - processor: LogRecordProcessor{ - Batch: &BatchLogRecordProcessor{ - MaxExportBatchSize: ptr(0), - ExportTimeout: ptr(0), - MaxQueueSize: ptr(0), - ScheduleDelay: ptr(0), - Exporter: LogRecordExporter{ - OTLP: &OTLP{ - Protocol: ptr("http/protobuf"), - Endpoint: ptr("localhost:4318"), - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: []NameStringValuePair{ - {Name: "test", Value: ptr("test1")}, - }, - }, - }, - }, - }, - wantProcessor: sdklog.NewBatchProcessor(otlpHTTPExporter), - }, - { - name: "batch/otlp-http-invalid-protocol", - processor: LogRecordProcessor{ - Batch: &BatchLogRecordProcessor{ - MaxExportBatchSize: ptr(0), - ExportTimeout: ptr(0), - MaxQueueSize: ptr(0), - ScheduleDelay: ptr(0), - Exporter: LogRecordExporter{ - OTLP: &OTLP{ - Protocol: ptr("invalid"), - Endpoint: ptr("https://10.0.0.0:443"), - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: []NameStringValuePair{ - {Name: "test", Value: ptr("test1")}, - }, - }, - }, - }, - }, - wantErr: "unsupported protocol \"invalid\"", - }, - { - name: "batch/otlp-http-invalid-endpoint", - processor: LogRecordProcessor{ - Batch: &BatchLogRecordProcessor{ - MaxExportBatchSize: ptr(0), - ExportTimeout: ptr(0), - MaxQueueSize: ptr(0), - ScheduleDelay: ptr(0), - Exporter: LogRecordExporter{ - OTLP: &OTLP{ - Protocol: ptr("http/protobuf"), - Endpoint: ptr(" "), - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: []NameStringValuePair{ - {Name: "test", Value: ptr("test1")}, - }, - }, - }, - }, - }, - wantErr: "parse \" \": invalid URI for request", - }, - { - name: "batch/otlp-http-none-compression", - processor: LogRecordProcessor{ - Batch: &BatchLogRecordProcessor{ - MaxExportBatchSize: ptr(0), - ExportTimeout: ptr(0), - MaxQueueSize: ptr(0), - ScheduleDelay: ptr(0), - Exporter: LogRecordExporter{ - OTLP: &OTLP{ - Protocol: ptr("http/protobuf"), - Endpoint: ptr("localhost:4318"), - Compression: ptr("none"), - Timeout: ptr(1000), - Headers: []NameStringValuePair{ - {Name: "test", Value: ptr("test1")}, - }, - }, - }, - }, - }, - wantProcessor: sdklog.NewBatchProcessor(otlpHTTPExporter), - }, - { - name: "batch/otlp-http-invalid-compression", - processor: LogRecordProcessor{ - Batch: &BatchLogRecordProcessor{ - MaxExportBatchSize: ptr(0), - ExportTimeout: ptr(0), - MaxQueueSize: ptr(0), - ScheduleDelay: ptr(0), - Exporter: LogRecordExporter{ - OTLP: &OTLP{ - Protocol: ptr("http/protobuf"), - Endpoint: ptr("localhost:4318"), - Compression: ptr("invalid"), - Timeout: ptr(1000), - Headers: []NameStringValuePair{ - {Name: "test", Value: ptr("test1")}, - }, - }, - }, - }, - }, - wantErr: "unsupported compression \"invalid\"", - }, - { - name: "simple/no-exporter", - processor: LogRecordProcessor{ - Simple: &SimpleLogRecordProcessor{ - Exporter: LogRecordExporter{}, - }, - }, - wantErr: "no valid log exporter", - }, - { - name: "simple/console", - processor: LogRecordProcessor{ - Simple: &SimpleLogRecordProcessor{ - Exporter: LogRecordExporter{ - Console: map[string]any{}, - }, - }, - }, - wantProcessor: sdklog.NewSimpleProcessor(consoleExporter), - }, - { - name: "simple/otlp-exporter", - processor: LogRecordProcessor{ - Simple: &SimpleLogRecordProcessor{ - Exporter: LogRecordExporter{ - OTLP: &OTLP{ - Protocol: ptr("http/protobuf"), - Endpoint: ptr("localhost:4318"), - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: []NameStringValuePair{ - {Name: "test", Value: ptr("test1")}, - }, - }, - }, - }, - }, - wantProcessor: sdklog.NewSimpleProcessor(otlpHTTPExporter), - }, - } - for _, tt := range testCases { - t.Run(tt.name, func(t *testing.T) { - got, err := logProcessor(context.Background(), tt.processor) - if tt.wantErr != "" { - require.Error(t, err) - require.Equal(t, tt.wantErr, err.Error()) - } else { - require.NoError(t, err) - } - if tt.wantProcessor == nil { - require.Nil(t, got) - } else { - require.Equal(t, reflect.TypeOf(tt.wantProcessor), reflect.TypeOf(got)) - wantExporterType := reflect.Indirect(reflect.ValueOf(tt.wantProcessor)).FieldByName("exporter").Elem().Type() - gotExporterType := reflect.Indirect(reflect.ValueOf(got)).FieldByName("exporter").Elem().Type() - require.Equal(t, wantExporterType.String(), gotExporterType.String()) - } - }) - } -} diff --git a/config/v0.3.0/metric.go b/config/v0.3.0/metric.go deleted file mode 100644 index 3454de0e83e..00000000000 --- a/config/v0.3.0/metric.go +++ /dev/null @@ -1,565 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package config // import "go.opentelemetry.io/contrib/config/v0.3.0" - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "math" - "net" - "net/http" - "net/url" - "os" - "strconv" - "time" - - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promhttp" - "google.golang.org/grpc/credentials" - - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" - "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp" - otelprom "go.opentelemetry.io/otel/exporters/prometheus" - "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/metric/noop" - "go.opentelemetry.io/otel/sdk/instrumentation" - sdkmetric "go.opentelemetry.io/otel/sdk/metric" - "go.opentelemetry.io/otel/sdk/metric/metricdata" - "go.opentelemetry.io/otel/sdk/resource" -) - -var zeroScope instrumentation.Scope - -const instrumentKindUndefined = sdkmetric.InstrumentKind(0) - -func meterProvider(cfg configOptions, res *resource.Resource) (metric.MeterProvider, shutdownFunc, error) { - if cfg.opentelemetryConfig.MeterProvider == nil { - return noop.NewMeterProvider(), noopShutdown, nil - } - opts := []sdkmetric.Option{ - sdkmetric.WithResource(res), - } - - var errs []error - for _, reader := range cfg.opentelemetryConfig.MeterProvider.Readers { - r, err := metricReader(cfg.ctx, reader) - if err == nil { - opts = append(opts, sdkmetric.WithReader(r)) - } else { - errs = append(errs, err) - } - } - for _, vw := range cfg.opentelemetryConfig.MeterProvider.Views { - v, err := view(vw) - if err == nil { - opts = append(opts, sdkmetric.WithView(v)) - } else { - errs = append(errs, err) - } - } - - if len(errs) > 0 { - return noop.NewMeterProvider(), noopShutdown, errors.Join(errs...) - } - - mp := sdkmetric.NewMeterProvider(opts...) - return mp, mp.Shutdown, nil -} - -func metricReader(ctx context.Context, r MetricReader) (sdkmetric.Reader, error) { - if r.Periodic != nil && r.Pull != nil { - return nil, errors.New("must not specify multiple metric reader type") - } - - if r.Periodic != nil { - var opts []sdkmetric.PeriodicReaderOption - if r.Periodic.Interval != nil { - opts = append(opts, sdkmetric.WithInterval(time.Duration(*r.Periodic.Interval)*time.Millisecond)) - } - - if r.Periodic.Timeout != nil { - opts = append(opts, sdkmetric.WithTimeout(time.Duration(*r.Periodic.Timeout)*time.Millisecond)) - } - return periodicExporter(ctx, r.Periodic.Exporter, opts...) - } - - if r.Pull != nil { - return pullReader(ctx, r.Pull.Exporter) - } - return nil, errors.New("no valid metric reader") -} - -func pullReader(ctx context.Context, exporter PullMetricExporter) (sdkmetric.Reader, error) { - if exporter.Prometheus != nil { - return prometheusReader(ctx, exporter.Prometheus) - } - return nil, errors.New("no valid metric exporter") -} - -func periodicExporter(ctx context.Context, exporter PushMetricExporter, opts ...sdkmetric.PeriodicReaderOption) (sdkmetric.Reader, error) { - if exporter.Console != nil && exporter.OTLP != nil { - return nil, errors.New("must not specify multiple exporters") - } - if exporter.Console != nil { - enc := json.NewEncoder(os.Stdout) - enc.SetIndent("", " ") - - exp, err := stdoutmetric.New( - stdoutmetric.WithEncoder(enc), - ) - if err != nil { - return nil, err - } - return sdkmetric.NewPeriodicReader(exp, opts...), nil - } - if exporter.OTLP != nil && exporter.OTLP.Protocol != nil { - var err error - var exp sdkmetric.Exporter - switch *exporter.OTLP.Protocol { - case protocolProtobufHTTP: - exp, err = otlpHTTPMetricExporter(ctx, exporter.OTLP) - case protocolProtobufGRPC: - exp, err = otlpGRPCMetricExporter(ctx, exporter.OTLP) - default: - return nil, fmt.Errorf("unsupported protocol %q", *exporter.OTLP.Protocol) - } - if err != nil { - return nil, err - } - return sdkmetric.NewPeriodicReader(exp, opts...), nil - } - return nil, errors.New("no valid metric exporter") -} - -func otlpHTTPMetricExporter(ctx context.Context, otlpConfig *OTLPMetric) (sdkmetric.Exporter, error) { - opts := []otlpmetrichttp.Option{} - - if otlpConfig.Endpoint != nil { - u, err := url.ParseRequestURI(*otlpConfig.Endpoint) - if err != nil { - return nil, err - } - opts = append(opts, otlpmetrichttp.WithEndpoint(u.Host)) - - if u.Scheme == "http" { - opts = append(opts, otlpmetrichttp.WithInsecure()) - } - if len(u.Path) > 0 { - opts = append(opts, otlpmetrichttp.WithURLPath(u.Path)) - } - } - if otlpConfig.Compression != nil { - switch *otlpConfig.Compression { - case compressionGzip: - opts = append(opts, otlpmetrichttp.WithCompression(otlpmetrichttp.GzipCompression)) - case compressionNone: - opts = append(opts, otlpmetrichttp.WithCompression(otlpmetrichttp.NoCompression)) - default: - return nil, fmt.Errorf("unsupported compression %q", *otlpConfig.Compression) - } - } - if otlpConfig.Timeout != nil { - opts = append(opts, otlpmetrichttp.WithTimeout(time.Millisecond*time.Duration(*otlpConfig.Timeout))) - } - headersConfig, err := createHeadersConfig(otlpConfig.Headers, otlpConfig.HeadersList) - if err != nil { - return nil, err - } - if len(headersConfig) > 0 { - opts = append(opts, otlpmetrichttp.WithHeaders(headersConfig)) - } - if otlpConfig.TemporalityPreference != nil { - switch *otlpConfig.TemporalityPreference { - case "delta": - opts = append(opts, otlpmetrichttp.WithTemporalitySelector(deltaTemporality)) - case "cumulative": - opts = append(opts, otlpmetrichttp.WithTemporalitySelector(cumulativeTemporality)) - case "lowmemory": - opts = append(opts, otlpmetrichttp.WithTemporalitySelector(lowMemory)) - default: - return nil, fmt.Errorf("unsupported temporality preference %q", *otlpConfig.TemporalityPreference) - } - } - - tlsConfig, err := createTLSConfig(otlpConfig.Certificate, otlpConfig.ClientCertificate, otlpConfig.ClientKey) - if err != nil { - return nil, err - } - opts = append(opts, otlpmetrichttp.WithTLSClientConfig(tlsConfig)) - - return otlpmetrichttp.New(ctx, opts...) -} - -func otlpGRPCMetricExporter(ctx context.Context, otlpConfig *OTLPMetric) (sdkmetric.Exporter, error) { - var opts []otlpmetricgrpc.Option - - if otlpConfig.Endpoint != nil { - u, err := url.ParseRequestURI(*otlpConfig.Endpoint) - if err != nil { - return nil, err - } - // ParseRequestURI leaves the Host field empty when no - // scheme is specified (i.e. localhost:4317). This check is - // here to support the case where a user may not specify a - // scheme. The code does its best effort here by using - // otlpConfig.Endpoint as-is in that case - if u.Host != "" { - opts = append(opts, otlpmetricgrpc.WithEndpoint(u.Host)) - } else { - opts = append(opts, otlpmetricgrpc.WithEndpoint(*otlpConfig.Endpoint)) - } - if u.Scheme == "http" || (u.Scheme != "https" && otlpConfig.Insecure != nil && *otlpConfig.Insecure) { - opts = append(opts, otlpmetricgrpc.WithInsecure()) - } - } - - if otlpConfig.Compression != nil { - switch *otlpConfig.Compression { - case compressionGzip: - opts = append(opts, otlpmetricgrpc.WithCompressor(*otlpConfig.Compression)) - case compressionNone: - // none requires no options - default: - return nil, fmt.Errorf("unsupported compression %q", *otlpConfig.Compression) - } - } - if otlpConfig.Timeout != nil && *otlpConfig.Timeout > 0 { - opts = append(opts, otlpmetricgrpc.WithTimeout(time.Millisecond*time.Duration(*otlpConfig.Timeout))) - } - headersConfig, err := createHeadersConfig(otlpConfig.Headers, otlpConfig.HeadersList) - if err != nil { - return nil, err - } - if len(headersConfig) > 0 { - opts = append(opts, otlpmetricgrpc.WithHeaders(headersConfig)) - } - if otlpConfig.TemporalityPreference != nil { - switch *otlpConfig.TemporalityPreference { - case "delta": - opts = append(opts, otlpmetricgrpc.WithTemporalitySelector(deltaTemporality)) - case "cumulative": - opts = append(opts, otlpmetricgrpc.WithTemporalitySelector(cumulativeTemporality)) - case "lowmemory": - opts = append(opts, otlpmetricgrpc.WithTemporalitySelector(lowMemory)) - default: - return nil, fmt.Errorf("unsupported temporality preference %q", *otlpConfig.TemporalityPreference) - } - } - - tlsConfig, err := createTLSConfig(otlpConfig.Certificate, otlpConfig.ClientCertificate, otlpConfig.ClientKey) - if err != nil { - return nil, err - } - opts = append(opts, otlpmetricgrpc.WithTLSCredentials(credentials.NewTLS(tlsConfig))) - - return otlpmetricgrpc.New(ctx, opts...) -} - -func cumulativeTemporality(sdkmetric.InstrumentKind) metricdata.Temporality { - return metricdata.CumulativeTemporality -} - -func deltaTemporality(ik sdkmetric.InstrumentKind) metricdata.Temporality { - switch ik { - case sdkmetric.InstrumentKindCounter, sdkmetric.InstrumentKindHistogram, sdkmetric.InstrumentKindObservableCounter: - return metricdata.DeltaTemporality - default: - return metricdata.CumulativeTemporality - } -} - -func lowMemory(ik sdkmetric.InstrumentKind) metricdata.Temporality { - switch ik { - case sdkmetric.InstrumentKindCounter, sdkmetric.InstrumentKindHistogram: - return metricdata.DeltaTemporality - default: - return metricdata.CumulativeTemporality - } -} - -// newIncludeExcludeFilter returns a Filter that includes attributes -// in the include list and excludes attributes in the excludes list. -// It returns an error if an attribute is in both lists -// -// If IncludeExclude is empty a include-all filter is returned. -func newIncludeExcludeFilter(lists *IncludeExclude) (attribute.Filter, error) { - if lists == nil { - return func(kv attribute.KeyValue) bool { return true }, nil - } - - included := make(map[attribute.Key]struct{}) - for _, k := range lists.Included { - included[attribute.Key(k)] = struct{}{} - } - excluded := make(map[attribute.Key]struct{}) - for _, k := range lists.Excluded { - if _, ok := included[attribute.Key(k)]; ok { - return nil, fmt.Errorf("attribute cannot be in both include and exclude list: %s", k) - } - excluded[attribute.Key(k)] = struct{}{} - } - return func(kv attribute.KeyValue) bool { - // check if a value is excluded first - if _, ok := excluded[kv.Key]; ok { - return false - } - - if len(included) == 0 { - return true - } - - _, ok := included[kv.Key] - return ok - }, nil -} - -func prometheusReader(ctx context.Context, prometheusConfig *Prometheus) (sdkmetric.Reader, error) { - if prometheusConfig.Host == nil { - return nil, errors.New("host must be specified") - } - if prometheusConfig.Port == nil { - return nil, errors.New("port must be specified") - } - - opts, err := prometheusReaderOpts(prometheusConfig) - if err != nil { - return nil, err - } - - reg := prometheus.NewRegistry() - opts = append(opts, otelprom.WithRegisterer(reg)) - - reader, err := otelprom.New(opts...) - if err != nil { - return nil, fmt.Errorf("error creating otel prometheus exporter: %w", err) - } - - mux := http.NewServeMux() - mux.Handle("/metrics", promhttp.HandlerFor(reg, promhttp.HandlerOpts{Registry: reg})) - server := http.Server{ - // Timeouts are necessary to make a server resilient to attacks. - // We use values from this example: https://blog.cloudflare.com/exposing-go-on-the-internet/#:~:text=There%20are%20three%20main%20timeouts - ReadTimeout: 5 * time.Second, - WriteTimeout: 10 * time.Second, - IdleTimeout: 120 * time.Second, - Handler: mux, - } - - // Remove surrounding "[]" from the host definition to allow users to define the host as "[::1]" or "::1". - host := *prometheusConfig.Host - if len(host) > 2 && host[0] == '[' && host[len(host)-1] == ']' { - host = host[1 : len(host)-1] - } - - addr := net.JoinHostPort(host, strconv.Itoa(*prometheusConfig.Port)) - lis, err := net.Listen("tcp", addr) - if err != nil { - return nil, errors.Join( - fmt.Errorf("binding address %s for Prometheus exporter: %w", addr, err), - reader.Shutdown(ctx), - ) - } - - // Only for testing reasons, add the address to the http Server, will not be used. - server.Addr = lis.Addr().String() - - go func() { - if err := server.Serve(lis); err != nil && !errors.Is(err, http.ErrServerClosed) { - otel.Handle(fmt.Errorf("the Prometheus HTTP server exited unexpectedly: %w", err)) - } - }() - - return readerWithServer{reader, &server}, nil -} - -func prometheusReaderOpts(prometheusConfig *Prometheus) ([]otelprom.Option, error) { - var opts []otelprom.Option - if prometheusConfig.WithoutScopeInfo != nil && *prometheusConfig.WithoutScopeInfo { - opts = append(opts, otelprom.WithoutScopeInfo()) - } - if prometheusConfig.WithoutTypeSuffix != nil && *prometheusConfig.WithoutTypeSuffix { - opts = append(opts, otelprom.WithoutCounterSuffixes()) - } - if prometheusConfig.WithoutUnits != nil && *prometheusConfig.WithoutUnits { - opts = append(opts, otelprom.WithoutUnits()) - } - if prometheusConfig.WithResourceConstantLabels != nil { - f, err := newIncludeExcludeFilter(prometheusConfig.WithResourceConstantLabels) - if err != nil { - return nil, err - } - opts = append(opts, otelprom.WithResourceAsConstantLabels(f)) - } - - return opts, nil -} - -type readerWithServer struct { - sdkmetric.Reader - server *http.Server -} - -func (rws readerWithServer) Shutdown(ctx context.Context) error { - return errors.Join( - rws.Reader.Shutdown(ctx), - rws.server.Shutdown(ctx), - ) -} - -func view(v View) (sdkmetric.View, error) { - if v.Selector == nil { - return nil, errors.New("view: no selector provided") - } - - inst, err := instrument(*v.Selector) - if err != nil { - return nil, err - } - - s, err := stream(v.Stream) - if err != nil { - return nil, err - } - return sdkmetric.NewView(inst, s), nil -} - -func instrument(vs ViewSelector) (sdkmetric.Instrument, error) { - kind, err := instrumentKind(vs.InstrumentType) - if err != nil { - return sdkmetric.Instrument{}, fmt.Errorf("view_selector: %w", err) - } - inst := sdkmetric.Instrument{ - Name: strOrEmpty(vs.InstrumentName), - Unit: strOrEmpty(vs.Unit), - Kind: kind, - Scope: instrumentation.Scope{ - Name: strOrEmpty(vs.MeterName), - Version: strOrEmpty(vs.MeterVersion), - SchemaURL: strOrEmpty(vs.MeterSchemaUrl), - }, - } - - if instrumentIsEmpty(inst) { - return sdkmetric.Instrument{}, errors.New("view_selector: empty selector not supporter") - } - return inst, nil -} - -func stream(vs *ViewStream) (sdkmetric.Stream, error) { - if vs == nil { - return sdkmetric.Stream{}, nil - } - - f, err := newIncludeExcludeFilter(vs.AttributeKeys) - if err != nil { - return sdkmetric.Stream{}, err - } - return sdkmetric.Stream{ - Name: strOrEmpty(vs.Name), - Description: strOrEmpty(vs.Description), - Aggregation: aggregation(vs.Aggregation), - AttributeFilter: f, - }, nil -} - -func aggregation(aggr *ViewStreamAggregation) sdkmetric.Aggregation { - if aggr == nil { - return nil - } - - if aggr.Base2ExponentialBucketHistogram != nil { - return sdkmetric.AggregationBase2ExponentialHistogram{ - MaxSize: int32OrZero(aggr.Base2ExponentialBucketHistogram.MaxSize), - MaxScale: int32OrZero(aggr.Base2ExponentialBucketHistogram.MaxScale), - // Need to negate because config has the positive action RecordMinMax. - NoMinMax: !boolOrFalse(aggr.Base2ExponentialBucketHistogram.RecordMinMax), - } - } - if aggr.Default != nil { - // TODO: Understand what to set here. - return nil - } - if aggr.Drop != nil { - return sdkmetric.AggregationDrop{} - } - if aggr.ExplicitBucketHistogram != nil { - return sdkmetric.AggregationExplicitBucketHistogram{ - Boundaries: aggr.ExplicitBucketHistogram.Boundaries, - // Need to negate because config has the positive action RecordMinMax. - NoMinMax: !boolOrFalse(aggr.ExplicitBucketHistogram.RecordMinMax), - } - } - if aggr.LastValue != nil { - return sdkmetric.AggregationLastValue{} - } - if aggr.Sum != nil { - return sdkmetric.AggregationSum{} - } - return nil -} - -func instrumentKind(vsit *ViewSelectorInstrumentType) (sdkmetric.InstrumentKind, error) { - if vsit == nil { - // Equivalent to instrumentKindUndefined. - return instrumentKindUndefined, nil - } - - switch *vsit { - case ViewSelectorInstrumentTypeCounter: - return sdkmetric.InstrumentKindCounter, nil - case ViewSelectorInstrumentTypeUpDownCounter: - return sdkmetric.InstrumentKindUpDownCounter, nil - case ViewSelectorInstrumentTypeHistogram: - return sdkmetric.InstrumentKindHistogram, nil - case ViewSelectorInstrumentTypeObservableCounter: - return sdkmetric.InstrumentKindObservableCounter, nil - case ViewSelectorInstrumentTypeObservableUpDownCounter: - return sdkmetric.InstrumentKindObservableUpDownCounter, nil - case ViewSelectorInstrumentTypeObservableGauge: - return sdkmetric.InstrumentKindObservableGauge, nil - } - - return instrumentKindUndefined, errors.New("instrument_type: invalid value") -} - -func instrumentIsEmpty(i sdkmetric.Instrument) bool { - return i.Name == "" && - i.Description == "" && - i.Kind == instrumentKindUndefined && - i.Unit == "" && - i.Scope == zeroScope -} - -func boolOrFalse(pBool *bool) bool { - if pBool == nil { - return false - } - return *pBool -} - -func int32OrZero(pInt *int) int32 { - if pInt == nil { - return 0 - } - i := *pInt - if i > math.MaxInt32 { - return math.MaxInt32 - } - if i < math.MinInt32 { - return math.MinInt32 - } - return int32(i) // nolint: gosec // Overflow and underflow checked above. -} - -func strOrEmpty(pStr *string) string { - if pStr == nil { - return "" - } - return *pStr -} diff --git a/config/v0.3.0/metric_test.go b/config/v0.3.0/metric_test.go deleted file mode 100644 index cc5c26f1d25..00000000000 --- a/config/v0.3.0/metric_test.go +++ /dev/null @@ -1,1390 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package config - -import ( - "context" - "errors" - "fmt" - "net/http" - "path/filepath" - "reflect" - "strings" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc" - "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp" - otelprom "go.opentelemetry.io/otel/exporters/prometheus" - "go.opentelemetry.io/otel/exporters/stdout/stdoutmetric" - "go.opentelemetry.io/otel/metric" - "go.opentelemetry.io/otel/metric/noop" - "go.opentelemetry.io/otel/sdk/instrumentation" - sdkmetric "go.opentelemetry.io/otel/sdk/metric" - "go.opentelemetry.io/otel/sdk/resource" -) - -func TestMeterProvider(t *testing.T) { - tests := []struct { - name string - cfg configOptions - wantProvider metric.MeterProvider - wantErr error - }{ - { - name: "no-meter-provider-configured", - wantProvider: noop.NewMeterProvider(), - }, - { - name: "error-in-config", - cfg: configOptions{ - opentelemetryConfig: OpenTelemetryConfiguration{ - MeterProvider: &MeterProvider{ - Readers: []MetricReader{ - { - Periodic: &PeriodicMetricReader{}, - Pull: &PullMetricReader{}, - }, - }, - }, - }, - }, - wantProvider: noop.NewMeterProvider(), - wantErr: errors.Join(errors.New("must not specify multiple metric reader type")), - }, - { - name: "multiple-errors-in-config", - cfg: configOptions{ - opentelemetryConfig: OpenTelemetryConfiguration{ - MeterProvider: &MeterProvider{ - Readers: []MetricReader{ - { - Periodic: &PeriodicMetricReader{}, - Pull: &PullMetricReader{}, - }, - { - Periodic: &PeriodicMetricReader{ - Exporter: PushMetricExporter{ - Console: Console{}, - OTLP: &OTLPMetric{}, - }, - }, - }, - }, - }, - }, - }, - wantProvider: noop.NewMeterProvider(), - wantErr: errors.Join(errors.New("must not specify multiple metric reader type"), errors.New("must not specify multiple exporters")), - }, - } - for _, tt := range tests { - mp, shutdown, err := meterProvider(tt.cfg, resource.Default()) - require.Equal(t, tt.wantProvider, mp) - assert.Equal(t, tt.wantErr, err) - require.NoError(t, shutdown(context.Background())) - } -} - -func TestReader(t *testing.T) { - consoleExporter, err := stdoutmetric.New( - stdoutmetric.WithPrettyPrint(), - ) - require.NoError(t, err) - ctx := context.Background() - otlpGRPCExporter, err := otlpmetricgrpc.New(ctx) - require.NoError(t, err) - otlpHTTPExporter, err := otlpmetrichttp.New(ctx) - require.NoError(t, err) - promExporter, err := otelprom.New() - require.NoError(t, err) - testCases := []struct { - name string - reader MetricReader - args any - wantErr string - wantReader sdkmetric.Reader - }{ - { - name: "no reader", - wantErr: "no valid metric reader", - }, - { - name: "pull/no-exporter", - reader: MetricReader{ - Pull: &PullMetricReader{}, - }, - wantErr: "no valid metric exporter", - }, - { - name: "pull/prometheus-no-host", - reader: MetricReader{ - Pull: &PullMetricReader{ - Exporter: PullMetricExporter{ - Prometheus: &Prometheus{}, - }, - }, - }, - wantErr: "host must be specified", - }, - { - name: "pull/prometheus-no-port", - reader: MetricReader{ - Pull: &PullMetricReader{ - Exporter: PullMetricExporter{ - Prometheus: &Prometheus{ - Host: ptr("localhost"), - }, - }, - }, - }, - wantErr: "port must be specified", - }, - { - name: "pull/prometheus", - reader: MetricReader{ - Pull: &PullMetricReader{ - Exporter: PullMetricExporter{ - Prometheus: &Prometheus{ - Host: ptr("localhost"), - Port: ptr(0), - WithoutScopeInfo: ptr(true), - WithoutUnits: ptr(true), - WithoutTypeSuffix: ptr(true), - WithResourceConstantLabels: &IncludeExclude{ - Included: []string{"include"}, - Excluded: []string{"exclude"}, - }, - }, - }, - }, - }, - wantReader: readerWithServer{promExporter, nil}, - }, - { - name: "periodic/otlp-exporter-invalid-protocol", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: PushMetricExporter{ - OTLP: &OTLPMetric{ - Protocol: ptr("http/invalid"), - }, - }, - }, - }, - wantErr: "unsupported protocol \"http/invalid\"", - }, - { - name: "periodic/otlp-grpc-exporter", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: PushMetricExporter{ - OTLP: &OTLPMetric{ - Protocol: ptr("grpc"), - Endpoint: ptr("http://localhost:4318"), - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: []NameStringValuePair{ - {Name: "test", Value: ptr("test1")}, - }, - }, - }, - }, - }, - wantReader: sdkmetric.NewPeriodicReader(otlpGRPCExporter), - }, - { - name: "periodic/otlp-grpc-exporter-with-path", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: PushMetricExporter{ - OTLP: &OTLPMetric{ - Protocol: ptr("grpc"), - Endpoint: ptr("http://localhost:4318/path/123"), - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: []NameStringValuePair{ - {Name: "test", Value: ptr("test1")}, - }, - }, - }, - }, - }, - wantReader: sdkmetric.NewPeriodicReader(otlpGRPCExporter), - }, - { - name: "periodic/otlp-grpc-good-ca-certificate", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: PushMetricExporter{ - OTLP: &OTLPMetric{ - Protocol: ptr("grpc"), - Endpoint: ptr("https://localhost:4317"), - Compression: ptr("gzip"), - Timeout: ptr(1000), - Certificate: ptr(filepath.Join("..", "testdata", "ca.crt")), - }, - }, - }, - }, - wantReader: sdkmetric.NewPeriodicReader(otlpGRPCExporter), - }, - { - name: "periodic/otlp-grpc-bad-ca-certificate", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: PushMetricExporter{ - OTLP: &OTLPMetric{ - Protocol: ptr("grpc"), - Endpoint: ptr("https://localhost:4317"), - Compression: ptr("gzip"), - Timeout: ptr(1000), - Certificate: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), - }, - }, - }, - }, - wantErr: "could not create certificate authority chain from certificate", - }, - { - name: "periodic/otlp-grpc-bad-client-certificate", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: PushMetricExporter{ - OTLP: &OTLPMetric{ - Protocol: ptr("grpc"), - Endpoint: ptr("localhost:4317"), - Compression: ptr("gzip"), - Timeout: ptr(1000), - ClientCertificate: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), - ClientKey: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), - }, - }, - }, - }, - wantErr: "could not use client certificate: tls: failed to find any PEM data in certificate input", - }, - { - name: "periodic/otlp-grpc-bad-headerslist", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: PushMetricExporter{ - OTLP: &OTLPMetric{ - Protocol: ptr("grpc"), - Endpoint: ptr("localhost:4317"), - Compression: ptr("gzip"), - Timeout: ptr(1000), - HeadersList: ptr("==="), - }, - }, - }, - }, - wantErr: "invalid headers list: invalid key: \"\"", - }, - { - name: "periodic/otlp-grpc-exporter-no-endpoint", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: PushMetricExporter{ - OTLP: &OTLPMetric{ - Protocol: ptr("grpc"), - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: []NameStringValuePair{ - {Name: "test", Value: ptr("test1")}, - }, - }, - }, - }, - }, - wantReader: sdkmetric.NewPeriodicReader(otlpGRPCExporter), - }, - { - name: "periodic/otlp-grpc-exporter-socket-endpoint", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: PushMetricExporter{ - OTLP: &OTLPMetric{ - Protocol: ptr("grpc"), - Endpoint: ptr("unix:collector.sock"), - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: []NameStringValuePair{ - {Name: "test", Value: ptr("test1")}, - }, - }, - }, - }, - }, - wantReader: sdkmetric.NewPeriodicReader(otlpGRPCExporter), - }, - { - name: "periodic/otlp-grpc-exporter-no-scheme", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: PushMetricExporter{ - OTLP: &OTLPMetric{ - Protocol: ptr("grpc"), - Endpoint: ptr("localhost:4318"), - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: []NameStringValuePair{ - {Name: "test", Value: ptr("test1")}, - }, - }, - }, - }, - }, - wantReader: sdkmetric.NewPeriodicReader(otlpGRPCExporter), - }, - { - name: "periodic/otlp-grpc-invalid-endpoint", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: PushMetricExporter{ - OTLP: &OTLPMetric{ - Protocol: ptr("grpc"), - Endpoint: ptr(" "), - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: []NameStringValuePair{ - {Name: "test", Value: ptr("test1")}, - }, - }, - }, - }, - }, - wantErr: "parse \" \": invalid URI for request", - }, - { - name: "periodic/otlp-grpc-none-compression", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: PushMetricExporter{ - OTLP: &OTLPMetric{ - Protocol: ptr("grpc"), - Endpoint: ptr("localhost:4318"), - Compression: ptr("none"), - Timeout: ptr(1000), - Headers: []NameStringValuePair{ - {Name: "test", Value: ptr("test1")}, - }, - }, - }, - }, - }, - wantReader: sdkmetric.NewPeriodicReader(otlpGRPCExporter), - }, - { - name: "periodic/otlp-grpc-delta-temporality", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: PushMetricExporter{ - OTLP: &OTLPMetric{ - Protocol: ptr("grpc"), - Endpoint: ptr("localhost:4318"), - Compression: ptr("none"), - Timeout: ptr(1000), - Headers: []NameStringValuePair{ - {Name: "test", Value: ptr("test1")}, - }, - TemporalityPreference: ptr("delta"), - }, - }, - }, - }, - wantReader: sdkmetric.NewPeriodicReader(otlpGRPCExporter), - }, - { - name: "periodic/otlp-grpc-cumulative-temporality", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: PushMetricExporter{ - OTLP: &OTLPMetric{ - Protocol: ptr("grpc"), - Endpoint: ptr("localhost:4318"), - Compression: ptr("none"), - Timeout: ptr(1000), - Headers: []NameStringValuePair{ - {Name: "test", Value: ptr("test1")}, - }, - TemporalityPreference: ptr("cumulative"), - }, - }, - }, - }, - wantReader: sdkmetric.NewPeriodicReader(otlpGRPCExporter), - }, - { - name: "periodic/otlp-grpc-lowmemory-temporality", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: PushMetricExporter{ - OTLP: &OTLPMetric{ - Protocol: ptr("grpc"), - Endpoint: ptr("localhost:4318"), - Compression: ptr("none"), - Timeout: ptr(1000), - Headers: []NameStringValuePair{ - {Name: "test", Value: ptr("test1")}, - }, - TemporalityPreference: ptr("lowmemory"), - }, - }, - }, - }, - wantReader: sdkmetric.NewPeriodicReader(otlpGRPCExporter), - }, - { - name: "periodic/otlp-grpc-invalid-temporality", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: PushMetricExporter{ - OTLP: &OTLPMetric{ - Protocol: ptr("grpc"), - Endpoint: ptr("localhost:4318"), - Compression: ptr("none"), - Timeout: ptr(1000), - Headers: []NameStringValuePair{ - {Name: "test", Value: ptr("test1")}, - }, - TemporalityPreference: ptr("invalid"), - }, - }, - }, - }, - wantErr: "unsupported temporality preference \"invalid\"", - }, - { - name: "periodic/otlp-grpc-invalid-compression", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: PushMetricExporter{ - OTLP: &OTLPMetric{ - Protocol: ptr("grpc"), - Endpoint: ptr("localhost:4318"), - Compression: ptr("invalid"), - Timeout: ptr(1000), - Headers: []NameStringValuePair{ - {Name: "test", Value: ptr("test1")}, - }, - }, - }, - }, - }, - wantErr: "unsupported compression \"invalid\"", - }, - { - name: "periodic/otlp-http-exporter", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: PushMetricExporter{ - OTLP: &OTLPMetric{ - Protocol: ptr("http/protobuf"), - Endpoint: ptr("http://localhost:4318"), - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: []NameStringValuePair{ - {Name: "test", Value: ptr("test1")}, - }, - }, - }, - }, - }, - wantReader: sdkmetric.NewPeriodicReader(otlpHTTPExporter), - }, - { - name: "periodic/otlp-http-good-ca-certificate", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: PushMetricExporter{ - OTLP: &OTLPMetric{ - Protocol: ptr("http/protobuf"), - Endpoint: ptr("https://localhost:4317"), - Compression: ptr("gzip"), - Timeout: ptr(1000), - Certificate: ptr(filepath.Join("..", "testdata", "ca.crt")), - }, - }, - }, - }, - wantReader: sdkmetric.NewPeriodicReader(otlpHTTPExporter), - }, - { - name: "periodic/otlp-http-bad-ca-certificate", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: PushMetricExporter{ - OTLP: &OTLPMetric{ - Protocol: ptr("http/protobuf"), - Endpoint: ptr("https://localhost:4317"), - Compression: ptr("gzip"), - Timeout: ptr(1000), - Certificate: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), - }, - }, - }, - }, - wantErr: "could not create certificate authority chain from certificate", - }, - { - name: "periodic/otlp-http-bad-client-certificate", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: PushMetricExporter{ - OTLP: &OTLPMetric{ - Protocol: ptr("http/protobuf"), - Endpoint: ptr("localhost:4317"), - Compression: ptr("gzip"), - Timeout: ptr(1000), - ClientCertificate: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), - ClientKey: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), - }, - }, - }, - }, - wantErr: "could not use client certificate: tls: failed to find any PEM data in certificate input", - }, - { - name: "periodic/otlp-http-bad-headerslist", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: PushMetricExporter{ - OTLP: &OTLPMetric{ - Protocol: ptr("http/protobuf"), - Endpoint: ptr("localhost:4317"), - Compression: ptr("gzip"), - Timeout: ptr(1000), - HeadersList: ptr("==="), - }, - }, - }, - }, - wantErr: "invalid headers list: invalid key: \"\"", - }, - { - name: "periodic/otlp-http-exporter-with-path", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: PushMetricExporter{ - OTLP: &OTLPMetric{ - Protocol: ptr("http/protobuf"), - Endpoint: ptr("http://localhost:4318/path/123"), - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: []NameStringValuePair{ - {Name: "test", Value: ptr("test1")}, - }, - }, - }, - }, - }, - wantReader: sdkmetric.NewPeriodicReader(otlpHTTPExporter), - }, - { - name: "periodic/otlp-http-exporter-no-endpoint", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: PushMetricExporter{ - OTLP: &OTLPMetric{ - Protocol: ptr("http/protobuf"), - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: []NameStringValuePair{ - {Name: "test", Value: ptr("test1")}, - }, - }, - }, - }, - }, - wantReader: sdkmetric.NewPeriodicReader(otlpHTTPExporter), - }, - { - name: "periodic/otlp-http-exporter-no-scheme", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: PushMetricExporter{ - OTLP: &OTLPMetric{ - Protocol: ptr("http/protobuf"), - Endpoint: ptr("localhost:4318"), - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: []NameStringValuePair{ - {Name: "test", Value: ptr("test1")}, - }, - }, - }, - }, - }, - wantReader: sdkmetric.NewPeriodicReader(otlpHTTPExporter), - }, - { - name: "periodic/otlp-http-invalid-endpoint", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: PushMetricExporter{ - OTLP: &OTLPMetric{ - Protocol: ptr("http/protobuf"), - Endpoint: ptr(" "), - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: []NameStringValuePair{ - {Name: "test", Value: ptr("test1")}, - }, - }, - }, - }, - }, - wantErr: "parse \" \": invalid URI for request", - }, - { - name: "periodic/otlp-http-none-compression", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: PushMetricExporter{ - OTLP: &OTLPMetric{ - Protocol: ptr("http/protobuf"), - Endpoint: ptr("localhost:4318"), - Compression: ptr("none"), - Timeout: ptr(1000), - Headers: []NameStringValuePair{ - {Name: "test", Value: ptr("test1")}, - }, - }, - }, - }, - }, - wantReader: sdkmetric.NewPeriodicReader(otlpHTTPExporter), - }, - { - name: "periodic/otlp-http-cumulative-temporality", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: PushMetricExporter{ - OTLP: &OTLPMetric{ - Protocol: ptr("http/protobuf"), - Endpoint: ptr("localhost:4318"), - Compression: ptr("none"), - Timeout: ptr(1000), - Headers: []NameStringValuePair{ - {Name: "test", Value: ptr("test1")}, - }, - TemporalityPreference: ptr("cumulative"), - }, - }, - }, - }, - wantReader: sdkmetric.NewPeriodicReader(otlpHTTPExporter), - }, - { - name: "periodic/otlp-http-lowmemory-temporality", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: PushMetricExporter{ - OTLP: &OTLPMetric{ - Protocol: ptr("http/protobuf"), - Endpoint: ptr("localhost:4318"), - Compression: ptr("none"), - Timeout: ptr(1000), - Headers: []NameStringValuePair{ - {Name: "test", Value: ptr("test1")}, - }, - TemporalityPreference: ptr("lowmemory"), - }, - }, - }, - }, - wantReader: sdkmetric.NewPeriodicReader(otlpHTTPExporter), - }, - { - name: "periodic/otlp-http-delta-temporality", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: PushMetricExporter{ - OTLP: &OTLPMetric{ - Protocol: ptr("http/protobuf"), - Endpoint: ptr("localhost:4318"), - Compression: ptr("none"), - Timeout: ptr(1000), - Headers: []NameStringValuePair{ - {Name: "test", Value: ptr("test1")}, - }, - TemporalityPreference: ptr("delta"), - }, - }, - }, - }, - wantReader: sdkmetric.NewPeriodicReader(otlpHTTPExporter), - }, - { - name: "periodic/otlp-http-invalid-temporality", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: PushMetricExporter{ - OTLP: &OTLPMetric{ - Protocol: ptr("http/protobuf"), - Endpoint: ptr("localhost:4318"), - Compression: ptr("none"), - Timeout: ptr(1000), - Headers: []NameStringValuePair{ - {Name: "test", Value: ptr("test1")}, - }, - TemporalityPreference: ptr("invalid"), - }, - }, - }, - }, - wantErr: "unsupported temporality preference \"invalid\"", - }, - { - name: "periodic/otlp-http-invalid-compression", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: PushMetricExporter{ - OTLP: &OTLPMetric{ - Protocol: ptr("http/protobuf"), - Endpoint: ptr("localhost:4318"), - Compression: ptr("invalid"), - Timeout: ptr(1000), - Headers: []NameStringValuePair{ - {Name: "test", Value: ptr("test1")}, - }, - }, - }, - }, - }, - wantErr: "unsupported compression \"invalid\"", - }, - { - name: "periodic/no-exporter", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: PushMetricExporter{}, - }, - }, - wantErr: "no valid metric exporter", - }, - { - name: "periodic/console-exporter", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Exporter: PushMetricExporter{ - Console: Console{}, - }, - }, - }, - wantReader: sdkmetric.NewPeriodicReader(consoleExporter), - }, - { - name: "periodic/console-exporter-with-extra-options", - reader: MetricReader{ - Periodic: &PeriodicMetricReader{ - Interval: ptr(30_000), - Timeout: ptr(5_000), - Exporter: PushMetricExporter{ - Console: Console{}, - }, - }, - }, - wantReader: sdkmetric.NewPeriodicReader( - consoleExporter, - sdkmetric.WithInterval(30_000*time.Millisecond), - sdkmetric.WithTimeout(5_000*time.Millisecond), - ), - }, - } - for _, tt := range testCases { - t.Run(tt.name, func(t *testing.T) { - got, err := metricReader(context.Background(), tt.reader) - if tt.wantErr != "" { - require.Error(t, err) - require.Equal(t, tt.wantErr, err.Error()) - } else { - require.NoError(t, err) - } - if tt.wantReader == nil { - require.Nil(t, got) - } else { - require.Equal(t, reflect.TypeOf(tt.wantReader), reflect.TypeOf(got)) - var fieldName string - switch reflect.TypeOf(tt.wantReader).String() { - case "*metric.PeriodicReader": - fieldName = "exporter" - case "config.readerWithServer": - fieldName = "Reader" - default: - fieldName = "e" - } - wantExporterType := reflect.Indirect(reflect.ValueOf(tt.wantReader)).FieldByName(fieldName).Elem().Type() - gotExporterType := reflect.Indirect(reflect.ValueOf(got)).FieldByName(fieldName).Elem().Type() - require.Equal(t, wantExporterType.String(), gotExporterType.String()) - require.NoError(t, got.Shutdown(context.Background())) - } - }) - } -} - -func TestView(t *testing.T) { - testCases := []struct { - name string - view View - args any - wantErr string - matchInstrument *sdkmetric.Instrument - wantStream sdkmetric.Stream - wantResult bool - }{ - { - name: "no selector", - wantErr: "view: no selector provided", - }, - { - name: "selector/invalid_type", - view: View{ - Selector: &ViewSelector{ - InstrumentType: (*ViewSelectorInstrumentType)(ptr("invalid_type")), - }, - }, - wantErr: "view_selector: instrument_type: invalid value", - }, - { - name: "selector/invalid_type", - view: View{ - Selector: &ViewSelector{}, - }, - wantErr: "view_selector: empty selector not supporter", - }, - { - name: "all selectors match", - view: View{ - Selector: &ViewSelector{ - InstrumentName: ptr("test_name"), - InstrumentType: (*ViewSelectorInstrumentType)(ptr("counter")), - Unit: ptr("test_unit"), - MeterName: ptr("test_meter_name"), - MeterVersion: ptr("test_meter_version"), - MeterSchemaUrl: ptr("test_schema_url"), - }, - }, - matchInstrument: &sdkmetric.Instrument{ - Name: "test_name", - Unit: "test_unit", - Kind: sdkmetric.InstrumentKindCounter, - Scope: instrumentation.Scope{ - Name: "test_meter_name", - Version: "test_meter_version", - SchemaURL: "test_schema_url", - }, - }, - wantStream: sdkmetric.Stream{Name: "test_name", Unit: "test_unit"}, - wantResult: true, - }, - { - name: "all selectors no match name", - view: View{ - Selector: &ViewSelector{ - InstrumentName: ptr("test_name"), - InstrumentType: (*ViewSelectorInstrumentType)(ptr("counter")), - Unit: ptr("test_unit"), - MeterName: ptr("test_meter_name"), - MeterVersion: ptr("test_meter_version"), - MeterSchemaUrl: ptr("test_schema_url"), - }, - }, - matchInstrument: &sdkmetric.Instrument{ - Name: "not_match", - Unit: "test_unit", - Kind: sdkmetric.InstrumentKindCounter, - Scope: instrumentation.Scope{ - Name: "test_meter_name", - Version: "test_meter_version", - SchemaURL: "test_schema_url", - }, - }, - wantStream: sdkmetric.Stream{}, - wantResult: false, - }, - { - name: "all selectors no match unit", - view: View{ - Selector: &ViewSelector{ - InstrumentName: ptr("test_name"), - InstrumentType: (*ViewSelectorInstrumentType)(ptr("counter")), - Unit: ptr("test_unit"), - MeterName: ptr("test_meter_name"), - MeterVersion: ptr("test_meter_version"), - MeterSchemaUrl: ptr("test_schema_url"), - }, - }, - matchInstrument: &sdkmetric.Instrument{ - Name: "test_name", - Unit: "not_match", - Kind: sdkmetric.InstrumentKindCounter, - Scope: instrumentation.Scope{ - Name: "test_meter_name", - Version: "test_meter_version", - SchemaURL: "test_schema_url", - }, - }, - wantStream: sdkmetric.Stream{}, - wantResult: false, - }, - { - name: "all selectors no match kind", - view: View{ - Selector: &ViewSelector{ - InstrumentName: ptr("test_name"), - InstrumentType: (*ViewSelectorInstrumentType)(ptr("histogram")), - Unit: ptr("test_unit"), - MeterName: ptr("test_meter_name"), - MeterVersion: ptr("test_meter_version"), - MeterSchemaUrl: ptr("test_schema_url"), - }, - }, - matchInstrument: &sdkmetric.Instrument{ - Name: "test_name", - Unit: "test_unit", - Kind: sdkmetric.InstrumentKindCounter, - Scope: instrumentation.Scope{ - Name: "test_meter_name", - Version: "test_meter_version", - SchemaURL: "test_schema_url", - }, - }, - wantStream: sdkmetric.Stream{}, - wantResult: false, - }, - { - name: "all selectors no match meter name", - view: View{ - Selector: &ViewSelector{ - InstrumentName: ptr("test_name"), - InstrumentType: (*ViewSelectorInstrumentType)(ptr("counter")), - Unit: ptr("test_unit"), - MeterName: ptr("test_meter_name"), - MeterVersion: ptr("test_meter_version"), - MeterSchemaUrl: ptr("test_schema_url"), - }, - }, - matchInstrument: &sdkmetric.Instrument{ - Name: "test_name", - Unit: "test_unit", - Kind: sdkmetric.InstrumentKindCounter, - Scope: instrumentation.Scope{ - Name: "not_match", - Version: "test_meter_version", - SchemaURL: "test_schema_url", - }, - }, - wantStream: sdkmetric.Stream{}, - wantResult: false, - }, - { - name: "all selectors no match meter version", - view: View{ - Selector: &ViewSelector{ - InstrumentName: ptr("test_name"), - InstrumentType: (*ViewSelectorInstrumentType)(ptr("counter")), - Unit: ptr("test_unit"), - MeterName: ptr("test_meter_name"), - MeterVersion: ptr("test_meter_version"), - MeterSchemaUrl: ptr("test_schema_url"), - }, - }, - matchInstrument: &sdkmetric.Instrument{ - Name: "test_name", - Unit: "test_unit", - Kind: sdkmetric.InstrumentKindCounter, - Scope: instrumentation.Scope{ - Name: "test_meter_name", - Version: "not_match", - SchemaURL: "test_schema_url", - }, - }, - wantStream: sdkmetric.Stream{}, - wantResult: false, - }, - { - name: "all selectors no match meter schema url", - view: View{ - Selector: &ViewSelector{ - InstrumentName: ptr("test_name"), - InstrumentType: (*ViewSelectorInstrumentType)(ptr("counter")), - Unit: ptr("test_unit"), - MeterName: ptr("test_meter_name"), - MeterVersion: ptr("test_meter_version"), - MeterSchemaUrl: ptr("test_schema_url"), - }, - }, - matchInstrument: &sdkmetric.Instrument{ - Name: "test_name", - Unit: "test_unit", - Kind: sdkmetric.InstrumentKindCounter, - Scope: instrumentation.Scope{ - Name: "test_meter_name", - Version: "test_meter_version", - SchemaURL: "not_match", - }, - }, - wantStream: sdkmetric.Stream{}, - wantResult: false, - }, - { - name: "with stream", - view: View{ - Selector: &ViewSelector{ - InstrumentName: ptr("test_name"), - Unit: ptr("test_unit"), - }, - Stream: &ViewStream{ - Name: ptr("new_name"), - Description: ptr("new_description"), - AttributeKeys: ptr(IncludeExclude{Included: []string{"foo", "bar"}}), - Aggregation: &ViewStreamAggregation{Sum: make(ViewStreamAggregationSum)}, - }, - }, - matchInstrument: &sdkmetric.Instrument{ - Name: "test_name", - Description: "test_description", - Unit: "test_unit", - }, - wantStream: sdkmetric.Stream{ - Name: "new_name", - Description: "new_description", - Unit: "test_unit", - Aggregation: sdkmetric.AggregationSum{}, - }, - wantResult: true, - }, - } - for _, tt := range testCases { - t.Run(tt.name, func(t *testing.T) { - got, err := view(tt.view) - if tt.wantErr != "" { - require.EqualError(t, err, tt.wantErr) - require.Nil(t, got) - } else { - require.NoError(t, err) - gotStream, gotResult := got(*tt.matchInstrument) - // Remove filter, since it cannot be compared - gotStream.AttributeFilter = nil - require.Equal(t, tt.wantStream, gotStream) - require.Equal(t, tt.wantResult, gotResult) - } - }) - } -} - -func TestInstrumentType(t *testing.T) { - testCases := []struct { - name string - instType *ViewSelectorInstrumentType - wantErr error - wantKind sdkmetric.InstrumentKind - }{ - { - name: "nil", - wantKind: sdkmetric.InstrumentKind(0), - }, - { - name: "counter", - instType: (*ViewSelectorInstrumentType)(ptr("counter")), - wantKind: sdkmetric.InstrumentKindCounter, - }, - { - name: "up_down_counter", - instType: (*ViewSelectorInstrumentType)(ptr("up_down_counter")), - wantKind: sdkmetric.InstrumentKindUpDownCounter, - }, - { - name: "histogram", - instType: (*ViewSelectorInstrumentType)(ptr("histogram")), - wantKind: sdkmetric.InstrumentKindHistogram, - }, - { - name: "observable_counter", - instType: (*ViewSelectorInstrumentType)(ptr("observable_counter")), - wantKind: sdkmetric.InstrumentKindObservableCounter, - }, - { - name: "observable_up_down_counter", - instType: (*ViewSelectorInstrumentType)(ptr("observable_up_down_counter")), - wantKind: sdkmetric.InstrumentKindObservableUpDownCounter, - }, - { - name: "observable_gauge", - instType: (*ViewSelectorInstrumentType)(ptr("observable_gauge")), - wantKind: sdkmetric.InstrumentKindObservableGauge, - }, - { - name: "invalid", - instType: (*ViewSelectorInstrumentType)(ptr("invalid")), - wantErr: errors.New("instrument_type: invalid value"), - }, - } - for _, tt := range testCases { - t.Run(tt.name, func(t *testing.T) { - got, err := instrumentKind(tt.instType) - if tt.wantErr != nil { - require.Equal(t, tt.wantErr, err) - require.Zero(t, got) - } else { - require.NoError(t, err) - require.Equal(t, tt.wantKind, got) - } - }) - } -} - -func TestAggregation(t *testing.T) { - testCases := []struct { - name string - aggregation *ViewStreamAggregation - wantAggregation sdkmetric.Aggregation - }{ - { - name: "nil", - wantAggregation: nil, - }, - { - name: "empty", - aggregation: &ViewStreamAggregation{}, - wantAggregation: nil, - }, - { - name: "Base2ExponentialBucketHistogram empty", - aggregation: &ViewStreamAggregation{ - Base2ExponentialBucketHistogram: &ViewStreamAggregationBase2ExponentialBucketHistogram{}, - }, - wantAggregation: sdkmetric.AggregationBase2ExponentialHistogram{ - MaxSize: 0, - MaxScale: 0, - NoMinMax: true, - }, - }, - { - name: "Base2ExponentialBucketHistogram", - aggregation: &ViewStreamAggregation{ - Base2ExponentialBucketHistogram: &ViewStreamAggregationBase2ExponentialBucketHistogram{ - MaxSize: ptr(2), - MaxScale: ptr(3), - RecordMinMax: ptr(true), - }, - }, - wantAggregation: sdkmetric.AggregationBase2ExponentialHistogram{ - MaxSize: 2, - MaxScale: 3, - NoMinMax: false, - }, - }, - { - name: "Default", - aggregation: &ViewStreamAggregation{ - Default: make(ViewStreamAggregationDefault), - }, - wantAggregation: nil, - }, - { - name: "Drop", - aggregation: &ViewStreamAggregation{ - Drop: make(ViewStreamAggregationDrop), - }, - wantAggregation: sdkmetric.AggregationDrop{}, - }, - { - name: "ExplicitBucketHistogram empty", - aggregation: &ViewStreamAggregation{ - ExplicitBucketHistogram: &ViewStreamAggregationExplicitBucketHistogram{}, - }, - wantAggregation: sdkmetric.AggregationExplicitBucketHistogram{ - Boundaries: nil, - NoMinMax: true, - }, - }, - { - name: "ExplicitBucketHistogram", - aggregation: &ViewStreamAggregation{ - ExplicitBucketHistogram: &ViewStreamAggregationExplicitBucketHistogram{ - Boundaries: []float64{1, 2, 3}, - RecordMinMax: ptr(true), - }, - }, - wantAggregation: sdkmetric.AggregationExplicitBucketHistogram{ - Boundaries: []float64{1, 2, 3}, - NoMinMax: false, - }, - }, - { - name: "LastValue", - aggregation: &ViewStreamAggregation{ - LastValue: make(ViewStreamAggregationLastValue), - }, - wantAggregation: sdkmetric.AggregationLastValue{}, - }, - { - name: "Sum", - aggregation: &ViewStreamAggregation{ - Sum: make(ViewStreamAggregationSum), - }, - wantAggregation: sdkmetric.AggregationSum{}, - }, - } - for _, tt := range testCases { - t.Run(tt.name, func(t *testing.T) { - got := aggregation(tt.aggregation) - require.Equal(t, tt.wantAggregation, got) - }) - } -} - -func TestNewIncludeExcludeFilter(t *testing.T) { - testCases := []struct { - name string - attributeKeys *IncludeExclude - wantPass []string - wantFail []string - }{ - { - name: "empty", - attributeKeys: nil, - wantPass: []string{"foo", "bar"}, - wantFail: nil, - }, - { - name: "filter-with-include", - attributeKeys: ptr(IncludeExclude{ - Included: []string{"foo"}, - }), - wantPass: []string{"foo"}, - wantFail: []string{"bar"}, - }, - { - name: "filter-with-exclude", - attributeKeys: ptr(IncludeExclude{ - Excluded: []string{"foo"}, - }), - wantPass: []string{"bar"}, - wantFail: []string{"foo"}, - }, - { - name: "filter-with-include-and-exclude", - attributeKeys: ptr(IncludeExclude{ - Included: []string{"bar"}, - Excluded: []string{"foo"}, - }), - wantPass: []string{"bar"}, - wantFail: []string{"foo"}, - }, - } - for _, tt := range testCases { - t.Run(tt.name, func(t *testing.T) { - got, err := newIncludeExcludeFilter(tt.attributeKeys) - require.NoError(t, err) - for _, pass := range tt.wantPass { - require.True(t, got(attribute.KeyValue{Key: attribute.Key(pass), Value: attribute.StringValue("")})) - } - for _, fail := range tt.wantFail { - require.False(t, got(attribute.KeyValue{Key: attribute.Key(fail), Value: attribute.StringValue("")})) - } - }) - } -} - -func TestNewIncludeExcludeFilterError(t *testing.T) { - _, err := newIncludeExcludeFilter(ptr(IncludeExclude{ - Included: []string{"foo"}, - Excluded: []string{"foo"}, - })) - require.Equal(t, fmt.Errorf("attribute cannot be in both include and exclude list: foo"), err) -} - -func TestPrometheusReaderOpts(t *testing.T) { - testCases := []struct { - name string - cfg Prometheus - wantOptions int - }{ - { - name: "no options", - cfg: Prometheus{}, - wantOptions: 0, - }, - { - name: "all set", - cfg: Prometheus{ - WithoutScopeInfo: ptr(true), - WithoutTypeSuffix: ptr(true), - WithoutUnits: ptr(true), - WithResourceConstantLabels: &IncludeExclude{}, - }, - wantOptions: 4, - }, - { - name: "all set false", - cfg: Prometheus{ - WithoutScopeInfo: ptr(false), - WithoutTypeSuffix: ptr(false), - WithoutUnits: ptr(false), - WithResourceConstantLabels: &IncludeExclude{}, - }, - wantOptions: 1, - }, - } - for _, tt := range testCases { - t.Run(tt.name, func(t *testing.T) { - opts, err := prometheusReaderOpts(&tt.cfg) - require.NoError(t, err) - require.Len(t, opts, tt.wantOptions) - }) - } -} - -func TestPrometheusIPv6(t *testing.T) { - tests := []struct { - name string - host string - }{ - { - name: "IPv6", - host: "::1", - }, - { - name: "[IPv6]", - host: "[::1]", - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - port := 0 - cfg := Prometheus{ - Host: &tt.host, - Port: &port, - WithoutScopeInfo: ptr(true), - WithoutTypeSuffix: ptr(true), - WithoutUnits: ptr(true), - WithResourceConstantLabels: &IncludeExclude{}, - } - - rs, err := prometheusReader(context.Background(), &cfg) - t.Cleanup(func() { - require.NoError(t, rs.Shutdown(context.Background())) - }) - require.NoError(t, err) - - hServ := rs.(readerWithServer).server - assert.True(t, strings.HasPrefix(hServ.Addr, "[::1]:")) - - resp, err := http.DefaultClient.Get("http://" + hServ.Addr + "/metrics") - t.Cleanup(func() { - require.NoError(t, resp.Body.Close()) - }) - require.NoError(t, err) - assert.Equal(t, http.StatusOK, resp.StatusCode) - }) - } -} diff --git a/config/v0.3.0/resource.go b/config/v0.3.0/resource.go deleted file mode 100644 index ddb0af83343..00000000000 --- a/config/v0.3.0/resource.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package config // import "go.opentelemetry.io/contrib/config/v0.3.0" - -import ( - "fmt" - "strconv" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/sdk/resource" -) - -func keyVal(k string, v any) attribute.KeyValue { - switch val := v.(type) { - case bool: - return attribute.Bool(k, val) - case int64: - return attribute.Int64(k, val) - case uint64: - return attribute.String(k, strconv.FormatUint(val, 10)) - case float64: - return attribute.Float64(k, val) - case int8: - return attribute.Int64(k, int64(val)) - case uint8: - return attribute.Int64(k, int64(val)) - case int16: - return attribute.Int64(k, int64(val)) - case uint16: - return attribute.Int64(k, int64(val)) - case int32: - return attribute.Int64(k, int64(val)) - case uint32: - return attribute.Int64(k, int64(val)) - case float32: - return attribute.Float64(k, float64(val)) - case int: - return attribute.Int(k, val) - case uint: - return attribute.String(k, strconv.FormatUint(uint64(val), 10)) - case string: - return attribute.String(k, val) - default: - return attribute.String(k, fmt.Sprint(v)) - } -} - -func newResource(res *Resource) *resource.Resource { - if res == nil { - return resource.Default() - } - - var attrs []attribute.KeyValue - for _, v := range res.Attributes { - attrs = append(attrs, keyVal(v.Name, v.Value)) - } - - if res.SchemaUrl == nil { - return resource.NewSchemaless(attrs...) - } - return resource.NewWithAttributes(*res.SchemaUrl, attrs...) -} diff --git a/config/v0.3.0/resource_test.go b/config/v0.3.0/resource_test.go deleted file mode 100644 index f4af030ef4c..00000000000 --- a/config/v0.3.0/resource_test.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package config - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/assert" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/sdk/resource" - semconv "go.opentelemetry.io/otel/semconv/v1.26.0" -) - -type mockType struct{} - -func TestNewResource(t *testing.T) { - other := mockType{} - tests := []struct { - name string - config *Resource - wantResource *resource.Resource - }{ - { - name: "no-resource-configuration", - wantResource: resource.Default(), - }, - { - name: "resource-no-attributes", - config: &Resource{}, - wantResource: resource.NewSchemaless(), - }, - { - name: "resource-with-schema", - config: &Resource{ - SchemaUrl: ptr(semconv.SchemaURL), - }, - wantResource: resource.NewWithAttributes(semconv.SchemaURL), - }, - { - name: "resource-with-attributes", - config: &Resource{ - Attributes: []AttributeNameValue{ - {Name: "service.name", Value: "service-a"}, - }, - }, - wantResource: resource.NewWithAttributes("", - semconv.ServiceName("service-a"), - ), - }, - { - name: "resource-with-attributes-and-schema", - config: &Resource{ - Attributes: []AttributeNameValue{ - {Name: "service.name", Value: "service-a"}, - }, - SchemaUrl: ptr(semconv.SchemaURL), - }, - wantResource: resource.NewWithAttributes(semconv.SchemaURL, - semconv.ServiceName("service-a"), - ), - }, - { - name: "resource-with-additional-attributes-and-schema", - config: &Resource{ - Attributes: []AttributeNameValue{ - {Name: "service.name", Value: "service-a"}, - {Name: "attr-bool", Value: true}, - {Name: "attr-int64", Value: int64(-164)}, - {Name: "attr-uint64", Value: uint64(164)}, - {Name: "attr-float64", Value: float64(64.0)}, - {Name: "attr-int8", Value: int8(-18)}, - {Name: "attr-uint8", Value: uint8(18)}, - {Name: "attr-int16", Value: int16(-116)}, - {Name: "attr-uint16", Value: uint16(116)}, - {Name: "attr-int32", Value: int32(-132)}, - {Name: "attr-uint32", Value: uint32(132)}, - {Name: "attr-float32", Value: float32(32.0)}, - {Name: "attr-int", Value: int(-1)}, - {Name: "attr-uint", Value: uint(1)}, - {Name: "attr-string", Value: "string-val"}, - {Name: "attr-default", Value: other}, - }, - SchemaUrl: ptr(semconv.SchemaURL), - }, - wantResource: resource.NewWithAttributes(semconv.SchemaURL, - semconv.ServiceName("service-a"), - attribute.Bool("attr-bool", true), - attribute.String("attr-uint64", fmt.Sprintf("%d", 164)), - attribute.Int64("attr-int64", int64(-164)), - attribute.Float64("attr-float64", float64(64.0)), - attribute.Int64("attr-int8", int64(-18)), - attribute.Int64("attr-uint8", int64(18)), - attribute.Int64("attr-int16", int64(-116)), - attribute.Int64("attr-uint16", int64(116)), - attribute.Int64("attr-int32", int64(-132)), - attribute.Int64("attr-uint32", int64(132)), - attribute.Float64("attr-float32", float64(32.0)), - attribute.Int64("attr-int", int64(-1)), - attribute.String("attr-uint", fmt.Sprintf("%d", 1)), - attribute.String("attr-string", "string-val"), - attribute.String("attr-default", fmt.Sprintf("%v", other))), - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := newResource(tt.config) - assert.Equal(t, tt.wantResource, got) - }) - } -} diff --git a/config/v0.3.0/trace.go b/config/v0.3.0/trace.go deleted file mode 100644 index b977348a6ee..00000000000 --- a/config/v0.3.0/trace.go +++ /dev/null @@ -1,219 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package config // import "go.opentelemetry.io/contrib/config/v0.3.0" - -import ( - "context" - "errors" - "fmt" - "net/url" - "time" - - "google.golang.org/grpc/credentials" - - "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" - "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp" - "go.opentelemetry.io/otel/exporters/stdout/stdouttrace" - "go.opentelemetry.io/otel/sdk/resource" - sdktrace "go.opentelemetry.io/otel/sdk/trace" - "go.opentelemetry.io/otel/trace" - "go.opentelemetry.io/otel/trace/noop" -) - -func tracerProvider(cfg configOptions, res *resource.Resource) (trace.TracerProvider, shutdownFunc, error) { - if cfg.opentelemetryConfig.TracerProvider == nil { - return noop.NewTracerProvider(), noopShutdown, nil - } - opts := []sdktrace.TracerProviderOption{ - sdktrace.WithResource(res), - } - var errs []error - for _, processor := range cfg.opentelemetryConfig.TracerProvider.Processors { - sp, err := spanProcessor(cfg.ctx, processor) - if err == nil { - opts = append(opts, sdktrace.WithSpanProcessor(sp)) - } else { - errs = append(errs, err) - } - } - if len(errs) > 0 { - return noop.NewTracerProvider(), noopShutdown, errors.Join(errs...) - } - tp := sdktrace.NewTracerProvider(opts...) - return tp, tp.Shutdown, nil -} - -func spanExporter(ctx context.Context, exporter SpanExporter) (sdktrace.SpanExporter, error) { - if exporter.Console != nil && exporter.OTLP != nil { - return nil, errors.New("must not specify multiple exporters") - } - - if exporter.Console != nil { - return stdouttrace.New( - stdouttrace.WithPrettyPrint(), - ) - } - if exporter.OTLP != nil && exporter.OTLP.Protocol != nil { - switch *exporter.OTLP.Protocol { - case protocolProtobufHTTP: - return otlpHTTPSpanExporter(ctx, exporter.OTLP) - case protocolProtobufGRPC: - return otlpGRPCSpanExporter(ctx, exporter.OTLP) - default: - return nil, fmt.Errorf("unsupported protocol %q", *exporter.OTLP.Protocol) - } - } - return nil, errors.New("no valid span exporter") -} - -func spanProcessor(ctx context.Context, processor SpanProcessor) (sdktrace.SpanProcessor, error) { - if processor.Batch != nil && processor.Simple != nil { - return nil, errors.New("must not specify multiple span processor type") - } - if processor.Batch != nil { - exp, err := spanExporter(ctx, processor.Batch.Exporter) - if err != nil { - return nil, err - } - return batchSpanProcessor(processor.Batch, exp) - } - if processor.Simple != nil { - exp, err := spanExporter(ctx, processor.Simple.Exporter) - if err != nil { - return nil, err - } - return sdktrace.NewSimpleSpanProcessor(exp), nil - } - return nil, errors.New("unsupported span processor type, must be one of simple or batch") -} - -func otlpGRPCSpanExporter(ctx context.Context, otlpConfig *OTLP) (sdktrace.SpanExporter, error) { - var opts []otlptracegrpc.Option - - if otlpConfig.Endpoint != nil { - u, err := url.ParseRequestURI(*otlpConfig.Endpoint) - if err != nil { - return nil, err - } - // ParseRequestURI leaves the Host field empty when no - // scheme is specified (i.e. localhost:4317). This check is - // here to support the case where a user may not specify a - // scheme. The code does its best effort here by using - // otlpConfig.Endpoint as-is in that case. - if u.Host != "" { - opts = append(opts, otlptracegrpc.WithEndpoint(u.Host)) - } else { - opts = append(opts, otlptracegrpc.WithEndpoint(*otlpConfig.Endpoint)) - } - - if u.Scheme == "http" || (u.Scheme != "https" && otlpConfig.Insecure != nil && *otlpConfig.Insecure) { - opts = append(opts, otlptracegrpc.WithInsecure()) - } - } - - if otlpConfig.Compression != nil { - switch *otlpConfig.Compression { - case compressionGzip: - opts = append(opts, otlptracegrpc.WithCompressor(*otlpConfig.Compression)) - case compressionNone: - // none requires no options - default: - return nil, fmt.Errorf("unsupported compression %q", *otlpConfig.Compression) - } - } - if otlpConfig.Timeout != nil && *otlpConfig.Timeout > 0 { - opts = append(opts, otlptracegrpc.WithTimeout(time.Millisecond*time.Duration(*otlpConfig.Timeout))) - } - headersConfig, err := createHeadersConfig(otlpConfig.Headers, otlpConfig.HeadersList) - if err != nil { - return nil, err - } - if len(headersConfig) > 0 { - opts = append(opts, otlptracegrpc.WithHeaders(headersConfig)) - } - - tlsConfig, err := createTLSConfig(otlpConfig.Certificate, otlpConfig.ClientCertificate, otlpConfig.ClientKey) - if err != nil { - return nil, err - } - opts = append(opts, otlptracegrpc.WithTLSCredentials(credentials.NewTLS(tlsConfig))) - - return otlptracegrpc.New(ctx, opts...) -} - -func otlpHTTPSpanExporter(ctx context.Context, otlpConfig *OTLP) (sdktrace.SpanExporter, error) { - var opts []otlptracehttp.Option - - if otlpConfig.Endpoint != nil { - u, err := url.ParseRequestURI(*otlpConfig.Endpoint) - if err != nil { - return nil, err - } - opts = append(opts, otlptracehttp.WithEndpoint(u.Host)) - - if u.Scheme == "http" { - opts = append(opts, otlptracehttp.WithInsecure()) - } - if len(u.Path) > 0 { - opts = append(opts, otlptracehttp.WithURLPath(u.Path)) - } - } - if otlpConfig.Compression != nil { - switch *otlpConfig.Compression { - case compressionGzip: - opts = append(opts, otlptracehttp.WithCompression(otlptracehttp.GzipCompression)) - case compressionNone: - opts = append(opts, otlptracehttp.WithCompression(otlptracehttp.NoCompression)) - default: - return nil, fmt.Errorf("unsupported compression %q", *otlpConfig.Compression) - } - } - if otlpConfig.Timeout != nil && *otlpConfig.Timeout > 0 { - opts = append(opts, otlptracehttp.WithTimeout(time.Millisecond*time.Duration(*otlpConfig.Timeout))) - } - headersConfig, err := createHeadersConfig(otlpConfig.Headers, otlpConfig.HeadersList) - if err != nil { - return nil, err - } - if len(headersConfig) > 0 { - opts = append(opts, otlptracehttp.WithHeaders(headersConfig)) - } - - tlsConfig, err := createTLSConfig(otlpConfig.Certificate, otlpConfig.ClientCertificate, otlpConfig.ClientKey) - if err != nil { - return nil, err - } - opts = append(opts, otlptracehttp.WithTLSClientConfig(tlsConfig)) - - return otlptracehttp.New(ctx, opts...) -} - -func batchSpanProcessor(bsp *BatchSpanProcessor, exp sdktrace.SpanExporter) (sdktrace.SpanProcessor, error) { - var opts []sdktrace.BatchSpanProcessorOption - if bsp.ExportTimeout != nil { - if *bsp.ExportTimeout < 0 { - return nil, fmt.Errorf("invalid export timeout %d", *bsp.ExportTimeout) - } - opts = append(opts, sdktrace.WithExportTimeout(time.Millisecond*time.Duration(*bsp.ExportTimeout))) - } - if bsp.MaxExportBatchSize != nil { - if *bsp.MaxExportBatchSize < 0 { - return nil, fmt.Errorf("invalid batch size %d", *bsp.MaxExportBatchSize) - } - opts = append(opts, sdktrace.WithMaxExportBatchSize(*bsp.MaxExportBatchSize)) - } - if bsp.MaxQueueSize != nil { - if *bsp.MaxQueueSize < 0 { - return nil, fmt.Errorf("invalid queue size %d", *bsp.MaxQueueSize) - } - opts = append(opts, sdktrace.WithMaxQueueSize(*bsp.MaxQueueSize)) - } - if bsp.ScheduleDelay != nil { - if *bsp.ScheduleDelay < 0 { - return nil, fmt.Errorf("invalid schedule delay %d", *bsp.ScheduleDelay) - } - opts = append(opts, sdktrace.WithBatchTimeout(time.Millisecond*time.Duration(*bsp.ScheduleDelay))) - } - return sdktrace.NewBatchSpanProcessor(exp, opts...), nil -} diff --git a/config/v0.3.0/trace_test.go b/config/v0.3.0/trace_test.go deleted file mode 100644 index a9e3b270913..00000000000 --- a/config/v0.3.0/trace_test.go +++ /dev/null @@ -1,701 +0,0 @@ -// Copyright The OpenTelemetry Authors -// SPDX-License-Identifier: Apache-2.0 - -package config - -import ( - "context" - "errors" - "path/filepath" - "reflect" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" - "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp" - "go.opentelemetry.io/otel/exporters/stdout/stdouttrace" - "go.opentelemetry.io/otel/sdk/resource" - sdktrace "go.opentelemetry.io/otel/sdk/trace" - "go.opentelemetry.io/otel/trace" - "go.opentelemetry.io/otel/trace/noop" -) - -func TestTracerPovider(t *testing.T) { - tests := []struct { - name string - cfg configOptions - wantProvider trace.TracerProvider - wantErr error - }{ - { - name: "no-tracer-provider-configured", - wantProvider: noop.NewTracerProvider(), - }, - { - name: "error-in-config", - cfg: configOptions{ - opentelemetryConfig: OpenTelemetryConfiguration{ - TracerProvider: &TracerProvider{ - Processors: []SpanProcessor{ - { - Batch: &BatchSpanProcessor{}, - Simple: &SimpleSpanProcessor{}, - }, - }, - }, - }, - }, - wantProvider: noop.NewTracerProvider(), - wantErr: errors.Join(errors.New("must not specify multiple span processor type")), - }, - { - name: "multiple-errors-in-config", - cfg: configOptions{ - opentelemetryConfig: OpenTelemetryConfiguration{ - TracerProvider: &TracerProvider{ - Processors: []SpanProcessor{ - { - Batch: &BatchSpanProcessor{}, - Simple: &SimpleSpanProcessor{}, - }, - { - Simple: &SimpleSpanProcessor{ - Exporter: SpanExporter{ - Console: Console{}, - OTLP: &OTLP{}, - }, - }, - }, - }, - }, - }, - }, - wantProvider: noop.NewTracerProvider(), - wantErr: errors.Join(errors.New("must not specify multiple span processor type"), errors.New("must not specify multiple exporters")), - }, - } - for _, tt := range tests { - tp, shutdown, err := tracerProvider(tt.cfg, resource.Default()) - require.Equal(t, tt.wantProvider, tp) - assert.Equal(t, tt.wantErr, err) - require.NoError(t, shutdown(context.Background())) - } -} - -func TestSpanProcessor(t *testing.T) { - consoleExporter, err := stdouttrace.New( - stdouttrace.WithPrettyPrint(), - ) - require.NoError(t, err) - ctx := context.Background() - otlpGRPCExporter, err := otlptracegrpc.New(ctx) - require.NoError(t, err) - otlpHTTPExporter, err := otlptracehttp.New(ctx) - require.NoError(t, err) - testCases := []struct { - name string - processor SpanProcessor - args any - wantErr string - wantProcessor sdktrace.SpanProcessor - }{ - { - name: "no processor", - wantErr: "unsupported span processor type, must be one of simple or batch", - }, - { - name: "multiple processor types", - processor: SpanProcessor{ - Batch: &BatchSpanProcessor{ - Exporter: SpanExporter{}, - }, - Simple: &SimpleSpanProcessor{}, - }, - wantErr: "must not specify multiple span processor type", - }, - { - name: "batch processor invalid exporter", - processor: SpanProcessor{ - Batch: &BatchSpanProcessor{ - Exporter: SpanExporter{}, - }, - }, - wantErr: "no valid span exporter", - }, - { - name: "batch processor invalid batch size console exporter", - processor: SpanProcessor{ - Batch: &BatchSpanProcessor{ - MaxExportBatchSize: ptr(-1), - Exporter: SpanExporter{ - Console: Console{}, - }, - }, - }, - wantErr: "invalid batch size -1", - }, - { - name: "batch processor invalid export timeout console exporter", - processor: SpanProcessor{ - Batch: &BatchSpanProcessor{ - ExportTimeout: ptr(-2), - Exporter: SpanExporter{ - Console: Console{}, - }, - }, - }, - wantErr: "invalid export timeout -2", - }, - { - name: "batch processor invalid queue size console exporter", - processor: SpanProcessor{ - Batch: &BatchSpanProcessor{ - MaxQueueSize: ptr(-3), - Exporter: SpanExporter{ - Console: Console{}, - }, - }, - }, - wantErr: "invalid queue size -3", - }, - { - name: "batch processor invalid schedule delay console exporter", - processor: SpanProcessor{ - Batch: &BatchSpanProcessor{ - ScheduleDelay: ptr(-4), - Exporter: SpanExporter{ - Console: Console{}, - }, - }, - }, - wantErr: "invalid schedule delay -4", - }, - { - name: "batch processor with multiple exporters", - processor: SpanProcessor{ - Batch: &BatchSpanProcessor{ - Exporter: SpanExporter{ - Console: Console{}, - OTLP: &OTLP{}, - }, - }, - }, - wantErr: "must not specify multiple exporters", - }, - { - name: "batch processor console exporter", - processor: SpanProcessor{ - Batch: &BatchSpanProcessor{ - MaxExportBatchSize: ptr(0), - ExportTimeout: ptr(0), - MaxQueueSize: ptr(0), - ScheduleDelay: ptr(0), - Exporter: SpanExporter{ - Console: Console{}, - }, - }, - }, - wantProcessor: sdktrace.NewBatchSpanProcessor(consoleExporter), - }, - { - name: "batch/otlp-exporter-invalid-protocol", - processor: SpanProcessor{ - Batch: &BatchSpanProcessor{ - MaxExportBatchSize: ptr(0), - ExportTimeout: ptr(0), - MaxQueueSize: ptr(0), - ScheduleDelay: ptr(0), - Exporter: SpanExporter{ - OTLP: &OTLP{ - Protocol: ptr("http/invalid"), - }, - }, - }, - }, - wantErr: "unsupported protocol \"http/invalid\"", - }, - { - name: "batch/otlp-grpc-exporter-no-endpoint", - processor: SpanProcessor{ - Batch: &BatchSpanProcessor{ - MaxExportBatchSize: ptr(0), - ExportTimeout: ptr(0), - MaxQueueSize: ptr(0), - ScheduleDelay: ptr(0), - Exporter: SpanExporter{ - OTLP: &OTLP{ - Protocol: ptr("grpc"), - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: []NameStringValuePair{ - {Name: "test", Value: ptr("test1")}, - }, - }, - }, - }, - }, - wantProcessor: sdktrace.NewBatchSpanProcessor(otlpGRPCExporter), - }, - { - name: "batch/otlp-grpc-exporter", - processor: SpanProcessor{ - Batch: &BatchSpanProcessor{ - MaxExportBatchSize: ptr(0), - ExportTimeout: ptr(0), - MaxQueueSize: ptr(0), - ScheduleDelay: ptr(0), - Exporter: SpanExporter{ - OTLP: &OTLP{ - Protocol: ptr("grpc"), - Endpoint: ptr("http://localhost:4317"), - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: []NameStringValuePair{ - {Name: "test", Value: ptr("test1")}, - }, - }, - }, - }, - }, - wantProcessor: sdktrace.NewBatchSpanProcessor(otlpGRPCExporter), - }, - { - name: "batch/otlp-grpc-exporter-socket-endpoint", - processor: SpanProcessor{ - Batch: &BatchSpanProcessor{ - MaxExportBatchSize: ptr(0), - ExportTimeout: ptr(0), - MaxQueueSize: ptr(0), - ScheduleDelay: ptr(0), - Exporter: SpanExporter{ - OTLP: &OTLP{ - Protocol: ptr("grpc"), - Endpoint: ptr("unix:collector.sock"), - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: []NameStringValuePair{ - {Name: "test", Value: ptr("test1")}, - }, - }, - }, - }, - }, - wantProcessor: sdktrace.NewBatchSpanProcessor(otlpGRPCExporter), - }, - { - name: "batch/otlp-grpc-good-ca-certificate", - processor: SpanProcessor{ - Batch: &BatchSpanProcessor{ - Exporter: SpanExporter{ - OTLP: &OTLP{ - Protocol: ptr("grpc"), - Endpoint: ptr("localhost:4317"), - Compression: ptr("gzip"), - Timeout: ptr(1000), - Certificate: ptr(filepath.Join("..", "testdata", "ca.crt")), - }, - }, - }, - }, - wantProcessor: sdktrace.NewBatchSpanProcessor(otlpGRPCExporter), - }, - { - name: "batch/otlp-grpc-bad-ca-certificate", - processor: SpanProcessor{ - Batch: &BatchSpanProcessor{ - Exporter: SpanExporter{ - OTLP: &OTLP{ - Protocol: ptr("grpc"), - Endpoint: ptr("localhost:4317"), - Compression: ptr("gzip"), - Timeout: ptr(1000), - Certificate: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), - }, - }, - }, - }, - wantErr: "could not create certificate authority chain from certificate", - }, - { - name: "batch/otlp-grpc-bad-client-certificate", - processor: SpanProcessor{ - Batch: &BatchSpanProcessor{ - Exporter: SpanExporter{ - OTLP: &OTLP{ - Protocol: ptr("grpc"), - Endpoint: ptr("localhost:4317"), - Compression: ptr("gzip"), - Timeout: ptr(1000), - ClientCertificate: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), - ClientKey: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), - }, - }, - }, - }, - wantErr: "could not use client certificate: tls: failed to find any PEM data in certificate input", - }, - { - name: "batch/otlp-grpc-bad-headerslist", - processor: SpanProcessor{ - Batch: &BatchSpanProcessor{ - Exporter: SpanExporter{ - OTLP: &OTLP{ - Protocol: ptr("grpc"), - Endpoint: ptr("localhost:4317"), - Compression: ptr("gzip"), - Timeout: ptr(1000), - HeadersList: ptr("==="), - }, - }, - }, - }, - wantErr: "invalid headers list: invalid key: \"\"", - }, - { - name: "batch/otlp-grpc-exporter-no-scheme", - processor: SpanProcessor{ - Batch: &BatchSpanProcessor{ - MaxExportBatchSize: ptr(0), - ExportTimeout: ptr(0), - MaxQueueSize: ptr(0), - ScheduleDelay: ptr(0), - Exporter: SpanExporter{ - OTLP: &OTLP{ - Protocol: ptr("grpc"), - Endpoint: ptr("localhost:4317"), - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: []NameStringValuePair{ - {Name: "test", Value: ptr("test1")}, - }, - }, - }, - }, - }, - wantProcessor: sdktrace.NewBatchSpanProcessor(otlpGRPCExporter), - }, - { - name: "batch/otlp-grpc-invalid-endpoint", - processor: SpanProcessor{ - Batch: &BatchSpanProcessor{ - MaxExportBatchSize: ptr(0), - ExportTimeout: ptr(0), - MaxQueueSize: ptr(0), - ScheduleDelay: ptr(0), - Exporter: SpanExporter{ - OTLP: &OTLP{ - Protocol: ptr("grpc"), - Endpoint: ptr(" "), - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: []NameStringValuePair{ - {Name: "test", Value: ptr("test1")}, - }, - }, - }, - }, - }, - wantErr: "parse \" \": invalid URI for request", - }, - { - name: "batch/otlp-grpc-invalid-compression", - processor: SpanProcessor{ - Batch: &BatchSpanProcessor{ - MaxExportBatchSize: ptr(0), - ExportTimeout: ptr(0), - MaxQueueSize: ptr(0), - ScheduleDelay: ptr(0), - Exporter: SpanExporter{ - OTLP: &OTLP{ - Protocol: ptr("grpc"), - Endpoint: ptr("localhost:4317"), - Compression: ptr("invalid"), - Timeout: ptr(1000), - Headers: []NameStringValuePair{ - {Name: "test", Value: ptr("test1")}, - }, - }, - }, - }, - }, - wantErr: "unsupported compression \"invalid\"", - }, - { - name: "batch/otlp-http-exporter", - processor: SpanProcessor{ - Batch: &BatchSpanProcessor{ - MaxExportBatchSize: ptr(0), - ExportTimeout: ptr(0), - MaxQueueSize: ptr(0), - ScheduleDelay: ptr(0), - Exporter: SpanExporter{ - OTLP: &OTLP{ - Protocol: ptr("http/protobuf"), - Endpoint: ptr("http://localhost:4318"), - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: []NameStringValuePair{ - {Name: "test", Value: ptr("test1")}, - }, - }, - }, - }, - }, - wantProcessor: sdktrace.NewBatchSpanProcessor(otlpHTTPExporter), - }, - { - name: "batch/otlp-http-good-ca-certificate", - processor: SpanProcessor{ - Batch: &BatchSpanProcessor{ - Exporter: SpanExporter{ - OTLP: &OTLP{ - Protocol: ptr("http/protobuf"), - Endpoint: ptr("localhost:4317"), - Compression: ptr("gzip"), - Timeout: ptr(1000), - Certificate: ptr(filepath.Join("..", "testdata", "ca.crt")), - }, - }, - }, - }, - wantProcessor: sdktrace.NewBatchSpanProcessor(otlpHTTPExporter), - }, - { - name: "batch/otlp-http-bad-ca-certificate", - processor: SpanProcessor{ - Batch: &BatchSpanProcessor{ - Exporter: SpanExporter{ - OTLP: &OTLP{ - Protocol: ptr("http/protobuf"), - Endpoint: ptr("localhost:4317"), - Compression: ptr("gzip"), - Timeout: ptr(1000), - Certificate: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), - }, - }, - }, - }, - wantErr: "could not create certificate authority chain from certificate", - }, - { - name: "batch/otlp-http-bad-client-certificate", - processor: SpanProcessor{ - Batch: &BatchSpanProcessor{ - Exporter: SpanExporter{ - OTLP: &OTLP{ - Protocol: ptr("http/protobuf"), - Endpoint: ptr("localhost:4317"), - Compression: ptr("gzip"), - Timeout: ptr(1000), - ClientCertificate: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), - ClientKey: ptr(filepath.Join("..", "testdata", "bad_cert.crt")), - }, - }, - }, - }, - wantErr: "could not use client certificate: tls: failed to find any PEM data in certificate input", - }, - { - name: "batch/otlp-http-bad-headerslist", - processor: SpanProcessor{ - Batch: &BatchSpanProcessor{ - Exporter: SpanExporter{ - OTLP: &OTLP{ - Protocol: ptr("http/protobuf"), - Endpoint: ptr("localhost:4317"), - Compression: ptr("gzip"), - Timeout: ptr(1000), - HeadersList: ptr("==="), - }, - }, - }, - }, - wantErr: "invalid headers list: invalid key: \"\"", - }, - { - name: "batch/otlp-http-exporter-with-path", - processor: SpanProcessor{ - Batch: &BatchSpanProcessor{ - MaxExportBatchSize: ptr(0), - ExportTimeout: ptr(0), - MaxQueueSize: ptr(0), - ScheduleDelay: ptr(0), - Exporter: SpanExporter{ - OTLP: &OTLP{ - Protocol: ptr("http/protobuf"), - Endpoint: ptr("http://localhost:4318/path/123"), - Compression: ptr("none"), - Timeout: ptr(1000), - Headers: []NameStringValuePair{ - {Name: "test", Value: ptr("test1")}, - }, - }, - }, - }, - }, - wantProcessor: sdktrace.NewBatchSpanProcessor(otlpHTTPExporter), - }, - { - name: "batch/otlp-http-exporter-no-endpoint", - processor: SpanProcessor{ - Batch: &BatchSpanProcessor{ - MaxExportBatchSize: ptr(0), - ExportTimeout: ptr(0), - MaxQueueSize: ptr(0), - ScheduleDelay: ptr(0), - Exporter: SpanExporter{ - OTLP: &OTLP{ - Protocol: ptr("http/protobuf"), - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: []NameStringValuePair{ - {Name: "test", Value: ptr("test1")}, - }, - }, - }, - }, - }, - wantProcessor: sdktrace.NewBatchSpanProcessor(otlpHTTPExporter), - }, - { - name: "batch/otlp-http-exporter-no-scheme", - processor: SpanProcessor{ - Batch: &BatchSpanProcessor{ - MaxExportBatchSize: ptr(0), - ExportTimeout: ptr(0), - MaxQueueSize: ptr(0), - ScheduleDelay: ptr(0), - Exporter: SpanExporter{ - OTLP: &OTLP{ - Protocol: ptr("http/protobuf"), - Endpoint: ptr("localhost:4318"), - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: []NameStringValuePair{ - {Name: "test", Value: ptr("test1")}, - }, - }, - }, - }, - }, - wantProcessor: sdktrace.NewBatchSpanProcessor(otlpHTTPExporter), - }, - { - name: "batch/otlp-http-invalid-endpoint", - processor: SpanProcessor{ - Batch: &BatchSpanProcessor{ - MaxExportBatchSize: ptr(0), - ExportTimeout: ptr(0), - MaxQueueSize: ptr(0), - ScheduleDelay: ptr(0), - Exporter: SpanExporter{ - OTLP: &OTLP{ - Protocol: ptr("http/protobuf"), - Endpoint: ptr(" "), - Compression: ptr("gzip"), - Timeout: ptr(1000), - Headers: []NameStringValuePair{ - {Name: "test", Value: ptr("test1")}, - }, - }, - }, - }, - }, - wantErr: "parse \" \": invalid URI for request", - }, - { - name: "batch/otlp-http-none-compression", - processor: SpanProcessor{ - Batch: &BatchSpanProcessor{ - MaxExportBatchSize: ptr(0), - ExportTimeout: ptr(0), - MaxQueueSize: ptr(0), - ScheduleDelay: ptr(0), - Exporter: SpanExporter{ - OTLP: &OTLP{ - Protocol: ptr("http/protobuf"), - Endpoint: ptr("localhost:4318"), - Compression: ptr("none"), - Timeout: ptr(1000), - Headers: []NameStringValuePair{ - {Name: "test", Value: ptr("test1")}, - }, - }, - }, - }, - }, - wantProcessor: sdktrace.NewBatchSpanProcessor(otlpHTTPExporter), - }, - { - name: "batch/otlp-http-invalid-compression", - processor: SpanProcessor{ - Batch: &BatchSpanProcessor{ - MaxExportBatchSize: ptr(0), - ExportTimeout: ptr(0), - MaxQueueSize: ptr(0), - ScheduleDelay: ptr(0), - Exporter: SpanExporter{ - OTLP: &OTLP{ - Protocol: ptr("http/protobuf"), - Endpoint: ptr("localhost:4318"), - Compression: ptr("invalid"), - Timeout: ptr(1000), - Headers: []NameStringValuePair{ - {Name: "test", Value: ptr("test1")}, - }, - }, - }, - }, - }, - wantErr: "unsupported compression \"invalid\"", - }, - { - name: "simple/no-exporter", - processor: SpanProcessor{ - Simple: &SimpleSpanProcessor{ - Exporter: SpanExporter{}, - }, - }, - wantErr: "no valid span exporter", - }, - { - name: "simple/console-exporter", - processor: SpanProcessor{ - Simple: &SimpleSpanProcessor{ - Exporter: SpanExporter{ - Console: Console{}, - }, - }, - }, - wantProcessor: sdktrace.NewSimpleSpanProcessor(consoleExporter), - }, - } - for _, tt := range testCases { - t.Run(tt.name, func(t *testing.T) { - got, err := spanProcessor(context.Background(), tt.processor) - if tt.wantErr != "" { - require.Error(t, err) - require.Equal(t, tt.wantErr, err.Error()) - } else { - require.NoError(t, err) - } - if tt.wantProcessor == nil { - require.Nil(t, got) - } else { - require.Equal(t, reflect.TypeOf(tt.wantProcessor), reflect.TypeOf(got)) - var fieldName string - switch reflect.TypeOf(tt.wantProcessor).String() { - case "*trace.simpleSpanProcessor": - fieldName = "exporter" - default: - fieldName = "e" - } - wantExporterType := reflect.Indirect(reflect.ValueOf(tt.wantProcessor)).FieldByName(fieldName).Elem().Type() - gotExporterType := reflect.Indirect(reflect.ValueOf(got)).FieldByName(fieldName).Elem().Type() - require.Equal(t, wantExporterType.String(), gotExporterType.String()) - } - }) - } -} diff --git a/versions.yaml b/versions.yaml index 7e03021cd14..eae3cd2771b 100644 --- a/versions.yaml +++ b/versions.yaml @@ -71,7 +71,6 @@ module-sets: experimental-config: version: v0.15.0 modules: - - go.opentelemetry.io/contrib/config - go.opentelemetry.io/contrib/otelconf experimental-bridge: version: v0.10.0 From d6e810fc2167699c648852737a11dc810fe2165c Mon Sep 17 00:00:00 2001 From: Mike Blum Date: Fri, 7 Mar 2025 14:13:52 -0600 Subject: [PATCH 2/2] Update CHANGELOG.md --- CHANGELOG.md | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8f758eb10e1..c073db1e11e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -10,12 +10,8 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ### Removed -<<<<<<< HEAD - Drop support for [Go 1.22]. (#6853) -||||||| parent of 666f71d5 (remove deprecated config module) -======= -- The deprecated `go.opentelemetry.io/contrib/config` package is removed. (#6894) ->>>>>>> 666f71d5 (remove deprecated config module) +- The deprecated `go.opentelemetry.io/contrib/config` package is removed, use `go.opentelemetry.io/contrib/otelconf` instead. (#6894)